本文整理汇总了C++中pci_map_single函数的典型用法代码示例。如果您正苦于以下问题:C++ pci_map_single函数的具体用法?C++ pci_map_single怎么用?C++ pci_map_single使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pci_map_single函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: ipath_map_single
/**
* ipath_map_single - a safety wrapper around pci_map_single()
*
* Same idea as ipath_map_page().
*/
dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
int direction)
{
dma_addr_t phys;
phys = pci_map_single(hwdev, ptr, size, direction);
if (phys == 0) {
pci_unmap_single(hwdev, phys, size, direction);
phys = pci_map_single(hwdev, ptr, size, direction);
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
return phys;
}
开发者ID:12019,项目名称:kernel_zte_u880,代码行数:23,代码来源:ipath_user_pages.c
示例2: osl_dma_map
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction)
{
int dir;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
return (pci_map_single(osh->pdev, va, size, dir));
}
开发者ID:rhg,项目名称:android_kernel_lge_gelato,代码行数:9,代码来源:linux_osl.c
示例3: ivtv_udma_alloc
/* User DMA Buffers */
void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
开发者ID:work40,项目名称:linux-tbs-drivers,代码行数:10,代码来源:ivtv-udma.c
示例4: rtl8822be_tx_fill_special_desc
void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
u8 *pbd_desc, struct sk_buff *skb,
u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue;
u8 txdesc_len = 48;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
return;
}
rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
mapping);
/* it should be BEACON_QUEUE or H2C_QUEUE,
* so skb=NULL is safe to assert
*/
fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);
CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);
/* common part for BEACON and H2C */
SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));
SET_TX_DESC_QSEL(pdesc, fw_queue);
if (hw_queue == H2C_QUEUE) {
/* fill H2C */
SET_TX_DESC_OFFSET(pdesc, 0);
} else {
/* fill beacon */
SET_TX_DESC_OFFSET(pdesc, txdesc_len);
SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SW_SEQ(pdesc, 0);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_LS(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 48);
SET_TX_DESC_USE_RATE(pdesc, 1);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
pdesc, txdesc_len);
}
开发者ID:mkrufky,项目名称:linux,代码行数:57,代码来源:trx.c
示例5: alloc_list
/* allocate and initialize Tx and Rx descriptors */
static void
alloc_list (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
}
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = NULL;
}
/* Allocate the rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL) {
printk (KERN_ERR
"%s: alloc_list: allocate Rx buffer error! ",
dev->name);
break;
}
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
return;
}
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:57,代码来源:dl2k.c
示例6: dma_map_single
dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
if (dev->bus == &vio_bus_type)
return vio_map_single(to_vio_dev(dev), cpu_addr, size, direction);
BUG();
return (dma_addr_t)0;
}
开发者ID:12019,项目名称:hg556a_source,代码行数:10,代码来源:dma.c
示例7: alloc_list
static void
alloc_list (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
/* */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
}
/* */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = NULL;
}
/* */
for (i = 0; i < RX_RING_SIZE; i++) {
/* */
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL) {
printk (KERN_ERR
"%s: alloc_list: allocate Rx buffer error! ",
dev->name);
break;
}
/* */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
/* */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:54,代码来源:dl2k.c
示例8: alloc_buffer
static int alloc_buffer(struct vino_device *v, int size)
{
int count, i, j, err;
err = i = 0;
count = (size / PAGE_SIZE + 4) & ~3;
v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long),
GFP_KERNEL);
if (!v->desc)
return -ENOMEM;
v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) *
sizeof(dma_addr_t),
&v->dma_desc.dma);
if (!v->dma_desc.cpu) {
err = -ENOMEM;
goto out_free_desc;
}
while (i < count) {
dma_addr_t dma;
v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!v->desc[i])
break;
dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE,
PCI_DMA_FROMDEVICE);
for (j = 0; j < PAGE_RATIO; j++)
v->dma_desc.cpu[PAGE_RATIO * i + j ] =
dma + VINO_PAGE_SIZE * j;
mem_map_reserve(virt_to_page(v->desc[i]));
i++;
}
v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP;
if (i-- < count) {
while (i >= 0) {
mem_map_unreserve(virt_to_page(v->desc[i]));
pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i],
PAGE_SIZE, PCI_DMA_FROMDEVICE);
free_page(v->desc[i]);
i--;
}
pci_free_consistent(NULL,
PAGE_RATIO * (count+4) * sizeof(dma_addr_t),
(void *)v->dma_desc.cpu, v->dma_desc.dma);
err = -ENOBUFS;
goto out_free_desc;
}
v->page_count = count;
return 0;
out_free_desc:
kfree(v->desc);
return err;
}
开发者ID:ProjectZeroSlackr,项目名称:linux-2.4.32-ipod,代码行数:54,代码来源:vino.c
示例9: epic_start_xmit
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct epic_private *ep = dev->priv;
int entry, free_count;
u32 ctrl_word;
unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
return 0;
/* Caution: the write order is important here, set the field with the
"ownership" bit last. */
/* Calculate the next Tx descriptor entry. */
spin_lock_irqsave(&ep->lock, flags);
free_count = ep->cur_tx - ep->dirty_tx;
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
} else if (free_count < TX_QUEUE_LEN - 1) {
ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
} else {
/* Leave room for an additional entry. */
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
ep->tx_full = 1;
}
ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
ep->tx_ring[entry].txstatus =
((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
| cpu_to_le32(DescOwn);
ep->cur_tx++;
if (ep->tx_full)
netif_stop_queue(dev);
spin_unlock_irqrestore(&ep->lock, flags);
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
dev->name, (int)skb->len, entry, ctrl_word,
(int)inl(dev->base_addr + TxSTAT));
return 0;
}
开发者ID:PennPanda,项目名称:linux-repo,代码行数:54,代码来源:epic100.c
示例10: pci_eth_start_xmit
static netdev_tx_t pci_eth_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct pci_eth_private *priv = netdev_priv(dev);
struct pci_eth_descriptor *descptr;
void __iomem *ioaddr = dev->base_addr;
unsigned long flags;
/* Critical Section */
spin_lock_irqsave(&priv->lock, flags);
/* TX resource check */
if (!priv->tx_free_desc) {
spin_unlock_irqrestore(&priv->lock, flags);
netif_stop_queue(dev);
netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
/* Statistic Counter */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Decrement free descriptors counter */
priv->tx_free_desc--;
/* Set TX descriptor & Transmit it */
descptr = priv->tx_insert_ptr;
if (skb->len < ETH_ZLEN)
descptr->len = ETH_ZLEN;
else
descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
descptr->status = DSC_OWNER_MAC;
skb_tx_timestamp(skb);
/* TODO: Trigger the MAC to check the TX descriptor - start DMA
* transaction.
*/
/* After DMA transaction perform the following check */
/* If no tx resource, stop */
if (!priv->tx_free_desc)
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
}
开发者ID:agamemnon886,项目名称:mod,代码行数:53,代码来源:pci_eth.c
示例11: ad1889_start_wav
static void ad1889_start_wav(ad1889_state_t *state)
{
unsigned long flags;
struct dmabuf *dmabuf = &state->dmabuf;
int cnt;
u16 tmp;
spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->dma_len) /* DMA already in flight */
goto skip_dma;
/* setup dma */
cnt = dmabuf->wr_ptr - dmabuf->rd_ptr;
if (cnt == 0) /* done - don't need to do anything */
goto skip_dma;
/* If the wr_ptr has wrapped, only map to the end */
if (cnt < 0)
cnt = DMA_SIZE - dmabuf->rd_ptr;
dmabuf->dma_handle = pci_map_single(ad1889_dev->pci,
dmabuf->rawbuf + dmabuf->rd_ptr,
cnt, PCI_DMA_TODEVICE);
dmabuf->dma_len = cnt;
dmabuf->ready = 1;
DBG("Starting playback at 0x%p for %ld bytes\n", dmabuf->rawbuf +
dmabuf->rd_ptr, dmabuf->dma_len);
/* load up the current register set */
AD1889_WRITEL(ad1889_dev, AD_DMAWAVCC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVICC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVCA, dmabuf->dma_handle);
/* TODO: for now we load the base registers with the same thing */
AD1889_WRITEL(ad1889_dev, AD_DMAWAVBC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVIBC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVBA, dmabuf->dma_handle);
/* and we're off to the races... */
AD1889_WRITEL(ad1889_dev, AD_DMACHSS, 0x8);
tmp = AD1889_READW(ad1889_dev, AD_DSWSMC);
tmp |= 0x0400; /* set WAEN */
AD1889_WRITEW(ad1889_dev, AD_DSWSMC, tmp);
(void) AD1889_READW(ad1889_dev, AD_DSWSMC); /* flush posted PCI write */
dmabuf->enable |= DAC_RUNNING;
skip_dma:
spin_unlock_irqrestore(&state->card->lock, flags);
}
开发者ID:KrisChaplin,项目名称:LRT2x4_v1.0.2.06_GPL_source,代码行数:52,代码来源:ad1889.c
示例12: rtl8169_start_xmit
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtl8169_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
}
spin_lock_irq(&tp->lock);
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
dma_addr_t mapping;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
RTL_W8(TxPoll, 0x40); //set polling bit
dev->trans_start = jiffies;
tp->cur_tx++;
} else
goto err_drop;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
netif_stop_queue(dev);
}
out:
spin_unlock_irq(&tp->lock);
return 0;
err_drop:
dev_kfree_skb(skb);
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
开发者ID:wxlong,项目名称:Test,代码行数:52,代码来源:r8169.c
示例13: islpci_mgmt_rx_fill
/*
* Fill the receive queue for management frames with fresh buffers.
*/
int
islpci_mgmt_rx_fill(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb = /* volatile not needed */
(isl38xx_control_block *) priv->control_block;
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_rx[index];
isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
if (buf->mem == NULL) {
buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
if (!buf->mem) {
// printk(KERN_WARNING
;
return -ENOMEM;
}
buf->size = MGMT_FRAME_SIZE;
}
if (buf->pci_addr == 0) {
buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
MGMT_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
if (!buf->pci_addr) {
// printk(KERN_WARNING
;
return -ENOMEM;
}
}
/* be safe: always reset control block information */
frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
frag->flags = 0;
frag->address = cpu_to_le32(buf->pci_addr);
curr++;
/* The fragment address in the control block must have
* been written before announcing the frame buffer to
* device */
wmb();
cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
}
return 0;
}
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:54,代码来源:islpci_mgt.c
示例14: sis_init_base_struct_addr
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
{
int rc;
void *base_struct_unaligned;
struct sis_base_struct *base_struct;
struct sis_sync_cmd_params params;
unsigned long error_buffer_paddr;
dma_addr_t bus_address;
base_struct_unaligned = kzalloc(sizeof(*base_struct)
+ SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
if (!base_struct_unaligned)
return -ENOMEM;
base_struct = PTR_ALIGN(base_struct_unaligned,
SIS_BASE_STRUCT_ALIGNMENT);
error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
put_unaligned_le32(lower_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_low);
put_unaligned_le32(upper_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_high);
put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
&base_struct->error_buffer_element_length);
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
sizeof(*base_struct), PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
memset(¶ms, 0, sizeof(params));
params.mailbox[1] = lower_32_bits((u64)bus_address);
params.mailbox[2] = upper_32_bits((u64)bus_address);
params.mailbox[3] = sizeof(*base_struct);
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
¶ms);
pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
PCI_DMA_TODEVICE);
out:
kfree(base_struct_unaligned);
return rc;
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:51,代码来源:smartpqi_sis.c
示例15: p54p_refill_rx_ring
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
u32 limit, idx, i;
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
while (limit-- > 1) {
struct p54p_desc *desc = &ring[i];
if (!desc->host_addr) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = dev_alloc_skb(priv->common.rx_mtu + 32);
if (!skb)
break;
mapping = pci_map_single(priv->pdev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
dev_kfree_skb_any(skb);
dev_err(&priv->pdev->dev,
"RX DMA Mapping error\n");
break;
}
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
desc->flags = 0;
rx_buf[i] = skb;
}
i++;
idx++;
i %= ring_limit;
}
wmb();
ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}
开发者ID:383530895,项目名称:linux,代码行数:51,代码来源:p54pci.c
示例16: linux_pci_map_single
/*
* invaild or writeback cache
* and convert virtual address to physical address
*/
dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size, int sd_idx, int direction)
{
PRTMP_ADAPTER pAd;
POS_COOKIE pObj;
/*
------ Porting Information ------
> For Tx Alloc:
mgmt packets => sd_idx = 0
SwIdx: pAd->MgmtRing.TxCpuIdx
pTxD : pAd->MgmtRing.Cell[SwIdx].AllocVa;
data packets => sd_idx = 1
TxIdx : pAd->TxRing[pTxBlk->QueIdx].TxCpuIdx
QueIdx: pTxBlk->QueIdx
pTxD : pAd->TxRing[pTxBlk->QueIdx].Cell[TxIdx].AllocVa;
> For Rx Alloc:
sd_idx = -1
*/
pAd = (PRTMP_ADAPTER)handle;
pObj = (POS_COOKIE)pAd->OS_Cookie;
if (sd_idx == 1)
{
PTX_BLK pTxBlk;
pTxBlk = (PTX_BLK)ptr;
return pci_map_single(pObj->pci_dev, pTxBlk->pSrcBufData, pTxBlk->SrcBufLen, direction);
}
else
{
return pci_map_single(pObj->pci_dev, ptr, size, direction);
}
}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:40,代码来源:rt_rbus_pci_util.c
示例17: pvscsi_map_buffers
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
struct PVSCSIRingReqDesc *e)
{
unsigned count;
unsigned bufflen = scsi_bufflen(cmd);
struct scatterlist *sg;
e->dataLen = bufflen;
e->dataAddr = 0;
if (bufflen == 0)
return;
sg = scsi_sglist(cmd);
count = scsi_sg_count(cmd);
if (count != 0) {
int segs = scsi_dma_map(cmd);
if (segs > 1) {
pvscsi_create_sg(ctx, sg, segs);
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
SGL_SIZE, PCI_DMA_TODEVICE);
e->dataAddr = ctx->sglPA;
} else
e->dataAddr = sg_dma_address(sg);
} else {
/*
* In case there is no S/G list, scsi_sglist points
* directly to the buffer.
*/
ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
cmd->sc_data_direction);
e->dataAddr = ctx->dataPA;
}
}
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:36,代码来源:vmw_pvscsi.c
示例18: start_tx
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = dev->priv;
struct netdev_desc *txdesc;
unsigned entry;
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
txdesc->next_desc = 0;
/* Note: disable the interrupt generation here before releasing. */
txdesc->status =
cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | DisableAlign);
txdesc->frag[0].addr = cpu_to_le32(pci_map_single(np->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
if (np->last_tx)
np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
entry*sizeof(struct netdev_desc));
np->last_tx = txdesc;
np->cur_tx++;
/* On some architectures: explicitly flush cache lines here. */
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1) {
/* do nothing */
} else {
np->tx_full = 1;
netif_stop_queue(dev);
}
/* Side effect: The read wakes the potentially-idle transmit channel. */
if (readl(dev->base_addr + TxListPtr) == 0)
writel(np->tx_ring_dma + entry*sizeof(*np->tx_ring),
dev->base_addr + TxListPtr);
dev->trans_start = jiffies;
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
}
return 0;
}
开发者ID:liexusong,项目名称:Linux-2.4.16,代码行数:48,代码来源:sundance.c
示例19: mlxsw_pci_wqe_frag_map
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
return 0;
}
开发者ID:Lyude,项目名称:linux,代码行数:16,代码来源:pci.c
示例20: init_rxtx_rings
static void init_rxtx_rings(struct net_device *dev)
{
struct netdev_private *np = (struct netdev_private *)dev->priv;
int i;
np->rx_head_desc = &np->rx_ring[0];
np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
/* Initial all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].length = cpu_to_le32(np->rx_buf_sz);
np->rx_ring[i].status = 0;
np->rx_skbuff[i] = 0;
}
/* Mark the last entry as wrapping the ring. */
np->rx_ring[i-1].length |= cpu_to_le32(DescEndRing);
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
np->rx_addr[i] = pci_map_single(np->pdev,skb->tail,
skb->len,PCI_DMA_FROMDEVICE);
np->rx_ring[i].buffer1 = cpu_to_le32(np->rx_addr[i]);
np->rx_ring[i].status = cpu_to_le32(DescOwn);
}
np->cur_rx = 0;
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Initialize the Tx descriptors */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = 0;
np->tx_ring[i].status = 0;
}
np->tx_full = 0;
np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
dev->base_addr + TxRingPtr);
}
开发者ID:dmgerman,项目名称:original,代码行数:47,代码来源:winbond-840.c
注:本文中的pci_map_single函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论