本文整理汇总了C++中dma_map_single函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_map_single函数的具体用法?C++ dma_map_single怎么用?C++ dma_map_single使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dma_map_single函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: sound_alloc_dmap
static int sound_alloc_dmap(struct dma_buffparms *dmap)
{
char *start_addr, *end_addr;
int dma_pagesize;
int sz, size;
struct page *page;
dmap->mapping_flags &= ~DMA_MAP_MAPPED;
if (dmap->raw_buf != NULL)
return 0; /* Already done */
if (dma_buffsize < 4096)
dma_buffsize = 4096;
dma_pagesize = (dmap->dma < 4) ? (64 * 1024) : (128 * 1024);
/*
* Now check for the Cyrix problem.
*/
if(isa_dma_bridge_buggy==2)
dma_pagesize=32768;
dmap->raw_buf = NULL;
dmap->buffsize = dma_buffsize;
if (dmap->buffsize > dma_pagesize)
dmap->buffsize = dma_pagesize;
start_addr = NULL;
/*
* Now loop until we get a free buffer. Try to get smaller buffer if
* it fails. Don't accept smaller than 8k buffer for performance
* reasons.
*/
while (start_addr == NULL && dmap->buffsize > PAGE_SIZE) {
for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1);
dmap->buffsize = PAGE_SIZE * (1 << sz);
start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA|__GFP_NOWARN, sz);
if (start_addr == NULL)
dmap->buffsize /= 2;
}
if (start_addr == NULL) {
printk(KERN_WARNING "Sound error: Couldn't allocate DMA buffer\n");
return -ENOMEM;
} else {
/* make some checks */
end_addr = start_addr + dmap->buffsize - 1;
if (debugmem)
printk(KERN_DEBUG "sound: start 0x%lx, end 0x%lx\n", (long) start_addr, (long) end_addr);
/* now check if it fits into the same dma-pagesize */
if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1))
|| end_addr >= (char *) (MAX_DMA_ADDRESS)) {
printk(KERN_ERR "sound: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, dmap->buffsize);
return -EFAULT;
}
}
dmap->raw_buf = start_addr;
dmap->raw_buf_phys = dma_map_single(NULL, start_addr, dmap->buffsize, DMA_BIDIRECTIONAL);
for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
SetPageReserved(page);
return 0;
}
开发者ID:020gzh,项目名称:linux,代码行数:65,代码来源:dmabuf.c
示例2: serial8250_request_dma
int serial8250_request_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
dma_cap_mask_t mask;
/* Default slave configuration parameters */
dma->rxconf.direction = DMA_DEV_TO_MEM;
dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->rxconf.src_addr = p->port.mapbase + UART_RX;
dma->txconf.direction = DMA_MEM_TO_DEV;
dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->txconf.dst_addr = p->port.mapbase + UART_TX;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Get a channel for RX */
dma->rxchan = dma_request_slave_channel_compat(mask,
dma->fn, dma->rx_param,
p->port.dev, "rx");
if (!dma->rxchan)
return -ENODEV;
dmaengine_slave_config(dma->rxchan, &dma->rxconf);
/* Get a channel for TX */
dma->txchan = dma_request_slave_channel_compat(mask,
dma->fn, dma->tx_param,
p->port.dev, "tx");
if (!dma->txchan) {
dma_release_channel(dma->rxchan);
return -ENODEV;
}
dmaengine_slave_config(dma->txchan, &dma->txconf);
/* RX buffer */
if (!dma->rx_size)
dma->rx_size = PAGE_SIZE;
dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
&dma->rx_addr, GFP_KERNEL);
if (!dma->rx_buf)
goto err;
/* TX buffer */
dma->tx_addr = dma_map_single(dma->txchan->device->dev,
p->port.state->xmit.buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
dma->rx_buf, dma->rx_addr);
goto err;
}
dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
return 0;
err:
dma_release_channel(dma->rxchan);
dma_release_channel(dma->txchan);
return -ENOMEM;
}
开发者ID:imcek,项目名称:BEAGLEBONE_BSP,代码行数:66,代码来源:8250_dma.c
示例3: void
/*
* This routine will assign vring's allocated in host/io memory. Code in
* virtio_ring.c however continues to access this io memory as if it were local
* memory without io accessors.
*/
static struct virtqueue *vop_find_vq(struct virtio_device *dev,
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
struct _vop_vdev *vdev = to_vopvdev(dev);
struct vop_device *vpdev = vdev->vpdev;
struct mic_vqconfig __iomem *vqconfig;
struct mic_vqconfig config;
struct virtqueue *vq;
void __iomem *va;
struct _mic_vring_info __iomem *info;
void *used;
int vr_size, _vr_size, err, magic;
u8 type = ioread8(&vdev->desc->type);
if (index >= ioread8(&vdev->desc->num_vq))
return ERR_PTR(-ENOENT);
if (!name)
return ERR_PTR(-ENOENT);
/* First assign the vring's allocated in host memory */
vqconfig = _vop_vq_config(vdev->desc) + index;
memcpy_fromio(&config, vqconfig, sizeof(config));
_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
if (!va)
return ERR_PTR(-ENOMEM);
vdev->vr[index] = va;
memset_io(va, 0x0, _vr_size);
info = va + _vr_size;
magic = ioread32(&info->magic);
if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
err = -EIO;
goto unmap;
}
vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
sizeof(struct vring_used_elem) *
le16_to_cpu(config.num));
used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(vdev->used_size[index]));
vdev->used_virt[index] = used;
if (!used) {
err = -ENOMEM;
dev_err(_vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, err);
goto unmap;
}
vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
(void __force *)va, vop_notify, callback,
name, used);
if (!vq) {
err = -ENOMEM;
goto free_used;
}
vdev->used[index] = dma_map_single(&vpdev->dev, used,
vdev->used_size[index],
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
err = -ENOMEM;
dev_err(_vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, err);
goto del_vq;
}
writeq(vdev->used[index], &vqconfig->used_address);
vq->priv = vdev;
return vq;
del_vq:
vring_del_virtqueue(vq);
free_used:
free_pages((unsigned long)used,
get_order(vdev->used_size[index]));
unmap:
vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
return ERR_PTR(err);
}
开发者ID:Anjali05,项目名称:linux,代码行数:89,代码来源:vop_main.c
示例4: octeon_mgmt_open
static int octeon_mgmt_open(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
int port = p->port;
union cvmx_mixx_ctl mix_ctl;
union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
union cvmx_mixx_oring1 oring1;
union cvmx_mixx_iring1 iring1;
union cvmx_agl_gmx_prtx_cfg prtx_cfg;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
union cvmx_mixx_irhwm mix_irhwm;
union cvmx_mixx_orhwm mix_orhwm;
union cvmx_mixx_intena mix_intena;
struct sockaddr sa;
/* Allocate ring buffers. */
p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
GFP_KERNEL);
if (!p->tx_ring)
return -ENOMEM;
p->tx_ring_handle =
dma_map_single(p->dev, p->tx_ring,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->tx_next = 0;
p->tx_next_clean = 0;
p->tx_current_fill = 0;
p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
GFP_KERNEL);
if (!p->rx_ring)
goto err_nomem;
p->rx_ring_handle =
dma_map_single(p->dev, p->rx_ring,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->rx_next = 0;
p->rx_next_fill = 0;
p->rx_current_fill = 0;
octeon_mgmt_reset_hw(p);
mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
/* Bring it out of reset if needed. */
if (mix_ctl.s.reset) {
mix_ctl.s.reset = 0;
cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
do {
mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
} while (mix_ctl.s.reset);
}
agl_gmx_inf_mode.u64 = 0;
agl_gmx_inf_mode.s.en = 1;
cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
oring1.u64 = 0;
oring1.s.obase = p->tx_ring_handle >> 3;
oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
iring1.u64 = 0;
iring1.s.ibase = p->rx_ring_handle >> 3;
iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
/* Disable packet I/O. */
prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
octeon_mgmt_set_mac_address(netdev, &sa);
octeon_mgmt_change_mtu(netdev, netdev->mtu);
/*
* Enable the port HW. Packets are not allowed until
* cvmx_mgmt_port_enable() is called.
*/
mix_ctl.u64 = 0;
mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
mix_ctl.s.en = 1; /* Enable the port */
mix_ctl.s.nbtarb = 0; /* Arbitration mode */
/* MII CB-request FIFO programmable high watermark */
mix_ctl.s.mrq_hwm = 1;
cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
/*
* Force compensation values, as they are not
* determined properly by HW
*/
union cvmx_agl_gmx_drv_ctl drv_ctl;
drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
//.........这里部分代码省略.........
开发者ID:nikai3d,项目名称:linux,代码行数:101,代码来源:octeon_mgmt.c
示例5: msm_bam_dmux_write
int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
{
int rc = 0;
struct bam_mux_hdr *hdr;
unsigned long flags;
struct sk_buff *new_skb = NULL;
dma_addr_t dma_address;
struct tx_pkt_info *pkt;
if (id >= BAM_DMUX_NUM_CHANNELS)
return -EINVAL;
if (!skb)
return -EINVAL;
if (!bam_mux_initialized)
return -ENODEV;
DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
spin_lock_irqsave(&bam_ch[id].lock, flags);
if (!bam_ch_is_open(id)) {
spin_unlock_irqrestore(&bam_ch[id].lock, flags);
pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
return -ENODEV;
}
spin_unlock_irqrestore(&bam_ch[id].lock, flags);
/* if skb do not have any tailroom for padding,
copy the skb into a new expanded skb */
if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
/* revisit, probably dev_alloc_skb and memcpy is effecient */
new_skb = skb_copy_expand(skb, skb_headroom(skb),
4 - (skb->len & 0x3), GFP_ATOMIC);
if (new_skb == NULL) {
pr_err("%s: cannot allocate skb\n", __func__);
return -ENOMEM;
}
dev_kfree_skb_any(skb);
skb = new_skb;
DBG_INC_WRITE_CPY(skb->len);
}
hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
/* caller should allocate for hdr and padding
hdr is fine, padding is tricky */
hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
hdr->cmd = BAM_MUX_HDR_CMD_DATA;
hdr->reserved = 0;
hdr->ch_id = id;
hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
if (skb->len & 0x3)
skb_put(skb, 4 - (skb->len & 0x3));
hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
__func__, skb->data, skb->tail, skb->len,
hdr->pkt_len, hdr->pad_len);
pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
if (pkt == NULL) {
pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
if (new_skb)
dev_kfree_skb_any(new_skb);
return -ENOMEM;
}
dma_address = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
if (!dma_address) {
pr_err("%s: dma_map_single() failed\n", __func__);
if (new_skb)
dev_kfree_skb_any(new_skb);
kfree(pkt);
return -ENOMEM;
}
pkt->skb = skb;
pkt->dma_address = dma_address;
pkt->is_cmd = 0;
INIT_WORK(&pkt->work, bam_mux_write_done);
rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
return rc;
}
开发者ID:Brainiarc7,项目名称:android_kernel_huawei_u8185,代码行数:83,代码来源:bam_dmux.c
示例6: tegra_uart_dma_channel_allocate
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
bool dma_to_memory)
{
struct dma_chan *dma_chan;
unsigned char *dma_buf;
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
"DMA channel alloc failed: %d\n", ret);
return ret;
}
if (dma_to_memory) {
dma_buf = dma_alloc_coherent(tup->uport.dev,
TEGRA_UART_RX_DMA_BUFFER_SIZE,
&dma_phys, GFP_KERNEL);
if (!dma_buf) {
dev_err(tup->uport.dev,
"Not able to allocate the dma buffer\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
} else {
dma_phys = dma_map_single(tup->uport.dev,
tup->uport.state->xmit.buf, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_buf = tup->uport.state->xmit.buf;
}
if (dma_to_memory) {
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = 4;
} else {
dma_sconfig.dst_addr = tup->uport.mapbase;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.dst_maxburst = 16;
}
ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tup->uport.dev,
"Dma slave config failed, err = %d\n", ret);
goto scrub;
}
if (dma_to_memory) {
tup->rx_dma_chan = dma_chan;
tup->rx_dma_buf_virt = dma_buf;
tup->rx_dma_buf_phys = dma_phys;
} else {
tup->tx_dma_chan = dma_chan;
tup->tx_dma_buf_virt = dma_buf;
tup->tx_dma_buf_phys = dma_phys;
}
return 0;
scrub:
dma_release_channel(dma_chan);
return ret;
}
开发者ID:mikemvk,项目名称:linux-at91,代码行数:67,代码来源:serial-tegra.c
示例7: arc_emac_rx
/**
* arc_emac_rx - processing of Rx packets.
* @ndev: Pointer to the network device.
* @budget: How many BDs to process on 1 call.
*
* returns: Number of processed BDs
*
* Iterate through Rx BDs and deliver received packages to upper layer.
*/
static int arc_emac_rx(struct net_device *ndev, int budget)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int work_done;
for (work_done = 0; work_done < budget; work_done++) {
unsigned int *last_rx_bd = &priv->last_rx_bd;
struct net_device_stats *stats = &ndev->stats;
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
unsigned int pktlen, info = le32_to_cpu(rxbd->info);
struct sk_buff *skb;
dma_addr_t addr;
if (unlikely((info & OWN_MASK) == FOR_EMAC))
break;
/* Make a note that we saw a packet at this BD.
* So next time, driver starts from this + 1
*/
*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
if (unlikely((info & FIRST_OR_LAST_MASK) !=
FIRST_OR_LAST_MASK)) {
/* We pre-allocate buffers of MTU size so incoming
* packets won't be split/chained.
*/
if (net_ratelimit())
netdev_err(ndev, "incomplete packet received\n");
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
stats->rx_length_errors++;
continue;
}
pktlen = info & LEN_MASK;
stats->rx_packets++;
stats->rx_bytes += pktlen;
skb = rx_buff->skb;
skb_put(skb, pktlen);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
/* Prepare the BD for next cycle */
rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
EMAC_BUFFER_SIZE);
if (unlikely(!rx_buff->skb)) {
stats->rx_errors++;
/* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
/* receive_skb only if new skb was allocated to avoid holes */
netif_receive_skb(skb);
addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
stats->rx_errors++;
continue;
}
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
rxbd->data = cpu_to_le32(addr);
/* Make sure pointer to data buffer is set */
wmb();
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
}
return work_done;
}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:93,代码来源:emac_main.c
示例8: arc_emac_open
/**
* arc_emac_open - Open the network device.
* @ndev: Pointer to the network device.
*
* returns: 0, on success or non-zero error value on failure.
*
* This function sets the MAC address, requests and enables an IRQ
* for the EMAC device and starts the Tx queue.
* It also connects to the phy device.
*/
static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = priv->phy_dev;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
phy_dev->advertising &= phy_dev->supported;
priv->last_rx_bd = 0;
/* Allocate and set buffers for Rx BD's */
for (i = 0; i < RX_BD_NUM; i++) {
dma_addr_t addr;
unsigned int *last_rx_bd = &priv->last_rx_bd;
struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
EMAC_BUFFER_SIZE);
if (unlikely(!rx_buff->skb))
return -ENOMEM;
addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
return -ENOMEM;
}
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
rxbd->data = cpu_to_le32(addr);
/* Make sure pointer to data buffer is set */
wmb();
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
}
/* Clean Tx BD's */
memset(priv->txbd, 0, TX_RING_SZ);
/* Initialize logical address filter */
arc_reg_set(priv, R_LAFL, 0);
arc_reg_set(priv, R_LAFH, 0);
/* Set BD ring pointers for device side */
arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
/* Enable interrupts */
arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
/* Set CONTROL */
arc_reg_set(priv, R_CTRL,
(RX_BD_NUM << 24) | /* RX BD table length */
(TX_BD_NUM << 16) | /* TX BD table length */
TXRN_MASK | RXRN_MASK);
napi_enable(&priv->napi);
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
phy_start_aneg(priv->phy_dev);
netif_start_queue(ndev);
return 0;
}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:87,代码来源:emac_main.c
示例9: __dev_alloc_skb
struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
{
struct data_queue *queue = entry->queue;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
struct sk_buff *skb;
struct skb_frame_desc *skbdesc;
unsigned int frame_size;
unsigned int head_size = 0;
unsigned int tail_size = 0;
/*
* The frame size includes descriptor size, because the
* hardware directly receive the frame into the skbuffer.
*/
frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
/*
* The payload should be aligned to a 4-byte boundary,
* this means we need at least 3 bytes for moving the frame
* into the correct offset.
*/
head_size = 4;
/*
* For IV/EIV/ICV assembly we must make sure there is
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
head_size += 8;
tail_size += 8;
}
/*
* Allocate skbuffer.
*/
skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
if (!skb)
return NULL;
/*
* Make sure we not have a frame with the requested bytes
* available in the head and tail.
*/
skb_reserve(skb, head_size);
skb_put(skb, frame_size);
/*
* Populate skbdesc.
*/
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
dma_addr_t skb_dma;
skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
dev_kfree_skb_any(skb);
return NULL;
}
skbdesc->skb_dma = skb_dma;
skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
}
return skb;
}
开发者ID:Lubensius,项目名称:MT7630e-ASUSTP500LN-ubuntu,代码行数:69,代码来源:rt2x00queue.c
示例10: octeon_mgmt_open
static int octeon_mgmt_open(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
union cvmx_mixx_ctl mix_ctl;
union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
union cvmx_mixx_oring1 oring1;
union cvmx_mixx_iring1 iring1;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
union cvmx_mixx_irhwm mix_irhwm;
union cvmx_mixx_orhwm mix_orhwm;
union cvmx_mixx_intena mix_intena;
struct sockaddr sa;
/* Allocate ring buffers. */
p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
GFP_KERNEL);
if (!p->tx_ring)
return -ENOMEM;
p->tx_ring_handle =
dma_map_single(p->dev, p->tx_ring,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->tx_next = 0;
p->tx_next_clean = 0;
p->tx_current_fill = 0;
p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
GFP_KERNEL);
if (!p->rx_ring)
goto err_nomem;
p->rx_ring_handle =
dma_map_single(p->dev, p->rx_ring,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->rx_next = 0;
p->rx_next_fill = 0;
p->rx_current_fill = 0;
octeon_mgmt_reset_hw(p);
mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
/* Bring it out of reset if needed. */
if (mix_ctl.s.reset) {
mix_ctl.s.reset = 0;
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
do {
mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
} while (mix_ctl.s.reset);
}
if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
agl_gmx_inf_mode.u64 = 0;
agl_gmx_inf_mode.s.en = 1;
cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
}
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
/* Force compensation values, as they are not
* determined properly by HW
*/
union cvmx_agl_gmx_drv_ctl drv_ctl;
drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
if (p->port) {
drv_ctl.s.byp_en1 = 1;
drv_ctl.s.nctl1 = 6;
drv_ctl.s.pctl1 = 6;
} else {
drv_ctl.s.byp_en = 1;
drv_ctl.s.nctl = 6;
drv_ctl.s.pctl = 6;
}
cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
}
oring1.u64 = 0;
oring1.s.obase = p->tx_ring_handle >> 3;
oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
iring1.u64 = 0;
iring1.s.ibase = p->rx_ring_handle >> 3;
iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
octeon_mgmt_set_mac_address(netdev, &sa);
octeon_mgmt_change_mtu(netdev, netdev->mtu);
/* Enable the port HW. Packets are not allowed until
* cvmx_mgmt_port_enable() is called.
*/
mix_ctl.u64 = 0;
mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
mix_ctl.s.en = 1; /* Enable the port */
mix_ctl.s.nbtarb = 0; /* Arbitration mode */
//.........这里部分代码省略.........
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:101,代码来源:octeon_mgmt.c
示例11: talitos_process
//.........这里部分代码省略.........
crd2->crd_alg == CRYPTO_ARC4) &&
((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
maccrd = crd1;
enccrd = crd2;
} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
crd1->crd_alg == CRYPTO_ARC4 ||
crd1->crd_alg == CRYPTO_3DES_CBC ||
crd1->crd_alg == CRYPTO_AES_CBC) &&
(crd2->crd_alg == CRYPTO_MD5_HMAC ||
crd2->crd_alg == CRYPTO_SHA1_HMAC ||
crd2->crd_alg == CRYPTO_MD5 ||
crd2->crd_alg == CRYPTO_SHA1) &&
(crd1->crd_flags & CRD_F_ENCRYPT)) {
enccrd = crd1;
maccrd = crd2;
} else {
/* We cannot order the SEC as requested */
printk("%s: cannot do the order\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
}
/* assign in_fifo and out_fifo based on input/output struct type */
if (crp->crp_flags & CRYPTO_F_SKBUF) {
/* using SKB buffers */
struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
if (skb_shinfo(skb)->nr_frags) {
printk("%s: skb frags unimplemented\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
td->ptr[in_fifo].len = skb->len;
td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
td->ptr[out_fifo].len = skb->len;
td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
} else if (crp->crp_flags & CRYPTO_F_IOV) {
/* using IOV buffers */
struct uio *uiop = (struct uio *)crp->crp_buf;
if (uiop->uio_iovcnt > 1) {
printk("%s: iov frags unimplemented\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
td->ptr[in_fifo].ptr = dma_map_single(NULL,
uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[in_fifo].len = crp->crp_ilen;
/* crp_olen is never set; always use crp_ilen */
td->ptr[out_fifo].ptr = dma_map_single(NULL,
uiop->uio_iov->iov_base,
crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[out_fifo].len = crp->crp_ilen;
} else {
/* using contig buffers */
td->ptr[in_fifo].ptr = dma_map_single(NULL,
crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[in_fifo].len = crp->crp_ilen;
td->ptr[out_fifo].ptr = dma_map_single(NULL,
crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[out_fifo].len = crp->crp_ilen;
开发者ID:patrick-ken,项目名称:MyNet_N900,代码行数:67,代码来源:talitos.c
示例12: temac_dma_bd_init
/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
int i;
lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb)
goto out;
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (!skb)
goto out;
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
out:
temac_dma_bd_release(ndev);
return -ENOMEM;
}
开发者ID:03199618,项目名称:linux,代码行数:75,代码来源:ll_temac_main.c
示例13: mali_meson_poweron
void mali_meson_poweron(void)
{
unsigned long flags;
u32 p, p_aligned;
dma_addr_t p_phy;
int i;
unsigned int_mask;
if ((last_power_mode != -1) && (last_power_mode != MALI_POWER_MODE_DEEP_SLEEP)) {
return;
}
if (READ_MALI_REG(MALI_PP_PP_VERSION) != MALI_PP_PP_VERSION_MAGIC) {
printk("mali_meson_poweron: Mali APB bus access failed.");
return;
}
if (READ_MALI_REG(MALI_MMU_DTE_ADDR) != 0) {
printk("mali_meson_poweron: Mali is not really powered off.");
return;
}
p = (u32)kcalloc(4096 * 4, 1, GFP_KERNEL);
if (!p) {
printk("mali_meson_poweron: NOMEM in meson_poweron\n");
return;
}
p_aligned = __ALIGN_MASK(p, 4096);
/* DTE */
*(u32 *)(p_aligned) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_PTE) | MMU_FLAG_DTE_PRESENT;
/* PTE */
for (i=0; i<1024; i++) {
*(u32 *)(p_aligned + OFFSET_MMU_PTE + i*4) =
(virt_to_phys((void *)p_aligned) + OFFSET_MMU_VIRTUAL_ZERO + 4096 * i) |
MMU_FLAG_PTE_PAGE_PRESENT |
MMU_FLAG_PTE_RD_PERMISSION;
}
/* command & data */
memcpy((void *)(p_aligned + OFFSET_MMU_VIRTUAL_ZERO), poweron_data, 4096);
p_phy = dma_map_single(NULL, (void *)p_aligned, 4096 * 3, DMA_TO_DEVICE);
/* Set up Mali GP MMU */
WRITE_MALI_REG(MALI_MMU_DTE_ADDR, p_phy);
WRITE_MALI_REG(MALI_MMU_CMD, 0);
if ((READ_MALI_REG(MALI_MMU_STATUS) & 1) != 1) {
printk("mali_meson_poweron: MMU enabling failed.\n");
}
/* Set up Mali command registers */
WRITE_MALI_REG(MALI_APB_GP_VSCL_START, 0);
WRITE_MALI_REG(MALI_APB_GP_VSCL_END, 0x38);
WRITE_MALI_REG(MALI_APB_GP_INT_MASK, 0x3ff);
spin_lock_irqsave(&lock, flags);
int_mask = READ_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK);
/* Set up ARM Mali interrupt */
WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
SET_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);
/* Start GP */
WRITE_MALI_REG(MALI_APB_GP_CMD, 1);
for (i = 0; i<100; i++)
udelay(500);
/* check Mali GP interrupt */
if (READ_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT) & (1<<16)) {
printk("mali_meson_poweron: Interrupt received.\n");
} else {
printk("mali_meson_poweron: No interrupt received.\n");
}
WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
CLEAR_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);
/* force reset GP */
WRITE_MALI_REG(MALI_APB_GP_CMD, 1 << 5);
/* stop MMU paging and reset */
WRITE_MALI_REG(MALI_MMU_CMD, 1);
WRITE_MALI_REG(MALI_MMU_CMD, 1 << 6);
WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK, int_mask);
spin_unlock_irqrestore(&lock, flags);
dma_unmap_single(NULL, p_phy, 4096 * 3, DMA_TO_DEVICE);
kfree((void *)p);
}
开发者ID:CoreTech-Development,项目名称:buildroot-linux-kernel-m3,代码行数:97,代码来源:mali_platform.c
示例14: iwl_enqueue_hcmd
//.........这里部分代码省略.........
}
idx = get_cmd_index(q, q->write_ptr);
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
if (cmd->flags & CMD_ASYNC)
out_meta->callback = cmd->callback;
/* set up the header */
out_cmd->hdr.cmd = cmd->id;
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
cmd_dest = &out_cmd->cmd.payload[0];
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
break;
memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
cmd_dest += cmd->len[i];
}
IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
"%d bytes at %d[%d]:%d\n",
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
q->write_ptr, idx, priv->cmd_queue);
phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
}
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[0] = &out_cmd->hdr;
trace_lens[0] = copy_size;
trace_idx = 1;
#endif
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(priv->bus->dev, phys_addr)) {
iwlagn_unmap_tfd(priv, out_meta,
&txq->tfds[q->write_ptr],
DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[trace_idx] = cmd->data[i];
trace_lens[trace_idx] = cmd->len[i];
trace_idx++;
#endif
}
out_meta->flags = cmd->flags;
txq->need_update = 1;
/* check that tracing gets all possible blocks */
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_iwlwifi_dev_hcmd(priv, cmd->flags,
trace_bufs[0], trace_lens[0],
trace_bufs[1], trace_lens[1],
trace_bufs[2], trace_lens[2]);
#endif
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_txq_update_write_ptr(priv, txq);
out:
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
return idx;
}
开发者ID:303750856,项目名称:linux-3.1,代码行数:101,代码来源:iwl-trans-tx-pcie.c
示例15: greth_init_rings
static int greth_init_rings(struct greth_private *greth)
{
struct sk_buff *skb;
struct greth_bd *rx_bd, *tx_bd;
u32 dma_addr;
int i;
rx_bd = greth->rx_bd_base;
tx_bd = greth->tx_bd_base;
/* Initialize descriptor rings and buffers */
if (greth->gbit_mac) {
for (i = 0; i < GRETH_RXBD_NUM; i++) {
skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
if (skb == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
skb_reserve(skb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
skb->data,
MAX_FRAME_SIZE+NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth->rx_skbuff[i] = skb;
greth_write_bd(&rx_bd[i].addr, dma_addr);
greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
}
} else {
/* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
for (i = 0; i < GRETH_RXBD_NUM; i++) {
greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
if (greth->rx_bufs[i] == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
dma_addr = dma_map_single(greth->dev,
greth->rx_bufs[i],
MAX_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth_write_bd(&rx_bd[i].addr, dma_addr);
greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
}
for (i = 0; i < GRETH_TXBD_NUM; i++) {
greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
if (greth->tx_bufs[i] == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
dma_addr = dma_map_single(greth->dev,
greth->tx_bufs[i],
MAX_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth_write_bd(&tx_bd[i].addr, dma_addr);
greth_write_bd(&tx_bd[i].stat, 0);
}
}
greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
/* Initialize pointers. */
greth->rx_cur = 0;
greth->tx_next = 0;
greth->tx_last = 0;
greth->tx_free = GRETH_TXBD_NUM;
/* Initialize descriptor base address */
GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
return 0;
//.........这里部分代码省略.........
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:101,代码来源:greth.c
示例16: sonic_send_packet
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
int entry = lp->next_tx;
if (sonic_debug > 2)
;
length = skb->len;
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
/*
* Map the packet data into the logical DMA address space
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
;
dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
/*
* Must set tx_skb[entry] only after clearing status, and
* before clearing EOL and before stopping queue
*/
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
wmb();
sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
if (lp->tx_skb[lp->next_tx] != NULL) {
/* The ring is full, the ISR has yet to process the next TD. */
if (sonic_debug > 3)
;
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
} else netif_start_queue(dev);
if (sonic_debug > 2)
;
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
return NETDEV_TX_OK;
}
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:67,代码来源:sonic.c
示例17: omap3_onenand_write_bufferram
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram
|
请发表评论