本文整理汇总了C++中dma_sync_single_for_device函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_sync_single_for_device函数的具体用法?C++ dma_sync_single_for_device怎么用?C++ dma_sync_single_for_device使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dma_sync_single_for_device函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: sgdma_async_read
/* If hardware is busy, don't restart async read.
* if status register is 0 - meaning initial state, restart async read,
* probably for the first time when populating a receive buffer.
* If read status indicate not busy and a status, restart the async
* DMA read.
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
struct sgdma_descrip *descbase =
(struct sgdma_descrip *)priv->rx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
rxbuffer = queue_rx_peekhead(priv);
if (rxbuffer == NULL) {
netdev_err(priv->dev, "no rx buffers available\n");
return 0;
}
sgdma_setup_descrip(cdesc, /* current descriptor */
ndesc, /* next descriptor */
sgdma_rxphysaddr(priv, ndesc),
0, /* read addr 0 for rx dma */
rxbuffer->dma_addr, /* write addr for rx dma */
0, /* read 'til EOP */
0, /* EOP: NA for rx dma */
0, /* read fixed: NA for rx dma */
0); /* SOP: NA for rx DMA */
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
priv->sgdmadesclen,
DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
&csr->next_descrip);
iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
&csr->control);
return 1;
}
return 0;
}
开发者ID:7799,项目名称:linux,代码行数:50,代码来源:altera_sgdma.c
示例2: lsdma_rxqbuf
/**
* lsdma_rxqbuf - Queue a read buffer
* @dma: DMA information structure
* @bufnum: buffer number
*
* Do everything which normally follows a copy from a driver buffer
* to a user buffer.
**/
static ssize_t
lsdma_rxqbuf (struct master_dma *dma, size_t bufnum)
{
unsigned int i;
struct lsdma_desc *desc;
if (bufnum != dma->cpu_buffer) {
return -EINVAL;
}
for (i = 0; i < dma->pointers_per_buf; i++) {
desc = dma->desc[dma->cpu_buffer * dma->pointers_per_buf + i];
dma_sync_single_for_device (dma->dev,
mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h),
(desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE),
DMA_FROM_DEVICE);
}
dma->cpu_buffer = (dma->cpu_buffer + 1) % dma->buffers;
dma->cpu_offset = 0;
return dma->bufsize;
}
开发者ID:MattHung,项目名称:sage-graphics,代码行数:28,代码来源:lsdma.c
示例3: tegra_start_dma_tx
static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
{
struct circ_buf *xmit;
xmit = &t->uport.state->xmit;
dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
uart_writeb(t, t->fcr_shadow, UART_FCR);
t->tx_bytes = bytes & ~(sizeof(u32)-1);
t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
t->tx_dma_req.size = t->tx_bytes;
t->tx_in_progress = TEGRA_TX_DMA;
tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
}
开发者ID:vocoderism,项目名称:Tegra-Note-7,代码行数:20,代码来源:tegra_hsuart.c
示例4: tegra_uart_copy_rx_to_tty
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
struct tty_port *tty, int count)
{
int copied;
tup->uport.icount.rx += count;
if (!tty) {
dev_err(tup->uport.dev, "No tty port\n");
return;
}
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
copied = tty_insert_flip_string_lock(tty,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count)
dev_err(tup->uport.dev, "RxData DMA copy to tty layer failed\n");
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
}
开发者ID:Toradex-Apalis-TK1-AndroidTV,项目名称:android_kernel_nvidia_mm,代码行数:20,代码来源:serial-tegra.c
示例5: tegra_uart_start_rx_dma
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!tup->rx_dma_desc) {
dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
return -EIO;
}
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
count, DMA_TO_DEVICE);
tup->rx_bytes_requested = count;
tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
dma_async_issue_pending(tup->rx_dma_chan);
return 0;
}
开发者ID:Lyude,项目名称:linux,代码行数:21,代码来源:serial-tegra.c
示例6: alloc_pages
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
struct page *page = alloc_pages(pool->gfp_mask, pool->order);
if (!page)
return NULL;
/* this is only being used to flush the page for dma,
this api is not really suitable for calling from a driver
but no better way to flush a page for dma exist at this time */
#ifdef CONFIG_64BIT
dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
#else
arm_dma_ops.sync_single_for_device(NULL,
pfn_to_dma(NULL, page_to_pfn(page)),
PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
#endif
return page;
}
开发者ID:ShinySide,项目名称:SM-G361H,代码行数:22,代码来源:ion_page_pool.c
示例7: tegra_rx_dma_complete_callback
/*
* It is expected that the callers take the UART lock when this API is called.
*
* There are 2 contexts when this function is called:
*
* 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the
* dequue API which in-turn calls this callback. UART lock is taken during
* the call to the threshold callback.
*
* 2. UART ISR - UART calls the dequue API which in-turn will call this API.
* In this case, UART ISR takes the UART lock.
*/
static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
{
struct tegra_uart_port *t = req->dev;
struct uart_port *u = &t->uport;
struct tty_struct *tty = u->state->port.tty;
int copied;
/* If we are here, DMA is stopped */
dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
req->status);
if (req->bytes_transferred) {
t->uport.icount.rx += req->bytes_transferred;
dma_sync_single_for_cpu(t->uport.dev, req->dest_addr,
req->size, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(tty,
((unsigned char *)(req->virt_addr)),
req->bytes_transferred);
if (copied != req->bytes_transferred) {
WARN_ON(1);
dev_err(t->uport.dev, "Not able to copy uart data "
"to tty layer Req %d and coped %d\n",
req->bytes_transferred, copied);
}
dma_sync_single_for_device(t->uport.dev, req->dest_addr,
req->size, DMA_TO_DEVICE);
}
do_handle_rx_pio(t);
/* Push the read data later in caller place. */
if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
return;
spin_unlock(&u->lock);
tty_flip_buffer_push(u->state->port.tty);
spin_lock(&u->lock);
}
开发者ID:vocoderism,项目名称:Tegra-Note-7,代码行数:50,代码来源:tegra_hsuart.c
示例8: denali_read_page_raw
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
if (page != denali->page) {
dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
}
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
denali_enable_dma(denali, false);
memcpy(buf, denali->buf.buf, mtd->writesize);
memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
return 0;
}
开发者ID:FEDEVEL,项目名称:imx6rex-linux-3.10.17,代码行数:38,代码来源:denali.c
示例9: sgdma_async_write
static int sgdma_async_write(struct altera_tse_private *priv,
struct sgdma_descrip *desc)
{
struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
if (sgdma_txbusy(priv))
return 0;
/* clear control and status */
iowrite32(0, &csr->control);
iowrite32(0x1f, &csr->status);
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->sgdmadesclen, DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
&csr->next_descrip);
iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
&csr->control);
return 1;
}
开发者ID:7799,项目名称:linux,代码行数:23,代码来源:altera_sgdma.c
示例10: mvneta_send
static int mvneta_send(struct eth_device *edev, void *data, int len)
{
struct mvneta_port *priv = edev->priv;
struct txdesc *txdesc = priv->txdesc;
int ret, error, last_desc;
/* Flush transmit data */
dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE);
memset(txdesc, 0, sizeof(*txdesc));
/* Fill the Tx descriptor */
txdesc->cmd_sts = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
txdesc->buf_ptr = (u32)data;
txdesc->byte_cnt = len;
/* Increase the number of prepared descriptors (one), by writing
* to the 'NoOfWrittenDescriptors' field in the PTXSU register.
*/
writel(1, priv->reg + MVNETA_TXQ_UPDATE_REG(0));
/* The controller updates the number of transmitted descriptors in
* the Tx port status register (PTXS).
*/
ret = wait_on_timeout(TRANSFER_TIMEOUT, !mvneta_pending_tx(priv));
dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE);
if (ret) {
dev_err(&edev->dev, "transmit timeout\n");
return ret;
}
last_desc = readl(&txdesc->cmd_sts) & MVNETA_TXD_L_DESC;
error = readl(&txdesc->error);
if (last_desc && error & MVNETA_TXD_ERROR) {
dev_err(&edev->dev, "transmit error %d\n",
(error & TXD_ERROR_MASK) >> TXD_ERROR_SHIFT);
return -EIO;
}
开发者ID:masahir0y,项目名称:barebox-yamada,代码行数:37,代码来源:mvneta.c
示例11: octeon_mgmt_rx_fill_ring
static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
int port = p->port;
while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
unsigned int size;
union mgmt_port_ring_entry re;
struct sk_buff *skb;
/* CN56XX pass 1 needs 8 bytes of padding. */
size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
skb = netdev_alloc_skb(netdev, size);
if (!skb)
break;
skb_reserve(skb, NET_IP_ALIGN);
__skb_queue_tail(&p->rx_list, skb);
re.d64 = 0;
re.s.len = size;
re.s.addr = dma_map_single(p->dev, skb->data,
size,
DMA_FROM_DEVICE);
/* Put it in the ring. */
p->rx_ring[p->rx_next_fill] = re.d64;
dma_sync_single_for_device(p->dev, p->rx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->rx_next_fill =
(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
p->rx_current_fill++;
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
}
}
开发者ID:andi34,项目名称:Dhollmen_Kernel,代码行数:37,代码来源:octeon_mgmt.c
示例12: caam_jr_enqueue
/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: device of the job ring to be used. This device should have
* been assigned prior by caam_jr_register().
* @desc: points to a job descriptor that execute our request. All
* descriptors (and all referenced data) must be in a DMAable
* region, and all data references must be physical addresses
* accessible to CAAM (i.e. within a PAMU window granted
* to it).
* @cbk: pointer to a callback function to be invoked upon completion
* of this request. This has the form:
* callback(struct device *dev, u32 *desc, u32 stat, void *arg)
* where:
* @dev: contains the job ring device that processed this
* response.
* @desc: descriptor that initiated the request, same as
* "desc" being argued to caam_jr_enqueue().
* @status: untranslated status received from CAAM. See the
* reference manual for a detailed description of
* error meaning, or see the JRSTA definitions in the
* register header file
* @areq: optional pointer to an argument passed with the
* original request
* @areq: optional pointer to a user argument for use at callback
* time.
**/
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc,
u32 status, void *areq),
void *areq)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
unsigned long flags;
int head, tail, desc_size;
dma_addr_t desc_dma, inpbusaddr;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, desc_dma)) {
dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
return -EIO;
}
dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
dma_sync_single_for_device(dev, inpbusaddr,
sizeof(dma_addr_t) * JOBR_DEPTH,
DMA_TO_DEVICE);
spin_lock_irqsave(&jrp->inplock, flags);
head = jrp->head;
tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_irqrestore(&jrp->inplock, flags);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
}
head_entry = &jrp->entinfo[head];
head_entry->desc_addr_virt = desc;
head_entry->desc_size = desc_size;
head_entry->callbk = (void *)cbk;
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
dma_sync_single_for_device(dev, inpbusaddr,
sizeof(dma_addr_t) * JOBR_DEPTH,
DMA_TO_DEVICE);
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_irqrestore(&jrp->inplock, flags);
return 0;
}
开发者ID:panfudonmx6q,项目名称:imx6q_fsl,代码行数:91,代码来源:jr.c
示例13: kbase_mmu_sync_pgd
static void kbase_mmu_sync_pgd(struct device *dev,
dma_addr_t handle, size_t size)
{
dma_sync_single_for_device(dev, handle, size, DMA_TO_DEVICE);
}
开发者ID:TimeMachine,项目名称:linux-juno_3.18,代码行数:5,代码来源:mali_kbase_mmu.c
示例14: hsu_dma_rx
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
struct uart_port *port = &up->port;
struct tty_struct *tty = port->state->port.tty;
int count;
if (!tty)
return;
/*
* First need to know how many is already transferred,
* then check if its a timeout DMA irq, and return
* the trail bytes out, push them up and reenable the
* channel
*/
/* Timeout IRQ, need wait some time, see Errata 2 */
if (int_sts & 0xf00)
udelay(2);
/* Stop the channel */
chan_writel(chan, HSU_CH_CR, 0x0);
count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
if (!count) {
/* Restart the channel before we leave */
chan_writel(chan, HSU_CH_CR, 0x3);
return;
}
del_timer(&chan->rx_timer);
dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/*
* Head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
tty_insert_flip_string(tty, dbuf->buf, count);
port->icount.rx += count;
dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/* Reprogram the channel */
chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
chan_writel(chan, HSU_CH_DCR, 0x1
| (0x1 << 8)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
tty_flip_buffer_push(tty);
chan_writel(chan, HSU_CH_CR, 0x3);
chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ;
add_timer(&chan->rx_timer);
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:65,代码来源:mfd.c
示例15: myri_rx
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
struct recvq __iomem *rq = mp->rq;
struct recvq __iomem *rqa = mp->rqack;
int entry = sbus_readl(&rqa->head);
int limit = sbus_readl(&rqa->tail);
int drops;
DRX(("entry[%d] limit[%d] ", entry, limit));
if (entry == limit)
return;
drops = 0;
DRX(("\n"));
while (entry != limit) {
struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
u32 csum = sbus_readl(&rxdack->csum);
int len = sbus_readl(&rxdack->myri_scatters[0].len);
int index = sbus_readl(&rxdack->ctx);
struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
struct sk_buff *skb = mp->rx_skbs[index];
/* Ack it. */
sbus_writel(NEXT_RX(entry), &rqa->head);
/* Check for errors. */
DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
dma_sync_single_for_cpu(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
DRX(("ERROR["));
dev->stats.rx_errors++;
if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
DRX(("BAD_LENGTH] "));
dev->stats.rx_length_errors++;
} else {
DRX(("NO_PADDING] "));
dev->stats.rx_frame_errors++;
}
/* Return it to the LANAI. */
drop_it:
drops++;
DRX(("DROP "));
dev->stats.rx_dropped++;
dma_sync_single_for_device(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
goto next;
}
DRX(("len[%d] ", len));
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
u32 dma_addr;
DRX(("BIGBUFF "));
new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
if (new_skb == NULL) {
DRX(("skb_alloc(FAILED) "));
goto drop_it;
}
dma_unmap_single(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
mp->rx_skbs[index] = new_skb;
new_skb->dev = dev;
skb_put(new_skb, RX_ALLOC_SIZE);
dma_addr = dma_map_single(&mp->myri_op->dev,
new_skb->data,
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
/* Trim the original skb for the netif. */
DRX(("trim(%d) ", len));
skb_trim(skb, len);
} else {
struct sk_buff *copy_skb = dev_alloc_skb(len);
DRX(("SMALLBUFF "));
if (copy_skb == NULL) {
DRX(("dev_alloc_skb(FAILED) "));
goto drop_it;
}
/* DMA sync already done above. */
copy_skb->dev = dev;
DRX(("resv_and_put "));
skb_put(copy_skb, len);
skb_copy_from_linear_data(skb, copy_skb->data, len);
//.........这里部分代码省略.........
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:101,代码来源:myri_sbus.c
示例16: greth_rx
static int greth_rx(struct net_device *dev, int limit)
{
struct greth_private *greth;
struct greth_bd *bdp;
struct sk_buff *skb;
int pkt_len;
int bad, count;
u32 status, dma_addr;
unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
mb();
status = greth_read_bd(&bdp->stat);
if (unlikely(status & GRETH_BD_EN)) {
break;
}
dma_addr = greth_read_bd(&bdp->addr);
bad = 0;
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
dev->stats.rx_length_errors++;
bad = 1;
}
if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
dev->stats.rx_frame_errors++;
bad = 1;
}
if (status & GRETH_RXBD_ERR_CRC) {
dev->stats.rx_crc_errors++;
bad = 1;
}
}
if (unlikely(bad)) {
dev->stats.rx_errors++;
} else {
pkt_len = status & GRETH_BD_LEN;
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (unlikely(skb == NULL)) {
if (net_ratelimit())
dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(greth->dev,
dma_addr,
pkt_len,
DMA_FROM_DEVICE);
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
netif_receive_skb(skb);
}
}
status = GRETH_BD_EN | GRETH_BD_IE;
if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
status |= GRETH_BD_WR;
}
wmb();
greth_write_bd(&bdp->stat, status);
dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
greth_enable_rx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
return count;
}
开发者ID:513855417,项目名称:linux,代码行数:96,代码来源:greth.c
示例17: dev_alloc_skb
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct sk_buff *skb;
void *va;
int used_frags;
dma_addr_t dma;
skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
skb->truesize = length + sizeof(struct sk_buff);
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
DMA_FROM_DEVICE);
skb->tail += length;
} else {
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
skb_shinfo(skb)->frags,
page_alloc, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
}
skb_shinfo(skb)->nr_frags = used_frags;
/* Copy headers into the skb linear buffer */
memcpy(skb->data, va, HEADER_COPY_SIZE);
skb->tail += HEADER_COPY_SIZE;
/* Skip headers in first fragment */
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
}
开发者ID:andi34,项目名称:Dhollmen_Kernel,代码行数:61,代码来源:en_rx.c
示例18: omap_8250_tx_dma
static int omap_8250_tx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
struct omap8250_priv *priv = p->port.private_data;
struct circ_buf *xmit = &p->port.state->xmit;
struct dma_async_tx_descriptor *desc;
unsigned int skip_byte = 0;
int ret;
if (dma->tx_running)
return 0;
if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
/*
* Even if no data, we need to return an error for the two cases
* below so serial8250_tx_chars() is invoked and properly clears
* THRI and/or runtime suspend.
*/
if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
ret = -EBUSY;
goto err;
}
if (p->ier & UART_IER_THRI) {
p->ier &= ~UART_IER_THRI;
serial_out(p, UART_IER, p->ier);
}
return 0;
}
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (priv->habit & OMAP_DMA_TX_KICK) {
u8 tx_lvl;
/*
* We need to put the first byte into the FIFO in order to start
* the DMA transfer. For transfers smaller than four bytes we
* don't bother doing DMA at all. It seem not matter if there
* are still bytes in the FIFO from the last transfer (in case
* we got here directly from omap_8250_dma_tx_complete()). Bytes
* leaving the FIFO seem not to trigger the DMA transfer. It is
* really the byte that we put into the FIFO.
* If the FIFO is already full then we most likely got here from
* omap_8250_dma_tx_complete(). And this means the DMA engine
* just completed its work. We don't have to wait the complete
* 86us at 115200,8n1 but around 60us (not to mention lower
* baudrates). So in that case we take the interrupt and try
* again with an empty FIFO.
*/
tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
if (tx_lvl == p->tx_loadsz) {
ret = -EBUSY;
goto err;
}
if (dma->tx_size < 4) {
ret = -EINVAL;
goto err;
}
skip_byte = 1;
}
desc = dmaengine_prep_slave_single(dma->txchan,
dma->tx_addr + xmit->tail + skip_byte,
dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
ret = -EBUSY;
goto err;
}
dma->tx_running = 1;
desc->callback = omap_8250_dma_tx_complete;
desc->callback_param = p;
dma->tx_cookie = dmaengine_submit(desc);
dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_async_issue_pending(dma->txchan);
if (dma->tx_err)
dma->tx_err = 0;
if (p->ier & UART_IER_THRI) {
p->ier &= ~UART_IER_THRI;
serial_out(p, UART_IER, p->ier);
}
if (skip_byte)
serial_out(p, UART_TX, xmit->buf[xmit->tail]);
return 0;
err:
dma->tx_err = 1;
return ret;
}
开发者ID:383530895,项目名称:linux,代码行数:94,代码来源:8250_omap.c
示例19: dwmci_cmd
static int
dwmci_cmd(struct mci_host *mci, struct mci_cmd *cmd, struct mci_data *data)
{
struct dwmci_host *host = to_dwmci_host(mci);
int flags = 0;
uint32_t mask;
uint32_t ctrl;
uint64_t start;
int ret;
unsigned int num_bytes = 0;
start = get_time_ns();
while (1) {
if (!(dwmci_readl(host, DWMCI_STATUS) & DWMCI_STATUS_BUSY))
break;
if (is_timeout(start, 100 * MSECOND)) {
dev_dbg(host->dev, "Timeout on data busy\n");
return -ETIMEDOUT;
}
}
dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
if (data) {
num_bytes = data->blocks * data->blocksize;
if (data->flags & MMC_DATA_WRITE)
dma_sync_single_for_device((unsigned long)data->src,
num_bytes, DMA_TO_DEVICE);
else
dma_sync_single_for_device((unsigned long)data->dest,
num_bytes, DMA_FROM_DEVICE);
ret = dwmci_prepare_data(host, data);
if (ret)
return ret;
}
dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
if (data)
flags = dwmci_set_transfer_mode(host, data);
if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
return -EINVAL;
if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
flags |= DWMCI_CMD_ABORT_STOP;
else
flags |= DWMCI_CMD_PRV_DAT_WAIT;
if (cmd->resp_type & MMC_RSP_PRESENT) {
flags |= DWMCI_CMD_RESP_EXP;
if (cmd->resp_type & MMC_RSP_136)
flags |= DWMCI_CMD_RESP_LENGTH;
}
if (cmd->resp_type & MMC_RSP_CRC)
flags |= DWMCI_CMD_CHECK_CRC;
flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
dev_dbg(host->dev, "Sending CMD%d\n", cmd->cmdidx);
dwmci_writel(host, DWMCI_CMD, flags);
start = get_time_ns();
while (1) {
mask = dwmci_readl(host, DWMCI_RINTSTS);
if (mask & DWMCI_INTMSK_CDONE) {
if (!data)
dwmci_writel(host, DWMCI_RINTSTS, mask);
break;
}
if (is_timeout(start, 100 * MSECOND)) {
dev_dbg(host->dev, "Send command timeout..\n");
return -ETIMEDOUT;
}
}
if (mask & DWMCI_INTMSK_RTO) {
dev_dbg(host->dev, "Response Timeout..\n");
return -ETIMEDOUT;
} else if (mask & DWMCI_INTMSK_RE) {
dev_dbg(host->dev, "Response Error..\n");
return -EIO;
}
if (cmd->resp_type & MMC_RSP_PRESENT) {
if (cmd->resp_type & MMC_RSP_136) {
cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
} else {
cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
}
}
//.........这里部分代码省略.........
开发者ID:cherojeong,项目名称:barebox,代码行数:101,代码来源:dw_mmc.c
示例20: caam_drv_ctx_update
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
{
int ret;
u32 num_words;
struct qman_fq *new_fq, *old_fq;
struct device *qidev = drv_ctx->qidev;
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
return -EINVAL;
}
/* Note down older req FQ */
old_fq = drv_ctx->req_fq;
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
if (unlikely(IS_ERR_OR_NULL(new_fq))) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
/* Hook up new FQ to context so that new requests keep queuing */
drv_ctx->req_fq = new_fq;
/* Empty and remove the older FQ */
ret = empty_caam_fq(old_fq);
if (ret) {
dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
/* We can revert to older FQ */
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
new_fq->fqid);
return ret;
}
/*
* Re-initialise pre-header. Set RSLS and SDLEN.
* Update the shared descriptor for driver context.
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
sizeof(drv_ctx->prehdr),
DMA_BIDIRECTIONAL);
/* Put the new FQ in scheduled state */
ret = qman_schedule_fq(new_fq);
if (ret) {
dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
/*
* We can kill new FQ and revert to old FQ.
* Since the desc is already modified, it is success case
*/
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
new_fq->fqid);
} else if (kill_fq(qidev, old_fq)) {
dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
}
return 0;
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:75,代码来源:qi.c
注:本文中的dma_sync_single_for_device函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论