• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ dma_map_page函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中dma_map_page函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_map_page函数的具体用法?C++ dma_map_page怎么用?C++ dma_map_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dma_map_page函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: do_async_pqxor

/**
 * do_async_pqxor - asynchronously calculate P and/or Q
 */
static struct dma_async_tx_descriptor *
do_async_pqxor(struct dma_device *device,
	struct dma_chan *chan,
	struct page *pdest, struct page *qdest,
	struct page **src_list, unsigned char *scoef_list,
	unsigned int offset, unsigned int src_cnt, size_t len,
	enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct page *dest;
	dma_addr_t dma_dest[2];
	dma_addr_t *dma_src = (dma_addr_t *) src_list;
	unsigned char *scf = qdest ? scoef_list : NULL;
	struct dma_async_tx_descriptor *tx;
	int i, dst_cnt = 0, zdst = flags & ASYNC_TX_XOR_ZERO_DST ? 1 : 0;
	unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

	if (flags & ASYNC_TX_XOR_ZERO_DST)
		dma_prep_flags |= DMA_PREP_ZERO_DST;

	/*  One parity (P or Q) calculation is initiated always;
	 * first always try Q
	 */
	dest = qdest ? qdest : pdest;
	dma_dest[dst_cnt++] = dma_map_page(device->dev, dest, offset, len,
					    DMA_FROM_DEVICE);

	/* Switch to the next destination */
	if (qdest && pdest) {
		/* Both destinations are set, thus here we deal with P */
		dma_dest[dst_cnt++] = dma_map_page(device->dev, pdest, offset,
						len, DMA_FROM_DEVICE);
	}

	for (i = 0; i < src_cnt; i++)
		dma_src[i] = dma_map_page(device->dev, src_list[i],
			offset, len, DMA_TO_DEVICE);

	/* Since we have clobbered the src_list we are committed
	 * to doing this asynchronously.  Drivers force forward progress
	 * in case they can not provide a descriptor
	 */
	tx = device->device_prep_dma_pqxor(chan, dma_dest, dst_cnt, dma_src,
					   src_cnt, scf, len, dma_prep_flags);
	if (!tx) {
		if (depend_tx)
			dma_wait_for_async_tx(depend_tx);

		while (!tx)
			tx = device->device_prep_dma_pqxor(chan,
							   dma_dest, dst_cnt,
							   dma_src, src_cnt,
							   scf, len,
							   dma_prep_flags);
	}

	async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);

	return tx;
}
开发者ID:ska-sa,项目名称:borph_linux_devel,代码行数:63,代码来源:async_pqxor.c


示例2: dcp_dma_map

static int dcp_dma_map(struct dcp_dev *dev,
	struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
{
	dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
	/* align to length = 16 */
	pkt->size = walk->nbytes - (walk->nbytes % 16);

	pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
		pkt->size, DMA_TO_DEVICE);

	if (pkt->src == 0) {
		dev_err(dev->dev, "Unable to map src");
		return -ENOMEM;
	}

	pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
		pkt->size, DMA_FROM_DEVICE);

	if (pkt->dst == 0) {
		dev_err(dev->dev, "Unable to map dst");
		dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
		return -ENOMEM;
	}

	return 0;
}
开发者ID:BozkurTR,项目名称:kernel,代码行数:26,代码来源:dcp.c


示例3: async_memcpy

/**
 * async_memcpy - attempt to copy memory with a dma engine.
 * @dest: destination page
 * @src: src page
 * @dest_offset: offset into 'dest' to start transaction
 * @src_offset: offset into 'src' to start transaction
 * @len: length in bytes
 * @submit: submission / completion modifiers
 *
 * honored flags: ASYNC_TX_ACK
 */
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
	     unsigned int src_offset, size_t len,
	     struct async_submit_ctl *submit)
{
	struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
						      &dest, 1, &src, 1, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
		dma_addr_t dma_dest, dma_src;
		unsigned long dma_prep_flags = 0;

		if (submit->cb_fn)
			dma_prep_flags |= DMA_PREP_INTERRUPT;
		if (submit->flags & ASYNC_TX_FENCE)
			dma_prep_flags |= DMA_PREP_FENCE;
		dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
					DMA_FROM_DEVICE);

		dma_src = dma_map_page(device->dev, src, src_offset, len,
				       DMA_TO_DEVICE);

		tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
						    len, dma_prep_flags);
		if (!tx) {
			dma_unmap_page(device->dev, dma_dest, len,
				       DMA_FROM_DEVICE);
			dma_unmap_page(device->dev, dma_src, len,
				       DMA_TO_DEVICE);
		}
	}

	if (tx) {
		pr_debug("%s: (async) len: %zu\n", __func__, len);
		async_tx_submit(chan, tx, submit);
	} else {
		void *dest_buf, *src_buf;
		pr_debug("%s: (sync) len: %zu\n", __func__, len);

		/* wait for any prerequisite operations */
		async_tx_quiesce(&submit->depend_tx);

		dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
		src_buf = kmap_atomic(src, KM_USER1) + src_offset;

		memcpy(dest_buf, src_buf, len);

		kunmap_atomic(src_buf, KM_USER1);
		kunmap_atomic(dest_buf, KM_USER0);

		async_tx_sync_epilog(submit);
	}

	return tx;
}
开发者ID:3null,项目名称:fastsocket,代码行数:68,代码来源:async_memcpy.c


示例4: async_sum_product

static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
		  size_t len, struct async_submit_ctl *submit)
{
	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
						      &dest, 1, srcs, 2, len);
	struct dma_device *dma = chan ? chan->device : NULL;
	const u8 *amul, *bmul;
	u8 ax, bx;
	u8 *a, *b, *c;

	if (dma) {
		dma_addr_t dma_dest[2];
		dma_addr_t dma_src[2];
		struct device *dev = dma->dev;
		struct dma_async_tx_descriptor *tx;
		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;

		if (submit->flags & ASYNC_TX_FENCE)
			dma_flags |= DMA_PREP_FENCE;
		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
		dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
		dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
					     len, dma_flags);
		if (tx) {
			async_tx_submit(chan, tx, submit);
			return tx;
		}

		/* could not get a descriptor, unmap and fall through to
		 * the synchronous path
		 */
		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
		dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
	}

	/* run the operation synchronously */
	async_tx_quiesce(&submit->depend_tx);
	amul = raid6_gfmul[coef[0]];
	bmul = raid6_gfmul[coef[1]];
	a = page_address(srcs[0]);
	b = page_address(srcs[1]);
	c = page_address(dest);

	while (len--) {
		ax    = amul[*a++];
		bx    = bmul[*b++];
		*c++ = ax ^ bx;
	}

	return NULL;
}
开发者ID:12019,项目名称:kernel_zte_u880,代码行数:54,代码来源:async_raid6_recov.c


示例5: async_mult

static struct dma_async_tx_descriptor *
async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
	   struct async_submit_ctl *submit)
{
	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
						      &dest, 1, &src, 1, len);
	struct dma_device *dma = chan ? chan->device : NULL;
	const u8 *qmul; /* Q multiplier table */
	u8 *d, *s;

	if (dma) {
		dma_addr_t dma_dest[2];
		dma_addr_t dma_src[1];
		struct device *dev = dma->dev;
		struct dma_async_tx_descriptor *tx;
		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;

		if (submit->flags & ASYNC_TX_FENCE)
			dma_flags |= DMA_PREP_FENCE;
		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
		dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
					     len, dma_flags);
		if (tx) {
			async_tx_submit(chan, tx, submit);
			return tx;
		}

		/* could not get a descriptor, unmap and fall through to
		 * the synchronous path
		 */
		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
	}

	/* no channel available, or failed to allocate a descriptor, so
	 * perform the operation synchronously
	 */
	async_tx_quiesce(&submit->depend_tx);
	qmul  = raid6_gfmul[coef];
	d = page_address(dest);
	s = page_address(src);

	while (len--)
		*d++ = qmul[*s++];

	return NULL;
}
开发者ID:12019,项目名称:kernel_zte_u880,代码行数:48,代码来源:async_raid6_recov.c


示例6: iwl_pcie_rxq_alloc_rbs

/*
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
 *
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
 * allocated buffers.
 */
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
		spin_lock(&rxq->lock);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock(&rxq->lock);
			return;
		}
		spin_unlock(&rxq->lock);

		/* Alloc a new receive buffer */
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
			return;

		spin_lock(&rxq->lock);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock(&rxq->lock);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
		spin_unlock(&rxq->lock);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
			spin_lock(&rxq->lock);
			list_add(&rxb->list, &rxq->rx_used);
			spin_unlock(&rxq->lock);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock(&rxq->lock);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock(&rxq->lock);
	}
}
开发者ID:andy-shev,项目名称:linux,代码行数:69,代码来源:rx.c


示例7: inv_cache_vmalloc

static void inv_cache_vmalloc(const struct LinuxMemArea *mem_area)
{
	struct page *pg;
	size_t chunk;
	u32 pg_cnt;
	u32 pg_ofs;
	u32 vaddr, vaddr_end;

	extern void ___dma_single_dev_to_cpu(const void *, size_t,
			                enum dma_data_direction);

	vaddr = (u32)mem_area->uData.sVmalloc.pvVmallocAddress;
	vaddr_end = vaddr + mem_area->ui32ByteSize;
	pg_cnt = (PAGE_ALIGN(vaddr_end) - (vaddr & PAGE_MASK)) / PAGE_SIZE;

	while (pg_cnt--) {
		pg = pfn_to_page(VMallocToPhys((void *)vaddr) >> PAGE_SHIFT);
		pg_ofs = vaddr & ~PAGE_MASK;
		chunk = min_t(ssize_t, vaddr_end - vaddr, PAGE_SIZE - pg_ofs);
		dma_unmap_page(NULL,
			       dma_map_page(NULL, pg, pg_ofs, chunk,
					    DMA_FROM_DEVICE),
			       chunk, DMA_FROM_DEVICE);
		vaddr += chunk;
	}
}
开发者ID:7hunderbug,项目名称:kernel-adaptation-n950-n9,代码行数:26,代码来源:mm.c


示例8: ftmac100_alloc_rx_page

/******************************************************************************
 * internal functions (buffer)
 *****************************************************************************/
static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
				  struct ftmac100_rxdes *rxdes, gfp_t gfp)
{
	struct net_device *netdev = priv->netdev;
	struct page *page;
	dma_addr_t map;

	page = alloc_page(gfp);
	if (!page) {
		if (net_ratelimit())
			netdev_err(netdev, "failed to allocate rx page\n");
		return -ENOMEM;
	}

	map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(priv->dev, map))) {
		if (net_ratelimit())
			netdev_err(netdev, "failed to map rx page\n");
		__free_page(page);
		return -ENOMEM;
	}

	ftmac100_rxdes_set_page(rxdes, page);
	ftmac100_rxdes_set_dma_addr(rxdes, map);
	ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
	ftmac100_rxdes_set_dma_own(rxdes);
	return 0;
}
开发者ID:ParrotSec,项目名称:linux-psec,代码行数:31,代码来源:ftmac100.c


示例9: xgbe_alloc_pages

static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
			    struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
	struct page *pages = NULL;
	dma_addr_t pages_dma;
	int ret;

	/* Try to obtain pages, decreasing order if necessary */
	gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
	while (order >= 0) {
		pages = alloc_pages(gfp, order);
		if (pages)
			break;

		order--;
	}
	if (!pages)
		return -ENOMEM;

	/* Map the pages */
	pages_dma = dma_map_page(pdata->dev, pages, 0,
				 PAGE_SIZE << order, DMA_FROM_DEVICE);
	ret = dma_mapping_error(pdata->dev, pages_dma);
	if (ret) {
		put_page(pages);
		return ret;
	}

	pa->pages = pages;
	pa->pages_len = PAGE_SIZE << order;
	pa->pages_offset = 0;
	pa->pages_dma = pages_dma;

	return 0;
}
开发者ID:ParrotSec,项目名称:linux-psec,代码行数:35,代码来源:xgbe-desc.c


示例10: ivp_smmu_flush_pgtable

/* Flush cache for the page tables after pgt updated */
void ivp_smmu_flush_pgtable(struct ivp_smmu_dev *smmu_dev, void *addr, size_t size)
{
	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
	unsigned int  idr0 = 0;

	if ((NULL == smmu_dev) || (NULL == addr)) {
		pr_err("smmu_dev or addr is null!");
		return;
	}

	/* IDR0 */
	idr0 = readl(smmu_dev->reg_base + SMMU_NS_IDR0);

	/* Coherent translation table walks are supported */
	if (idr0 & SMMU_IDR0_CTTW) {
		dsb(ishst);
	} else {
		/*
		 * If the SMMU can't walk tables in the CPU caches, treat them
		 * like non-coherent DMA since we need to flush the new entries
		 * all the way out to memory. There's no possibility of
		 * recursion here as the SMMU table walker will not be wired
		 * through another SMMU.
		 */
		dma_map_page(smmu_dev->dev, virt_to_page(addr), offset, size,
				DMA_TO_DEVICE);
	}
}
开发者ID:XePeleato,项目名称:android_kernel_huawei_venus,代码行数:29,代码来源:ivp_smmu.c


示例11: omap_gem_attach_pages

/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
	struct omap_gem_object *omap_obj = to_omap_bo(obj);
	struct page **pages;

	WARN_ON(omap_obj->pages);

	/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
	 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
	 * we actually want CMA memory for it all anyways..
	 */
	pages = _drm_gem_get_pages(obj, GFP_KERNEL);
	if (IS_ERR(pages)) {
		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
		return PTR_ERR(pages);
	}

	/* for non-cached buffers, ensure the new pages are clean because
	 * DSS, GPU, etc. are not cache coherent:
	 */
	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
		int i, npages = obj->size >> PAGE_SHIFT;
		dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
		for (i = 0; i < npages; i++) {
			addrs[i] = dma_map_page(obj->dev->dev, pages[i],
					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
		}
		omap_obj->addrs = addrs;
	}
开发者ID:OneOfMany07,项目名称:fjord-kernel,代码行数:30,代码来源:omap_gem.c


示例12: efx_init_rx_buffers

/**
 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates a batch of pages, maps them for DMA, and populates
 * struct efx_rx_buffers for each one. Return a negative error code or
 * 0 on success. If a single page can be used for multiple buffers,
 * then the page will either be inserted fully, or not at all.
 */
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	unsigned int page_offset;
	struct efx_rx_page_state *state;
	dma_addr_t dma_addr;
	unsigned index, count;

	count = 0;
	do {
		page = efx_reuse_page(rx_queue);
		if (page == NULL) {
			page = alloc_pages(__GFP_COLD | __GFP_COMP |
					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
					   efx->rx_buffer_order);
			if (unlikely(page == NULL))
				return -ENOMEM;
			dma_addr =
				dma_map_page(&efx->pci_dev->dev, page, 0,
					     PAGE_SIZE << efx->rx_buffer_order,
					     DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
						       dma_addr))) {
				__free_pages(page, efx->rx_buffer_order);
				return -EIO;
			}
			state = page_address(page);
			state->dma_addr = dma_addr;
		} else {
			state = page_address(page);
			dma_addr = state->dma_addr;
		}

		dma_addr += sizeof(struct efx_rx_page_state);
		page_offset = sizeof(struct efx_rx_page_state);

		do {
			index = rx_queue->added_count & rx_queue->ptr_mask;
			rx_buf = efx_rx_buffer(rx_queue, index);
			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
			rx_buf->page = page;
			rx_buf->page_offset = page_offset + efx->rx_ip_align;
			rx_buf->len = efx->rx_dma_len;
			rx_buf->flags = 0;
			++rx_queue->added_count;
			get_page(page);
			dma_addr += efx->rx_page_buf_step;
			page_offset += efx->rx_page_buf_step;
		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);

		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
	} while (++count < efx->rx_pages_per_batch);

	return 0;
}
开发者ID:3bsa,项目名称:linux,代码行数:67,代码来源:rx.c


示例13: do_async_xor

/* do_async_xor - dma map the pages and perform the xor with an engine.
 * 	This routine is marked __always_inline so it can be compiled away
 * 	when CONFIG_DMA_ENGINE=n
 */
static __always_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_device *device,
	struct dma_chan *chan, struct page *dest, struct page **src_list,
	unsigned int offset, unsigned int src_cnt, size_t len,
	enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	dma_addr_t dma_dest;
	dma_addr_t *dma_src = (dma_addr_t *) src_list;
	struct dma_async_tx_descriptor *tx;
	int i;
	unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

	pr_debug("%s: len: %zu\n", __func__, len);

	dma_dest = dma_map_page(device->dev, dest, offset, len,
				DMA_FROM_DEVICE);

	for (i = 0; i < src_cnt; i++)
		dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
					  len, DMA_TO_DEVICE);

	/* Since we have clobbered the src_list we are committed
	 * to doing this asynchronously.  Drivers force forward progress
	 * in case they can not provide a descriptor
	 */
	tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
					 dma_prep_flags);
	if (!tx) {
		if (depend_tx)
			dma_wait_for_async_tx(depend_tx);

		while (!tx)
			tx = device->device_prep_dma_xor(chan, dma_dest,
							 dma_src, src_cnt, len,
							 dma_prep_flags);
	}

	async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);

	return tx;
}
开发者ID:janrinze,项目名称:loox7xxport,代码行数:46,代码来源:async_xor.c


示例14: dma_map_sg

/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int BCMFASTPATH_HOST dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i, j;

	for_each_sg(sg, s, nents, i) {
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->length, dir);
		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
	}
开发者ID:1703011,项目名称:asuswrt-merlin,代码行数:27,代码来源:dma-mapping.c


示例15: efx_init_rx_buffers_page

/**
 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
 * and populates struct efx_rx_buffers for each one. Return a negative error
 * code or 0 on success. If a single page can be split between two buffers,
 * then the page will either be inserted fully, or not at at all.
 */
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	unsigned int page_offset;
	struct efx_rx_page_state *state;
	dma_addr_t dma_addr;
	unsigned index, count;

	/* We can split a page between two buffers */
	BUILD_BUG_ON(EFX_RX_BATCH & 1);

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
				   efx->rx_buffer_order);
		if (unlikely(page == NULL))
			return -ENOMEM;
		dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
					efx_rx_buf_size(efx),
					DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
			__free_pages(page, efx->rx_buffer_order);
			return -EIO;
		}
		state = page_address(page);
		state->refcnt = 0;
		state->dma_addr = dma_addr;

		dma_addr += sizeof(struct efx_rx_page_state);
		page_offset = sizeof(struct efx_rx_page_state);

	split:
		index = rx_queue->added_count & rx_queue->ptr_mask;
		rx_buf = efx_rx_buffer(rx_queue, index);
		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
		rx_buf->u.page = page;
		rx_buf->page_offset = page_offset;
		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
		rx_buf->flags = EFX_RX_BUF_PAGE;
		++rx_queue->added_count;
		++rx_queue->alloc_page_count;
		++state->refcnt;

		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
			/* Use the second half of the page */
			get_page(page);
			dma_addr += (PAGE_SIZE >> 1);
			page_offset += (PAGE_SIZE >> 1);
			++count;
			goto split;
		}
	}
开发者ID:AiWinters,项目名称:linux,代码行数:63,代码来源:rx.c


示例16: arc_dma_map_sg

static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
	   int nents, enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
					       s->length, dir);

	return nents;
}
开发者ID:AlexanderStein,项目名称:linux,代码行数:12,代码来源:dma.c


示例17: qib_user_sdma_coalesce

/* we've too many pages in the iovec, coalesce to a single page */
static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
				  struct qib_user_sdma_pkt *pkt,
				  const struct iovec *iov,
				  unsigned long niov)
{
	int ret = 0;
	struct page *page = alloc_page(GFP_KERNEL);
	void *mpage_save;
	char *mpage;
	int i;
	int len = 0;
	dma_addr_t dma_addr;

	if (!page) {
		ret = -ENOMEM;
		goto done;
	}

	mpage = kmap(page);
	mpage_save = mpage;
	for (i = 0; i < niov; i++) {
		int cfur;

		cfur = copy_from_user(mpage,
				      iov[i].iov_base, iov[i].iov_len);
		if (cfur) {
			ret = -EFAULT;
			goto free_unmap;
		}

		mpage += iov[i].iov_len;
		len += iov[i].iov_len;
	}

	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
				DMA_TO_DEVICE);
	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
		ret = -ENOMEM;
		goto free_unmap;
	}

	qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
				dma_addr);
	pkt->naddr = 2;

	goto done;

free_unmap:
	kunmap(page);
	__free_page(page);
done:
	return ret;
}
开发者ID:HobbesOSR,项目名称:kitten,代码行数:54,代码来源:qib_user_sdma.c


示例18: async_memset

/**
 * async_memset - attempt to fill memory with a dma engine.
 * @dest: destination page
 * @val: fill value
 * @offset: offset in pages to start transaction
 * @len: length in bytes
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: memset depends on the result of this transaction
 * @cb_fn: function to call when the memcpy completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset,
    size_t len, enum async_tx_flags flags,
    struct dma_async_tx_descriptor *depend_tx,
    dma_async_tx_callback cb_fn, void *cb_param)
{
    struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
                              &dest, 1, NULL, 0, len);
    struct dma_device *device = chan ? chan->device : NULL;
    struct dma_async_tx_descriptor *tx = NULL;

    if (device) {
        dma_addr_t dma_dest;
        unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

        dma_dest = dma_map_page(device->dev, dest, offset, len,
                    DMA_FROM_DEVICE);

        tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
                            dma_prep_flags);
    }

    if (tx) {
        pr_debug("%s: (async) len: %zu\n", __func__, len);
        async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
    } else { /* run the memset synchronously */
        void *dest_buf;
        pr_debug("%s: (sync) len: %zu\n", __func__, len);

        dest_buf = (void *) (((char *) page_address(dest)) + offset);

        /* wait for any prerequisite operations */
        if (depend_tx) {
            /* if ack is already set then we cannot be sure
             * we are referring to the correct operation
             */
            BUG_ON(depend_tx->ack);
            if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
                panic("%s: DMA_ERROR waiting for depend_tx\n",
                    __func__);
        }

        memset(dest_buf, val, len);

        async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
    }

    return tx;
}
开发者ID:274914765,项目名称:C,代码行数:60,代码来源:async_memset.c


示例19: omap_gem_attach_pages

/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct omap_gem_object *omap_obj = to_omap_bo(obj);
	struct page **pages;
	int npages = obj->size >> PAGE_SHIFT;
	int i, ret;
	dma_addr_t *addrs;

	WARN_ON(omap_obj->pages);

	pages = drm_gem_get_pages(obj);
	if (IS_ERR(pages)) {
		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
		return PTR_ERR(pages);
	}

	/* for non-cached buffers, ensure the new pages are clean because
	 * DSS, GPU, etc. are not cache coherent:
	 */
	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
		if (!addrs) {
			ret = -ENOMEM;
			goto free_pages;
		}

		for (i = 0; i < npages; i++) {
			addrs[i] = dma_map_page(dev->dev, pages[i],
					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
		}
	} else {
		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
		if (!addrs) {
			ret = -ENOMEM;
			goto free_pages;
		}
	}

	omap_obj->addrs = addrs;
	omap_obj->pages = pages;

	return 0;

free_pages:
	drm_gem_put_pages(obj, pages, true, false);

	return ret;
}
开发者ID:19Dan01,项目名称:linux,代码行数:50,代码来源:omap_gem.c


示例20: inv_cache_page_list

static void inv_cache_page_list(const struct LinuxMemArea *mem_area)
{
	u32 pg_cnt;
	struct page **pg_list;

	extern void ___dma_single_dev_to_cpu(const void *, size_t,
			                enum dma_data_direction);

	pg_cnt = RANGE_TO_PAGES(mem_area->ui32ByteSize);
	pg_list = mem_area->uData.sPageList.pvPageList;
	while (pg_cnt--)
		dma_unmap_page(NULL,
			       dma_map_page(NULL, *pg_list++, 0, PAGE_SIZE,
					    DMA_FROM_DEVICE),
			       PAGE_SIZE, DMA_FROM_DEVICE);
}
开发者ID:7hunderbug,项目名称:kernel-adaptation-n950-n9,代码行数:16,代码来源:mm.c



注:本文中的dma_map_page函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ dma_map_sg函数代码示例发布时间:2022-05-30
下一篇:
C++ dma_free_writecombine函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap