• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ dma_map_sg函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中dma_map_sg函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_map_sg函数的具体用法?C++ dma_map_sg怎么用?C++ dma_map_sg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dma_map_sg函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: eesoxscsi_dma_setup

/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
	struct device *dev = scsi_get_device(host);
	int dmach = info->info.scsi.dma;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = DMA_TO_DEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = DMA_FROM_DEVICE,
			dma_dir = DMA_MODE_READ;

		dma_map_sg(dev, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
开发者ID:03199618,项目名称:linux,代码行数:44,代码来源:eesox.c


示例2: powertecscsi_dma_setup

/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct powertec_info *info = (struct powertec_info *)host->hostdata;
	struct device *dev = scsi_get_device(host);
	int dmach = info->info.scsi.dma;

	if (info->info.ifcfg.capabilities & FASCAP_DMA &&
	    min_type == fasdma_real_all) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = DMA_TO_DEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = DMA_FROM_DEVICE,
			dma_dir = DMA_MODE_READ;

		dma_map_sg(dev, info->sg, bufs + 1, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs + 1);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do slow PIO
	 */
	return fasdma_pio;
}
开发者ID:Dronevery,项目名称:JetsonTK1-kernel,代码行数:44,代码来源:powertec.c


示例3: sun3x_esp_map_sg

static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
				  int num_sg, int dir)
{
	return dma_map_sg(esp->dev, sg, num_sg, dir);
}
开发者ID:BackupTheBerlios,项目名称:grasshopper-svn,代码行数:5,代码来源:sun3x_esp.c


示例4: srp_indirect_data

static int srp_indirect_data(struct scst_cmd *sc, struct srp_cmd *cmd,
			     struct srp_indirect_buf *id,
			     enum dma_data_direction dir, srp_rdma_t rdma_io,
			     int dma_map, int ext_desc)
{
	struct iu_entry *iue = NULL;
	struct srp_direct_buf *md = NULL;
	struct scatterlist dummy, *sg = NULL;
	dma_addr_t token = 0;
	int err = 0;
	int nmd, nsg = 0, len, sg_cnt = 0;
	u32 tsize = 0;
	enum dma_data_direction dma_dir;

	iue = scst_cmd_get_tgt_priv(sc);
	if (dir == DMA_TO_DEVICE) {
		scst_cmd_get_write_fields(sc, &sg, &sg_cnt);
		tsize = scst_cmd_get_bufflen(sc);
		dma_dir = DMA_FROM_DEVICE;
	} else {
		sg = scst_cmd_get_sg(sc);
		sg_cnt = scst_cmd_get_sg_cnt(sc);
		tsize = scst_cmd_get_adjusted_resp_data_len(sc);
		dma_dir = DMA_TO_DEVICE;
	}

	dprintk("%p %u %u %d %d\n", iue, tsize, be32_to_cpu(id->len),
		be32_to_cpu(cmd->data_in_desc_cnt),
		be32_to_cpu(cmd->data_out_desc_cnt));

	len = min(tsize, be32_to_cpu(id->len));

	nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);

	if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
	    (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
		md = &id->desc_list[0];
		goto rdma;
	}

	if (ext_desc && dma_map) {
		md = dma_alloc_coherent(iue->target->dev,
					be32_to_cpu(id->table_desc.len),
					&token, GFP_KERNEL);
		if (!md) {
			eprintk("Can't get dma memory %u\n", id->table_desc.len);
			return -ENOMEM;
		}

		sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
		sg_dma_address(&dummy) = token;
		sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
		err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
			      be32_to_cpu(id->table_desc.len));
		if (err) {
			eprintk("Error copying indirect table %d\n", err);
			goto free_mem;
		}
	} else {
		eprintk("This command uses external indirect buffer\n");
		return -EINVAL;
	}

rdma:
	if (dma_map) {
		nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir);
		if (!nsg) {
			eprintk("fail to map %p %d\n", iue, sg_cnt);
			err = -ENOMEM;
			goto free_mem;
		}
	}

	err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

	if (dma_map)
		dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir);

free_mem:
	if (token && dma_map)
		dma_free_coherent(iue->target->dev,
				  be32_to_cpu(id->table_desc.len), md, token);

	return err;
}
开发者ID:Chilledheart,项目名称:scst,代码行数:85,代码来源:libsrp.c


示例5: exynos_gem_map_dma_buf

static struct sg_table *
		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
	struct drm_device *dev = gem_obj->base.dev;
	struct exynos_drm_gem_buf *buf;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt = NULL;
	unsigned int i;
	int nents, ret;

	/* just return current sgt if already requested. */
	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
		return &exynos_attach->sgt;

	buf = gem_obj->buffer;
	if (!buf) {
		DRM_ERROR("buffer is null.\n");
		return ERR_PTR(-ENOMEM);
	}

	sgt = &exynos_attach->sgt;

	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&dev->struct_mutex);

	rd = buf->sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			DRM_ERROR("failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	exynos_attach->is_mapped = true;
	exynos_attach->dir = dir;
	attach->priv = exynos_attach;

	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:61,代码来源:exynos_drm_dmabuf.c


示例6: mmc_omap_prepare_data

static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
	struct mmc_data *data = req->data;
	int i, use_dma, block_size;
	unsigned sg_len;

	host->data = data;
	if (data == NULL) {
		OMAP_MMC_WRITE(host, BLEN, 0);
		OMAP_MMC_WRITE(host, NBLK, 0);
		OMAP_MMC_WRITE(host, BUF, 0);
		host->dma_in_use = 0;
		set_cmd_timeout(host, req);
		return;
	}

	block_size = data->blksz;

	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
	set_data_timeout(host, req);

	/* cope with calling layer confusion; it issues "single
	 * block" writes using multi-block scatterlists.
	 */
	sg_len = (data->blocks == 1) ? 1 : data->sg_len;

	/* Only do DMA for entire blocks */
	use_dma = host->use_dma;
	if (use_dma) {
		for (i = 0; i < sg_len; i++) {
			if ((data->sg[i].length % block_size) != 0) {
				use_dma = 0;
				break;
			}
		}
	}

	host->sg_idx = 0;
	if (use_dma) {
		if (mmc_omap_get_dma_channel(host, data) == 0) {
			enum dma_data_direction dma_data_dir;

			if (data->flags & MMC_DATA_WRITE)
				dma_data_dir = DMA_TO_DEVICE;
			else
				dma_data_dir = DMA_FROM_DEVICE;

			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
						sg_len, dma_data_dir);
			host->total_bytes_left = 0;
			mmc_omap_prepare_dma(host, req->data);
			host->brs_received = 0;
			host->dma_done = 0;
			host->dma_in_use = 1;
		} else
			use_dma = 0;
	}

	/* Revert to PIO? */
	if (!use_dma) {
		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
		host->total_bytes_left = data->blocks * block_size;
		host->sg_len = sg_len;
		mmc_omap_sg_to_buf(host);
		host->dma_in_use = 0;
	}
}
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:69,代码来源:omap.c


示例7: imxmci_setup_data

static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
{
	unsigned int nob = data->blocks;
	unsigned int blksz = data->blksz;
	unsigned int datasz = nob * blksz;
	int i;

	if (data->flags & MMC_DATA_STREAM)
		nob = 0xffff;

	host->data = data;
	data->bytes_xfered = 0;

	MMC_NOB = nob;
	MMC_BLK_LEN = blksz;

	/*
	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
	 * We are in big troubles for non-512 byte transfers according to note in the paragraph
	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
	 * The situation is even more complex in reality. The SDHC in not able to handle wll
	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
	 * This is required for SCR read at least.
	 */
	if (datasz < 512) {
		host->dma_size = datasz;
		if (data->flags & MMC_DATA_READ) {
			host->dma_dir = DMA_FROM_DEVICE;

			/* Hack to enable read SCR */
			MMC_NOB = 1;
			MMC_BLK_LEN = 512;
		} else {
			host->dma_dir = DMA_TO_DEVICE;
		}

		/* Convert back to virtual address */
		host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
		host->data_cnt = 0;

		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

		return;
	}

	if (data->flags & MMC_DATA_READ) {
		host->dma_dir = DMA_FROM_DEVICE;
		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
	} else {
		host->dma_dir = DMA_TO_DEVICE;

		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
	}

#if 1	/* This code is there only for consistency checking and can be disabled in future */
	host->dma_size = 0;
	for(i=0; i<host->dma_nents; i++)
		host->dma_size+=data->sg[i].length;

	if (datasz > host->dma_size) {
		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
		       datasz, host->dma_size);
	}
#endif

	host->dma_size = datasz;

	wmb();

	if(host->actual_bus_width == MMC_BUS_WIDTH_4)
		BLR(host->dma) = 0;	/* burst 64 byte read / 64 bytes write */
	else
		BLR(host->dma) = 16;	/* burst 16 byte read / 16 bytes write */

	RSSR(host->dma) = DMA_REQ_SDHC;

	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

	/* start DMA engine for read, write is delayed after initial response */
	if (host->dma_dir == DMA_FROM_DEVICE) {
		imx_dma_enable(host->dma);
	}
}
开发者ID:ivucica,项目名称:linux,代码行数:99,代码来源:imxmmc.c


示例8: ixgbe_fcoe_ddp_setup

/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
//.........这里部分代码省略.........
开发者ID:insop,项目名称:linux,代码行数:101,代码来源:ixgbe_fcoe.c


示例9: qce_ablkcipher_async_req_handle

static int
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	struct scatterlist *sg;
	bool diff_dst;
	gfp_t gfp;
	int ret;

	rctx->iv = req->info;
	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	rctx->cryptlen = req->nbytes;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
	if (diff_dst)
		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
	else
		rctx->dst_nents = rctx->src_nents;
	if (rctx->src_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of src SG.\n");
		return rctx->src_nents;
	}
	if (rctx->dst_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
		return -rctx->dst_nents;
	}

	rctx->dst_nents += 1;

	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
						GFP_KERNEL : GFP_ATOMIC;

	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
	if (ret)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg_mark_end(sg);
	rctx->dst_sg = rctx->dst_tbl.sgl;

	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
	if (ret < 0)
		goto error_free;

	if (diff_dst) {
		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
		if (ret < 0)
			goto error_unmap_dst;
		rctx->src_sg = req->src;
	} else {
		rctx->src_sg = rctx->dst_sg;
	}

	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
			       rctx->dst_sg, rctx->dst_nents,
			       qce_ablkcipher_done, async_req);
	if (ret)
		goto error_unmap_src;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_src:
	if (diff_dst)
		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
	sg_free_table(&rctx->dst_tbl);
	return ret;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:100,代码来源:ablkcipher.c


示例10: ox800_aeslrw_gencrypt

/**
 * Generic LRW-AES en/decryption
 * @param encrypt non-zero to encrypt, zero to decrypt
 * @param in Source of data
 * @param out Location to place en/decrypted data
 * @param nents Number of entries in scatter list, in and out must have the same
 *              number of entries
 * @param iv 8 byte array containing the I-Value
 * @return error code or 0 for success
 */
static int ox800_aeslrw_gencrypt(  u8  encrypt,
                            struct scatterlist* in,
                            struct scatterlist* out,
                            unsigned int nents,
                            u8  iv[])
{
    oxnas_dma_channel_t* dma_in;
    oxnas_dma_channel_t* dma_out;
    struct scatterlist* out_;
    char same_buffer;
    int status = 0;

    /* get dma resources (non blocking) */
    dma_in = oxnas_dma_request(0);
    dma_out = oxnas_dma_request(0);
    
    VPRINTK("dma in %d out %d \n", 
        dma_in->channel_number_,  
        dma_out->channel_number_);  

    if ((dma_in) && (dma_out)) {
        u32 reg;
        
        // shouldn't be busy or full
        reg = readl( OX800DPE_STATUS );
        if (! (reg & OX800DPE_STAT_IDLE) )
            printk("not idle after abort toggle");
        if (reg & OX800DPE_STAT_TX_NOTEMPTY)
            printk("tx fifo not empty after abort toggle");
        if (! (reg & OX800DPE_STAT_RX_SPACE) )
            printk("rx not empty after abort toggle");
        
        /* check to see if the destination buffer is the same as the source */
        same_buffer = (sg_phys(in) == sg_phys(out));
        
        /* map transfers */
        if (same_buffer) {
            dma_map_sg(NULL, in, nents, DMA_BIDIRECTIONAL);
            out_ = in;
        } else {
            /* map transfers */
            dma_map_sg(NULL, in, nents, DMA_TO_DEVICE);
            dma_map_sg(NULL, out, nents, DMA_FROM_DEVICE);
            out_ = out;
        }
#ifdef CIPHER_USE_SG_DMA        
        /* setup DMA transfers */ 
        oxnas_dma_device_set_sg(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            in,
            nents,
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC);
            
        oxnas_dma_device_set_sg(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            out_,
            nents,
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC);

#else
        oxnas_dma_device_set(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            (unsigned char* )sg_dma_address(in),
            sg_dma_len(in),
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
            
        oxnas_dma_device_set(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            (unsigned char* )sg_dma_address(out_),
            sg_dma_len(out_),
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
#endif

        /* set dma callbacks */
        oxnas_dma_set_callback(
            dma_in,
            OXNAS_DMA_CALLBACK_ARG_NUL,
            OXNAS_DMA_CALLBACK_ARG_NUL);
        
        oxnas_dma_set_callback(
//.........这里部分代码省略.........
开发者ID:aircross,项目名称:ray,代码行数:101,代码来源:cipher.c


示例11: sslsd_request

static void sslsd_request(struct mmc_host *mmc, struct mmc_request *req)
{
	sslsd_host			*host = mmc_priv(mmc);
	struct mmc_command	*cmd = req->cmd;
	sd_cmd_p			c = &host->tcmd;
	struct mmc_data		*d;
	int					flag;
	unsigned long		iflags;

	if (!sdhc_is_in(&host->hw))
	{
		cmd->error = -ENOMEDIUM;
		mmc_request_done(mmc, req);
		return;
	}

	c->dat = 0;
	switch (mmc_resp_type(cmd))
	{
		case MMC_RSP_R1: /* & R5, R6 */
			flag = SDCMD_F_R1;
			break;

		case MMC_RSP_R1B: /* & R5b */
			flag = SDCMD_F_R1B;
			break;

		case MMC_RSP_R2:
			flag = SDCMD_F_R2;
			c->dat = &host->tdat;
			c->dat->buf = (uint8_t *)cmd->resp;
			break;

		case MMC_RSP_R3:
			flag = SDCMD_F_R3;
			break;

		default:
			flag = 0;
			break;
	}

	c->cmd = cmd->opcode;
	c->arg = cmd->arg;
	host->pcmd = cmd;

	d = cmd->data;
	if (d)
	{
		struct scatterlist	*sg;
		sd_dat_p			dat;

		if (d->flags & MMC_DATA_STREAM) 
		{
			/* not supported */
			cmd->error = -EINVAL;
			mmc_request_done(mmc, req);
			return;
		}

		flag |= SDCMD_F_DAT;
		if (d->flags & MMC_DATA_WRITE)
		{
			flag |= SDCMD_F_WR;
		}
		if (d->blocks > 1)
		{
			flag |= SDCMD_F_MULTI;
		}
#if SD_DMA
		if (host->hw.fdma)
		{
			flag |= SDCMD_F_DMA;
		}
#endif

		dat = c->dat = &host->tdat;
		dat->blk = d->blksz;
#if 1
		c->tout = (d->timeout_ns + 1000000 - 1) / 1000000 + 
			d->timeout_clks / (mmc->ios.clock / 1000);
#endif

		sg = d->sg;
		host->sg = sg;
		host->sgc = d->sg_len;
		host->sgn = sg + d->sg_len - 1;
		host->sgofs = 0;

#if SD_DMA && IO_MAP == 1
		if (flag & SDCMD_F_DMA)
		{
			int	count;

			count = dma_map_sg(mmc_dev(mmc), sg, d->sg_len, 
						(flag & SDCMD_F_WR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
			if (!count)
			{
				/* failed to map even 1 page */
				cmd->error = -ENOMEM;
//.........这里部分代码省略.........
开发者ID:acassis,项目名称:emlinux-ssd1935,代码行数:101,代码来源:ssl_sd.c


示例12: hi_mci_setup_data

static int hi_mci_setup_data(struct himci_host *host, struct mmc_data *data)
{
	unsigned int sg_phyaddr, sg_length;
	unsigned int i, ret = 0;
	unsigned int data_size;
	unsigned int max_des, des_cnt;
	struct himci_des *des;

	himci_trace(2, "begin");
	himci_assert(host);
	himci_assert(data);

	host->data = data;

	if (data->flags & MMC_DATA_READ)
		host->dma_dir = DMA_FROM_DEVICE;
	else
		host->dma_dir = DMA_TO_DEVICE;

	host->dma_sg = data->sg;
	host->dma_sg_num = dma_map_sg(mmc_dev(host->mmc),
			data->sg, data->sg_len, host->dma_dir);
	himci_assert(host->dma_sg_num);
	himci_trace(2, "host->dma_sg_num is %d\n", host->dma_sg_num);

	data_size = data->blksz * data->blocks;
	if (data_size > (DMA_BUFFER * MAX_DMA_DES)) {
		himci_error("mci request data_size is too big!\n");
		ret = -1;
		goto out;
	}

	himci_trace(2, "host->dma_paddr is 0x%08X,host->dma_vaddr is 0x%08X\n",
			(unsigned int)host->dma_paddr,
			(unsigned int)host->dma_vaddr);

	max_des = (PAGE_SIZE/sizeof(struct himci_des));
	des = (struct himci_des *)host->dma_vaddr;
	des_cnt = 0;

	for (i = 0; i < host->dma_sg_num; i++) {
		sg_length = sg_dma_len(&data->sg[i]);
		sg_phyaddr = sg_dma_address(&data->sg[i]);
		himci_trace(2, "sg[%d] sg_length is 0x%08X, "
				"sg_phyaddr is 0x%08X\n",
				i, (unsigned int)sg_length,
				(unsigned int)sg_phyaddr);
		while (sg_length) {
			des[des_cnt].idmac_des_ctrl = DMA_DES_OWN
				| DMA_DES_NEXT_DES;
			des[des_cnt].idmac_des_buf_addr = sg_phyaddr;
			/* idmac_des_next_addr is paddr for dma */
			des[des_cnt].idmac_des_next_addr = host->dma_paddr
				+ (des_cnt + 1) * sizeof(struct himci_des);

			if (sg_length >= 0x1F00) {
				des[des_cnt].idmac_des_buf_size = 0x1F00;
				sg_length -= 0x1F00;
				sg_phyaddr += 0x1F00;
			} else {
				/* FIXME:data alignment */
				des[des_cnt].idmac_des_buf_size = sg_length;
				sg_length = 0;
			}

			himci_trace(2, "des[%d] vaddr  is 0x%08X", i,
					(unsigned int)&des[i]);
			himci_trace(2, "des[%d].idmac_des_ctrl is 0x%08X",
			       i, (unsigned int)des[i].idmac_des_ctrl);
			himci_trace(2, "des[%d].idmac_des_buf_size is 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_size);
			himci_trace(2, "des[%d].idmac_des_buf_addr 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_addr);
			himci_trace(2, "des[%d].idmac_des_next_addr is 0x%08X",
				i, (unsigned int)des[i].idmac_des_next_addr);
			des_cnt++;
		}

		himci_assert(des_cnt < max_des);
	}
	des[0].idmac_des_ctrl |= DMA_DES_FIRST_DES;
	des[des_cnt - 1].idmac_des_ctrl |= DMA_DES_LAST_DES;
	des[des_cnt - 1].idmac_des_next_addr = 0;
out:
	return ret;
}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:86,代码来源:himci.c


示例13: pmpmci_prepare_data

static int pmpmci_prepare_data(struct pmpmci_host *host,
				struct mmc_data *data)
{
    
	int datalen = data->blocks * data->blksz;
    struct sd_data_s *sd_data= host->platdata;		
	static int blksz = 0;
	
	if (data->flags & MMC_DATA_READ)
		host->flags |= HOST_F_RECV;
	else
		host->flags |= HOST_F_XMIT;

	if (host->mrq->stop)
		host->flags |= HOST_F_STOP;

	host->dma.dir = DMA_BIDIRECTIONAL;

	host->dma.sgmap_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
				   data->sg_len, host->dma.dir);
	if (host->dma.sgmap_len == 0)
		return -EINVAL;
  
    if(blksz != data->blksz)
    {
      sd_data->ops->setBlkLen(&(sd_data->info), data->blksz);			  
      blksz = data->blksz;
    }
/*************************************************************/
	if ((host->use_dma) && ((data->blksz % 16) == 0)) {  // dma working size must 16 plus
	    if(~(host->flags & HOST_F_DMA))
	    {
          host->flags |= HOST_F_DMA;		
	    }
        host->dma.dmatogo = 0;
	    host->dma.totalxfer = 0;
    	host->dma.offset = 0;	
    	host->dma.prexfer =0; 			
//      	if(pmpmci_prepare_dma(host,data))
//			goto dataerr; 		
	} 
	else {
	    if(host->flags & HOST_F_DMA)
	    {		
          host->flags &= ~HOST_F_DMA;					  
	    }
		host->pio.index = 0;
		host->pio.offset = 0;
		host->pio.len = datalen;

		
//		if (host->flags & HOST_F_XMIT)
//        	sd_data->ops->intrpt_enable(&(sd_data->info), SD_MMC_INT_DATABUFEMPTY , 1);	
//		else
//        	sd_data->ops->intrpt_enable(&(sd_data->info), SD_MMC_INT_DATABUFFULL , 1);	
	}
	
/***************************************************/

	return 0;

dataerr:
	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			host->dma.dir);
	return -ETIMEDOUT;
}
开发者ID:WayWingsDev,项目名称:Gplus_2159_0801,代码行数:66,代码来源:gp_sdio_module.c


示例14: mmc_dma_tx_start

/*
 * Prepare and enable DMA Tx channel (on STM32)
 */
static void mmc_dma_tx_start(struct mmci_host *host)
{
	struct mmc_request *mrq = host->mrq;
	struct mmc_data *reqdata = mrq->data;
	int dma_len;
	int rv;

	/* Scatter/gather DMA is not supported */
	BUG_ON(reqdata->sg_len > 1);

	dma_len = dma_map_sg(
		mmc_dev(host->mmc), reqdata->sg, reqdata->sg_len,
		DMA_TO_DEVICE);
	if (dma_len == 0) {
		dev_err(mmc_dev(host->mmc), "could not map DMA Tx buffer\n");
		goto out;
	}

	/*
	 * Direction: memory-to-peripheral
	 * Flow controller: peripheral
	 * Priority: very high (3)
	 * Double buffer mode: disabled
	 * Circular mode: disabled
	 */
	rv = stm32_dma_ch_init(STM32F2_DMACH_SDIO, 1, 1, 3, 0, 0);
	if (rv < 0)
		goto err;

	/*
	 * Enable burst mode; set FIFO threshold to "full FIFO"
	 */
	rv = stm32_dma_ch_init_fifo(STM32F2_DMACH_SDIO, 1, 3);
	if (rv < 0)
		goto err;

	/*
	 * Peripheral address: SDIO controller FIFO data register
	 * Peripheral increment: disabled
	 * Peripheral data size: 32-bit
	 * Burst transfer configuration: incremental burst of 4 beats
	 */
	rv = stm32_dma_ch_set_periph(STM32F2_DMACH_SDIO,
		SD_FIFO((u32)host->base), 0, 2, 1);
	if (rv < 0)
		goto err;

	/*
	 * Memory address: DMA buffer address
	 * Memory incremental: enabled
	 * Memory data size: 32-bit
	 * Burst transfer configuration: incremental burst of 4 beats
	 */
	rv = stm32_dma_ch_set_memory(STM32F2_DMACH_SDIO,
		sg_dma_address(&reqdata->sg[0]), 1, 2, 1);
	if (rv < 0)
		goto err;

	/*
	 * Set number of items to transfer to zero, because we use peripheral
	 * flow controller, and therefore the SDIO controller will stop
	 * the transfer when the whole block data has been transferred.
	 */
	rv = stm32_dma_ch_set_nitems(STM32F2_DMACH_SDIO, 0);
	if (rv < 0)
		goto err;

	/*
	 * Enable the DMA channel. After this point, the DMA transfer will
	 * be able to start.
	 */
	rv = stm32_dma_ch_enable(STM32F2_DMACH_SDIO);
	if (rv < 0)
		goto err;

	goto out;

err:
	dev_err(mmc_dev(host->mmc), "Tx DMA channel initialization failed\n");
out:
	;
}
开发者ID:KroMignon,项目名称:linux-emcraft,代码行数:85,代码来源:mmci.c


示例15: __mmci_dma_prep_data

/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
				struct dma_chan **dma_chan,
				struct dma_async_tx_descriptor **dma_desc)
{
	struct variant_data *variant = host->variant;
	struct dma_slave_config conf = {
		.src_addr = host->phybase + MMCIFIFO,
		.dst_addr = host->phybase + MMCIFIFO,
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.device_fc = false,
	};
	struct dma_chan *chan;
	struct dma_device *device;
	struct dma_async_tx_descriptor *desc;
	int nr_sg;
	unsigned long flags = DMA_CTRL_ACK;

	if (data->flags & MMC_DATA_READ) {
		conf.direction = DMA_DEV_TO_MEM;
		chan = host->dma_rx_channel;
	} else {
		conf.direction = DMA_MEM_TO_DEV;
		chan = host->dma_tx_channel;
	}

	/* If there's no DMA channel, fall back to PIO */
	if (!chan)
		return -EINVAL;

	/* If less than or equal to the fifo size, don't bother with DMA */
	if (data->blksz * data->blocks <= variant->fifosize)
		return -EINVAL;

	device = chan->device;
	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
			   mmc_get_dma_dir(data));
	if (nr_sg == 0)
		return -EINVAL;

	if (host->variant->qcom_dml)
		flags |= DMA_PREP_INTERRUPT;

	dmaengine_slave_config(chan, &conf);
	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
					    conf.direction, flags);
	if (!desc)
		goto unmap_exit;

	*dma_chan = chan;
	*dma_desc = desc;

	return 0;

 unmap_exit:
	dma_unmap_sg(device->dev, data->sg, data->sg_len,
		     mmc_get_dma_dir(data));
	return -ENOMEM;
}

static inline int mmci_dma_prep_data(struct mmci_host *host,
				     struct mmc_data *data)
{
	/* Check if next job is already prepared. */
	if (host->dma_current && host->dma_desc_current)
		return 0;

	/* No job were prepared thus do it now. */
	return __mmci_dma_prep_data(host, data, &host->dma_current,
				    &host->dma_desc_current);
}

static inline int mmci_dma_prep_next(struct mmci_host *host,
				     struct mmc_data *data)
{
	struct mmci_host_next *nd = &host->next_data;
	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
}

static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
	int ret;
	struct mmc_data *data = host->data;

	ret = mmci_dma_prep_data(host, host->data);
	if (ret)
		return ret;

	/* Okay, go for it. */
	dev_vdbg(mmc_dev(host->mmc),
		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
		 data->sg_len, data->blksz, data->blocks, data->flags);
	host->dma_in_progress = true;
	dmaengine_submit(host->dma_desc_current);
	dma_async_issue_pending(host->dma_current);

	if (host->variant->qcom_dml)
//.........这里部分代码省略.........
开发者ID:Lyude,项目名称:linux,代码行数:101,代码来源:mmci.c


示例16: rk_load_data

static int rk_load_data(struct rk_crypto_info *dev,
			struct scatterlist *sg_src,
			struct scatterlist *sg_dst)
{
	unsigned int count;

	dev->aligned = dev->aligned ?
		check_alignment(sg_src, sg_dst, dev->align_size) :
		dev->aligned;
	if (dev->aligned) {
		count = min(dev->left_bytes, sg_src->length);
		dev->left_bytes -= count;

		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->addr_in = sg_dma_address(sg_src);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(dst)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, sg_src, 1,
					     DMA_TO_DEVICE);
				return -EINVAL;
			}
			dev->addr_out = sg_dma_address(sg_dst);
		}
	} else {
		count = (dev->left_bytes > PAGE_SIZE) ?
			PAGE_SIZE : dev->left_bytes;

		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
					dev->addr_vir, count,
					dev->total - dev->left_bytes)) {
			dev_err(dev->dev, "[%s:%d] pcopy err\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->left_bytes -= count;
		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
				__func__, __LINE__);
			return -ENOMEM;
		}
		dev->addr_in = sg_dma_address(&dev->sg_tmp);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
					DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
					     DMA_TO_DEVICE);
				return -ENOMEM;
			}
			dev->addr_out = sg_dma_address(&dev->sg_tmp);
		}
	}
	dev->count = count;
	return 0;
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:67,代码来源:rk3288_crypto.c


示例17: sahara_hw_descriptor_create

static int sahara_hw_descriptor_create(struct sahara_dev *dev)
{
	struct sahara_ctx *ctx = dev->ctx;
	struct scatterlist *sg;
	int ret;
	int i, j;

	/* Copy new key if necessary */
	if (ctx->flags & FLAGS_NEW_KEY) {
		memcpy(dev->key_base, ctx->key, ctx->keylen);
		ctx->flags &= ~FLAGS_NEW_KEY;

		if (dev->flags & FLAGS_CBC) {
			dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
			dev->hw_desc[0]->p1 = dev->iv_phys_base;
		} else {
			dev->hw_desc[0]->len1 = 0;
			dev->hw_desc[0]->p1 = 0;
		}
		dev->hw_desc[0]->len2 = ctx->keylen;
		dev->hw_desc[0]->p2 = dev->key_phys_base;
		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
	}
	dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);

	dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
	dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
		dev_err(dev->device, "not enough hw links (%d)\n",
			dev->nb_in_sg + dev->nb_out_sg);
		return -EINVAL;
	}

	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
			 DMA_TO_DEVICE);
	if (ret != dev->nb_in_sg) {
		dev_err(dev->device, "couldn't map in sg\n");
		goto unmap_in;
	}
	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
			 DMA_FROM_DEVICE);
	if (ret != dev->nb_out_sg) {
		dev_err(dev->device, "couldn't map out sg\n");
		goto unmap_out;
	}

	/* Create input links */
	dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
	sg = dev->in_sg;
	for (i = 0; i < dev->nb_in_sg; i++) {
		dev->hw_link[i]->len = sg->length;
		dev->hw_link[i]->p = sg->dma_address;
		if (i == (dev->nb_in_sg - 1)) {
			dev->hw_link[i]->next = 0;
		} else {
			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
			sg = sg_next(sg);
		}
	}

	/* Create output links */
	dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
	sg = dev->out_sg;
	for (j = i; j < dev->nb_out_sg + i; j++) {
		dev->hw_link[j]->len = sg->length;
		dev->hw_link[j]->p = sg->dma_address;
		if (j == (dev->nb_out_sg + i - 1)) {
			dev->hw_link[j]->next = 0;
		} else {
			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
			sg = sg_next(sg);
		}
	}

	/* Fill remaining fields of hw_desc[1] */
	dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
	dev->hw_desc[1]->len1 = dev->total;
	dev->hw_desc[1]->len2 = dev->total;
	dev->hw_desc[1]->next = 0;

	sahara_dump_descriptors(dev);
	sahara_dump_links(dev);

	/* Start processing descriptor chain. */
	mod_timer(&dev->watchdog,
		  jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);

	return 0;

unmap_out:
	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
		DMA_TO_DEVICE);
unmap_in:
	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
		DMA_FROM_DEVICE);

	return -EINVAL;
}
开发者ID:AdaLovelance,项目名称:lxcGrsecKernels,代码行数:99,代码来源:sahara.c


示例18: mxs_i2c_dma_setup_xfer

static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
			struct i2c_msg *msg, uint32_t flags)
{
	struct dma_async_tx_descriptor *desc;
	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);

	if (msg->flags & I2C_M_RD) {
		i2c->dma_read = 1;
		i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_READ;

		/*
		 * SELECT command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[0] = MXS_CMD_I2C_SELECT;
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[0],
					1, DMA_TRANS_NONE, 0);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_pio_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_one(&i2c->sg_io[0], &i2c->addr_data, 1);
		dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
					DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto select_init_dma_fail;
		}

		/*
		 * READ command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[1] = flags | MXS_CMD_I2C_READ |
				MXS_I2C_CTRL0_XFER_COUNT(msg->len);
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[1],
					1, DMA_TRANS_NONE, DMA_PREP_INTERRUPT);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_dma_fail;
		}

		/* Queue the DMA data transf 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ dma_map_single函数代码示例发布时间:2022-05-30
下一篇:
C++ dma_map_page函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap