本文整理汇总了C++中dma_alloc_coherent函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_alloc_coherent函数的具体用法?C++ dma_alloc_coherent怎么用?C++ dma_alloc_coherent使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dma_alloc_coherent函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: sonic_probe1
static int __devinit sonic_probe1(struct net_device *dev)
{
static unsigned version_printed;
unsigned int silicon_revision;
unsigned int val;
struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
/*
* get the Silicon Revision ID. If this is one of the known
* one assume that we found a SONIC ethernet controller at
* the expected location.
*/
silicon_revision = SONIC_READ(SONIC_SR);
if (sonic_debug > 1)
printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
i = 0;
while (known_revisions[i] != 0xffff &&
known_revisions[i] != silicon_revision)
i++;
if (known_revisions[i] == 0xffff) {
printk("SONIC ethernet controller not found (0x%4x)\n",
silicon_revision);
goto out;
}
if (sonic_debug && version_printed++ == 0)
printk(version);
printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
dev_name(lp->device), dev->base_addr);
/*
* Put the sonic into software reset, then
* retrieve and print the ethernet address.
*/
SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
val = SONIC_READ(SONIC_CAP0-i);
dev->dev_addr[i*2] = val;
dev->dev_addr[i*2+1] = val >> 8;
}
err = -ENOMEM;
/* Initialize the device structure. */
lp->dma_bitmode = SONIC_BITMODE32;
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
dev_name(lp->device));
goto out;
}
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
lp->tda = lp->cda + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->cda_laddr = lp->descriptors_laddr;
lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
/*
* clear tally counter
*/
SONIC_WRITE(SONIC_CRCT,0xffff);
SONIC_WRITE(SONIC_FAET,0xffff);
SONIC_WRITE(SONIC_MPT,0xffff);
return 0;
out:
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
return err;
}
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:98,代码来源:jazzsonic.c
示例2: create_qp
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
struct c4iw_dev_ucontext *uctx)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
int ret = 0;
int eqsize;
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
if (!wq->sq.qid)
return -ENOMEM;
wq->rq.qid = c4iw_get_qpid(rdev, uctx);
if (!wq->rq.qid) {
ret = -ENOMEM;
goto free_sq_qid;
}
if (!user) {
wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
GFP_KERNEL);
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
goto free_rq_qid;
}
wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
GFP_KERNEL);
if (!wq->rq.sw_rq) {
ret = -ENOMEM;
goto free_sw_sq;
}
}
/*
* RQT must be a power of 2.
*/
wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
if (!wq->rq.rqt_hwaddr) {
ret = -ENOMEM;
goto free_sw_rq;
}
ret = alloc_sq(rdev, &wq->sq, user);
if (ret)
goto free_hwaddr;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, &(wq->rq.dma_addr),
GFP_KERNEL);
if (!wq->rq.queue) {
ret = -ENOMEM;
goto free_sq;
}
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),
wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue));
memset(wq->rq.queue, 0, wq->rq.memsize);
dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = rdev->lldi.db_reg;
wq->gts = rdev->lldi.gts_reg;
if (user) {
wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
(wq->sq.qid << rdev->qpshift);
wq->sq.udb &= PAGE_MASK;
wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
(wq->rq.qid << rdev->qpshift);
wq->rq.udb &= PAGE_MASK;
}
wq->rdev = rdev;
wq->rq.msn = 1;
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto free_dma;
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(2) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
//.........这里部分代码省略.........
开发者ID:Distefano,项目名称:linux,代码行数:101,代码来源:qp.c
示例3: ath_descdma_setup
/*
* This function will allocate both the DMA descriptor structure, and the
* buffers it contains. These are used to contain the descriptors used
* by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
int nbuf, int ndesc, bool is_tx)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
struct ath_buf *bf;
int i, bsize, error, desc_len;
ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
if (is_tx)
desc_len = sc->sc_ah->caps.tx_desc_len;
else
desc_len = sizeof(struct ath_desc);
/* ath_desc must be a multiple of DWORDs */
if ((desc_len % 4) != 0) {
ath_err(common, "ath_desc not DWORD aligned\n");
BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
}
dd->dd_desc_len = desc_len * nbuf * ndesc;
/*
* Need additional DMA memory because we can't use
* descriptors that cross the 4K page boundary. Assume
* one skipped descriptor per 4K page.
*/
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
u32 ndesc_skipped =
ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
u32 dma_len;
while (ndesc_skipped) {
dma_len = ndesc_skipped * desc_len;
dd->dd_desc_len += dma_len;
ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
}
}
/* allocate descriptors */
dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
&dd->dd_desc_paddr, GFP_KERNEL);
if (dd->dd_desc == NULL) {
error = -ENOMEM;
goto fail;
}
ds = (u8 *) dd->dd_desc;
ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
bsize = sizeof(struct ath_buf) * nbuf;
bf = kzalloc(bsize, GFP_KERNEL);
if (bf == NULL) {
error = -ENOMEM;
goto fail2;
}
dd->dd_bufptr = bf;
for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
if (!(sc->sc_ah->caps.hw_caps &
ATH9K_HW_CAP_4KB_SPLITTRANS)) {
/*
* Skip descriptor addresses which can cause 4KB
* boundary crossing (addr + length) with a 32 dword
* descriptor fetch.
*/
while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
BUG_ON((caddr_t) bf->bf_desc >=
((caddr_t) dd->dd_desc +
dd->dd_desc_len));
ds += (desc_len * ndesc);
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
}
list_add_tail(&bf->list, head);
}
return 0;
fail2:
dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
dd->dd_desc_paddr);
//.........这里部分代码省略.........
开发者ID:jue-jiang,项目名称:rc3-linux,代码行数:101,代码来源:init.c
示例4: dma_alloc_coherent
/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
* @chan: Channel to allocate for
* @size: Size of buffer in bytes
* @handle: DMA handle
*
* Allocate a buffer to be used by the DMA engine for read/write,
* similar to dma_alloc_coherent().
*
* Returns the virtual address of the buffer, or NULL in case of failure.
*/
void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
dma_addr_t *handle)
{
return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
}
开发者ID:E-LLP,项目名称:n900,代码行数:15,代码来源:dma_lib.c
示例5: sonic_probe1
static int __init sonic_probe1(struct net_device *dev)
{
static unsigned version_printed = 0;
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);
unsigned int base_addr = dev->base_addr;
int i;
int err = 0;
if (!request_mem_region(base_addr, 0x100, xtsonic_string))
return -EBUSY;
/*
* get the Silicon Revision ID. If this is one of the known
* one assume that we found a SONIC ethernet controller at
* the expected location.
*/
silicon_revision = SONIC_READ(SONIC_SR);
if (sonic_debug > 1)
printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
i = 0;
while ((known_revisions[i] != 0xffff) &&
(known_revisions[i] != silicon_revision))
i++;
if (known_revisions[i] == 0xffff) {
printk("SONIC ethernet controller not found (0x%4x)\n",
silicon_revision);
return -ENODEV;
}
if (sonic_debug && version_printed++ == 0)
printk(version);
/*
* Put the sonic into software reset, then retrieve ethernet address.
* Note: we are assuming that the boot-loader has initialized the cam.
*/
SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
SONIC_WRITE(SONIC_DCR,
SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
SONIC_WRITE(SONIC_CEP,0);
SONIC_WRITE(SONIC_IMR,0);
SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
unsigned int val = SONIC_READ(SONIC_CAP0-i);
dev->dev_addr[i*2] = val;
dev->dev_addr[i*2+1] = val >> 8;
}
/* Initialize the device structure. */
lp->dma_bitmode = SONIC_BITMODE32;
/*
* Allocate local private descriptor areas in uncached space.
* The entire structure must be located within the same 64kb segment.
* A simple way to ensure this is to allocate twice the
* size of the structure -- given that the structure is
* much less than 64 kB, at least one of the halves of
* the allocated area will be contained entirely in 64 kB.
* We also allocate extra space for a pointer to allow freeing
* this structure later on (in xtsonic_cleanup_module()).
*/
lp->descriptors =
dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL);
if (lp->descriptors == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for "
" descriptors.\n", dev_name(lp->device));
goto out;
}
lp->cda = lp->descriptors;
lp->tda = lp->cda + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
/* get the virtual dma address */
lp->cda_laddr = lp->descriptors_laddr;
lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &xtsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
//.........这里部分代码省略.........
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:101,代码来源:xtsonic.c
示例6: mmc_dma_setup
static int mmc_dma_setup(struct mmci_platform_data *plat)
{
u32 llptrrx, llptrtx;
int ret = 0;
/*
* There is a quirk with the LPC32XX and SD burst DMA. DMA sg
* transfers where DMA is the flow controller will not transfer
* the last few bytes to or from the SD card controller and
* memory. For RX, the last few bytes in the SD transfer can be
* forced out with a software DMA burst request. For TX, this
* can't be done, so TX sg support cannot be supported. For TX,
* a temporary bouncing buffer is used if more than 1 sg segment
* is passed in the data request. The bouncing buffer will get a
* contiguous copy of the TX data and it will be used instead.
*/
if (plat->dma_tx_size) {
/* Use pre-allocated memory for the DMA Tx buffer */
dmac_drvdat.dma_handle_tx = (dma_addr_t)plat->dma_tx_v_base;
dmac_drvdat.dma_v_base = plat->dma_tx_v_base;
dmac_drvdat.preallocated_tx_buf = 1;
} else {
/* Allocate a chunk of memory for the DMA TX buffers */
dmac_drvdat.dma_v_base = dma_alloc_coherent(dmac_drvdat.dev,
DMA_BUFF_SIZE, &dmac_drvdat.dma_handle_tx, GFP_KERNEL);
dmac_drvdat.preallocated_tx_buf = 0;
}
if (dmac_drvdat.dma_v_base == NULL) {
dev_err(dmac_drvdat.dev, "error getting DMA region\n");
ret = -ENOMEM;
goto dma_no_tx_buff;
}
dev_info(dmac_drvdat.dev, "DMA buffer: phy:%p, virt:%p\n",
(void *) dmac_drvdat.dma_handle_tx,
dmac_drvdat.dma_v_base);
/* Setup TX DMA channel */
dmac_drvdat.dmacfgtx.ch = DMA_CH_SDCARD_TX;
dmac_drvdat.dmacfgtx.tc_inten = 0;
dmac_drvdat.dmacfgtx.err_inten = 0;
dmac_drvdat.dmacfgtx.src_size = 4;
dmac_drvdat.dmacfgtx.src_inc = 1;
dmac_drvdat.dmacfgtx.src_bsize = DMAC_CHAN_SRC_BURST_8;
dmac_drvdat.dmacfgtx.src_prph = DMAC_SRC_PERIP(DMA_PERID_SDCARD);
dmac_drvdat.dmacfgtx.dst_size = 4;
dmac_drvdat.dmacfgtx.dst_inc = 0;
dmac_drvdat.dmacfgtx.dst_bsize = DMAC_CHAN_DEST_BURST_8;
dmac_drvdat.dmacfgtx.dst_prph = DMAC_DEST_PERIP(DMA_PERID_SDCARD);
dmac_drvdat.dmacfgtx.flowctrl = DMAC_CHAN_FLOW_P_M2P;
if (lpc178x_dma_ch_get(
&dmac_drvdat.dmacfgtx, "dma_sd_tx", NULL, NULL) < 0)
{
dev_err(dmac_drvdat.dev,
"Error setting up SD card TX DMA channel\n");
ret = -ENODEV;
goto dma_no_txch;
}
/* Allocate a linked list for DMA support */
llptrtx = lpc178x_dma_alloc_llist(
dmac_drvdat.dmacfgtx.ch, NR_SG * 2);
if (llptrtx == 0) {
dev_err(dmac_drvdat.dev,
"Error allocating list buffer (MMC TX)\n");
ret = -ENOMEM;
goto dma_no_txlist;
}
/* Setup RX DMA channel */
dmac_drvdat.dmacfgrx.ch = DMA_CH_SDCARD_RX;
dmac_drvdat.dmacfgrx.tc_inten = 0;
dmac_drvdat.dmacfgrx.err_inten = 0;
dmac_drvdat.dmacfgrx.src_size = 4;
dmac_drvdat.dmacfgrx.src_inc = 0;
dmac_drvdat.dmacfgrx.src_bsize = DMAC_CHAN_SRC_BURST_8;
dmac_drvdat.dmacfgrx.src_prph = DMAC_SRC_PERIP(DMA_PERID_SDCARD);
dmac_drvdat.dmacfgrx.dst_size = 4;
dmac_drvdat.dmacfgrx.dst_inc = 1;
dmac_drvdat.dmacfgrx.dst_bsize = DMAC_CHAN_DEST_BURST_8;
dmac_drvdat.dmacfgrx.dst_prph = DMAC_DEST_PERIP(DMA_PERID_SDCARD);
dmac_drvdat.dmacfgrx.flowctrl = DMAC_CHAN_FLOW_D_P2M;
if (lpc178x_dma_ch_get(
&dmac_drvdat.dmacfgrx, "dma_sd_rx", NULL, NULL) < 0)
{
dev_err(dmac_drvdat.dev,
"Error setting up SD card RX DMA channel\n");
ret = -ENODEV;
goto dma_no_rxch;
}
/* Allocate a linked list for DMA support */
llptrrx = lpc178x_dma_alloc_llist(
dmac_drvdat.dmacfgrx.ch, NR_SG * 2);
if (llptrrx == 0) {
dev_err(dmac_drvdat.dev,
"Error allocating list buffer (MMC RX)\n");
ret = -ENOMEM;
goto dma_no_rxlist;
//.........这里部分代码省略.........
开发者ID:KroMignon,项目名称:linux-emcraft,代码行数:101,代码来源:mmci.c
示例7: temac_dma_bd_init
/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
int i;
lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
goto out;
}
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) {
dev_err(&ndev->dev,
"unable to allocate DMA TX buffer descriptors");
goto out;
}
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) {
dev_err(&ndev->dev,
"unable to allocate DMA RX buffer descriptors");
goto out;
}
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
goto out;
}
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
out:
temac_dma_bd_release(ndev);
return -ENOMEM;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:86,代码来源:ll_temac_main.c
示例8: cfv_create_genpool
static int cfv_create_genpool(struct cfv_info *cfv)
{
int err;
/* dma_alloc can only allocate whole pages, and we need a more
* fine graned allocation so we use genpool. We ask for space needed
* by IP and a full ring. If the dma allcoation fails we retry with a
* smaller allocation size.
*/
err = -ENOMEM;
cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
return -EINVAL;
for (;;) {
if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
netdev_info(cfv->ndev, "Not enough device memory\n");
return -ENOMEM;
}
cfv->alloc_addr = dma_alloc_coherent(
cfv->vdev->dev.parent->parent,
cfv->allocsz, &cfv->alloc_dma,
GFP_ATOMIC);
if (cfv->alloc_addr)
break;
cfv->allocsz = (cfv->allocsz * 3) >> 2;
}
netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
cfv->allocsz);
/* Allocate on 128 bytes boundaries (1 << 7)*/
cfv->genpool = gen_pool_create(7, -1);
if (!cfv->genpool)
goto err;
err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
(phys_addr_t)virt_to_phys(cfv->alloc_addr),
cfv->allocsz, -1);
if (err)
goto err;
/* Reserve some memory for low memory situations. If we hit the roof
* in the memory pool, we stop TX flow and release the reserve.
*/
cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
cfv->reserved_size);
if (!cfv->reserved_mem) {
err = -ENOMEM;
goto err;
}
cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
return 0;
err:
cfv_destroy_genpool(cfv);
return err;
}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:62,代码来源:caif_virtio.c
示例9: sh_eth_ring_init
/* Get skb and descriptor buffer */
static int sh_eth_ring_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
/*
* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
*/
mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
(((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
if (mdp->cd->rpadir)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
if (!mdp->rx_ring) {
dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
return ret;
desc_ring_free:
/* free DMA buffer */
dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
return ret;
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:70,代码来源:sh_eth.c
示例10: scc_enet_init
//.........这里部分代码省略.........
eap = (unsigned char *)&(ep->sen_paddrh);
for (i=5; i>=0; i--)
*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
ep->sen_pper = 0; /* 'cause the book says so */
ep->sen_taddrl = 0; /* temp address (LSB) */
ep->sen_taddrm = 0;
ep->sen_taddrh = 0; /* temp address (MSB) */
/* Now allocate the host memory pages and initialize the
* buffer descriptors.
*/
bdp = cep->tx_bd_base;
for (i=0; i<TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page.
*/
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
bdp++;
}
/* Set the last buffer to wrap.
*/
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
bdp = cep->rx_bd_base;
k = 0;
for (i=0; i<CPM_ENET_RX_PAGES; i++) {
/* Allocate a page.
*/
ba = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE,
&mem_addr, GFP_KERNEL);
/* BUG: no check for failure */
/* Initialize the BD for every fragment in the page.
*/
for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
bdp->cbd_bufaddr = mem_addr;
cep->rx_vaddr[k++] = ba;
mem_addr += CPM_ENET_RX_FRSIZE;
ba += CPM_ENET_RX_FRSIZE;
bdp++;
}
}
/* Set the last buffer to wrap.
*/
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
/* Let's re-initialize the channel now. We have to do it later
* than the manual describes because we have just now finished
* the BD initialization.
*/
cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
while (cp->cp_cpcr & CPM_CR_FLG);
cep->skb_cur = cep->skb_dirty = 0;
sccp->scc_scce = 0xffff; /* Clear any pending events */
/* Enable interrupts for transmit error, complete frame
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:67,代码来源:enet.c
示例11: nusmart_pcm_probe
static int nusmart_pcm_probe(struct snd_soc_platform *platform)
{
int ret = 0;
DBG_PRINT("nusmart_pcm_probe\n");
pl330_data.base = __io_address(NS115_DMA_330S_BASE);
ret = request_irq(IRQ_NS115_DMA330_INTR6, nusmart_pl330_interrupt, IRQF_SHARED,
"alsa_pl330_dmac", &dma_chan_param);
if (ret)
{
DBG_PRINT("request irq failed\n");
goto out;
}
ret = request_irq(IRQ_NS115_DMA330_INTR7, nusmart_pl330_interrupt, IRQF_SHARED,
"alsa_pl330_dmac", &dma_chan_param);
if (ret)
{
DBG_PRINT("request irq failed\n");
goto err_free_irq1;
}
prtd_record = kzalloc(sizeof(*prtd_record), GFP_KERNEL);
if (prtd_record == NULL) {
DBG_PRINT("nusmart_pcm_probe can not alloc nusmart_runtime_data for record\n");
ret = -ENOMEM;
goto err_free_irq0;
}
prtd_record->desc_pool_virt = dma_alloc_coherent(NULL, PL330_POOL_SIZE, &(prtd_record->lli_start), GFP_KERNEL);
if(prtd_record->desc_pool_virt == NULL)
{
DBG_PRINT("nusmart_pcm_probe can not alloc dma descriptor for record\n");
ret = -ENOMEM;
goto err_free_prtd_record;
}
spin_lock_init(&prtd_record->lock);
prtd_playback = kzalloc(sizeof(*prtd_playback), GFP_KERNEL);
if (prtd_playback == NULL) {
DBG_PRINT("nusmart_pcm_probe can not alloc nusmart_runtime_data for playback\n");
ret = -ENOMEM;
goto err_free_prtd_record_pool;
}
prtd_playback->desc_pool_virt = dma_alloc_coherent(NULL, PL330_POOL_SIZE, &(prtd_playback->lli_start), GFP_KERNEL);
if(prtd_playback->desc_pool_virt == NULL)
{
DBG_PRINT("nusmart_pcm_probe can not alloc dma descriptor for record\n");
ret = -ENOMEM;
goto err_free_prtd_playback;
}
spin_lock_init(&prtd_playback->lock);
goto out;
err_free_prtd_playback:
kfree(prtd_playback);
err_free_prtd_record_pool:
dma_free_coherent(NULL, PL330_POOL_SIZE, prtd_record->desc_pool_virt, prtd_record->lli_start);
err_free_prtd_record:
kfree(prtd_record);
err_free_irq0:
free_irq(IRQ_NS115_DMA330_INTR7, &dma_chan_param);
err_free_irq1:
free_irq(IRQ_NS115_DMA330_INTR6, &dma_chan_param);
out:
return ret;
}
开发者ID:alessandroste,项目名称:testBSP,代码行数:78,代码来源:dma.c
示例12: rpmsg_probe
static int rpmsg_probe(struct virtio_device *vdev)
{
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
const char *names[] = { "input", "output" };
struct virtqueue *vqs[2];
struct virtproc_info *vrp;
struct rproc *vrp_rproc;
void *bufs_va;
void *cpu_addr; /* buffer virtual address */
void *cpu_addr_dma; /* buffer DMA address' virutal address conversion */
void *rbufs_guest_addr_kva;
int err = 0, i;
size_t total_buf_space;
vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
if (!vrp)
return -ENOMEM;
vrp->vdev = vdev;
idr_init(&vrp->endpoints);
mutex_init(&vrp->endpoints_lock);
mutex_init(&vrp->tx_lock);
init_waitqueue_head(&vrp->sendq);
/* We expect two virtqueues, rx and tx (and in this order) */
err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
if (err)
goto free_vrp;
vrp->rvq = vqs[0];
vrp->svq = vqs[1];
/* we expect symmetric tx/rx vrings */
WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
virtqueue_get_vring_size(vrp->svq));
/* we need less buffers if vrings are small */
if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
else
vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
/* allocate coherent memory for the buffers */
bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
total_buf_space, &vrp->bufs_dma,
GFP_KERNEL);
if (!bufs_va) {
err = -ENOMEM;
goto vqs_del;
}
dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
(unsigned long long)vrp->bufs_dma);
/* half of the buffers is dedicated for RX */
vrp->rbufs = bufs_va;
/* and half is dedicated for TX */
vrp->sbufs = bufs_va + total_buf_space / 2;
vrp_rproc = vdev_to_rproc(vdev);
rbufs_guest_addr_kva = vrp->rbufs;
if (vrp_rproc->ops->kva_to_guest_addr_kva) {
rbufs_guest_addr_kva = vrp_rproc->ops->kva_to_guest_addr_kva(vrp_rproc, vrp->rbufs, vrp->rvq);
}
/* set up the receive buffers */
for (i = 0; i < vrp->num_bufs / 2; i++) {
struct scatterlist sg;
cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
cpu_addr_dma = rbufs_guest_addr_kva + i*RPMSG_BUF_SIZE;
sg_init_one(&sg, cpu_addr_dma, RPMSG_BUF_SIZE);
err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
GFP_KERNEL);
WARN_ON(err); /* sanity check; this can't really happen */
}
/* suppress "tx-complete" interrupts */
virtqueue_disable_cb(vrp->svq);
vdev->priv = vrp;
/* if supported by the remote processor, enable the name service */
if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
/* a dedicated endpoint handles the name service msgs */
vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
vrp, RPMSG_NS_ADDR);
if (!vrp->ns_ept) {
dev_err(&vdev->dev, "failed to create the ns ept\n");
err = -ENOMEM;
goto free_coherent;
}
}
/* tell the remote processor it can start sending messages */
virtqueue_kick(vrp->rvq);
//.........这里部分代码省略.........
开发者ID:SovanKundu,项目名称:linux-xlnx,代码行数:101,代码来源:virtio_rpmsg_bus.c
示例13: csi_enc_enabling_tasks
}
/*!
* Enable encoder task
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_enabling_tasks(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
cam->dummy_frame.vaddress = dma_alloc_coherent(0,
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
&cam->dummy_frame.paddress,
GFP_DMA | GFP_KERNEL);
if (cam->dummy_frame.vaddress == 0) {
pr_err("ERROR: v4l2 capture: Allocate dummy frame "
"failed.\n");
return -ENOBUFS;
}
cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
cam->dummy_frame.buffer.length =
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
ipu_clear_irq(IPU_IRQ_CSI0_OUT_EOF);
err = ipu_request_irq(IPU_IRQ_CSI0_OUT_EOF,
csi_enc_callback, 0, "Mxc Camera", cam);
if (err != 0) {
开发者ID:YCsuperlife,项目名称:imx53_kernel,代码行数:32,代码来源:ipu_csi_enc.c
示例14: srp_indirect_data
static int srp_indirect_data(struct scst_cmd *sc, struct srp_cmd *cmd,
struct srp_indirect_buf *id,
enum dma_data_direction dir, srp_rdma_t rdma_io,
int dma_map, int ext_desc)
{
struct iu_entry *iue = NULL;
struct srp_direct_buf *md = NULL;
struct scatterlist dummy, *sg = NULL;
dma_addr_t token = 0;
int err = 0;
int nmd, nsg = 0, len, sg_cnt = 0;
u32 tsize = 0;
enum dma_data_direction dma_dir;
iue = scst_cmd_get_tgt_priv(sc);
if (dir == DMA_TO_DEVICE) {
scst_cmd_get_write_fields(sc, &sg, &sg_cnt);
tsize = scst_cmd_get_bufflen(sc);
dma_dir = DMA_FROM_DEVICE;
} else {
sg = scst_cmd_get_sg(sc);
sg_cnt = scst_cmd_get_sg_cnt(sc);
tsize = scst_cmd_get_adjusted_resp_data_len(sc);
dma_dir = DMA_TO_DEVICE;
}
dprintk("%p %u %u %d %d\n", iue, tsize, be32_to_cpu(id->len),
be32_to_cpu(cmd->data_in_desc_cnt),
be32_to_cpu(cmd->data_out_desc_cnt));
len = min(tsize, be32_to_cpu(id->len));
nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
(dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
md = &id->desc_list[0];
goto rdma;
}
if (ext_desc && dma_map) {
md = dma_alloc_coherent(iue->target->dev,
be32_to_cpu(id->table_desc.len),
&token, GFP_KERNEL);
if (!md) {
eprintk("Can't get dma memory %u\n", id->table_desc.len);
return -ENOMEM;
}
sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
sg_dma_address(&dummy) = token;
sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
be32_to_cpu(id->table_desc.len));
if (err) {
eprintk("Error copying indirect table %d\n", err);
goto free_mem;
}
} else {
eprintk("This command uses external indirect buffer\n");
return -EINVAL;
}
rdma:
if (dma_map) {
nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir);
if (!nsg) {
eprintk("fail to map %p %d\n", iue, sg_cnt);
err = -ENOMEM;
goto free_mem;
}
}
err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
if (dma_map)
dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir);
free_mem:
if (token && dma_map)
dma_free_coherent(iue->target->dev,
be32_to_cpu(id->table_desc.len), md, token);
return err;
}
开发者ID:Chilledheart,项目名称:scst,代码行数:85,代码来源:libsrp.c
示例15: iwl_pcie_ctxt_info_init
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
int ret;
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
if (!ctxt_info)
return -ENOMEM;
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
switch (trans_pcie->rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
break;
case IWL_AMSDU_4K:
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
break;
case IWL_AMSDU_8K:
rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
break;
case IWL_AMSDU_12K:
rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
break;
default:
WARN_ON(1);
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
IWL_CTXT_INFO_RB_CB_SIZE_POS) |
(rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
rx_cfg = &ctxt_info->rbd_cfg;
rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
return ret;
}
trans_pcie->ctxt_info = ctxt_info;
iwl_enable_interrupts(trans);
/* Configure debug, if exists */
if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* Context info will be released upon alive or failure to get one */
return 0;
}
开发者ID:Anjali05,项目名称:linux,代码行数:82,代码来源:ctxt-info.c
示例16: q6audio_init
static int q6audio_init(void)
{
struct audio_client *ac = 0;
int res;
mutex_lock(&audio_lock);
if (ac_control) {
res = 0;
goto done;
}
pr_info("audio: init: codecs\n");
icodec_rx_clk = clk_get(0, "icodec_rx_clk");
icodec_tx_clk = clk_get(0, "icodec_tx_clk");
ecodec_clk = clk_get(0, "ecodec_clk");
sdac_clk = clk_get(0, "sdac_clk");
audio_data = dma_alloc_coherent(NULL, 4096, &audio_phys, GFP_KERNEL);
adsp = dal_attach(AUDIO_DAL_DEVICE, AUDIO_DAL_PORT,
callback, 0);
if (!adsp) {
pr_err("audio_init: cannot attach to adsp\n");
res = -ENODEV;
goto done;
}
pr_info("audio: init: INIT\n");
audio_init(adsp);
dal_trace(adsp);
ac = audio_client_alloc(0);
if (!ac) {
pr_err("audio_init: cannot allocate client\n");
res = -ENOMEM;
goto done;
}
pr_info("audio: init: OPEN control\n");
if (audio_open_control(ac)) {
pr_err("audio_init: cannot open control channel\n");
res = -ENODEV;
goto done;
}
pr_info("audio: init: attach ACDB\n");
acdb = dal_attach(ACDB_DAL_DEVICE, ACDB_DAL_PORT, 0, 0);
if (!acdb) {
pr_err("audio_init: cannot attach to acdb channel\n");
res = -ENODEV;
goto done;
}
pr_info("audio: init: attach ADIE\n");
adie = dal_attach(ADIE_DAL_DEVICE, ADIE_DAL_PORT, 0, 0);
if (!adie) {
pr_err("audio_init: cannot attach to adie\n");
res = -ENODEV;
goto done;
}
if (analog_ops->init)
analog_ops->init();
res = 0;
ac_control = ac;
wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle");
wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "audio_pcm_suspend");
done:
if ((res < 0) && ac)
audio_client_free(ac);
mutex_unlock(&audio_lock);
return res;
}
开发者ID:Savaged-Zen,项目名称:Savaged-Zen-Inc,代码行数:73,代码来源:q6audio.c
示例17: pruss_probe
static int pruss_probe(struct platform_device *dev)
{
struct uio_info *p;
struct uio_pruss_dev *gdev;
struct resource *regs_prussio;
int ret = -ENODEV, cnt = 0, len;
struct uio_pruss_pdata *pdata = dev->dev.platform_data;
gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL);
if (!gdev->info) {
kfree(gdev);
return -ENOMEM;
}
/* Power on PRU in case its not done as part of boot-loader */
gdev->pruss_clk = clk_get(&dev->dev, "pruss");
if (IS_ERR(gdev->pruss_clk)) {
dev_err(&dev->dev, "Failed to get clock\n");
kfree(gdev->info);
kfree(gdev);
ret = PTR_ERR(gdev->pruss_clk);
return ret;
} else {
clk_enable(gdev->pruss_clk);
}
regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!regs_prussio) {
dev_err(&dev->dev, "No PRUSS I/O resource specified\n");
goto out_free;
}
if (!regs_prussio->start) {
dev_err(&dev->dev, "Invalid memory resource\n");
goto out_free;
}
if (pdata->sram_pool) {
gdev->sram_pool = pdata->sram_pool;
gdev->sram_vaddr =
gen_pool_alloc(gdev->sram_pool, sram_pool_sz);
if (!gdev->sram_vaddr) {
dev_err(&dev->dev, "Could not allocate SRAM pool\n");
goto out_free;
}
gdev->sram_paddr =
gen_pool_virt_to_phys(gdev->sram_pool,
gdev->sram_vaddr);
}
gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
if (!gdev->ddr_vaddr) {
dev_err(&dev->dev, "Could not allocate external memory\n");
goto out_free;
}
len = resource_size(regs_prussio);
gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
if (!gdev->prussio_vaddr) {
dev_err(&dev->dev, "Can't remap PRUSS I/O address range\n");
goto out_free;
}
gdev->pintc_base = pdata->pintc_base;
gdev->hostirq_start = platform_get_irq(dev, 0);
for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
p->mem[0].addr = regs_prussio->start;
p->mem[0].size = resource_size(regs_prussio);
p->mem[0].memtype = UIO_MEM_PHYS;
p->mem[1].addr = gdev->sram_paddr;
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
p->mem[2].addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
p->mem[2].
|
请发表评论