/**
* sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
*
* Description:
* Usually the next entry will be @[email protected] + 1, but if this sg element is part
* of a chained scatterlist, it could jump to the start of a new
* scatterlist array.
*
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
*
* Description:
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
* Note that the @[email protected] pointer passed in need not be the first one,
* the important bit is that @[email protected] denotes the number of entries that
* exist from @[email protected]
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
#endif
return ret;
}
static int __init example_init(void)
{
int i;
unsigned int ret;
unsigned int nents;
struct scatterlist sg[10];
printk(KERN_INFO "DMA fifo test start\n");
if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
printk(KERN_WARNING "error kfifo_alloc\n");
return -ENOMEM;
}
printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
kfifo_in(&fifo, "test", 4);
for (i = 0; i != 9; i++)
kfifo_put(&fifo, i);
/* kick away first byte */
kfifo_skip(&fifo);
printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
/*
* Configure the kfifo buffer to receive data from DMA input.
*
* .--------------------------------------.
* | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
* |---|------------------|---------------|
* \_/ \________________/ \_____________/
* \ \ \
* \ \_allocated data \
* \_*free space* \_*free space*
*
* We need two different SG entries: one for the free space area at the
* end of the kfifo buffer (19 bytes) and another for the first free
* byte at the beginning, after the kfifo_skip().
*/
sg_init_table(sg, ARRAY_SIZE(sg));
nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* fifo is full and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
return -EIO;
}
/* receive data */
printk(KERN_INFO "scatterlist for receive:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: zero bytes received */
ret = 0;
/* finish the dma operation and update the received data */
kfifo_dma_in_finish(&fifo, ret);
/* Prepare to transmit data, example: 8 bytes */
nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* no data was available and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
return -EIO;
}
printk(KERN_INFO "scatterlist for transmit:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: 5 bytes transmitted */
ret = 5;
/* finish the dma operation and update the transmitted data */
kfifo_dma_out_finish(&fifo, ret);
//.........这里部分代码省略.........
/*
* preps Ep pointers & data counters for next packet
* (fragment of the request) xfer returns true if
* there is a next transfer, and false if all bytes in
* current request have been xfered
*/
static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
{
if (!end_points[ep].sg_list_enabled) {
/*
* no further transfers for non storage EPs
* (like EP2 during firmware download, done
* in 64 byte chunks)
*/
if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
__func__, end_points[ep].req_length, ep);
/*
* no more transfers, we are done with the request
*/
return false;
}
/*
* calculate size of the next DMA xfer, corner
* case for non-storage EPs where transfer size
* is not egual N * HAL_DMA_PKT_SZ xfers
*/
if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
>= HAL_DMA_PKT_SZ) {
end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
} else {
/*
* that would be the last chunk less
* than P-port max size
*/
end_points[ep].dma_xfer_sz = end_points[ep].req_length -
end_points[ep].req_xfer_cnt;
}
return true;
}
/*
* for SG_list assisted dma xfers
* are we done with current SG ?
*/
if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
/*
* was it the Last SG segment on the list ?
*/
if (sg_is_last(end_points[ep].sg_p)) {
DBGPRN("<1> %s: EP:%d completed,"
"%d bytes xfered\n",
__func__,
ep,
end_points[ep].req_xfer_cnt
);
return false;
} else {
/*
* There are more SG segments in current
* request's sg list setup new segment
*/
end_points[ep].seg_xfer_cnt = 0;
end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
/* set data pointer for next DMA sg transfer*/
end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
DBGPRN("<1> %s new SG:_va:%p\n\n",
__func__, end_points[ep].data_p);
}
}
/*
* for sg list xfers it will always be 512 or 1024
*/
end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
/*
* next transfer is required
*/
return true;
}
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @master: SPI master
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
*
* A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
* Only the final write access is permitted to transmit less than 4 bytes, the
* SPI controller deduces its intended size from the DLEN register.
*
* If a TX or RX sglist contains multiple entries, one per page, and the first
* entry starts in the middle of a page, that first entry's length may not be
* a multiple of 4. Subsequent entries are fine because they span an entire
* page, hence do have a length that's a multiple of 4.
*
* This cannot happen with kmalloc'ed buffers (which is what most clients use)
* because they are contiguous in physical memory and therefore not split on
* page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
* buffers.
*
* The DMA engine is incapable of combining sglist entries into a continuous
* stream of 4 byte chunks, it treats every entry separately: A TX entry is
* rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
* entry is rounded up by throwing away received bytes.
*
* Overcome this limitation by transferring the first few bytes without DMA:
* E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
* write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
* The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
* the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
*
* Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
* write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
* Caution, the additional 4 bytes spill over to the second TX sglist entry
* if the length of the first is *exactly* 1.
*
* At most 6 bytes are written and at most 3 bytes read. Do we know the
* transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
*
* The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
* by the DMA engine. Toggling the DMA Enable flag in the CS register switches
* the width but also garbles the FIFO's contents. The prologue must therefore
* be transmitted in 32-bit width to ensure that the following DMA transfer can
* pick up the residue in the RX FIFO in ungarbled form.
*/
static void bcm2835_spi_transfer_prologue(struct spi_master *master,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
u32 cs)
{
int tx_remaining;
bs->tfr = tfr;
bs->tx_prologue = 0;
bs->rx_prologue = 0;
bs->tx_spillover = false;
if (!sg_is_last(&tfr->tx_sg.sgl[0]))
bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
if (bs->rx_prologue > bs->tx_prologue) {
if (sg_is_last(&tfr->tx_sg.sgl[0])) {
bs->tx_prologue = bs->rx_prologue;
} else {
bs->tx_prologue += 4;
bs->tx_spillover =
!(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
}
}
}
/* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
if (!bs->tx_prologue)
return;
/* Write and read RX prologue. Adjust first entry in RX sglist. */
if (bs->rx_prologue) {
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
| BCM2835_SPI_CS_DMAEN);
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
bcm2835_spi_reset_hw(master);
dma_sync_single_for_device(master->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
bs->rx_prologue, DMA_FROM_DEVICE);
sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
}
/*
* Write remaining TX prologue. Adjust first entry in TX sglist.
* Also adjust second entry if prologue spills over to it.
//.........这里部分代码省略.........
请发表评论