• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ prefetchw函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中prefetchw函数的典型用法代码示例。如果您正苦于以下问题:C++ prefetchw函数的具体用法?C++ prefetchw怎么用?C++ prefetchw使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了prefetchw函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: isp1362_read_ptd

static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
			     struct isp1362_ep_queue *epq)
{
	struct ptd *ptd = &ep->ptd;
	int act_len;

	WARN_ON(list_empty(&ep->active));
	BUG_ON(ep->ptd_offset < 0);

	list_del_init(&ep->active);
	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);

	prefetchw(ptd);
	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
	dump_ptd(ptd);
	act_len = PTD_GET_COUNT(ptd);
	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
		return;
	if (act_len > ep->length)
		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
			 ep->ptd_offset, act_len, ep->length);
	BUG_ON(act_len > ep->length);
	/* Only transfer the amount of data that has actually been overwritten
	 * in the chip buffer. We don't want any data that doesn't belong to the
	 * transfer to leak out of the chip to the callers transfer buffer!
	 */
	prefetchw(ep->data);
	isp1362_read_buffer(isp1362_hcd, ep->data,
			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
	dump_ptd_in_data(ptd, ep->data);
}
开发者ID:stretched,项目名称:linux,代码行数:31,代码来源:isp1362-hcd.c


示例2: lcpit_advance_one_byte_huge

/* Like lcpit_advance_one_byte(), but for buffers larger than
 * MAX_NORMAL_BUFSIZE.  */
static inline u32
lcpit_advance_one_byte_huge(const u32 cur_pos,
			    u32 pos_data[restrict],
			    u64 intervals64[restrict],
			    u32 prefetch_next[restrict],
			    struct lz_match matches[restrict],
			    const bool record_matches)
{
	u32 interval_idx;
	u32 next_interval_idx;
	u64 cur;
	u64 next;
	u32 match_pos;
	struct lz_match *matchptr;

	interval_idx = pos_data[cur_pos];

	prefetchw(&intervals64[pos_data[prefetch_next[0]] & HUGE_POS_MASK]);

	prefetch_next[0] = intervals64[prefetch_next[1]] & HUGE_POS_MASK;
	prefetchw(&pos_data[prefetch_next[0]]);

	prefetch_next[1] = pos_data[cur_pos + 3] & HUGE_POS_MASK;
	prefetchw(&intervals64[prefetch_next[1]]);

	pos_data[cur_pos] = 0;

	while ((next = intervals64[interval_idx]) & HUGE_UNVISITED_TAG) {
		intervals64[interval_idx] = (next & HUGE_LCP_MASK) | cur_pos;
		interval_idx = next & HUGE_POS_MASK;
	}

	matchptr = matches;
	while (next & HUGE_LCP_MASK) {
		cur = next;
		do {
			match_pos = next & HUGE_POS_MASK;
			next_interval_idx = pos_data[match_pos];
			next = intervals64[next_interval_idx];
		} while (next > cur);
		intervals64[interval_idx] = (cur & HUGE_LCP_MASK) | cur_pos;
		pos_data[match_pos] = interval_idx;
		if (record_matches) {
			matchptr->length = cur >> HUGE_LCP_SHIFT;
			matchptr->offset = cur_pos - match_pos;
			matchptr++;
		}
		interval_idx = next_interval_idx;
	}
	return matchptr - matches;
}
开发者ID:AudienceScience,项目名称:wimlib,代码行数:53,代码来源:lcpit_matchfinder.c


示例3: read_fifo

/** Read to request from FIFO (max read == bytes in fifo)
 *  Return:  0 = still running, 1 = completed, negative = errno
 *  NOTE: INDEX register must be set for EP
 */
static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req)
{
	u32 csr;
	u8 *buf;
	unsigned bufferspace, count, is_short;
	volatile u32 *fifo = (volatile u32 *)ep->fifo;

	/* make sure there's a packet in the FIFO. */
	csr = usb_read(ep->csr1);
	if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) {
		DEBUG("%s: Packet NOT ready!\n", __func__);
		return -EINVAL;
	}

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);
	bufferspace = req->req.length - req->req.actual;

	/* read all bytes from this packet */
	count = usb_read(USB_OUT_FIFO_WC1);
	req->req.actual += min(count, bufferspace);

	is_short = (count < ep->ep.maxpacket);
	DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n",
	      ep->ep.name, csr, count,
	      is_short ? "/S" : "", req, req->req.actual, req->req.length);

	while (likely(count-- != 0)) {
		u8 byte = (u8) (*fifo & 0xff);

		if (unlikely(bufferspace == 0)) {
			/* this happens when the driver's buffer
			 * is smaller than what the host sent.
			 * discard the extra data.
			 */
			if (req->req.status != -EOVERFLOW)
				printk(KERN_WARNING "%s overflow %d\n",
				       ep->ep.name, count);
			req->req.status = -EOVERFLOW;
		} else {
			*buf++ = byte;
			bufferspace--;
		}
	}

	usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1);

	/* completion */
	if (is_short || req->req.actual == req->req.length) {
		done(ep, req, 0);
		usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1);

		if (list_empty(&ep->queue))
			pio_irq_disable(ep_index(ep));
		return 1;
	}

	/* finished that packet.  the next one may be waiting... */
	return 0;
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:64,代码来源:lh7a40x_udc.c


示例4: mpage_end_io_read

/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		if (bio_flagged(bio, BIO_BAIO)) {
			struct ba_iocb *baiocb =
				(struct ba_iocb *)bio->bi_private2;
		       	BUG_ON(!PageBaio(page));
			ClearPageBaio(page);
			if (!uptodate)
				baiocb->io_error = -EIO;
			baiocb->result += bvec->bv_len;
			baiocb_put(baiocb);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
开发者ID:285452612,项目名称:ali_kernel,代码行数:43,代码来源:mpage.c


示例5: bi_write_complete

/* completion handler for BIO writes */
static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error)
{
    const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
    struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

    if (bio->bi_size)
        return 1;

    if(!uptodate)
        err("bi_write_complete: not uptodate\n");

    do {
        struct page *page = bvec->bv_page;
        DEBUG(3, "Cleaning up page %ld\n", page->index);
        if (--bvec >= bio->bi_io_vec)
            prefetchw(&bvec->bv_page->flags);

        if (uptodate) {
            SetPageUptodate(page);
        } else {
            ClearPageUptodate(page);
            SetPageError(page);
        }
        ClearPageDirty(page);
        unlock_page(page);
        page_cache_release(page);
    } while (bvec >= bio->bi_io_vec);

    complete((struct completion*)bio->bi_private);
    return 0;
}
开发者ID:iPodLinux,项目名称:linux-2.6.7-ipod,代码行数:32,代码来源:blkmtd.c


示例6: build_LCPIT

/*
 * Use the suffix array accompanied with the longest-common-prefix array ---
 * which in combination can be called the "enhanced suffix array" --- to
 * simulate a bottom-up traversal of the corresponding suffix tree, or
 * equivalently the lcp-interval tree.  Do so in suffix rank order, but save the
 * superinterval references needed for later bottom-up traversal of the tree in
 * suffix position order.
 *
 * To enumerate the lcp-intervals, this algorithm scans the suffix array and its
 * corresponding LCP array linearly.  While doing so, it maintains a stack of
 * lcp-intervals that are currently open, meaning that their left boundaries
 * have been seen but their right boundaries have not.  The bottom of the stack
 * is the interval which covers the entire suffix array (this has lcp=0), and
 * the top of the stack is the deepest interval that is currently open (this has
 * the greatest lcp of any interval on the stack).  When this algorithm opens an
 * lcp-interval, it assigns it a unique index in intervals[] and pushes it onto
 * the stack.  When this algorithm closes an interval, it pops it from the stack
 * and sets the intervals[] entry of that interval to the index and lcp of that
 * interval's superinterval, which is the new top of the stack.
 *
 * This algorithm also set pos_data[pos] for each suffix position 'pos' to the
 * index and lcp of the deepest lcp-interval containing it.  Alternatively, we
 * can interpret each suffix as being associated with a singleton lcp-interval,
 * or leaf of the suffix tree.  With this interpretation, an entry in pos_data[]
 * is the superinterval reference for one of these singleton lcp-intervals and
 * therefore is not fundamentally different from an entry in intervals[].
 *
 * To reduce memory usage, this algorithm re-uses the suffix array's memory to
 * store the generated intervals[] array.  This is possible because SA and LCP
 * are accessed linearly, and no more than one interval is generated per suffix.
 *
 * The techniques used in this algorithm are described in various published
 * papers.  The generation of lcp-intervals from the suffix array (SA) and the
 * longest-common-prefix array (LCP) is given as Algorithm BottomUpTraverse in
 * Kasai et al. (2001) and Algorithm 4.1 ("Computation of lcp-intervals") in
 * Abouelhoda et al. (2004).  Both these papers note the equivalence between
 * lcp-intervals (including the singleton lcp-interval for each suffix) and
 * nodes of the suffix tree.  Abouelhoda et al. (2004) furthermore applies
 * bottom-up traversal of the lcp-interval tree to Lempel-Ziv factorization, as
 * does Chen at al. (2008).  Algorithm CPS1b of Chen et al. (2008) dynamically
 * re-uses the suffix array during bottom-up traversal of the lcp-interval tree.
 *
 * References:
 *
 *	Kasai et al. Linear-Time Longest-Common-Prefix Computation in Suffix
 *	Arrays and Its Applications.  2001.  CPM '01 Proceedings of the 12th
 *	Annual Symposium on Combinatorial Pattern Matching pp. 181-192.
 *
 *	M.I. Abouelhoda, S. Kurtz, E. Ohlebusch.  2004.  Replacing Suffix Trees
 *	With Enhanced Suffix Arrays.  Journal of Discrete Algorithms Volume 2
 *	Issue 1, March 2004, pp. 53-86.
 *
 *	G. Chen, S.J. Puglisi, W.F. Smyth.  2008.  Lempel-Ziv Factorization
 *	Using Less Time & Space.  Mathematics in Computer Science June 2008,
 *	Volume 1, Issue 4, pp. 605-623.
 */
static void
build_LCPIT(u32 intervals[restrict], u32 pos_data[restrict], const u32 n)
{
	u32 * const SA_and_LCP = intervals;
	u32 next_interval_idx;
	u32 open_intervals[LCP_MAX + 1];
	u32 *top = open_intervals;
	u32 prev_pos = SA_and_LCP[0] & POS_MASK;

	*top = 0;
	intervals[0] = 0;
	next_interval_idx = 1;

	for (u32 r = 1; r < n; r++) {
		const u32 next_pos = SA_and_LCP[r] & POS_MASK;
		const u32 next_lcp = SA_and_LCP[r] & LCP_MASK;
		const u32 top_lcp = *top & LCP_MASK;

		prefetchw(&pos_data[SA_and_LCP[r + PREFETCH_SAFETY] & POS_MASK]);

		if (next_lcp == top_lcp) {
			/* Continuing the deepest open interval  */
			pos_data[prev_pos] = *top;
		} else if (next_lcp > top_lcp) {
			/* Opening a new interval  */
			*++top = next_lcp | next_interval_idx++;
			pos_data[prev_pos] = *top;
		} else {
			/* Closing the deepest open interval  */
			pos_data[prev_pos] = *top;
			for (;;) {
				const u32 closed_interval_idx = *top-- & POS_MASK;
				const u32 superinterval_lcp = *top & LCP_MASK;

				if (next_lcp == superinterval_lcp) {
					/* Continuing the superinterval */
					intervals[closed_interval_idx] = *top;
					break;
				} else if (next_lcp > superinterval_lcp) {
					/* Creating a new interval that is a
					 * superinterval of the one being
					 * closed, but still a subinterval of
					 * its superinterval  */
					*++top = next_lcp | next_interval_idx++;
//.........这里部分代码省略.........
开发者ID:AudienceScience,项目名称:wimlib,代码行数:101,代码来源:lcpit_matchfinder.c


示例7: build_LCP

/*
 * Build the LCP (Longest Common Prefix) array in linear time.
 *
 * LCP[r] will be the length of the longest common prefix between the suffixes
 * with positions SA[r - 1] and SA[r].  LCP[0] will be undefined.
 *
 * Algorithm taken from Kasai et al. (2001), but modified slightly:
 *
 *  - With bytes there is no realistic way to reserve a unique symbol for
 *    end-of-buffer, so use explicit checks for end-of-buffer.
 *
 *  - For decreased memory usage and improved memory locality, pack the two
 *    logically distinct SA and LCP arrays into a single array SA_and_LCP.
 *
 *  - Since SA_and_LCP is accessed randomly, improve the cache behavior by
 *    reading several entries ahead in ISA and prefetching the upcoming
 *    SA_and_LCP entry.
 *
 *  - If an LCP value is less than the minimum match length, then store 0.  This
 *    avoids having to do comparisons against the minimum match length later.
 *
 *  - If an LCP value is greater than the "nice match length", then store the
 *    "nice match length".  This caps the number of bits needed to store each
 *    LCP value, and this caps the depth of the LCP-interval tree, without
 *    usually hurting the compression ratio too much.
 *
 * References:
 *
 *	Kasai et al.  2001.  Linear-Time Longest-Common-Prefix Computation in
 *	Suffix Arrays and Its Applications.  CPM '01 Proceedings of the 12th
 *	Annual Symposium on Combinatorial Pattern Matching pp. 181-192.
 */
static void
build_LCP(u32 SA_and_LCP[restrict], const u32 ISA[restrict],
	  const u8 T[restrict], const u32 n,
	  const u32 min_lcp, const u32 max_lcp)
{
	u32 h = 0;
	for (u32 i = 0; i < n; i++) {
		const u32 r = ISA[i];
		prefetchw(&SA_and_LCP[ISA[i + PREFETCH_SAFETY]]);
		if (r > 0) {
			const u32 j = SA_and_LCP[r - 1] & POS_MASK;
			const u32 lim = min(n - i, n - j);
			while (h < lim && T[i + h] == T[j + h])
				h++;
			u32 stored_lcp = h;
			if (stored_lcp < min_lcp)
				stored_lcp = 0;
			else if (stored_lcp > max_lcp)
				stored_lcp = max_lcp;
			SA_and_LCP[r] |= stored_lcp << LCP_SHIFT;
			if (h > 0)
				h--;
		}
	}
}
开发者ID:AudienceScience,项目名称:wimlib,代码行数:57,代码来源:lcpit_matchfinder.c


示例8: fb_counter_netrx

static int fb_counter_netrx(const struct fblock * const fb,
			    struct sk_buff * const skb,
			    enum path_type * const dir)
{
	int drop = 0;
	unsigned int seq;
	struct fb_counter_priv __percpu *fb_priv_cpu;

	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
	prefetchw(skb->cb);
	do {
		seq = read_seqbegin(&fb_priv_cpu->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
		if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
			drop = 1;
	} while (read_seqretry(&fb_priv_cpu->lock, seq));

	u64_stats_update_begin(&fb_priv_cpu->syncp);
	fb_priv_cpu->packets++;
	fb_priv_cpu->bytes += skb->len;
	u64_stats_update_end(&fb_priv_cpu->syncp);

	if (drop) {
		kfree_skb(skb);
		return PPE_DROPPED;
	}
	return PPE_SUCCESS;
}
开发者ID:digideskio,项目名称:lana,代码行数:28,代码来源:fb_counter.c


示例9: mpage_end_io

/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				SetPageUptodate(page);
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
			}
			unlock_page(page);
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
开发者ID:kgdhost,项目名称:kernel-lge-e400-stock,代码行数:41,代码来源:mpage.c


示例10: mpage_readpages

/**
 * mpage_readpages - populate an address space with some pages & start reads against them
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 *   The page at @pages->prev has the lowest file offset, and reads should be
 *   issued in @pages->prev to @pages->next order.
 * @nr_pages: The number of pages at *@pages
 * @get_block: The filesystem's block mapper function.
 *
 * This function walks the pages and the blocks within each page, building and
 * emitting large BIOs.
 *
 * If anything unusual happens, such as:
 *
 * - encountering a page which has buffers
 * - encountering a page which has a non-hole after a hole
 * - encountering a page with non-contiguous blocks
 *
 * then this code just gives up and calls the buffer_head-based read function.
 * It does handle a page which has holes at the end - that is a common case:
 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
 *
 * BH_Boundary explanation:
 *
 * There is a problem.  The mpage read code assembles several pages, gets all
 * their disk mappings, and then submits them all.  That's fine, but obtaining
 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 *
 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 * submitted in the following order:
 * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
 *
 * because the indirect block has to be read to get the mappings of blocks
 * 13,14,15,16.  Obviously, this impacts performance.
 *
 * So what we do it to allow the filesystem's get_block() function to set
 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 * after this one will require I/O against a block which is probably close to
 * this one.  So you should push what I/O you have currently accumulated.
 *
 * This all causes the disk requests to be issued in the correct order.
 */
int
mpage_readpages(struct address_space *mapping, struct list_head *pages,
				unsigned nr_pages, get_block_t get_block)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);

	map_bh.b_state = 0;
	map_bh.b_size = 0;
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_entry(pages->prev, struct page, lru);

		prefetchw(&page->flags);
		list_del(&page->lru);
		if (!add_to_page_cache_lru(page, mapping,
					page->index,
					gfp)) {
			bio = do_mpage_readpage(bio, page,
					nr_pages - page_idx,
					&last_block_in_bio, &map_bh,
					&first_logical_block,
					get_block, gfp);
		}
		page_cache_release(page);
	}
	BUG_ON(!list_empty(pages));
	if (bio)
		mpage_bio_submit(READ, bio);
	return 0;
}
开发者ID:Chong-Li,项目名称:cse522,代码行数:77,代码来源:mpage.c


示例11: __generic_copy_from_user

unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	prefetchw(to);
	if (access_ok(VERIFY_READ, from, n))
		__copy_user_zeroing(to,from,n);
	else
		memset(to, 0, n);
	return n;
}
开发者ID:Lord-Devices,项目名称:cm_kernel_samsung_hlte,代码行数:10,代码来源:usercopy.c


示例12: read_fifo

/** Read to request from FIFO (max read == bytes in fifo)
 *  Return:  0 = still running, 1 = completed, negative = errno
 */
static int read_fifo(struct elfin_ep *ep, struct elfin_request *req)
{
	u32 csr;
	u8 *buf;
	unsigned bufferspace, count, is_short;
	void* fifo = ep->fifo;

	/* make sure there's a packet in the FIFO. */
	csr = usb_read(ep->csr1, ep_index(ep));
	if (!(csr & S3C2410_UDC_OCSR1_PKTRDY)) {
		DPRINTK("%s: Packet NOT ready!\n", __FUNCTION__);
		return -EINVAL;
	}

	buf = req->req.buf + req->req.actual;
	prefetchw(buf);
	bufferspace = req->req.length - req->req.actual;

	/* read all bytes from this packet */
	count = (( (usb_read(S3C2410_UDC_OUT_FIFO_CNT2_REG, ep_index(ep)) & 0xff ) << 8) | (usb_read(S3C2410_UDC_OUT_FIFO_CNT1_REG, ep_index(ep)) & 0xff));
	req->req.actual += min(count, bufferspace);

	is_short = (count < ep->ep.maxpacket);
	DPRINTK("read %s %02x, %d bytes%s req %p %d/%d\n",
	      ep->ep.name, csr, count,
	      is_short ? "/S" : "", req, req->req.actual, req->req.length);

	while (likely(count-- != 0)) {
		u8 byte = (u8) __raw_readl(fifo);

		if (unlikely(bufferspace == 0)) {
			/* this happens when the driver's buffer
			 * is smaller than what the host sent.
			 * discard the extra data.
			 */
			if (req->req.status != -EOVERFLOW)
				printk("%s overflow %d\n", ep->ep.name, count);
			req->req.status = -EOVERFLOW;
		} else {
			*buf++ = byte;
			bufferspace--;
		}
	}

	usb_clear(S3C2410_UDC_OCSR1_PKTRDY, ep->csr1, ep_index(ep));

	/* completion */
	if (is_short || req->req.actual == req->req.length) {
		done(ep, req, 0);
		return 1;
	}

	/* finished that packet.  the next one may be waiting... */
	return 0;
}
开发者ID:kzlin129,项目名称:tt-gpl,代码行数:58,代码来源:s3c24xx_udc.c


示例13: gru_get_cb_exception_detail

int gru_get_cb_exception_detail(void *cb,
		struct control_block_extended_exc_detail *excdet)
{
	struct gru_control_block_extended *cbe;

	cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
	prefetchw(cbe);         /* Harmless on hardware, required for emulator */
	excdet->opc = cbe->opccpy;
	excdet->exopc = cbe->exopccpy;
	excdet->ecause = cbe->ecause;
	excdet->exceptdet0 = cbe->idef1upd;
	excdet->exceptdet1 = cbe->idef3upd;
	return 0;
}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:14,代码来源:grukservices.c


示例14: __atomic_add

static inline void __atomic_add(int i, __atomic_t *v)
{
    unsigned long tmp;
    int result;

    prefetchw(&v->counter);
    __asm__ __volatile__("@ __atomic_add\n"
                         "1:     ldrex   %0, [%3]\n"
                         "       add     %0, %0, %4\n"
                         "       strex   %1, %0, [%3]\n"
                         "       teq     %1, #0\n"
                         "       bne     1b"
                         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
                         : "r" (&v->counter), "Ir" (i)
                         : "cc");
}
开发者ID:21cnbao,项目名称:training,代码行数:16,代码来源:ldrex-strex-test.c


示例15: __atomic_add_hang

static inline void __atomic_add_hang(int i, __atomic_t *v)
{
    unsigned long tmp;
    int result;

    prefetchw(&v->counter);
    __asm__ __volatile__("@ __atomic_add\n"
                         "1:     ldrex   %0, [%3]\n"
                         "       ldrex   %0, [%3]\n"
                         "       add     %0, %0, %4\n"
                         "       strex   %1, %0, [%3]\n"
                         "       strex   %1, %0, [%3]\n"
                         "       teq     %1, #0\n"
                         "       bne     1b" /* the 2nd strex should fail, the whole program will hang */
                         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
                         : "r" (&v->counter), "Ir" (i)
                         : "cc");
}
开发者ID:21cnbao,项目名称:training,代码行数:18,代码来源:ldrex-strex-test.c


示例16: mpage_end_io_write

static void mpage_end_io_write(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (!uptodate){
			if (page->mapping)
				set_bit(AS_EIO, &page->mapping->flags);
		}
		end_page_writeback(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
开发者ID:3null,项目名称:fastsocket,代码行数:19,代码来源:mpage.c


示例17: setdma_rx

static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
{
	u32 *buf, ctrl;
	u32 length, pktcnt;
	u32 ep_num = ep_index(ep);
	struct s3c_udc *udc = ep->dev;
	struct device *dev = &udc->dev->dev;

	aligned_map_buf(req, ep_is_in(ep));
	buf = req->req.buf + req->req.actual;
	prefetchw(buf);

	length = req->req.length - req->req.actual;

	req->req.dma = dma_map_single(dev, buf,
				length, DMA_FROM_DEVICE);
	req->mapped = 1;

	if (length == 0)
		pktcnt = 1;
	else
		pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;

	ctrl =  __raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num));

	__raw_writel(virt_to_phys(buf),
		udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num));
	__raw_writel((pktcnt<<19) | (length<<0),
		udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num));
	__raw_writel(DEPCTL_EPENA | DEPCTL_CNAK | ctrl,
		udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num));

	DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
			"DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
			"\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
			__func__, ep_num,
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num)),
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num)),
			__raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num)),
			buf, pktcnt, length);
	return 0;
}
开发者ID:ShedrockN4,项目名称:wiliteneo,代码行数:42,代码来源:s3c_udc_otg_xfer_dma.c


示例18: free_one_pgd

static inline void free_one_pgd(pgd_t * dir)
{
	int j;
	pmd_t * pmd;

	if (pgd_none(*dir))
		return;
	if (pgd_bad(*dir)) {
		pgd_ERROR(*dir);
		pgd_clear(dir);
		return;
	}
	pmd = pmd_offset(dir, 0);
	pgd_clear(dir);
	for (j = 0; j < PTRS_PER_PMD ; j++) {
		prefetchw(pmd+j+(PREFETCH_STRIDE/16));
		free_one_pmd(pmd+j);
	}
	pmd_free(pmd);
}
开发者ID:fgeraci,项目名称:cs518-sched,代码行数:20,代码来源:memory.c


示例19: mpage_end_io

/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct compressed_bio *cb = bio->bi_private;
	struct page *page;
	int page_idx, ret = 0;
	
	printk(KERN_INFO "\n==> IN MPAGE_END_IO");
	do {
		struct page *page = bvec->bv_page;		
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
			
	} while (bvec >= bio->bi_io_vec);

	if (!atomic_dec_and_test(&cb->pending_bios))
		goto out;
	
	/* Last bio...start decompression */
	ret = decompress_stride(cb);
	
	for (page_idx = 0; page_idx < cb->nr_pages; page_idx++) {
		page = cb->compressed_pages[page_idx];
		page->mapping = NULL;
		page_cache_release(page);
	}
	
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}
开发者ID:AbhijeetPawar,项目名称:tux3-kernel,代码行数:53,代码来源:mpage_compress.c


示例20: writeseg_end_io

static void writeseg_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct super_block *sb = bio->bi_private;
	struct logfs_super *super = logfs_super(sb);
	struct page *page;

	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
	BUG_ON(err);
	BUG_ON(bio->bi_vcnt == 0);
	do {
		page = bvec->bv_page;
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		end_page_writeback(page);
		page_cache_release(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
	if (atomic_dec_and_test(&super->s_pending_writes))
		wake_up(&wq);
}
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:23,代码来源:dev_bdev.c



注:本文中的prefetchw函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ prefix函数代码示例发布时间:2022-05-30
下一篇:
C++ prefetch函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap