• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ READ_ONCE函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中READ_ONCE函数的典型用法代码示例。如果您正苦于以下问题:C++ READ_ONCE函数的具体用法?C++ READ_ONCE怎么用?C++ READ_ONCE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了READ_ONCE函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: mce_chrdev_poll

static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
{
	poll_wait(file, &mce_chrdev_wait, wait);
	if (READ_ONCE(mcelog.next))
		return POLLIN | POLLRDNORM;
	if (!mce_apei_read_done && apei_check_mce())
		return POLLIN | POLLRDNORM;
	return 0;
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:9,代码来源:dev-mcelog.c


示例2: array_map_lookup_elem

static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_map **inner_map = array_map_lookup_elem(map, key);

	if (!inner_map)
		return NULL;

	return READ_ONCE(*inner_map);
}
开发者ID:asmalldev,项目名称:linux,代码行数:9,代码来源:arraymap.c


示例3: hrtimer_active

/*
 * A timer is active, when it is enqueued into the rbtree or the
 * callback function is running or it's in the state of being migrated
 * to another cpu.
 *
 * It is important for this function to not return a false negative.
 */
bool hrtimer_active(const struct hrtimer *timer)
{
	struct hrtimer_cpu_base *cpu_base;
	unsigned int seq;

	do {
		cpu_base = READ_ONCE(timer->base->cpu_base);
		seq = raw_read_seqcount_begin(&cpu_base->seq);

		if (timer->state != HRTIMER_STATE_INACTIVE ||
		    cpu_base->running == timer)
			return true;

	} while (read_seqcount_retry(&cpu_base->seq, seq) ||
		 cpu_base != READ_ONCE(timer->base->cpu_base));

	return false;
}
开发者ID:DenisLug,项目名称:mptcp,代码行数:25,代码来源:hrtimer.c


示例4: hv_pkt_iter_close

/*
 * Update host ring buffer after iterating over packets.
 */
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
	u32 curr_write_sz, pending_sz, bytes_read, start_read_index;

	/*
	 * Make sure all reads are done before we update the read index since
	 * the writer may start writing to the read area once the read index
	 * is updated.
	 */
	virt_rmb();
	start_read_index = rbi->ring_buffer->read_index;
	rbi->ring_buffer->read_index = rbi->priv_read_index;

	if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
		return;

	/*
	 * Issue a full memory barrier before making the signaling decision.
	 * Here is the reason for having this barrier:
	 * If the reading of the pend_sz (in this function)
	 * were to be reordered and read before we commit the new read
	 * index (in the calling function)  we could
	 * have a problem. If the host were to set the pending_sz after we
	 * have sampled pending_sz and go to sleep before we commit the
	 * read index, we could miss sending the interrupt. Issue a full
	 * memory barrier to address this.
	 */
	virt_mb();

	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
	if (!pending_sz)
		return;

	/*
	 * Ensure the read of write_index in hv_get_bytes_to_write()
	 * happens after the read of pending_send_sz.
	 */
	virt_rmb();
	curr_write_sz = hv_get_bytes_to_write(rbi);
	bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);

	/*
	 * If there was space before we began iteration,
	 * then host was not blocked.
	 */

	if (curr_write_sz - bytes_read > pending_sz)
		return;

	/* If pending write will not fit, don't give false hope. */
	if (curr_write_sz <= pending_sz)
		return;

	vmbus_setevent(channel);
}
开发者ID:lumag,项目名称:linux,代码行数:59,代码来源:ring_buffer.c


示例5: srcu_drive_gp

/*
 * Workqueue handler to drive one grace period and invoke any callbacks
 * that become ready as a result.  Single-CPU and !PREEMPT operation
 * means that we get away with murder on synchronization.  ;-)
 */
void srcu_drive_gp(struct work_struct *wp)
{
	int idx;
	struct rcu_head *lh;
	struct rcu_head *rhp;
	struct srcu_struct *sp;

	sp = container_of(wp, struct srcu_struct, srcu_work);
	if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
		return; /* Already running or nothing to do. */

	/* Remove recently arrived callbacks and wait for readers. */
	WRITE_ONCE(sp->srcu_gp_running, true);
	local_irq_disable();
	lh = sp->srcu_cb_head;
	sp->srcu_cb_head = NULL;
	sp->srcu_cb_tail = &sp->srcu_cb_head;
	local_irq_enable();
	idx = sp->srcu_idx;
	WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
	WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
	swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
	WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */

	/* Invoke the callbacks we removed above. */
	while (lh) {
		rhp = lh;
		lh = lh->next;
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}

	/*
	 * Enable rescheduling, and if there are more callbacks,
	 * reschedule ourselves.  This can race with a call_srcu()
	 * at interrupt level, but the ->srcu_gp_running checks will
	 * straighten that out.
	 */
	WRITE_ONCE(sp->srcu_gp_running, false);
	if (READ_ONCE(sp->srcu_cb_head))
		schedule_work(&sp->srcu_work);
}
开发者ID:markus-oberhumer,项目名称:linux,代码行数:48,代码来源:srcutiny.c


示例6: hv_pkt_iter_avail

/*
 * Determine number of bytes available in ring buffer after
 * the current iterator (priv_read_index) location.
 *
 * This is similar to hv_get_bytes_to_read but with private
 * read index instead.
 */
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
	u32 priv_read_loc = rbi->priv_read_index;
	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);

	if (write_loc >= priv_read_loc)
		return write_loc - priv_read_loc;
	else
		return (rbi->ring_datasize - priv_read_loc) + write_loc;
}
开发者ID:austriancoder,项目名称:linux,代码行数:17,代码来源:ring_buffer.c


示例7: rxrpc_fill_out_ack

/*
 * Fill out an ACK packet.
 */
static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
				 struct rxrpc_ack_buffer *pkt,
				 rxrpc_seq_t *_hard_ack,
				 rxrpc_seq_t *_top,
				 u8 reason)
{
	rxrpc_serial_t serial;
	rxrpc_seq_t hard_ack, top, seq;
	int ix;
	u32 mtu, jmax;
	u8 *ackp = pkt->acks;

	/* Barrier against rxrpc_input_data(). */
	serial = call->ackr_serial;
	hard_ack = READ_ONCE(call->rx_hard_ack);
	top = smp_load_acquire(&call->rx_top);
	*_hard_ack = hard_ack;
	*_top = top;

	pkt->ack.bufferSpace	= htons(8);
	pkt->ack.maxSkew	= htons(call->ackr_skew);
	pkt->ack.firstPacket	= htonl(hard_ack + 1);
	pkt->ack.previousPacket	= htonl(call->ackr_prev_seq);
	pkt->ack.serial		= htonl(serial);
	pkt->ack.reason		= reason;
	pkt->ack.nAcks		= top - hard_ack;

	if (reason == RXRPC_ACK_PING)
		pkt->whdr.flags |= RXRPC_REQUEST_ACK;

	if (after(top, hard_ack)) {
		seq = hard_ack + 1;
		do {
			ix = seq & RXRPC_RXTX_BUFF_MASK;
			if (call->rxtx_buffer[ix])
				*ackp++ = RXRPC_ACK_TYPE_ACK;
			else
				*ackp++ = RXRPC_ACK_TYPE_NACK;
			seq++;
		} while (before_eq(seq, top));
	}

	mtu = call->conn->params.peer->if_mtu;
	mtu -= call->conn->params.peer->hdrsize;
	jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
	pkt->ackinfo.rxMTU	= htonl(rxrpc_rx_mtu);
	pkt->ackinfo.maxMTU	= htonl(mtu);
	pkt->ackinfo.rwind	= htonl(call->rx_winsize);
	pkt->ackinfo.jumbo_max	= htonl(jmax);

	*ackp++ = 0;
	*ackp++ = 0;
	*ackp++ = 0;
	return top - hard_ack + 3;
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:58,代码来源:output.c


示例8: WRITE_ONCE

void *thread_update(void *arg)
{
	WRITE_ONCE(y, 1);
#ifndef FORCE_FAILURE_2
	synchronize_srcu(&ss);
#endif
	might_sleep();
	__unbuffered_tpr_x = READ_ONCE(x);

	return NULL;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:11,代码来源:test.c


示例9: kasan_pud_populate

static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
				      unsigned long end, int node, bool early)
{
	unsigned long next;
	pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);

	do {
		next = pud_addr_end(addr, end);
		kasan_pmd_populate(pudp, addr, next, node, early);
	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:11,代码来源:kasan_init.c


示例10: run_on

void *test_xchg_lock(void *arg)
{
	int me = (long)arg;

	run_on(me);
	atomic_inc(&nthreadsrunning);
	while (READ_ONCE(goflag) == GOFLAG_INIT)
		poll(NULL, 0, 1);
	while (READ_ONCE(goflag) == GOFLAG_RUN) {
		xchg_lock(&testlock);
		if (owner != -1)
			lockerr++;
		lockacqs++;
		owner = me;
		poll(NULL, 0, 1);
		owner = -1;
		xchg_unlock(&testlock);
	}
	return NULL;
}
开发者ID:akiyks,项目名称:perfbook,代码行数:20,代码来源:xchglock.c


示例11: kasan_copy_shadow

/*
 * Copy the current shadow region into a new pgdir.
 */
void __init kasan_copy_shadow(pgd_t *pgdir)
{
	pgd_t *pgdp, *pgdp_new, *pgdp_end;

	pgdp = pgd_offset_k(KASAN_SHADOW_START);
	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
	do {
		set_pgd(pgdp_new, READ_ONCE(*pgdp));
	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:14,代码来源:kasan_init.c


示例12: process_shares_mm

/*
 * task->mm can be NULL if the task is the exited group leader.  So to
 * determine whether the task is using a particular mm, we examine all the
 * task's threads: if one of those is using this mm then this task was also
 * using it.
 */
static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
{
	struct task_struct *t;

	for_each_thread(p, t) {
		struct mm_struct *t_mm = READ_ONCE(t->mm);
		if (t_mm)
			return t_mm == mm;
	}
	return false;
}
开发者ID:513855417,项目名称:linux,代码行数:17,代码来源:oom_kill.c


示例13: hv_signal_on_write

static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
			       bool kick_q)
{
	struct hv_ring_buffer_info *rbi = &channel->outbound;

	virt_mb();
	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
		return;

	/* check interrupt_mask before read_index */
	virt_rmb();
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
		vmbus_setevent(channel);

	return;
}
开发者ID:forgivemyheart,项目名称:linux,代码行数:20,代码来源:ring_buffer.c


示例14: prepare_exit_to_usermode

/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
	struct thread_info *ti = current_thread_info();
	u32 cached_flags;

	addr_limit_user_check();

	lockdep_assert_irqs_disabled();
	lockdep_sys_exit();

	cached_flags = READ_ONCE(ti->flags);

	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
		exit_to_usermode_loop(regs, cached_flags);

	/* Reload ti->flags; we may have rescheduled above. */
	cached_flags = READ_ONCE(ti->flags);

	fpregs_assert_state_consistent();
	if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
		switch_fpu_return();

#ifdef CONFIG_COMPAT
	/*
	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
	 * returning to user mode.  We need to clear it *after* signal
	 * handling, because syscall restart has a fixup for compat
	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
	 * selftest.
	 *
	 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
	 * special case only applies after poking regs and before the
	 * very next return to user mode.
	 */
	ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif

	user_enter_irqoff();

	mds_user_clear_cpu_buffers();
}
开发者ID:avagin,项目名称:linux,代码行数:42,代码来源:common.c


示例15: quarantine_reduce

void quarantine_reduce(void)
{
	size_t new_quarantine_size, percpu_quarantines;
	unsigned long flags;
	struct qlist_head to_free = QLIST_INIT;
	size_t size_to_free = 0;
	struct qlist_node *last;

	if (likely(READ_ONCE(global_quarantine.bytes) <=
		   READ_ONCE(quarantine_size)))
		return;

	spin_lock_irqsave(&quarantine_lock, flags);

	/*
	 * Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
	new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
		QUARANTINE_FRACTION;
	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
	new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
		0 : new_quarantine_size - percpu_quarantines;
	WRITE_ONCE(quarantine_size, new_quarantine_size);

	last = global_quarantine.head;
	while (last) {
		struct kmem_cache *cache = qlink_to_cache(last);

		size_to_free += cache->size;
		if (!last->next || size_to_free >
		    global_quarantine.bytes - QUARANTINE_LOW_SIZE)
			break;
		last = last->next;
	}
	qlist_move(&global_quarantine, last, &to_free, size_to_free);

	spin_unlock_irqrestore(&quarantine_lock, flags);

	qlist_free_all(&to_free, NULL);
}
开发者ID:acton393,项目名称:linux,代码行数:41,代码来源:quarantine.c


示例16: __sanitizer_cov_trace_pc

/*
 * Entry point from instrumented code.
 * This is called once per basic-block/edge.
 */
void notrace __sanitizer_cov_trace_pc(void)
{
	struct task_struct *t;
	enum kcov_mode mode;

	t = current;
	/*
	 * We are interested in code coverage as a function of a syscall inputs,
	 * so we ignore code executed in interrupts.
	 * The checks for whether we are in an interrupt are open-coded, because
	 * 1. We can't use in_interrupt() here, since it also returns true
	 *    when we are inside local_bh_disable() section.
	 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
	 *    since that leads to slower generated code (three separate tests,
	 *    one for each of the flags).
	 */
	if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
							| NMI_MASK)))
		return;
	mode = READ_ONCE(t->kcov_mode);
	if (mode == KCOV_MODE_TRACE) {
		unsigned long *area;
		unsigned long pos;

		/*
		 * There is some code that runs in interrupts but for which
		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
		 * interrupts, there are paired barrier()/WRITE_ONCE() in
		 * kcov_ioctl_locked().
		 */
		barrier();
		area = t->kcov_area;
		/* The first word is number of subsequent PCs. */
		pos = READ_ONCE(area[0]) + 1;
		if (likely(pos < t->kcov_size)) {
			area[pos] = _RET_IP_;
			WRITE_ONCE(area[0], pos);
		}
	}
}
开发者ID:plaes,项目名称:linux,代码行数:45,代码来源:kcov.c


示例17: kasan_pud_offset

static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
				      bool early)
{
	if (pgd_none(READ_ONCE(*pgdp))) {
		phys_addr_t pud_phys = early ?
				__pa_symbol(kasan_early_shadow_pud)
					: kasan_alloc_zeroed_page(node);
		__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
	}

	return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:12,代码来源:kasan_init.c


示例18: change_page_range

static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
			void *data)
{
	struct page_change_data *cdata = data;
	pte_t pte = READ_ONCE(*ptep);

	pte = clear_pte_bit(pte, cdata->clear_mask);
	pte = set_pte_bit(pte, cdata->set_mask);

	set_pte(ptep, pte);
	return 0;
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:12,代码来源:pageattr.c


示例19: send_sigio_to_task

static void send_sigio_to_task(struct task_struct *p,
			       struct fown_struct *fown,
			       int fd, int reason, int group)
{
	/*
	 * F_SETSIG can change ->signum lockless in parallel, make
	 * sure we read it once and use the same value throughout.
	 */
	int signum = READ_ONCE(fown->signum);

	if (!sigio_perm(p, fown, signum))
		return;

	switch (signum) {
		siginfo_t si;
		default:
			/* Queue a rt signal with the appropriate fd as its
			   value.  We use SI_SIGIO as the source, not 
			   SI_KERNEL, since kernel signals always get 
			   delivered even if we can't queue.  Failure to
			   queue in this case _should_ be reported; we fall
			   back to SIGIO in that case. --sct */
			clear_siginfo(&si);
			si.si_signo = signum;
			si.si_errno = 0;
		        si.si_code  = reason;
			/*
			 * Posix definies POLL_IN and friends to be signal
			 * specific si_codes for SIG_POLL.  Linux extended
			 * these si_codes to other signals in a way that is
			 * ambiguous if other signals also have signal
			 * specific si_codes.  In that case use SI_SIGIO instead
			 * to remove the ambiguity.
			 */
			if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
				si.si_code = SI_SIGIO;

			/* Make sure we are called with one of the POLL_*
			   reasons, otherwise we could leak kernel stack into
			   userspace.  */
			BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
			if (reason - POLL_IN >= NSIGPOLL)
				si.si_band  = ~0L;
			else
				si.si_band = mangle_poll(band_table[reason - POLL_IN]);
			si.si_fd    = fd;
			if (!do_send_sig_info(signum, &si, p, group))
				break;
		/* fall-through: fall back on the old plain SIGIO signal */
		case 0:
			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
	}
}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:53,代码来源:fcntl.c


示例20: READ_ONCE

struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
				struct ixgbe_ring *ring)
{
	bool xdp_on = READ_ONCE(adapter->xdp_prog);
	int qid = ring->ring_idx;

	if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
	    qid >= adapter->num_xsk_umems || !xdp_on)
		return NULL;

	return adapter->xsk_umems[qid];
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:12,代码来源:ixgbe_xsk.c



注:本文中的READ_ONCE函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ READ_OP函数代码示例发布时间:2022-05-30
下一篇:
C++ READ_OID_FIELD函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap