本文整理汇总了C++中raw_smp_processor_id函数的典型用法代码示例。如果您正苦于以下问题:C++ raw_smp_processor_id函数的具体用法?C++ raw_smp_processor_id怎么用?C++ raw_smp_processor_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了raw_smp_processor_id函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: set_mtrr
/**
* set_mtrr - update mtrrs on all processors
* @reg: mtrr in question
* @base: mtrr base
* @size: mtrr size
* @type: mtrr type
*
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
*
* 1. Queue work to do the following on all processors:
* 2. Disable Interrupts
* 3. Wait for all procs to do so
* 4. Enter no-fill cache mode
* 5. Flush caches
* 6. Clear PGE bit
* 7. Flush all TLBs
* 8. Disable all range registers
* 9. Update the MTRRs
* 10. Enable all range registers
* 11. Flush all TLBs and caches again
* 12. Enter normal cache mode and reenable caching
* 13. Set PGE
* 14. Wait for buddies to catch up
* 15. Enable interrupts.
*
* What does that mean for us? Well, first we set data.count to the number
* of CPUs. As each CPU announces that it started the rendezvous handler by
* decrementing the count, We reset data.count and set the data.gate flag
* allowing all the cpu's to proceed with the work. As each cpu disables
* interrupts, it'll decrement data.count once. We wait until it hits 0 and
* proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
* are waiting for that flag to be cleared. Once it's cleared, each
* CPU goes through the transition of updating MTRRs.
* The CPU vendors may each do it differently,
* so we call mtrr_if->set() callback and let them take care of it.
* When they're done, they again decrement data->count and wait for data.gate
* to be set.
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
* Everyone then enables interrupts and we all continue on.
*
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
*/
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
struct set_mtrr_data data;
unsigned long flags;
int cpu;
#ifdef CONFIG_SMP
/*
* If this cpu is not yet active, we are in the cpu online path. There
* can be no stop_machine() in parallel, as stop machine ensures this
* by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
* as we don't need it and also we can't afford to block while waiting
* for the mutex.
*
* If this cpu is active, we need to prevent stop_machine() happening
* in parallel by taking the stop cpus mutex.
*
* Also, this is called in the context of cpu online path or in the
* context where cpu hotplug is prevented. So checking the active status
* of the raw_smp_processor_id() is safe.
*/
if (cpu_active(raw_smp_processor_id()))
mutex_lock(&stop_cpus_mutex);
#endif
preempt_disable();
data.smp_reg = reg;
data.smp_base = base;
data.smp_size = size;
data.smp_type = type;
atomic_set(&data.count, num_booting_cpus() - 1);
/* Make sure data.count is visible before unleashing other CPUs */
smp_wmb();
atomic_set(&data.gate, 0);
/* Start the ball rolling on other CPUs */
for_each_online_cpu(cpu) {
struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
if (cpu == smp_processor_id())
continue;
stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
}
while (atomic_read(&data.count))
cpu_relax();
/* Ok, reset count and toggle gate */
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
atomic_set(&data.gate, 1);
//.........这里部分代码省略.........
开发者ID:1111saeid,项目名称:jb_kernel_3.0.16_htc_golfu,代码行数:101,代码来源:main.c
示例2: smp_callin
/*
* Report back to the Boot Processor.
* Running on AP.
*/
static void __cpuinit smp_callin(void)
{
int cpuid, phys_id;
unsigned long timeout;
/*
* If waken up by an INIT in an 82489DX configuration
* we may get here before an INIT-deassert IPI reaches
* our local APIC. We have to wait for the IPI or we'll
* lock up on an APIC access.
*/
if (apic->wait_for_init_deassert)
apic->wait_for_init_deassert(&init_deasserted);
/*
* (This works even if the APIC is not enabled.)
*/
phys_id = read_apic_id();
cpuid = smp_processor_id();
if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
phys_id, cpuid);
}
pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
/*
* STARTUP IPIs are fragile beasts as they might sometimes
* trigger some glue motherboard logic. Complete APIC bus
* silence for 1 second, this overestimates the time the
* boot CPU is spending to send the up to 2 STARTUP IPIs
* by a factor of two. This should be enough.
*/
/*
* Waiting 2s total for startup (udelay is not yet working)
*/
timeout = jiffies + 2*HZ;
while (time_before(jiffies, timeout)) {
/*
* Has the boot CPU finished it's STARTUP sequence?
*/
if (cpumask_test_cpu(cpuid, cpu_callout_mask))
break;
cpu_relax();
}
if (!time_before(jiffies, timeout)) {
panic("%s: CPU%d started up but did not get a callout!\n",
__func__, cpuid);
}
/*
* the boot CPU has finished the init stage and is spinning
* on callin_map until we finish. We are free to set up this
* CPU, first the APIC. (this is probably redundant on most
* boards)
*/
pr_debug("CALLIN, before setup_local_APIC().\n");
if (apic->smp_callin_clear_local_apic)
apic->smp_callin_clear_local_apic();
setup_local_APIC();
end_local_APIC_setup();
/*
* Need to setup vector mappings before we enable interrupts.
*/
setup_vector_irq(smp_processor_id());
/*
* Save our processor parameters. Note: this information
* is needed for clock calibration.
*/
smp_store_cpu_info(cpuid);
/*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
* accurate as the value just calculated.
*/
calibrate_delay();
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
/*
* This must be done before setting cpu_online_mask
* or calling notify_cpu_starting.
*/
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
/*
* Allow the master to continue.
//.........这里部分代码省略.........
开发者ID:AsherBond,项目名称:ceph-client,代码行数:101,代码来源:smpboot.c
示例3: round_jiffies_up
/**
* round_jiffies_up - function to round jiffies up to a full second
* @j: the time in (absolute) jiffies that should be rounded
*
* This is the same as round_jiffies() except that it will never
* round down. This is useful for timeouts for which the exact time
* of firing does not matter too much, as long as they don't fire too
* early.
*/
unsigned long round_jiffies_up(unsigned long j)
{
return round_jiffies_common(j, raw_smp_processor_id(), true);
}
开发者ID:longqzh,项目名称:chronnOS,代码行数:13,代码来源:timer.c
示例4: cpu_vsyscall_init
static void cpu_vsyscall_init(void *arg)
{
/* preemption should be already off */
vsyscall_set_cpu(raw_smp_processor_id());
}
开发者ID:tinocyngn,项目名称:sofia-kernel,代码行数:5,代码来源:vsyscall_64.c
示例5: debug_write_lock_after
static inline void debug_write_lock_after(rwlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
开发者ID:AudioGod,项目名称:MediaTek-HelioX10-Kernel,代码行数:5,代码来源:spinlock_debug.c
示例6: nps_clksrc_read
static u64 nps_clksrc_read(struct clocksource *clksrc)
{
int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:6,代码来源:timer-nps.c
示例7: wait_slt_scu_state_sync
static int wait_slt_scu_state_sync(unsigned long state, int wait)
{
int ret = 0, i;
unsigned long retry = 0;
static volatile int get_exit = 0;
static volatile int cpu_num = 0;
unsigned long all_cpu_mask = 0;
int cpu = raw_smp_processor_id();
//printk("wait_slt_scu_state_sync, cpu%d wait state=%d\n", cpu, state);
if(cpu_num & (0x1 << cpu))
{
//printk(KERN_ERR, "cpu%d already waitting\n", cpu);
return 0;
}
while(cpu_num && get_exit)
{
//printk(KERN_INFO, "wait other cpu to finish waiting loop\n");
mdelay(10);
}
spin_lock(&scu_wait_sync_lock);
cpu_num |= 0x1 << cpu;
get_exit = 0;
__cpuc_flush_dcache_area(&get_exit, sizeof(int));
__cpuc_flush_dcache_area(&cpu_num, sizeof(int));
spin_unlock(&scu_wait_sync_lock);
for(i = 0; i < NR_CPUS; i++)
{
all_cpu_mask |= (0x1 << i);
}
/* wait all cpu in sync loop */
while(cpu_num != all_cpu_mask)
{
retry++;
if(retry > 0x10000)
{
//printk(KERN_INFO, "scu wait sync state (%d) timeout\n", state);
goto wait_sync_out;
}
if(get_exit)
break;
//printk(KERN_INFO, "\n\nretry=0x%08x wait state = %d\n", retry, state);
//slt_scu_print_state();
mdelay(1);
}
spin_lock(&scu_wait_sync_lock);
get_exit |= 0x1 << cpu;
__cpuc_flush_dcache_area(&get_exit, sizeof(int));
spin_unlock(&scu_wait_sync_lock);
ret = is_slt_scu_state_sync(state);
/* make sure all cpu exit wait sync loop
* check cpu_num is for the case retry timeout
*/
while(1)
{
//printk(KERN_INFO, "wait exit retry\n");
if(!get_exit ||
get_exit == all_cpu_mask ||
cpu_num != all_cpu_mask)
{
break;
}
mdelay(1);
}
wait_sync_out:
spin_lock(&scu_wait_sync_lock);
cpu_num &= ~(0x01 << cpu);
__cpuc_flush_dcache_area(&cpu_num, sizeof(int));
spin_unlock(&scu_wait_sync_lock);
//printk("cpu%d exit fun, ret=%s\n", cpu, ret ? "pass" : "fail");
return ret;
}
开发者ID:Lesozav25,项目名称:mtk_6572,代码行数:88,代码来源:slt_scu_func.c
示例8: slt_scu_test_func
static int slt_scu_test_func(void *data)
{
int ret = 0, loop, pass;
int cpu = raw_smp_processor_id();
unsigned long irq_flag;
int cpu_cnt;
unsigned long buf;
unsigned long *mem_buf = (unsigned long *)data;
unsigned long retry;
//spin_lock(&scu_thread_lock[cpu]);
//local_irq_save(irq_flag);
#if 0
if(cpu == 0)
{
mtk_wdt_enable(WK_WDT_DIS);
}
#endif
if(!mem_buf)
{
printk(KERN_ERR, "allocate memory fail for cpu scu test\n");
g_iCPU_PassFail = -1;
goto scu_thread_out;
}
printk("\n>>slt_scu_test_func -- cpu id = %d, mem_buf = 0x%08x <<\n", cpu, mem_buf);
msleep(50);
if(!wait_slt_scu_state_sync(SCU_STATE_START, 1))
{
printk("cpu%d wait SCU_STATE_START timeout\n", cpu);
goto scu_thread_out;
}
g_iCPU_PassFail = 0;
g_iSCU_PassFail[cpu] = 1;
for (loop = 0; loop < g_iScuLoopCount; loop++) {
slt_scu_write_state(cpu, SCU_STATE_EXECUTE);
spin_lock_irqsave(&scu_thread_irq_lock[cpu], irq_flag);
if(!wait_slt_scu_state_sync(SCU_STATE_EXECUTE, 1))
{
spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);
printk("cpu%d wait SCU_STATE_EXECUTE timeout\n", cpu);
goto scu_thread_out;
}
g_iSCU_PassFail[cpu] = fp6_scu_start(mem_buf);
spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);
__cpuc_flush_dcache_area(g_iSCU_PassFail, 2*sizeof(int));
printk("\n>>cpu%d scu : fp6_scu_start %s ret=0x%x<<\n", cpu, g_iSCU_PassFail[cpu] != 0xA? "fail" : "pass", g_iSCU_PassFail[cpu]);
slt_scu_write_state(cpu, SCU_STATE_EXEEND);
if(!wait_slt_scu_state_sync(SCU_STATE_EXEEND, 1))
{
printk("cpu%d wait SCU_STATE_EXEEND timeout\n", cpu);
goto scu_thread_out;
}
if(cpu == 0)
{
pass = 1;
for(cpu_cnt = 0; cpu_cnt < NR_CPUS; cpu_cnt++)
{
if(g_iSCU_PassFail[cpu_cnt] != 0xA)
{
pass = 0;
}
}
if(pass)
{
g_iCPU_PassFail += 1;
}
}
}
scu_thread_out:
slt_scu_write_state(cpu, SCU_STATE_IDEL);
if(cpu == 0)
{
if (g_iCPU_PassFail == g_iScuLoopCount) {
printk("\n>> CPU scu test pass <<\n\n");
}else {
printk("\n>> CPU scu test fail (loop count = %d)<<\n\n", g_iCPU_PassFail);
}
//mtk_wdt_enable(WK_WDT_EN);
}
wait_slt_scu_state_sync(SCU_STATE_IDEL, 1);
//.........这里部分代码省略.........
开发者ID:Lesozav25,项目名称:mtk_6572,代码行数:101,代码来源:slt_scu_func.c
示例9: priv_ev_loop_run
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
struct xio_ev_loop *loop = loop_hndl;
struct xio_ev_data *tev;
struct llist_node *node;
int cpu;
clear_bit(XIO_EV_LOOP_STOP, &loop->states);
switch (loop->flags) {
case XIO_LOOP_GIVEN_THREAD:
if (loop->ctx->worker != (uint64_t) get_current()) {
ERROR_LOG("worker kthread(%p) is not current(%p).\n",
(void *) loop->ctx->worker, get_current());
goto cleanup0;
}
/* no need to disable preemption */
cpu = raw_smp_processor_id();
if (loop->ctx->cpuid != cpu) {
TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
cpu, loop->ctx->cpuid);
set_cpus_allowed_ptr(get_current(),
cpumask_of(loop->ctx->cpuid));
}
break;
case XIO_LOOP_TASKLET:
/* were events added to list while in STOP state ? */
if (!llist_empty(&loop->ev_llist))
priv_kick_tasklet(loop_hndl);
return 0;
case XIO_LOOP_WORKQUEUE:
/* were events added to list while in STOP state ? */
while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
node = llist_reverse_order(node);
while (node) {
tev = llist_entry(node, struct xio_ev_data,
ev_llist);
node = llist_next(node);
tev->work.func = priv_ev_loop_run_work;
queue_work_on(loop->ctx->cpuid, loop->workqueue,
&tev->work);
}
}
return 0;
default:
/* undo */
set_bit(XIO_EV_LOOP_STOP, &loop->states);
return -1;
}
retry_wait:
wait_event_interruptible(loop->wait,
test_bit(XIO_EV_LOOP_WAKE, &loop->states));
retry_dont_wait:
while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
node = llist_reverse_order(node);
while (node) {
tev = llist_entry(node, struct xio_ev_data, ev_llist);
node = llist_next(node);
tev->handler(tev->data);
}
}
/* "race point" */
clear_bit(XIO_EV_LOOP_WAKE, &loop->states);
if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
return 0;
/* if a new entry was added while we were at "race point"
* than wait event might block forever as condition is false */
if (llist_empty(&loop->ev_llist))
goto retry_wait;
/* race detected */
if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
goto retry_dont_wait;
/* was one wakeup was called */
goto retry_wait;
cleanup0:
set_bit(XIO_EV_LOOP_STOP, &loop->states);
return -1;
}
开发者ID:maxgurtovoy,项目名称:accelio,代码行数:88,代码来源:xio_ev_loop.c
示例10: raw_smp_processor_id
static struct kvm_para_state *kvm_para_state(void)
{
return &per_cpu(para_state, raw_smp_processor_id());
}
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:4,代码来源:kvm.c
示例11: cpu_request_microcode
static int cpu_request_microcode(int cpu, const void *buf, size_t bufsize)
{
struct microcode_amd *mc_amd, *mc_old;
size_t offset = bufsize;
size_t last_offset, applied_offset = 0;
int error = 0;
struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
/* We should bind the task to the CPU */
BUG_ON(cpu != raw_smp_processor_id());
if ( *(const uint32_t *)buf != UCODE_MAGIC )
{
printk(KERN_ERR "microcode: Wrong microcode patch file magic\n");
error = -EINVAL;
goto out;
}
mc_amd = xmalloc(struct microcode_amd);
if ( !mc_amd )
{
printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n");
error = -ENOMEM;
goto out;
}
error = install_equiv_cpu_table(mc_amd, buf, &offset);
if ( error )
{
xfree(mc_amd);
printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
error = -EINVAL;
goto out;
}
mc_old = uci->mc.mc_amd;
/* implicitely validates uci->mc.mc_valid */
uci->mc.mc_amd = mc_amd;
/*
* It's possible the data file has multiple matching ucode,
* lets keep searching till the latest version
*/
mc_amd->mpb = NULL;
mc_amd->mpb_size = 0;
last_offset = offset;
while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
&offset)) == 0 )
{
if ( microcode_fits(mc_amd, cpu) )
{
error = apply_microcode(cpu);
if ( error )
break;
applied_offset = last_offset;
}
last_offset = offset;
if ( offset >= bufsize )
break;
}
/* On success keep the microcode patch for
* re-apply on resume.
*/
if ( applied_offset )
{
int ret = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
&applied_offset);
if ( ret == 0 )
xfree(mc_old);
else
error = ret;
}
if ( !applied_offset || error )
{
xfree(mc_amd);
uci->mc.mc_amd = mc_old;
}
out:
svm_host_osvw_init();
/*
* In some cases we may return an error even if processor's microcode has
* been updated. For example, the first patch in a container file is loaded
* successfully but subsequent container file processing encounters a
* failure.
*/
return error;
}
开发者ID:HackLinux,项目名称:xen,代码行数:93,代码来源:microcode_amd.c
示例12: dump_bfin_trace_buffer
void dump_bfin_trace_buffer(void)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
int tflags, i = 0, fault = 0;
char buf[150];
unsigned short *addr;
unsigned int cpu = raw_smp_processor_id();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
int j, index;
#endif
trace_buffer_save(tflags);
pr_notice("Hardware Trace:\n");
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
#endif
if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
addr = (unsigned short *)bfin_read_TBUF();
decode_address(buf, (unsigned long)addr);
pr_notice("%4i Target : %s\n", i, buf);
if (!fault && addr == ((unsigned short *)evt_ivhw)) {
addr = (unsigned short *)bfin_read_TBUF();
decode_address(buf, (unsigned long)addr);
pr_notice(" FAULT : %s ", buf);
decode_instruction(addr);
pr_cont("\n");
fault = 1;
continue;
}
if (!fault && addr == (unsigned short *)trap &&
(cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
pr_notice(" FAULT : %s ", buf);
decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
pr_cont("\n");
fault = 1;
}
addr = (unsigned short *)bfin_read_TBUF();
decode_address(buf, (unsigned long)addr);
pr_notice(" Source : %s ", buf);
decode_instruction(addr);
pr_cont("\n");
}
}
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
if (trace_buff_offset)
index = trace_buff_offset / 4;
else
index = EXPAND_LEN;
j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
while (j) {
decode_address(buf, software_trace_buff[index]);
pr_notice("%4i Target : %s\n", i, buf);
index -= 1;
if (index < 0)
index = EXPAND_LEN;
decode_address(buf, software_trace_buff[index]);
pr_notice(" Source : %s ", buf);
decode_instruction((unsigned short *)software_trace_buff[index]);
pr_cont("\n");
index -= 1;
if (index < 0)
index = EXPAND_LEN;
j--;
i++;
}
#endif
trace_buffer_restore(tflags);
#endif
}
开发者ID:Alex-V2,项目名称:One_M8_4.4.3_kernel,代码行数:77,代码来源:trace.c
示例13: __const_udelay
inline void __const_udelay(unsigned long xloops)
{
__delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:4,代码来源:udelay.c
示例14: nft_meta_get_eval
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
const struct net_device *in = pkt->in, *out = pkt->out;
u32 *dest = ®s->data[priv->dreg];
switch (priv->key) {
case NFT_META_LEN:
*dest = skb->len;
break;
case NFT_META_PROTOCOL:
*dest = 0;
*(__be16 *)dest = skb->protocol;
break;
case NFT_META_NFPROTO:
*dest = pkt->ops->pf;
break;
case NFT_META_L4PROTO:
*dest = pkt->tprot;
break;
case NFT_META_PRIORITY:
*dest = skb->priority;
break;
case NFT_META_MARK:
*dest = skb->mark;
break;
case NFT_META_IIF:
if (in == NULL)
goto err;
*dest = in->ifindex;
break;
case NFT_META_OIF:
if (out == NULL)
goto err;
*dest = out->ifindex;
break;
case NFT_META_IIFNAME:
if (in == NULL)
goto err;
strncpy((char *)dest, in->name, IFNAMSIZ);
break;
case NFT_META_OIFNAME:
if (out == NULL)
goto err;
strncpy((char *)dest, out->name, IFNAMSIZ);
break;
case NFT_META_IIFTYPE:
if (in == NULL)
goto err;
*dest = 0;
*(u16 *)dest = in->type;
break;
case NFT_META_OIFTYPE:
if (out == NULL)
goto err;
*dest = 0;
*(u16 *)dest = out->type;
break;
case NFT_META_SKUID:
if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket == NULL ||
skb->sk->sk_socket->file == NULL) {
read_unlock_bh(&skb->sk->sk_callback_lock);
goto err;
}
*dest = from_kuid_munged(&init_user_ns,
skb->sk->sk_socket->file->f_cred->fsuid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
case NFT_META_SKGID:
if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket == NULL ||
skb->sk->sk_socket->file == NULL) {
read_unlock_bh(&skb->sk->sk_callback_lock);
goto err;
}
*dest = from_kgid_munged(&init_user_ns,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID: {
const struct dst_entry *dst = skb_dst(skb);
if (dst == NULL)
goto err;
*dest = dst->tclassid;
break;
}
#endif
//.........这里部分代码省略.........
开发者ID:DenisLug,项目名称:mptcp,代码行数:101,代码来源:nft_meta.c
示例15: do_page_fault
//.........这里部分代码省略.........
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:101,代码来源:fault.c
示例16: boost_mig_sync_thread
static int boost_mig_sync_thread(void *data)
{
int dest_cpu = (int) data;
int src_cpu, ret;
struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
struct cpufreq_policy dest_policy;
struct cpufreq_policy src_policy;
unsigned long flags;
unsigned int req_freq;
while (1) {
wait_event(s->sync_wq, s->pending || kthread_should_stop());
#ifdef CONFIG_IRLED_GPIO
if (unlikely(gir_boost_disable)) {
pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n",
__func__, raw_smp_processor_id());
continue;
}
#endif
if (kthread_should_stop())
break;
spin_lock_irqsave(&s->lock, flags);
s->pending = false;
src_cpu = s->src_cpu;
spin_unlock_irqrestore(&s->lock, flags);
ret = cpufreq_get_policy(&src_policy, src_cpu);
if (ret)
continue;
ret = cpufreq_get_policy(&dest_policy, dest_cpu);
if (ret)
continue;
if (s->task_load < migration_load_threshold)
continue;
req_freq = load_based_syncs ?
(dest_policy.max * s->task_load) / 100 : src_policy.cur;
if (req_freq <= dest_policy.cpuinfo.min_freq) {
pr_debug("No sync. Sync Freq:%u\n", req_freq);
continue;
}
if (sync_threshold)
req_freq = min(sync_threshold, req_freq);
cancel_delayed_work_sync(&s->boost_rem);
#ifdef CONFIG_CPUFREQ_HARDLIMIT
s->boost_min = check_cpufreq_hardlimit(req_freq);
#else
#ifdef CONFIG_CPUFREQ_LIMIT
s->boost_min = check_cpufreq_limit(req_freq);
#else
s->boost_min = req_freq;
#endif
#endif
/* Force policy re-evaluation to trigger adjust notifier. */
get_online_cpus();
if (cpu_online(src_cpu))
/*
* Send an unchanged policy update to the source
* CPU. Even though the policy isn't changed from
* its existing boosted or non-boosted state
* notifying the source CPU will let the governor
* know a boost happened on another CPU and that it
* should re-evaluate the frequency at the next timer
* event without interference from a min sample time.
*/
cpufreq_update_policy(src_cpu);
if (cpu_online(dest_cpu)) {
cpufreq_update_policy(dest_cpu);
queue_delayed_work_on(dest_cpu, cpu_boost_wq,
&s->boost_rem, msecs_to_jiffies(boost_ms));
} else {
s->boost_min = 0;
}
put_online_cpus();
}
return 0;
}
开发者ID:ffolkes,项目名称:plasmakernel_note4_tw_lp511,代码行数:87,代码来源:cpu-boost.c
示例17: padlock_store_cword
static inline void padlock_store_cword(struct cword *cword)
{
per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
开发者ID:1111saeid,项目名称:jb_kernel_3.0.16_htc_golfu,代码行数:4,代码来源:padlock-aes.c
示例18: kgdb_call_nmi_hook
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0;
}
开发者ID:7L,项目名称:pi_plus,代码行数:5,代码来源:kgdb.c
示例19: sk_run_filter
//.........这里部分代码省略.........
A = K;
continue;
case BPF_S_LDX_IMM:
X = K;
continue;
case BPF_S_LD_MEM:
A = mem[K];
continue;
case BPF_S_LDX_MEM:
X = mem[K];
continue;
case BPF_S_MISC_TAX:
X = A;
continue;
case BPF_S_MISC_TXA:
A = X;
continue;
case BPF_S_RET_K:
return K;
case BPF_S_RET_A:
return A;
case BPF_S_ST:
mem[K] = A;
continue;
case BPF_S_STX:
mem[K] = X;
continue;
case BPF_S_ANC_PROTOCOL:
A = ntohs(skb->protocol);
continue;
case BPF_S_ANC_PKTTYPE:
A = skb->pkt_type;
continue;
case BPF_S_ANC_IFINDEX:
if (!skb->dev)
return 0;
A = skb->dev->ifindex;
continue;
case BPF_S_ANC_MARK:
A = skb->mark;
continue;
case BPF_S_ANC_QUEUE:
A = skb->queue_mapping;
continue;
case BPF_S_ANC_HATYPE:
if (!skb->dev)
return 0;
A = skb->dev->type;
continue;
case BPF_S_ANC_RXHASH:
A = skb->rxhash;
continue;
case BPF_S_ANC_CPU:
A = raw_smp_processor_id();
continue;
case BPF_S_ANC_NLATTR: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *)&skb->data[A],
skb->len - A, X);
if (nla)
A = (void *)nla - (void *)skb->data;
else
A = 0;
continue;
}
case BPF_S_ANC_NLATTR_NEST: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *)&skb->data[A];
if (nla->nla_len > A - skb->len)
return 0;
nla = nla_find_nested(nla, X);
if (nla)
A = (void *)nla - (void *)skb->data;
else
A = 0;
continue;
}
default:
WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
fentry->code, fentry->jt,
fentry->jf, fentry->k);
return 0;
}
}
return 0;
}
开发者ID:ARMP,项目名称:android_kernel_lge_x3,代码行数:101,代码来源:filter.c
示例20: gameport_measure_speed
static int gameport_measure_speed(struct gameport *gameport)
{
#if defined(__i386__)
unsigned int i, t, t1, t2, t3, tx;
unsigned long flags;
if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
return 0;
tx = 1 << 30;
for(i = 0; i < 50; i++) {
local_irq_save(flags);
GET_TIME(t1);
for (t = 0; t < 50; t++) gameport_read(gameport);
GET_TIME(t2);
GET_TIME(t3);
local_irq_restore(flags);
udelay(i * 10);
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
}
gameport_close(gameport);
return 59659 / (tx < 1 ? 1 : tx);
#elif defined (__x86_64__)
unsigned int i, t;
unsigned long tx, t1, t2, flags;
if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
return 0;
tx = 1 << 30;
for(i = 0; i < 50; i++) {
local_irq_save(flags);
rdtscl(t1);
for (t = 0; t < 50; t++) gameport_read(gameport);
rdtscl(t2);
local_irq_restore(flags);
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
gameport_close(gameport);
return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
(unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
#else
unsigned int j, t = 0;
if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
return 0;
j = jiffies; while (j == jiffies);
j = jiffies; while (j == jiffies) { t++; gameport_read(gameport); }
gameport_close(gameport);
return t * HZ / 1000;
#endif
}
开发者ID:E-LLP,项目名称:n900,代码行数:65,代码来源:gameport.c
注:本文中的raw_smp_processor_id函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论