• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ smp_processor_id函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中smp_processor_id函数的典型用法代码示例。如果您正苦于以下问题:C++ smp_processor_id函数的具体用法?C++ smp_processor_id怎么用?C++ smp_processor_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了smp_processor_id函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ia64_sync_itc

/*
 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
 * (normally the time-keeper CPU).  We use a closed loop to eliminate the possibility of
 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
 * step).  The basic idea is for the slave to ask the master what itc value it has and to
 * read its own itc before and after the master responds.  Each iteration gives us three
 * timestamps:
 *
 *	slave		master
 *
 *	t0 ---\
 *             ---\
 *		   --->
 *			tm
 *		   /---
 *	       /---
 *	t1 <---
 *
 *
 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
 * and t1.  If we achieve this, the clocks are synchronized provided the interconnect
 * between the slave and the master is symmetric.  Even if the interconnect were
 * asymmetric, we would still know that the synchronization error is smaller than the
 * roundtrip latency (t0 - t1).
 *
 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
 * within one or two cycles.  However, we can only *guarantee* that the synchronization is
 * accurate to within a round-trip time, which is typically in the range of several
 * hundred cycles (e.g., ~500 cycles).  In practice, this means that the itc's are usually
 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
 * than half a micro second or so.
 */
void
ia64_sync_itc (unsigned int master)
{
    long i, delta, adj, adjust_latency = 0, done = 0;
    unsigned long flags, rt, master_time_stamp, bound;
#if DEBUG_ITC_SYNC
    struct {
        long rt;	/* roundtrip time */
        long master;	/* master's timestamp */
        long diff;	/* difference between midpoint and master's timestamp */
        long lat;	/* estimate of itc adjustment latency */
    } t[NUM_ROUNDS];
#endif

    /*
     * Make sure local timer ticks are disabled while we sync.  If
     * they were enabled, we'd have to worry about nasty issues
     * like setting the ITC ahead of (or a long time before) the
     * next scheduled tick.
     */
    BUG_ON((ia64_get_itv() & (1 << 16)) == 0);

    go[MASTER] = 1;

    if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
        printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
        return;
    }

    while (go[MASTER])
        cpu_relax();	/* wait for master to be ready */

    spin_lock_irqsave(&itc_sync_lock, flags);
    {
        for (i = 0; i < NUM_ROUNDS; ++i) {
            delta = get_delta(&rt, &master_time_stamp);
            if (delta == 0) {
                done = 1;	/* let's lock on to this... */
                bound = rt;
            }

            if (!done) {
                if (i > 0) {
                    adjust_latency += -delta;
                    adj = -delta + adjust_latency/4;
                } else
                    adj = -delta;

                ia64_set_itc(ia64_get_itc() + adj);
            }
#if DEBUG_ITC_SYNC
            t[i].rt = rt;
            t[i].master = master_time_stamp;
            t[i].diff = delta;
            t[i].lat = adjust_latency/4;
#endif
        }
    }
    spin_unlock_irqrestore(&itc_sync_lock, flags);

#if DEBUG_ITC_SYNC
    for (i = 0; i < NUM_ROUNDS; ++i)
        printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
               t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif

    printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
           "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
//.........这里部分代码省略.........
开发者ID:nandra,项目名称:omap_850_kernel,代码行数:101,代码来源:smpboot.c


示例2: tick_check_new_device

/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;

	raw_spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
		goto out_bc;

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
	if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
		if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
			goto out_bc;
	}

	/*
	 * Replace the eventually existing device by the new
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
	 */
	if (tick_is_broadcast_device(curdev)) {
		clockevents_shutdown(curdev);
		curdev = NULL;
	}
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);

	return ret;
}
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:83,代码来源:tick-common.c


示例3: cris_mmu_init

/*
 * The kernel is already mapped with linear mapping at kseg_c so there's no
 * need to map it with a page table. However, head.S also temporarily mapped it
 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
 * other paging stuff.
 */
void __init
cris_mmu_init(void)
{
	unsigned long mmu_config;
	unsigned long mmu_kbase_hi;
	unsigned long mmu_kbase_lo;
	unsigned short mmu_page_id;

	/*
	 * Make sure the current pgd table points to something sane, even if it
	 * is most probably not used until the next switch_mm.
	 */
	per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;

#ifdef CONFIG_SMP
	{
		pgd_t **pgd;
		pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
		SUPP_BANK_SEL(1);
		SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
		SUPP_BANK_SEL(2);
		SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
	}
#endif

	/* Initialise the TLB. Function found in tlb.c. */
	tlb_init();

	/* Enable exceptions and initialize the kernel segments. */
	mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on)        |
		       REG_STATE(mmu, rw_mm_cfg, acc, on)       |
		       REG_STATE(mmu, rw_mm_cfg, ex, on)        |
		       REG_STATE(mmu, rw_mm_cfg, inv, on)       |
		       REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
		       REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
		       REG_STATE(mmu, rw_mm_cfg, seg_d, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
		       REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
#ifndef CONFIG_ETRAXFS_SIM
                       REG_STATE(mmu, rw_mm_cfg, seg_a, page)   |
#else
		       REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
#endif
		       REG_STATE(mmu, rw_mm_cfg, seg_9, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_8, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_7, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_6, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_5, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_4, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_3, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_2, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_1, page)   |
		       REG_STATE(mmu, rw_mm_cfg, seg_0, page));

	mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
#ifndef CONFIG_ETRAXFS_SIM
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
#else
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
#endif
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
#ifndef CONFIG_ETRAXFS_SIM
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
#else
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
#endif
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));

	mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
			 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));

	mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);

	/* Update the instruction MMU. */
	SUPP_BANK_SEL(BANK_IM);
	SUPP_REG_WR(RW_MM_CFG, mmu_config);
	SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
	SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
	SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);

	/* Update the data MMU. */
	SUPP_BANK_SEL(BANK_DM);
	SUPP_REG_WR(RW_MM_CFG, mmu_config);
	SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
	SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
//.........这里部分代码省略.........
开发者ID:1x23,项目名称:unifi-gpl,代码行数:101,代码来源:init.c


示例4: vprintk

asmlinkage int vprintk(const char *fmt, va_list args)
{
	int printed_len = 0;
	int current_log_level = default_message_loglevel;
	unsigned long flags;
	int this_cpu;
	char *p;

	boot_delay_msec();

	preempt_disable();
	/* This stops the holder of console_sem just where we want him */
	raw_local_irq_save(flags);
	this_cpu = smp_processor_id();

	/*
	 * Ouch, printk recursed into itself!
	 */
	if (unlikely(printk_cpu == this_cpu)) {
		/*
		 * If a crash is occurring during printk() on this CPU,
		 * then try to get the crash message out but make sure
		 * we can't deadlock. Otherwise just return to avoid the
		 * recursion and return - but flag the recursion so that
		 * it can be printed at the next appropriate moment:
		 */
		if (!oops_in_progress) {
			recursion_bug = 1;
			goto out_restore_irqs;
		}
		zap_locks();
	}

	lockdep_off();
	spin_lock(&logbuf_lock);
	printk_cpu = this_cpu;

	if (recursion_bug) {
		recursion_bug = 0;
		strcpy(printk_buf, recursion_bug_msg);
		printed_len = sizeof(recursion_bug_msg);
	}
	/* Emit the output into the temporary buffer */
	printed_len += vscnprintf(printk_buf + printed_len,
				  sizeof(printk_buf) - printed_len, fmt, args);


	/*
	 * Copy the output into log_buf.  If the caller didn't provide
	 * appropriate log level tags, we insert them here
	 */
	for (p = printk_buf; *p; p++) {
		if (new_text_line) {
			/* If a token, set current_log_level and skip over */
			if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' &&
			    p[2] == '>') {
				current_log_level = p[1] - '0';
				p += 3;
				printed_len -= 3;
			}

			/* Always output the token */
			emit_log_char('<');
			emit_log_char(current_log_level + '0');
			emit_log_char('>');
#ifdef CONFIG_KERNEL_LOGGING
			{
				char tbuf[3];

				// check kernel start
				if( (b_first_call_after_booting == 0) &&
					(logging_mode & LOGGING_RAM_MASK) && 
					(!ioremapped && log_buf_base  ) )
				{
//					char tempStar[] = "********************************************\n";
					char tempChar[] = "============== start kernel logging !! ==============\n";
					
					b_first_call_after_booting = 1;

//					emit_log_char_RAMbuf(tempStar, sizeof(tempStar));
					emit_log_char_RAMbuf(tempChar, sizeof(tempChar));
//					emit_log_char_RAMbuf(tempStar, sizeof(tempStar));
				}
				
				sprintf(tbuf, "<%1d>",default_message_loglevel);
				emit_log_char_RAMbuf(tbuf, 3);
			}
#endif
			printed_len += 3;
			new_text_line = 0;

			if (printk_time) {
				/* Follow the token with the time */
				char tbuf[50], *tp;
				unsigned tlen;
				unsigned long long t;
				unsigned long nanosec_rem;

				t = cpu_clock(printk_cpu);
				nanosec_rem = do_div(t, 1000000000);
//.........这里部分代码省略.........
开发者ID:argentinos,项目名称:o2droid,代码行数:101,代码来源:printk.c


示例5: mcheck_cmn_handler

/* Shared #MC handler. */
void mcheck_cmn_handler(const struct cpu_user_regs *regs)
{
    struct mca_banks *bankmask = mca_allbanks;
    struct mca_banks *clear_bank = __get_cpu_var(mce_clear_banks);
    uint64_t gstatus;
    mctelem_cookie_t mctc = NULL;
    struct mca_summary bs;

    mce_spin_lock(&mce_logout_lock);

    if (clear_bank != NULL) {
        memset( clear_bank->bank_map, 0x0,
            sizeof(long) * BITS_TO_LONGS(clear_bank->num));
    }
    mctc = mcheck_mca_logout(MCA_MCE_SCAN, bankmask, &bs, clear_bank);

    if (bs.errcnt) {
        /*
         * Uncorrected errors must be dealt with in softirq context.
         */
        if (bs.uc || bs.pcc) {
            add_taint(TAINT_MACHINE_CHECK);
            if (mctc != NULL)
                mctelem_defer(mctc);
            /*
             * For PCC=1 and can't be recovered, context is lost, so
             * reboot now without clearing the banks, and deal with
             * the telemetry after reboot (the MSRs are sticky)
             */
            if (bs.pcc || !bs.recoverable)
                cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
        } else {
            if (mctc != NULL)
                mctelem_commit(mctc);
        }
        atomic_set(&found_error, 1);

        /* The last CPU will be take check/clean-up etc */
        atomic_set(&severity_cpu, smp_processor_id());

        mce_printk(MCE_CRITICAL, "MCE: clear_bank map %lx on CPU%d\n",
                *((unsigned long*)clear_bank), smp_processor_id());
        if (clear_bank != NULL)
            mcheck_mca_clearbanks(clear_bank);
    } else {
        if (mctc != NULL)
            mctelem_dismiss(mctc);
    }
    mce_spin_unlock(&mce_logout_lock);

    mce_barrier_enter(&mce_trap_bar);
    if ( mctc != NULL && mce_urgent_action(regs, mctc))
        cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
    mce_barrier_exit(&mce_trap_bar);

    /*
     * Wait until everybody has processed the trap.
     */
    mce_barrier_enter(&mce_trap_bar);
    if (atomic_read(&severity_cpu) == smp_processor_id())
    {
        /* According to SDM, if no error bank found on any cpus,
         * something unexpected happening, we can't do any
         * recovery job but to reset the system.
         */
        if (atomic_read(&found_error) == 0)
            mc_panic("MCE: No CPU found valid MCE, need reset");
        if (!cpumask_empty(&mce_fatal_cpus))
        {
            char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
            ebufp = ebuf + strlen(ebuf);
            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), &mce_fatal_cpus);
            mc_panic(ebuf);
        }
        atomic_set(&found_error, 0);
    }
    mce_barrier_exit(&mce_trap_bar);

    /* Clear flags after above fatal check */
    mce_barrier_enter(&mce_trap_bar);
    gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS);
    if ((gstatus & MCG_STATUS_MCIP) != 0) {
        mce_printk(MCE_CRITICAL, "MCE: Clear [email protected] last step");
        mca_wrmsr(MSR_IA32_MCG_STATUS, 0);
    }
    mce_barrier_exit(&mce_trap_bar);

    raise_softirq(MACHINE_CHECK_SOFTIRQ);
}
开发者ID:sheep,项目名称:xen,代码行数:90,代码来源:mce.c


示例6: xics_migrate_irqs_away

/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
	unsigned int irq, virq;
	struct irq_desc *desc;

	/* If we used to be the default server, move to the new "boot_cpuid" */
	if (hw_cpu == xics_default_server)
		xics_update_irq_servers();

	/* Reject any interrupt that was queued to us... */
	icp_ops->set_priority(0);

	/* Remove ourselves from the global interrupt queue */
	xics_set_cpu_giq(xics_default_distrib_server, 0);

	/* Allow IPIs again... */
	icp_ops->set_priority(DEFAULT_PRIORITY);

	for_each_irq_desc(virq, desc) {
		struct irq_chip *chip;
		long server;
		unsigned long flags;
		struct ics *ics;

		/* We can't set affinity on ISA interrupts */
		if (virq < NUM_ISA_INTERRUPTS)
			continue;
		/* We only need to migrate enabled IRQS */
		if (!desc->action)
			continue;
		if (desc->irq_data.domain != xics_host)
			continue;
		irq = desc->irq_data.hwirq;
		/* We need to get IPIs still. */
		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
			continue;
		chip = irq_desc_get_chip(desc);
		if (!chip || !chip->irq_set_affinity)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		/* Locate interrupt server */
		server = -1;
		ics = irq_get_chip_data(virq);
		if (ics)
			server = ics->get_server(ics, irq);
		if (server < 0) {
			printk(KERN_ERR "%s: Can't find server for irq %d\n",
			       __func__, irq);
			goto unlock;
		}

		/* We only support delivery to all cpus or to one cpu.
		 * The irq has to be migrated only in the single cpu
		 * case.
		 */
		if (server != hw_cpu)
			goto unlock;

		/* This is expected during cpu offline. */
		if (cpu_online(cpu))
			pr_warning("IRQ %u affinity broken off cpu %u\n",
			       virq, cpu);

		/* Reset affinity to all cpus */
		raw_spin_unlock_irqrestore(&desc->lock, flags);
		irq_set_affinity(virq, cpu_all_mask);
		continue;
unlock:
		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
开发者ID:168519,项目名称:linux,代码行数:75,代码来源:xics-common.c


示例7: timer_interrupt

/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 * We set it up to overflow again in 1/HZ seconds.
 */
void timer_interrupt(struct pt_regs * regs)
{
	int next_dec;
	unsigned long cpu = smp_processor_id();
	unsigned jiffy_stamp = last_jiffy_stamp(cpu);
	extern void do_IRQ(struct pt_regs *);

	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);

	irq_enter();

	while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
		jiffy_stamp += tb_ticks_per_jiffy;
		
		profile_tick(CPU_PROFILING, regs);
		update_process_times(user_mode(regs));

	  	if (smp_processor_id())
			continue;

		/* We are in an interrupt, no need to save/restore flags */
		write_seqlock(&xtime_lock);
		tb_last_stamp = jiffy_stamp;
		do_timer(regs);

		/*
		 * update the rtc when needed, this should be performed on the
		 * right fraction of a second. Half or full second ?
		 * Full second works on mk48t59 clocks, others need testing.
		 * Note that this update is basically only used through
		 * the adjtimex system calls. Setting the HW clock in
		 * any other way is a /dev/rtc and userland business.
		 * This is still wrong by -0.5/+1.5 jiffies because of the
		 * timer interrupt resolution and possible delay, but here we
		 * hit a quantization limit which can only be solved by higher
		 * resolution timers and decoupling time management from timer
		 * interrupts. This is also wrong on the clocks
		 * which require being written at the half second boundary.
		 * We should have an rtc call that only sets the minutes and
		 * seconds like on Intel to avoid problems with non UTC clocks.
		 */
		if ( ppc_md.set_rtc_time && ntp_synced() &&
		     xtime.tv_sec - last_rtc_update >= 659 &&
		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
		     jiffies - wall_jiffies == 1) {
		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
				last_rtc_update = xtime.tv_sec+1;
			else
				/* Try again one minute later */
				last_rtc_update += 60;
		}
		write_sequnlock(&xtime_lock);
	}
	if ( !disarm_decr[smp_processor_id()] )
		set_dec(next_dec);
	last_jiffy_stamp(cpu) = jiffy_stamp;

	if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
		ppc_md.heartbeat();

	irq_exit();
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:68,代码来源:time.c


示例8: vprintk

asmlinkage int vprintk(const char *fmt, va_list args)
{
	int printed_len = 0;
	int current_log_level = default_message_loglevel;
	unsigned long flags;
	int this_cpu;
	char *p;

	boot_delay_msec();
	printk_delay();

	preempt_disable();
	/* This stops the holder of console_sem just where we want him */
	raw_local_irq_save(flags);
	this_cpu = smp_processor_id();

	/*
	 * Ouch, printk recursed into itself!
	 */
	if (unlikely(printk_cpu == this_cpu)) {
		/*
		 * If a crash is occurring during printk() on this CPU,
		 * then try to get the crash message out but make sure
		 * we can't deadlock. Otherwise just return to avoid the
		 * recursion and return - but flag the recursion so that
		 * it can be printed at the next appropriate moment:
		 */
		if (!oops_in_progress) {
			recursion_bug = 1;
			goto out_restore_irqs;
		}
		zap_locks();
	}

	lockdep_off();
	spin_lock(&logbuf_lock);
	printk_cpu = this_cpu;

	if (recursion_bug) {
		recursion_bug = 0;
		strcpy(printk_buf, recursion_bug_msg);
		printed_len = strlen(recursion_bug_msg);
	}
	/* Emit the output into the temporary buffer */
	printed_len += vscnprintf(printk_buf + printed_len,
				  sizeof(printk_buf) - printed_len, fmt, args);

#ifdef	CONFIG_DEBUG_LL
	printascii(printk_buf);
#endif

	p = printk_buf;

	/* Do we have a loglevel in the string? */
	if (p[0] == '<') {
		unsigned char c = p[1];
		if (c && p[2] == '>') {
			switch (c) {
			case '0' ... '7': /* loglevel */
				current_log_level = c - '0';
			/* Fallthrough - make sure we're on a new line */
			case 'd': /* KERN_DEFAULT */
				if (!new_text_line) {
					emit_log_char('\n');
					new_text_line = 1;
				}
			/* Fallthrough - skip the loglevel */
			case 'c': /* KERN_CONT */
				p += 3;
				break;
			}
		}
	}
开发者ID:garwedgess,项目名称:LuPuS-STOCK-ICS-Xperia2011,代码行数:73,代码来源:printk.c


示例9: check_irq_vectors_for_cpu_disable

/*
 * This cpu is going to be removed and its vectors migrated to the remaining
 * online cpus.  Check to see if there are enough vectors in the remaining cpus.
 * This function is protected by stop_machine().
 */
int check_irq_vectors_for_cpu_disable(void)
{
	unsigned int this_cpu, vector, this_count, count;
	struct irq_desc *desc;
	struct irq_data *data;
	int cpu;

	this_cpu = smp_processor_id();
	cpumask_copy(&online_new, cpu_online_mask);
	cpumask_clear_cpu(this_cpu, &online_new);

	this_count = 0;
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		desc = __this_cpu_read(vector_irq[vector]);
		if (IS_ERR_OR_NULL(desc))
			continue;
		/*
		 * Protect against concurrent action removal, affinity
		 * changes etc.
		 */
		raw_spin_lock(&desc->lock);
		data = irq_desc_get_irq_data(desc);
		cpumask_copy(&affinity_new,
			     irq_data_get_affinity_mask(data));
		cpumask_clear_cpu(this_cpu, &affinity_new);

		/* Do not count inactive or per-cpu irqs. */
		if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
			raw_spin_unlock(&desc->lock);
			continue;
		}

		raw_spin_unlock(&desc->lock);
		/*
		 * A single irq may be mapped to multiple cpu's
		 * vector_irq[] (for example IOAPIC cluster mode).  In
		 * this case we have two possibilities:
		 *
		 * 1) the resulting affinity mask is empty; that is
		 * this the down'd cpu is the last cpu in the irq's
		 * affinity mask, or
		 *
		 * 2) the resulting affinity mask is no longer a
		 * subset of the online cpus but the affinity mask is
		 * not zero; that is the down'd cpu is the last online
		 * cpu in a user set affinity mask.
		 */
		if (cpumask_empty(&affinity_new) ||
		    !cpumask_subset(&affinity_new, &online_new))
			this_count++;
	}

	count = 0;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We scan from FIRST_EXTERNAL_VECTOR to first system
		 * vector. If the vector is marked in the used vectors
		 * bitmap or an irq is assigned to it, we don't count
		 * it as available.
		 *
		 * As this is an inaccurate snapshot anyway, we can do
		 * this w/o holding vector_lock.
		 */
		for (vector = FIRST_EXTERNAL_VECTOR;
		     vector < first_system_vector; vector++) {
			if (!test_bit(vector, used_vectors) &&
			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
			    count++;
		}
	}

	if (count < this_count) {
		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
			this_cpu, this_count, count);
		return -ERANGE;
	}
	return 0;
}
开发者ID:020gzh,项目名称:linux,代码行数:85,代码来源:irq.c


示例10: __profile_flip_buffers

/*
 * Each cpu has a pair of open-addressed hashtables for pending
 * profile hits. read_profile() IPI's all cpus to request them
 * to flip buffers and flushes their contents to prof_buffer itself.
 * Flip requests are serialized by the profile_flip_mutex. The sole
 * use of having a second hashtable is for avoiding cacheline
 * contention that would otherwise happen during flushes of pending
 * profile hits required for the accuracy of reported profile hits
 * and so resurrect the interrupt livelock issue.
 *
 * The open-addressed hashtables are indexed by profile buffer slot
 * and hold the number of pending hits to that profile buffer slot on
 * a cpu in an entry. When the hashtable overflows, all pending hits
 * are accounted to their corresponding profile buffer slots with
 * atomic_add() and the hashtable emptied. As numerous pending hits
 * may be accounted to a profile buffer slot in a hashtable entry,
 * this amortizes a number of atomic profile buffer increments likely
 * to be far larger than the number of entries in the hashtable,
 * particularly given that the number of distinct profile buffer
 * positions to which hits are accounted during short intervals (e.g.
 * several seconds) is usually very small. Exclusion from buffer
 * flipping is provided by interrupt disablement (note that for
 * SCHED_PROFILING profile_hit() may be called from process context).
 * The hash function is meant to be lightweight as opposed to strong,
 * and was vaguely inspired by ppc64 firmware-supported inverted
 * pagetable hash functions, but uses a full hashtable full of finite
 * collision chains, not just pairs of them.
 *
 * -- wli
 */
static void __profile_flip_buffers(void *unused)
{
	int cpu = smp_processor_id();

	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
}
开发者ID:GodFox,项目名称:magx_kernel_xpixl,代码行数:36,代码来源:profile.c


示例11: xen_play_dead

static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
{
	play_dead_common();
	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
	cpu_bringup();
}
开发者ID:10x-Amin,项目名称:x10_Th_kernel,代码行数:6,代码来源:smp.c


示例12: sched_clock_idle_sleep_event

/*
 * We are going deep-idle (irqs are disabled):
 */
void sched_clock_idle_sleep_event(void)
{
	sched_clock_cpu(smp_processor_id());
}
开发者ID:miettal,项目名称:armadillo420_standard,代码行数:7,代码来源:sched_clock.c


示例13: smp_callin

static void __cpuinit
smp_callin (void)
{
    int cpuid, phys_id, itc_master;
    struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
    extern void ia64_init_itm(void);
    extern volatile int time_keeper_id;

#ifdef CONFIG_PERFMON
    extern void pfm_init_percpu(void);
#endif

    cpuid = smp_processor_id();
    phys_id = hard_smp_processor_id();
    itc_master = time_keeper_id;

    if (cpu_online(cpuid)) {
        printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
               phys_id, cpuid);
        BUG();
    }

    fix_b0_for_bsp();

    lock_ipi_calllock();
    spin_lock(&vector_lock);
    /* Setup the per cpu irq handling data structures */
    __setup_vector_irq(cpuid);
    cpu_set(cpuid, cpu_online_map);
    unlock_ipi_calllock();
    per_cpu(cpu_state, cpuid) = CPU_ONLINE;
    spin_unlock(&vector_lock);

    smp_setup_percpu_timer();

    ia64_mca_cmc_vector_setup();	/* Setup vector on AP */

#ifdef CONFIG_PERFMON
    pfm_init_percpu();
#endif

    local_irq_enable();

    if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
        /*
         * Synchronize the ITC with the BP.  Need to do this after irqs are
         * enabled because ia64_sync_itc() calls smp_call_function_single(), which
         * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
         * local_bh_enable(), which bugs out if irqs are not enabled...
         */
        Dprintk("Going to syncup ITC with ITC Master.\n");
        ia64_sync_itc(itc_master);
    }

    /*
     * Get our bogomips.
     */
    ia64_init_itm();

    /*
     * Delay calibration can be skipped if new processor is identical to the
     * previous processor.
     */
    last_cpuinfo = cpu_data(cpuid - 1);
    this_cpuinfo = local_cpu_data;
    if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
            last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
            last_cpuinfo->features != this_cpuinfo->features ||
            last_cpuinfo->revision != this_cpuinfo->revision ||
            last_cpuinfo->family != this_cpuinfo->family ||
            last_cpuinfo->archrev != this_cpuinfo->archrev ||
            last_cpuinfo->model != this_cpuinfo->model)
        calibrate_delay();
    local_cpu_data->loops_per_jiffy = loops_per_jiffy;

#ifdef CONFIG_IA32_SUPPORT
    ia32_gdt_init();
#endif

    /*
     * Allow the master to continue.
     */
    cpu_set(cpuid, cpu_callin_map);
    Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
开发者ID:nandra,项目名称:omap_850_kernel,代码行数:85,代码来源:smpboot.c


示例14: nmi_cpu_setup

static void nmi_cpu_setup(void * dummy)
{
	int cpu = smp_processor_id();
	struct op_msrs * msrs = &cpu_msrs[cpu];
	model->setup_ctrs(msrs);
}
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:6,代码来源:nmi_int.c


示例15: nmi_save_registers

static void nmi_save_registers(void *dummy)
{
    int cpu = smp_processor_id();
    struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
    nmi_cpu_save_registers(msrs);
}
开发者ID:274914765,项目名称:C,代码行数:6,代码来源:nmi_int.c


示例16: nmi_cpu_shutdown

static void nmi_cpu_shutdown(void * dummy)
{
	int cpu = smp_processor_id();
	struct op_msrs * msrs = &cpu_msrs[cpu];
	nmi_restore_registers(msrs);
}
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:6,代码来源:nmi_int.c


示例17: ERR_PTR


//.........这里部分代码省略.........
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
开发者ID:mymas,项目名称:transcall-3.5.1,代码行数:67,代码来源:fork.c


示例18: bridge_probe

int __init bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{
    unsigned long offset = NODE_OFFSET(nasid);
    struct bridge_controller *bc;
    static int num_bridges = 0;
    bridge_t *bridge;
    int slot;

    printk("a bridge\n");

    /* XXX: kludge alert.. */
    if (!num_bridges)
        ioport_resource.end = ~0UL;

    bc = &bridges[num_bridges];

    bc->pc.pci_ops		= &bridge_pci_ops;
    bc->pc.mem_resource	= &bc->mem;
    bc->pc.io_resource	= &bc->io;

    bc->pc.index		= num_bridges;

    bc->mem.name		= "Bridge PCI MEM";
    bc->pc.mem_offset	= offset;
    bc->mem.start		= 0;
    bc->mem.end		= ~0UL;
    bc->mem.flags		= IORESOURCE_MEM;

    bc->io.name		= "Bridge IO MEM";
    bc->pc.io_offset	= offset;
    bc->io.start		= 0UL;
    bc->io.end		= ~0UL;
    bc->io.flags		= IORESOURCE_IO;

    bc->irq_cpu = smp_processor_id();
    bc->widget_id = widget_id;
    bc->nasid = nasid;

    bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR;

    /*
     * point to this bridge
     */
    bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id);

    /*
     * Clear all pending interrupts.
     */
    bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR;

    /*
     * Until otherwise set up, assume all interrupts are from slot 0
     */
    bridge->b_int_device = 0x0;

    /*
     * swap pio's to pci mem and io space (big windows)
     */
    bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
                             BRIDGE_CTRL_MEM_SWAP;

    /*
     * Hmm...  IRIX sets additional bits in the address which
     * are documented as reserved in the bridge docs.
     */
    bridge->b_wid_int_upper = 0x8000 | (masterwid << 16);
    bridge->b_wid_int_lower = 0x01800090;	/* PI_INT_PEND_MOD off*/
    bridge->b_dir_map = (masterwid << 20);	/* DMA */
    bridge->b_int_enable = 0;

    for (slot = 0; slot < 8; slot ++) {
        bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
        bc->pci_int[slot] = -1;
    }
    bridge->b_wid_tflush;     /* wait until Bridge PIO complete */

    bc->base = bridge;

    register_pci_controller(&bc->pc);

    num_bridges++;

    return 0;
}
开发者ID:b3rnik,项目名称:dsl-n55u-bender,代码行数:84,代码来源:pci-ip27.c


示例19: switch_mmu_context

void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
	unsigned int i, id, cpu = smp_processor_id();
	unsigned long *map;

	/* No lockless fast path .. yet */
	raw_spin_lock(&context_lock);

	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
		cpu, next, next->context.active, next->context.id);

#ifdef CONFIG_SMP
	/* Mark us active and the previous one not anymore */
	next->context.active++;
	if (prev) {
		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}

 again:
#endif /* CONFIG_SMP */

	/* If we already have a valid assigned context, skip all that */
	id = next->context.id;
	if (likely(id != MMU_NO_CONTEXT)) {
#ifdef DEBUG_MAP_CONSISTENCY
		if (context_mm[id] != next)
			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
			       next, id, id, context_mm[id]);
#endif
		goto ctxt_ok;
	}

	/* We really don't have a context, let's try to acquire one */
	id = next_context;
	if (id > last_context)
		id = first_context;
	map = context_map;

	/* No more free contexts, let's try to steal one */
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
			if (id == MMU_NO_CONTEXT)
				goto again;
			goto stolen;
		}
#endif /* CONFIG_SMP */
		id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	/* We know there's at least one free context, try to find it */
	while (__test_and_set_bit(id, map)) {
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;
	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);

	context_check_map();
 ctxt_ok:

	/* If that context got marked stale on this CPU, then flush the
	 * local TLB for it and unmark it before we use it
	 */
	if (test_bit(id, stale_map[cpu])) {
		pr_hardcont(" | stale flush %d [%d..%d]",
			    id, cpu_first_thread_sibling(cpu),
			    cpu_last_thread_sibling(cpu));

		local_flush_tlb_mm(next);

		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
		for (i = cpu_first_thread_sibling(cpu);
		     i <= cpu_last_thread_sibling(cpu); i++) {
			__clear_bit(id, stale_map[i]);
		}
	}

	/* Flick the MMU and release lock */
	pr_hardcont(" -> %d\n", id);
	set_context(id, next->pgd);
	raw_spin_unlock(&context_lock);
}
开发者ID:adis1313,项目名称:android_kernel_samsung_msm8974,代码行数:92,代码来源:mmu_context_nohash.c


示例20: osq_lock

bool osq_lock(struct optimistic_spin_queue *lock)
{
	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
	struct optimistic_spin_node *prev, *next;
	int curr = encode_cpu(smp_processor_id());
	int old;

	node->locked = 0;
	node-&g 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ smp_rmb函数代码示例发布时间:2022-05-30
下一篇:
C++ smp_ops函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap