• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cpu_online函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cpu_online函数的典型用法代码示例。如果您正苦于以下问题:C++ cpu_online函数的具体用法?C++ cpu_online怎么用?C++ cpu_online使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cpu_online函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: kzalloc

/* ktap mainthread initization, main entry for ktap */
ktap_state *kp_newstate(struct ktap_parm *parm, struct dentry *dir, char **argv)
{
	ktap_state *ks;
	pid_t pid;
	int cpu;

	ks = kzalloc(sizeof(ktap_state) + sizeof(ktap_global_state),
		     GFP_KERNEL);
	if (!ks)
		return NULL;

	ks->stack = kp_malloc(ks, KTAP_STACK_SIZE);
	G(ks) = (ktap_global_state *)(ks + 1);
	G(ks)->mainthread = ks;
	G(ks)->seed = 201236; /* todo: make more random in future */
	G(ks)->task = current;
	G(ks)->verbose = parm->verbose; /* for debug use */
	G(ks)->print_timestamp = parm->print_timestamp;
	G(ks)->workload = parm->workload;
	INIT_LIST_HEAD(&(G(ks)->timers));
	INIT_LIST_HEAD(&(G(ks)->probe_events_head));
	G(ks)->exit = 0;

	if (kp_transport_init(ks, dir))
		goto out;

	pid = (pid_t)parm->trace_pid;
	if (pid != -1) {
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(find_vpid(pid), PIDTYPE_PID);
		if (!task) {
			kp_error(ks, "cannot find pid %d\n", pid);
			rcu_read_unlock();
			goto out;
		}
		G(ks)->trace_task = task;
		get_task_struct(task);
		rcu_read_unlock();
	}

	if( !alloc_cpumask_var(&G(ks)->cpumask, GFP_KERNEL))
		goto out;

	cpumask_copy(G(ks)->cpumask, cpu_online_mask);

	cpu = parm->trace_cpu;
	if (cpu != -1) {
		if (!cpu_online(cpu)) {
			printk(KERN_INFO "ktap: cpu %d is not online\n", cpu);
			goto out;
		}

		cpumask_clear(G(ks)->cpumask);
		cpumask_set_cpu(cpu, G(ks)->cpumask);
	}

	if (cfunction_cache_init(ks))
		goto out;

	kp_tstring_resize(ks, 512); /* set inital string hashtable size */

	ktap_init_state(ks);
	ktap_init_registry(ks);
	ktap_init_arguments(ks, parm->argc, argv);

	/* init library */
	kp_init_baselib(ks);
	kp_init_kdebuglib(ks);
	kp_init_timerlib(ks);
	kp_init_ansilib(ks);

	if (alloc_kp_percpu_data())
		goto out;

	if (kp_probe_init(ks))
		goto out;

	return ks;

 out:
	G(ks)->exit = 1;
	kp_final_exit(ks);
	return NULL;
}
开发者ID:joelagnel,项目名称:ktap,代码行数:87,代码来源:vm.c


示例2: can_use_console

/*
 * Can we actually use the console at this time on this cpu?
 *
 * Console drivers may assume that per-cpu resources have
 * been allocated. So unless they're explicitly marked as
 * being able to cope (CON_ANYTIME) don't call them until
 * this CPU is officially up.
 */
static inline int can_use_console(unsigned int cpu)
{
	return cpu_online(cpu) || have_callable_console();
}
开发者ID:argentinos,项目名称:o2droid,代码行数:12,代码来源:printk(OLD).c


示例3: hotplug_decision_work_fn

static void hotplug_decision_work_fn(struct work_struct *work)
{
	unsigned int running, disable_load, sampling_rate, avg_running = 0;
	unsigned int online_cpus, available_cpus, i, j;
	bool hotplug_flag_on = false;
	bool hotplug_flag_off = false;
#if DEBUG
	unsigned int k;
#endif
	if (!isEnabled)
		return;
		
	online_cpus = num_online_cpus();
	available_cpus = CPUS_AVAILABLE;
	disable_load = DISABLE_LOAD_THRESHOLD; // * online_cpus;
	//enable_load = ENABLE_LOAD_THRESHOLD; // * online_cpus;
	/*
	 * Multiply nr_running() by 100 so we don't have to
	 * use fp division to get the average.
	 */
	running = nr_running() * 100;

	history[index] = running;

#if DEBUG
	pr_info("online_cpus is: %d\n", online_cpus);
	//pr_info("enable_load is: %d\n", enable_load);
	pr_info("disable_load is: %d\n", disable_load);
	pr_info("index is: %d\n", index);
	pr_info("running is: %d\n", running);
#endif

	/*
	 * Use a circular buffer to calculate the average load
	 * over the sampling periods.
	 * This will absorb load spikes of short duration where
	 * we don't want additional cores to be onlined because
	 * the cpufreq driver should take care of those load spikes.
	 */
	for (i = 0, j = index; i < SAMPLING_PERIODS; i++, j--) {
		avg_running += history[j];
		if (unlikely(j == 0))
			j = INDEX_MAX_VALUE;
	}

	/*
	 * If we are at the end of the buffer, return to the beginning.
	 */
	if (unlikely(index++ == INDEX_MAX_VALUE))
		index = 0;

#if DEBUG
	pr_info("array contents: ");
	for (k = 0; k < SAMPLING_PERIODS; k++) {
		 pr_info("%d: %d\t",k, history[k]);
	}
	pr_info("\n");
	pr_info("avg_running before division: %d\n", avg_running);
#endif

	avg_running = avg_running / SAMPLING_PERIODS;

#if DEBUG
	pr_info("average_running is: %d\n", avg_running);
#endif

	if (likely(!(flags & HOTPLUG_DISABLED))) {
		int cpu;
		for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++)
		{
			if (avg_running >= enable_load[cpu] && (!cpu_online(cpu)))
			{
				hotplug_cpu_single_on[cpu] = 1;
				hotplug_flag_on = true;
			}
			else if (avg_running < enable_load[cpu] && (cpu_online(cpu)))
			{
				hotplug_cpu_single_off[cpu] = 1;
				hotplug_flag_off = true;
			}
		}

		if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus))) {
			pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
			/*
			 * Flush any delayed offlining work from the workqueue.
			 * No point in having expensive unnecessary hotplug transitions.
			 * We still online after flushing, because load is high enough to
			 * warrant it.
			 * We set the paused flag so the sampling can continue but no more
			 * hotplug events will occur.
			 */
			flags |= HOTPLUG_PAUSED;
			if (delayed_work_pending(&aphotplug_offline_work))
				cancel_delayed_work(&aphotplug_offline_work);
			hotplug_flag_on = false;
			schedule_work_on(0, &hotplug_online_all_work);
			return;
		} else if (flags & HOTPLUG_PAUSED) {
			schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
//.........这里部分代码省略.........
开发者ID:ShinySide,项目名称:HispAsian_Kernel_GNG8,代码行数:101,代码来源:auto_hotplug.c


示例4: ixgbe_alloc_q_vector

/**
 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 * @adapter: board private structure to initialize
 * @v_idx: index of vector in adapter struct
 *
 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 **/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
				int txr_count, int txr_idx,
				int rxr_count, int rxr_idx)
{
	struct ixgbe_q_vector *q_vector;
	struct ixgbe_ring *ring;
	int node = -1;
	int cpu = -1;
	int ring_count, size;

	ring_count = txr_count + rxr_count;
	size = sizeof(struct ixgbe_q_vector) +
	       (sizeof(struct ixgbe_ring) * ring_count);

	/* customize cpu for Flow Director mapping */
	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
		if (cpu_online(v_idx)) {
			cpu = v_idx;
			node = cpu_to_node(cpu);
		}
	}

	/* allocate q_vector and rings */
	q_vector = kzalloc_node(size, GFP_KERNEL, node);
	if (!q_vector)
		q_vector = kzalloc(size, GFP_KERNEL);
	if (!q_vector)
		return -ENOMEM;

	/* setup affinity mask and node */
	if (cpu != -1)
		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
	else
		cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
	q_vector->numa_node = node;

	/* initialize NAPI */
	netif_napi_add(adapter->netdev, &q_vector->napi,
		       ixgbe_poll, 64);

	/* tie q_vector and adapter together */
	adapter->q_vector[v_idx] = q_vector;
	q_vector->adapter = adapter;
	q_vector->v_idx = v_idx;

	/* initialize work limits */
	q_vector->tx.work_limit = adapter->tx_work_limit;

	/* initialize pointer to rings */
	ring = q_vector->ring;

	while (txr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Tx values */
		ixgbe_add_ring(ring, &q_vector->tx);

		/* apply Tx specific ring traits */
		ring->count = adapter->tx_ring_count;
		ring->queue_index = txr_idx;

		/* assign ring to adapter */
		adapter->tx_ring[txr_idx] = ring;

		/* update count and index */
		txr_count--;
		txr_idx++;

		/* push pointer to next ring */
		ring++;
	}

	while (rxr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Rx values */
		ixgbe_add_ring(ring, &q_vector->rx);

		/*
		 * 82599 errata, UDP frames with a 0 checksum
		 * can be marked as checksum errors.
		 */
		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
//.........这里部分代码省略.........
开发者ID:404992361,项目名称:mi1_kernel,代码行数:101,代码来源:ixgbe_lib.c


示例5: tick_nohz_stop_sched_tick

static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	now = tick_nohz_start_idle(cpu, ts);

	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		return;

	if (need_resched())
		return;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		return;
	}

	ts->idle_calls++;
	
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	if (!ts->tick_stopped && delta_jiffies <= 1)
		goto out;

	
	if ((long)delta_jiffies >= 1) {

		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);
			calc_load_enter_idle();

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
		}

		ts->idle_sleeps++;

		
		ts->idle_expires = expires;

		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
//.........这里部分代码省略.........
开发者ID:smokin901,项目名称:m7-GPE-5.0.1,代码行数:101,代码来源:tick-sched.c


示例6: show_cpuinfo

static int show_cpuinfo(struct seq_file *m, void *v)
{
	struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
	unsigned long n = (unsigned long) v - 1;
	unsigned int version = cpu_data[n].processor_id;
	unsigned int fp_vers = cpu_data[n].fpu_id;
	char fmt [64];
	int i;

#ifdef CONFIG_SMP
	if (!cpu_online(n))
		return 0;
#endif

	/*
	 * For the first processor also print the system type
	 */
	if (n == 0) {
		seq_printf(m, "system type\t\t: %s\n", get_system_type());
		if (mips_get_machine_name())
			seq_printf(m, "machine\t\t\t: %s\n",
				   mips_get_machine_name());
	}

	seq_printf(m, "processor\t\t: %ld\n", n);
	sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
		      cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
	seq_printf(m, fmt, __cpu_name[n],
		      (version >> 4) & 0x0f, version & 0x0f,
		      (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
	seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
		      cpu_data[n].udelay_val / (500000/HZ),
		      (cpu_data[n].udelay_val / (5000/HZ)) % 100);
	seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
	seq_printf(m, "microsecond timers\t: %s\n",
		      cpu_has_counter ? "yes" : "no");
	seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
	seq_printf(m, "extra interrupt vector\t: %s\n",
		      cpu_has_divec ? "yes" : "no");
	seq_printf(m, "hardware watchpoint\t: %s",
		      cpu_has_watch ? "yes, " : "no\n");
	if (cpu_has_watch) {
		seq_printf(m, "count: %d, address/irw mask: [",
		      cpu_data[n].watch_reg_count);
		for (i = 0; i < cpu_data[n].watch_reg_count; i++)
			seq_printf(m, "%s0x%04x", i ? ", " : "" ,
				cpu_data[n].watch_reg_masks[i]);
		seq_printf(m, "]\n");
	}

	seq_printf(m, "isa\t\t\t:"); 
	if (cpu_has_mips_r1)
		seq_printf(m, " mips1");
	if (cpu_has_mips_2)
		seq_printf(m, "%s", " mips2");
	if (cpu_has_mips_3)
		seq_printf(m, "%s", " mips3");
	if (cpu_has_mips_4)
		seq_printf(m, "%s", " mips4");
	if (cpu_has_mips_5)
		seq_printf(m, "%s", " mips5");
	if (cpu_has_mips32r1)
		seq_printf(m, "%s", " mips32r1");
	if (cpu_has_mips32r2)
		seq_printf(m, "%s", " mips32r2");
	if (cpu_has_mips32r6)
		seq_printf(m, "%s", " mips32r6");
	if (cpu_has_mips64r1)
		seq_printf(m, "%s", " mips64r1");
	if (cpu_has_mips64r2)
		seq_printf(m, "%s", " mips64r2");
	if (cpu_has_mips64r6)
		seq_printf(m, "%s", " mips64r6");
	seq_printf(m, "\n");

	seq_printf(m, "ASEs implemented\t:");
	if (cpu_has_mips16)	seq_printf(m, "%s", " mips16");
	if (cpu_has_mdmx)	seq_printf(m, "%s", " mdmx");
	if (cpu_has_mips3d)	seq_printf(m, "%s", " mips3d");
	if (cpu_has_smartmips)	seq_printf(m, "%s", " smartmips");
	if (cpu_has_dsp)	seq_printf(m, "%s", " dsp");
	if (cpu_has_dsp2)	seq_printf(m, "%s", " dsp2");
	if (cpu_has_mipsmt)	seq_printf(m, "%s", " mt");
	if (cpu_has_mmips)	seq_printf(m, "%s", " micromips");
	if (cpu_has_vz)		seq_printf(m, "%s", " vz");
	if (cpu_has_msa)	seq_printf(m, "%s", " msa");
	if (cpu_has_eva)	seq_printf(m, "%s", " eva");
	if (cpu_has_htw)	seq_printf(m, "%s", " htw");
	seq_printf(m, "\n");

	if (cpu_has_mmips) {
		seq_printf(m, "micromips kernel\t: %s\n",
		      (read_c0_config3() & MIPS_CONF3_ISA_OE) ?  "yes" : "no");
	}
	seq_printf(m, "shadow register sets\t: %d\n",
		      cpu_data[n].srsets);
	seq_printf(m, "kscratch registers\t: %d\n",
		      hweight8(cpu_data[n].kscratch_mask));
	seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
	seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
//.........这里部分代码省略.........
开发者ID:168519,项目名称:linux,代码行数:101,代码来源:proc.c


示例7: hotplug_timer

static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*do not ever hotplug out CPU 0*/
	if ((cpu_rq_min == 0) && (flag_hotplug == HOTPLUG_OUT))
		goto no_hotplug;

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
		cpu_up(select_off_cpu);
		DBG_PRINT("cpu%d on\n", select_off_cpu);
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
		cpu_down(cpu_rq_min);
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

	mutex_unlock(&hotplug_lock);
}
开发者ID:cattleprod,项目名称:XCeLL-X2,代码行数:81,代码来源:stand-hotplug.c


示例8: _cpu_down

/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
		goto out_release;
	}

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	smpboot_park_threads(cpu);

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
开发者ID:383530895,项目名称:linux,代码行数:82,代码来源:cpu.c


示例9: switch_L2

int switch_L2(enum options option)
{
	int i, cpu;
	int err = 0;
	int retry=0;    
	u64 t1;
	u64 t2;
	unsigned long mask = (1<<0);

	if(option >= BORROW_NONE) {
		pr_err("wrong option %d\n", option);
		return -1;
	}

	t1 = sched_clock();

	/* bind this process to main cpu */    
	while(sched_setaffinity(0, (struct cpumask*) &mask) < 0)
	{
		pr_err("Could not set cpu 0 affinity for current process(%d).\n", retry);
		retry++;
		if(retry > 100)
		{
			return -1;
		}
	}

	/*disable hot-plug*/
	hps_set_enabled(0);

	is_l2_borrowed = 0;

	for(i=1; i<NR_CPUS; i++)
	{
		if(cpu_online(i))
		{
			err = cpu_down(i);
			if(err < 0)
			{
				pr_err("[L2$ sharing] disable cpu %d failed!\n", i);
				
				hps_set_enabled(1);
				return -1;
			}
		}
	}

	/* disable preemption */
	cpu = get_cpu();
	
	/* enable other clusters' power */
	enable_secondary_clusters_pwr();

	config_L2_size(option);

	if(option == BORROW_L2)
	{
		is_l2_borrowed = 1;        
	}
	else // if(option == RETURN_L2)
	{
		is_l2_borrowed = 0;
		/* Disable other clusters' power */
		disable_secondary_clusters_pwr();
	}

	/*enable hot-plug*/
	hps_set_enabled(1);	
	put_cpu();

	t2 = sched_clock();
    
	if(option == BORROW_L2)
	{
		pr_notice("[%s]: borrow L2$ cost %llu ns\n", __func__, t2 - t1);
	}
	else
	{
		pr_notice("[%s]: return L2$ cost %llu ns\n", __func__, t2 - t1);
	}

	return err;
}
开发者ID:AudioGod,项目名称:MediaTek-HelioX10-Kernel,代码行数:83,代码来源:l2c_share_normal.c


示例10: __cpu_hotplug

static int __ref __cpu_hotplug(bool out_flag, enum hotplug_cmd cmd)
{
	int i = 0;
	int ret = 0;

	if (exynos_dm_hotplug_disabled())
		return 0;

#if defined(CONFIG_SCHED_HMP)
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		if (cmd == CMD_BIG_OUT && !in_low_power_mode) {
			for (i = setup_max_cpus - 1; i >= NR_CA7; i--) {
				if (cpu_online(i)) {
					ret = cpu_down(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			for (i = setup_max_cpus - 1; i > 0; i--) {
				if (cpu_online(i)) {
					ret = cpu_down(i);
					if (ret)
						goto blk_out;
				}
			}
		}
	} else {
		if (in_suspend_prepared)
			goto blk_out;

		if (cmd == CMD_BIG_IN) {
			if (in_low_power_mode)
				goto blk_out;

			for (i = NR_CA7; i < setup_max_cpus; i++) {
				if (!cpu_online(i)) {
					ret = cpu_up(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			if ((big_hotpluged && !do_disable_hotplug) ||
				(cmd == CMD_LITTLE_IN)) {
				for (i = 1; i < NR_CA7; i++) {
					if (!cpu_online(i)) {
						ret = cpu_up(i);
						if (ret)
							goto blk_out;
					}
				}
			} else {
				if (lcd_is_on) {
					for (i = NR_CA7; i < setup_max_cpus; i++) {
						if (!cpu_online(i)) {
							if (i == NR_CA7)
								set_hmp_boostpulse(100000);

							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}

					for (i = 1; i < NR_CA7; i++) {
						if (!cpu_online(i)) {
							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}
				} else {
					for (i = 1; i < setup_max_cpus; i++) {
						if (!cpu_online(i)) {
							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}
				}
			}
		}
	}
#else
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		for (i = setup_max_cpus - 1; i > 0; i--) {
			if (cpu_online(i)) {
				ret = cpu_down(i);
				if (ret)
					goto blk_out;
			}
		}
	} else {
//.........这里部分代码省略.........
开发者ID:4pao,项目名称:android_kernel_hardkernel_odroidxu3,代码行数:101,代码来源:dm_cpu_hotplug.c


示例11: smp_callin

static void __devinit
smp_callin (void)
{
#ifdef XEN
	/* work around for spinlock irq assert. */
	unsigned long flags;
#endif
	int cpuid, phys_id;
	extern void ia64_init_itm(void);

#ifdef CONFIG_PERFMON
	extern void pfm_init_percpu(void);
#endif

	cpuid = smp_processor_id();
	phys_id = hard_smp_processor_id();

	if (cpu_online(cpuid)) {
		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
		       phys_id, cpuid);
		BUG();
	}

	fix_b0_for_bsp();

#ifdef XEN
	notify_cpu_starting(cpuid);
	lock_ipi_calllock(&flags);
#else
	lock_ipi_calllock();
#endif
	cpu_set(cpuid, cpu_online_map);
#ifdef XEN
	unlock_ipi_calllock(flags);
#else
	unlock_ipi_calllock();
#endif
	per_cpu(cpu_state, cpuid) = CPU_ONLINE;

	smp_setup_percpu_timer();

	ia64_mca_cmc_vector_setup();	/* Setup vector on AP */

#ifdef CONFIG_PERFMON
	pfm_init_percpu();
#endif

	local_irq_enable();

	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
		/*
		 * Synchronize the ITC with the BP.  Need to do this after irqs are
		 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
		 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
		 * local_bh_enable(), which bugs out if irqs are not enabled...
		 */
		Dprintk("Going to syncup ITC with BP.\n");
		ia64_sync_itc(0);
	}

	/*
	 * Get our bogomips.
	 */
	ia64_init_itm();
#ifndef XEN
	calibrate_delay();
#endif
	local_cpu_data->loops_per_jiffy = loops_per_jiffy;

#ifdef CONFIG_IA32_SUPPORT
	ia32_gdt_init();
#endif

	/*
	 * Allow the master to continue.
	 */
	cpu_set(cpuid, cpu_callin_map);
	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
开发者ID:Angel666,项目名称:android_hardware_intel,代码行数:79,代码来源:smpboot.c


示例12: ERR_PTR


//.........这里部分代码省略.........
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
	p->ioprio = current->ioprio;

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
 	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_cleanup_namespaces;
	}

	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);

		if (!cputime_eq(current->signal->it_virt_expires,
开发者ID:cilynx,项目名称:dd-wrt,代码行数:67,代码来源:fork.c


示例13: show_cpuinfo

/*
 *	Get CPU information for use by the procfs.
 */
static int show_cpuinfo(struct seq_file *m, void *v)
{
    struct cpuinfo_m32r *c = v;
    unsigned long cpu = c - cpu_data;

#ifdef CONFIG_SMP
    if (!cpu_online(cpu))
        return 0;
#endif	/* CONFIG_SMP */

    seq_printf(m, "processor\t: %ld\n", cpu);

#if defined(CONFIG_CHIP_VDEC2)
    seq_printf(m, "cpu family\t: VDEC2\n"
               "cache size\t: Unknown\n");
#elif defined(CONFIG_CHIP_M32700)
    seq_printf(m,"cpu family\t: M32700\n"
               "cache size\t: I-8KB/D-8KB\n");
#elif defined(CONFIG_CHIP_M32102)
    seq_printf(m,"cpu family\t: M32102\n"
               "cache size\t: I-8KB\n");
#elif defined(CONFIG_CHIP_OPSP)
    seq_printf(m,"cpu family\t: OPSP\n"
               "cache size\t: I-8KB/D-8KB\n");
#elif defined(CONFIG_CHIP_MP)
    seq_printf(m, "cpu family\t: M32R-MP\n"
               "cache size\t: I-xxKB/D-xxKB\n");
#elif  defined(CONFIG_CHIP_M32104)
    seq_printf(m,"cpu family\t: M32104\n"
               "cache size\t: I-8KB/D-8KB\n");
#else
    seq_printf(m, "cpu family\t: Unknown\n");
#endif
    seq_printf(m, "bogomips\t: %lu.%02lu\n",
               c->loops_per_jiffy/(500000/HZ),
               (c->loops_per_jiffy/(5000/HZ)) % 100);
#if defined(CONFIG_PLAT_MAPPI)
    seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
#elif defined(CONFIG_PLAT_MAPPI2)
    seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
#elif defined(CONFIG_PLAT_MAPPI3)
    seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
#elif defined(CONFIG_PLAT_M32700UT)
    seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
#elif defined(CONFIG_PLAT_OPSPUT)
    seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
#elif defined(CONFIG_PLAT_USRV)
    seq_printf(m, "Machine\t\t: uServer\n");
#elif defined(CONFIG_PLAT_OAKS32R)
    seq_printf(m, "Machine\t\t: OAKS32R\n");
#elif  defined(CONFIG_PLAT_M32104UT)
    seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
#else
    seq_printf(m, "Machine\t\t: Unknown\n");
#endif

#define PRINT_CLOCK(name, value)				\
	seq_printf(m, name " clock\t: %d.%02dMHz\n",		\
		((value) / 1000000), ((value) % 1000000)/10000)

    PRINT_CLOCK("CPU", (int)c->cpu_clock);
    PRINT_CLOCK("Bus", (int)c->bus_clock);

    seq_printf(m, "\n");

    return 0;
}
开发者ID:BackupTheBerlios,项目名称:arp2-svn,代码行数:70,代码来源:setup.c


示例14: cpufreq_governor_interactivex

static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
		unsigned int event)
{
	int rc;
	unsigned int min_freq = ~0;
	unsigned int max_freq = 0;
	unsigned int i;
	struct cpufreq_frequency_table *freq_table;

	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(new_policy->cpu))
			return -EINVAL;

		/*
		 * Do not register the idle hook and create sysfs
		 * entries if we have already done so.
		 */
		if (atomic_inc_return(&active_count) > 1)
			return 0;

		rc = sysfs_create_group(cpufreq_global_kobject,
				&interactivex_attr_group);
		if (rc)
			return rc;

		pm_idle_old = pm_idle;
		pm_idle = cpufreq_idle;
		policy = new_policy;
		enabled = 1;
        	register_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX active\n");
		freq_table = cpufreq_frequency_get_table(new_policy->cpu);
		for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
			unsigned int freq = freq_table[i].frequency;
			if (freq == CPUFREQ_ENTRY_INVALID) {
				continue;
			}
			if (freq < min_freq)	
				min_freq = freq;
			if (freq > max_freq)
				max_freq = freq;
		}
		resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_table[(i-1)/2].frequency : max_freq;		//Value in midrange of available CPU frequencies if sufficient number of freq bins available
		freq_threshld = max_freq;
		break;

	case CPUFREQ_GOV_STOP:
		if (atomic_dec_return(&active_count) > 1)
			return 0;

		sysfs_remove_group(cpufreq_global_kobject,
				&interactivex_attr_group);

		pm_idle = pm_idle_old;
		del_timer(&per_cpu(cpu_timer, new_policy->cpu));
		enabled = 0;
        	unregister_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX inactive\n");
			break;

	case CPUFREQ_GOV_LIMITS:
		if (new_policy->max < new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->max, CPUFREQ_RELATION_H);
		else if (new_policy->min > new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->min, CPUFREQ_RELATION_L);
		break;
	}
	return 0;
}
开发者ID:Oleg-k,项目名称:Cranium_Kernel,代码行数:72,代码来源:cpufreq_interactivex.c


示例15: hps_algo_hmp

/*
 * hps algo - hmp
 */
void hps_algo_hmp(void)
{
    unsigned int cpu;
    unsigned int val;
    struct cpumask little_online_cpumask;
    struct cpumask big_online_cpumask;
    unsigned int little_num_base, little_num_limit, little_num_online;
    unsigned int big_num_base, big_num_limit, big_num_online;
    //log purpose
    char str1[64];
    char str2[64];
    int i, j;
    char * str1_ptr = str1;
    char * str2_ptr = str2;

    /*
     * run algo or not by hps_ctxt.enabled
     */
    if (!hps_ctxt.enabled)
    {
        atomic_set(&hps_ctxt.is_ondemand, 0);
        return;
    }

    /*
     * calculate cpu loading
     */
    hps_ctxt.cur_loads = 0;
    str1_ptr = str1;
    str2_ptr = str2;

    for_each_possible_cpu(cpu)
    {
        per_cpu(hps_percpu_ctxt, cpu).load = hps_cpu_get_percpu_load(cpu);
        hps_ctxt.cur_loads += per_cpu(hps_percpu_ctxt, cpu).load;

        if (hps_ctxt.cur_dump_enabled)
        {
            if (cpu_online(cpu))
                i = sprintf(str1_ptr, "%4u", 1);
            else
                i = sprintf(str1_ptr, "%4u", 0);
            str1_ptr += i;
            j = sprintf(str2_ptr, "%4u", per_cpu(hps_percpu_ctxt, cpu).load);
            str2_ptr += j;
        }
    }
    hps_ctxt.cur_nr_heavy_task = hps_cpu_get_nr_heavy_task();
    hps_cpu_get_tlp(&hps_ctxt.cur_tlp, &hps_ctxt.cur_iowait);

    /*
     * algo - begin
     */
    mutex_lock(&hps_ctxt.lock);
    hps_ctxt.action = ACTION_NONE;
    atomic_set(&hps_ctxt.is_ondemand, 0);

    /*
     * algo - get boundary
     */
    little_num_limit = min(hps_ctxt.little_num_limit_thermal, hps_ctxt.little_num_limit_low_battery);
    little_num_limit = min3(little_num_limit, hps_ctxt.little_num_limit_ultra_power_saving, hps_ctxt.little_num_limit_power_serv);
    little_num_base = hps_ctxt.little_num_base_perf_serv;
    cpumask_and(&little_online_cpumask, &hps_ctxt.little_cpumask, cpu_online_mask);
    little_num_online = cpumask_weight(&little_online_cpumask);
    //TODO: no need if is_hmp
    big_num_limit = min(hps_ctxt.big_num_limit_thermal, hps_ctxt.big_num_limit_low_battery);
    big_num_limit = min3(big_num_limit, hps_ctxt.big_num_limit_ultra_power_saving, hps_ctxt.big_num_limit_power_serv);
    big_num_base = max(hps_ctxt.cur_nr_heavy_task, hps_ctxt.big_num_base_perf_serv);
    cpumask_and(&big_online_cpumask, &hps_ctxt.big_cpumask, cpu_online_mask);
    big_num_online = cpumask_weight(&big_online_cpumask);
    if (hps_ctxt.cur_dump_enabled)
    {
        hps_debug(" CPU:%s\n", str1);
        hps_debug("LOAD:%s\n", str2);
        hps_debug("loads(%u), hvy_tsk(%u), tlp(%u), iowait(%u), limit_t(%u)(%u), limit_lb(%u)(%u), limit_ups(%u)(%u), limit_pos(%u)(%u), base_pes(%u)(%u)\n", 
            hps_ctxt.cur_loads, hps_ctxt.cur_nr_heavy_task, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait,
            hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
            hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
            hps_ctxt.little_num_limit_ultra_power_saving, hps_ctxt.big_num_limit_ultra_power_saving,
            hps_ctxt.little_num_limit_power_serv, hps_ctxt.big_num_limit_power_serv,
            hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv);
    }

//ALGO_LIMIT:
    /*
     * algo - thermal, low battery
     */
    if (big_num_online > big_num_limit)
    {
        val =  big_num_online - big_num_limit;
        for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
//.........这里部分代码省略.........
开发者ID:John677,项目名称:Kernal_k3note,代码行数:101,代码来源:mt_hotplug_strategy_algo.c


示例16: cpufreq_frequency_table_target

int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
				   struct cpufreq_frequency_table *table,
				   unsigned int target_freq,
				   unsigned int relation,
				   unsigned int *index)
{
	struct cpufreq_frequency_table optimal = {
		.index = ~0,
		.frequency = 0,
	};
	struct cpufreq_frequency_table suboptimal = {
		.index = ~0,
		.frequency = 0,
	};
	unsigned int i;

	pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
					target_freq, relation, policy->cpu);

	switch (relation) {
	case CPUFREQ_RELATION_H:
		suboptimal.frequency = ~0;
		break;
	case CPUFREQ_RELATION_L:
		optimal.frequency = ~0;
		break;
	}

	if (!cpu_online(policy->cpu))
		return -EINVAL;

	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		if ((freq < policy->min) || (freq > policy->max))
			continue;
		switch (relation) {
		case CPUFREQ_RELATION_H:
			if (freq <= target_freq) {
				if (freq >= optimal.frequency) {
					optimal.frequency = freq;
					optimal.index = i;
				}
			} else {
				if (freq <= suboptimal.frequency) {
					suboptimal.frequency = freq;
					suboptimal.index = i;
				}
			}
			break;
		case CPUFREQ_RELATION_L:
			if (freq >= target_freq) {
				if (freq <= optimal.frequency) {
					optimal.frequency = freq;
					optimal.index = i;
				}
			} else {
				if (freq >= suboptimal.frequency) {
					suboptimal.frequency = freq;
					suboptimal.index = i;
				}
			}
			break;
		}
	}
	if (optimal.index > i) {
		if (suboptimal.index > i)
			return -EINVAL;
		*index = suboptimal.index;
	} else
		*index = optimal.index;

	pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
		table[*index].index);

	return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);

static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
{
	unsigned int i = 0;
	unsigned int cpu = policy->cpu;
	ssize_t count = 0;
	struct cpufreq_frequency_table *table;

	if (!per_cpu(cpufreq_show_table, cpu))
		return -ENODEV;

	table = per_cpu(cpufreq_show_table, cpu);

	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
			continue;
		count += sprintf(&buf[count], "%d ", table[i].frequency);
	}
	count += sprintf(&buf[count], "\n");

//.........这里部分代码省略.........
开发者ID:Albinoman887,项目名称:pyramid-3.4.10,代码行数:101,代码来源:freq_table.c


示例17: _cpu_down

/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}
	smpboot_park_threads(cpu);

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
开发者ID:dwander,项目名称:eas-backports,代码行数:62,代码来源:cpu.c


示例18: boost_mig_sync_thread

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cpu_physical_id函数代码示例发布时间:2022-05-30
下一篇:
C++ cpu_number函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap