• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cpumask_empty函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cpumask_empty函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_empty函数的具体用法?C++ cpumask_empty怎么用?C++ cpumask_empty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cpumask_empty函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: irq_move_masked_irq

void irq_move_masked_irq(struct irq_data *idata)
{
	struct irq_desc *desc = irq_data_to_desc(idata);
	struct irq_chip *chip = desc->irq_data.chip;

	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
		return;

	irqd_clr_move_pending(&desc->irq_data);

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (irqd_is_per_cpu(&desc->irq_data)) {
		WARN_ON(1);
		return;
	}

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!chip->irq_set_affinity)
		return;

	assert_raw_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is coming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);

	cpumask_clear(desc->pending_mask);
}
开发者ID:020gzh,项目名称:linux,代码行数:43,代码来源:migration.c


示例2: tick_shutdown_broadcast

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);

	bc = tick_broadcast_device.evtdev;
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_on);

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
		if (bc && cpumask_empty(tick_broadcast_mask))
			clockevents_shutdown(bc);
	}

	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
开发者ID:Master-Traders,项目名称:linux,代码行数:22,代码来源:tick-broadcast.c


示例3: _percpu_write_lock

void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
                percpu_rwlock_t *percpu_rwlock)
{
    unsigned int cpu;
    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);

    /* Validate the correct per_cpudata variable has been provided. */
    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);

    /*
     * First take the write lock to protect against other writers or slow
     * path readers.
     */
    write_lock(&percpu_rwlock->rwlock);

    /* Now set the global variable so that readers start using read_lock. */
    percpu_rwlock->writer_activating = 1;
    smp_mb();

    /* Using a per cpu cpumask is only safe if there is no nesting. */
    ASSERT(!in_irq());
    cpumask_copy(rwlock_readers, &cpu_online_map);

    /* Check if there are any percpu readers in progress on this rwlock. */
    for ( ; ; )
    {
        for_each_cpu(cpu, rwlock_readers)
        {
            /*
             * Remove any percpu readers not contending on this rwlock
             * from our check mask.
             */
            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
                __cpumask_clear_cpu(cpu, rwlock_readers);
        }
        /* Check if we've cleared all percpu readers from check mask. */
        if ( cpumask_empty(rwlock_readers) )
            break;
        /* Give the coherency fabric a break. */
        cpu_relax();
    };
}
开发者ID:0day-ci,项目名称:xen,代码行数:42,代码来源:rwlock.c


示例4: flush_area_mask

void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:20,代码来源:smp.c


示例5: tick_do_broadcast

/*
 * Broadcast the event to the cpus, which are set in the mask (mangled).
 */
static bool tick_do_broadcast(struct cpumask *mask)
{
	int cpu = smp_processor_id();
	struct tick_device *td;
	bool local = false;

	/*
	 * Check, if the current cpu is in the mask
	 */
	if (cpumask_test_cpu(cpu, mask)) {
		struct clock_event_device *bc = tick_broadcast_device.evtdev;

		cpumask_clear_cpu(cpu, mask);
		/*
		 * We only run the local handler, if the broadcast
		 * device is not hrtimer based. Otherwise we run into
		 * a hrtimer recursion.
		 *
		 * local timer_interrupt()
		 *   local_handler()
		 *     expire_hrtimers()
		 *       bc_handler()
		 *         local_handler()
		 *	     expire_hrtimers()
		 */
		local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
	}

	if (!cpumask_empty(mask)) {
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
	}
	return local;
}
开发者ID:markus-oberhumer,项目名称:linux,代码行数:43,代码来源:tick-broadcast.c


示例6: rtas_cpu_state_change_mask

/* On return cpumask will be altered to indicate CPUs changed.
 * CPUs with states changed will be set in the mask,
 * CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
				cpumask_var_t cpus)
{
	int cpu;
	int cpuret = 0;
	int ret = 0;

	if (cpumask_empty(cpus))
		return 0;

	for_each_cpu(cpu, cpus) {
		switch (state) {
		case DOWN:
			cpuret = cpu_down(cpu);
			break;
		case UP:
			cpuret = cpu_up(cpu);
			break;
		}
		if (cpuret) {
			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
					__func__,
					((state == UP) ? "up" : "down"),
					cpu, cpuret);
			if (!ret)
				ret = cpuret;
			if (state == UP) {
				/* clear bits for unchanged cpus, return */
				cpumask_shift_right(cpus, cpus, cpu);
				cpumask_shift_left(cpus, cpus, cpu);
				break;
			} else {
				/* clear bit for unchanged cpu, continue */
				cpumask_clear_cpu(cpu, cpus);
			}
		}
	}

	return ret;
}
开发者ID:cloudlinuxadmin,项目名称:cl7-kernel,代码行数:43,代码来源:rtas.c


示例7: get_cur_val

static u32 get_cur_val(const cpumask_t *mask)
{
    struct cpufreq_policy *policy;
    struct processor_performance *perf;
    struct drv_cmd cmd;
    unsigned int cpu = smp_processor_id();

    if (unlikely(cpumask_empty(mask)))
        return 0;

    if (!cpumask_test_cpu(cpu, mask))
        cpu = cpumask_first(mask);
    if (cpu >= nr_cpu_ids || !cpu_online(cpu))
        return 0;

    policy = per_cpu(cpufreq_cpu_policy, cpu);
    if (!policy || !cpufreq_drv_data[policy->cpu])
        return 0;    

    switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) {
    case SYSTEM_INTEL_MSR_CAPABLE:
        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
        cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
        break;
    case SYSTEM_IO_CAPABLE:
        cmd.type = SYSTEM_IO_CAPABLE;
        perf = cpufreq_drv_data[policy->cpu]->acpi_data;
        cmd.addr.io.port = perf->control_register.address;
        cmd.addr.io.bit_width = perf->control_register.bit_width;
        break;
    default:
        return 0;
    }

    cmd.mask = cpumask_of(cpu);

    drv_read(&cmd);
    return cmd.val;
}
开发者ID:0day-ci,项目名称:xen,代码行数:39,代码来源:cpufreq.c


示例8: enable_nonboot_cpus

void __ref enable_nonboot_cpus(void)
{
	int cpu, error;
#if defined (CONFIG_MACH_APQ8064_OMEGA) || defined (CONFIG_MACH_APQ8064_OMEGAR)
	static int first = 0;

	if (!first) {
		init_timer(&boost_freq_timer);
		first = 1;
	}
	if (timer_pending(&boost_freq_timer))
		del_timer(&boost_freq_timer);
	boost_freq_timer.function = boost_freq_timer_cb;
	boost_freq_timer.expires =
		jiffies + msecs_to_jiffies(BOOST_FREQ_TIME_MS);
	add_timer(&boost_freq_timer);
	boost_freq = 1;
#endif

	/* Allow everyone to use the CPU hotplug again */
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	if (cpumask_empty(frozen_cpus))
		goto out;

	printk(KERN_INFO "Enabling non-boot CPUs ...\n");

	arch_enable_nonboot_cpus_begin();

	for_each_cpu(cpu, frozen_cpus) {
		error = _cpu_up(cpu, 1);
		if (!error) {
			printk(KERN_INFO "CPU%d is up\n", cpu);
			continue;
		}
		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
	}
开发者ID:Loller79,项目名称:Solid_Kernel-STOCK-KK-CAF,代码行数:37,代码来源:cpu.c


示例9: opal_flash_term_callback

/* This gets called just before system reboots */
void opal_flash_term_callback(void)
{
	struct cpumask mask;

	if (update_flash_data.status != FLASH_IMG_READY)
		return;

	pr_alert("FLASH: Flashing new firmware\n");
	pr_alert("FLASH: Image is %u bytes\n", image_data.size);
	pr_alert("FLASH: Performing flash and reboot/shutdown\n");
	pr_alert("FLASH: This will take several minutes. Do not power off!\n");

	/* Small delay to help getting the above message out */
	msleep(500);

	/* Return secondary CPUs to firmware */
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
	if (!cpumask_empty(&mask))
		smp_call_function_many(&mask,
				       flash_return_cpu, NULL, false);
	/* Hard disable interrupts */
	hard_irq_disable();
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:25,代码来源:opal-flash.c


示例10: __bind_irq_vector

static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpumask_empty(&mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu(cpu, &mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
	return 0;
}
开发者ID:0x7f454c46,项目名称:linux,代码行数:24,代码来源:irq_ia64.c


示例11: arch_get_hmp_domains

void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
	struct hmp_domain *domain;

	arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);

	/*
	 * Initialize hmp_domains
	 * Must be ordered with respect to compute capacity.
	 * Fastest domain at head of list.
	 */
	if(!cpumask_empty(&hmp_slow_cpu_mask)) {
		domain = (struct hmp_domain *)
			kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
		cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
		cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
		list_add(&domain->hmp_domains, hmp_domains_list);
	}
	domain = (struct hmp_domain *)
		kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
	cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
	cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
	list_add(&domain->hmp_domains, hmp_domains_list);
}
开发者ID:Fnoeoe,项目名称:android_kernel_samsung_grandprimevelte,代码行数:24,代码来源:topology.c


示例12: enable_nonboot_cpus

void __ref enable_nonboot_cpus(void)
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	if (cpumask_empty(frozen_cpus))
		goto out;

	pr_info("Enabling non-boot CPUs ...\n");

	arch_enable_nonboot_cpus_begin();

	for_each_cpu(cpu, frozen_cpus) {
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
		error = _cpu_up(cpu, 1);
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
		if (!error) {
			pr_info("CPU%d is up\n", cpu);
			continue;
		}
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
	}
开发者ID:tsj123,项目名称:androidx86_remix,代码行数:24,代码来源:cpu.c


示例13: tick_handle_oneshot_broadcast

/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
	ktime_t now, next_event;
	int cpu, next_cpu = 0;
	bool bc_local;

	raw_spin_lock(&tick_broadcast_lock);
	dev->next_event = KTIME_MAX;
	next_event = KTIME_MAX;
	cpumask_clear(tmpmask);
	now = ktime_get();
	/* Find all expired events */
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
		/*
		 * Required for !SMP because for_each_cpu() reports
		 * unconditionally CPU0 as set on UP kernels.
		 */
		if (!IS_ENABLED(CONFIG_SMP) &&
		    cpumask_empty(tick_broadcast_oneshot_mask))
			break;

		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev->next_event <= now) {
			cpumask_set_cpu(cpu, tmpmask);
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
		} else if (td->evtdev->next_event < next_event) {
			next_event = td->evtdev->next_event;
			next_cpu = cpu;
		}
	}

	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

	/*
	 * Wakeup the cpus which have an expired event.
	 */
	bc_local = tick_do_broadcast(tmpmask);

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
	 */
	if (next_event != KTIME_MAX)
		tick_broadcast_set_event(dev, next_cpu, next_event);

	raw_spin_unlock(&tick_broadcast_lock);

	if (bc_local) {
		td = this_cpu_ptr(&tick_cpu_device);
		td->evtdev->event_handler(td->evtdev);
	}
}
开发者ID:markus-oberhumer,项目名称:linux,代码行数:82,代码来源:tick-broadcast.c


示例14: tick_broadcast_control

/**
 * tick_broadcast_control - Enable/disable or force broadcast mode
 * @mode:	The selected broadcast mode
 *
 * Called when the system enters a state where affected tick devices
 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
 */
void tick_broadcast_control(enum tick_broadcast_mode mode)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	int cpu, bc_stopped;
	unsigned long flags;

	/* Protects also the local clockevent device. */
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
	td = this_cpu_ptr(&tick_cpu_device);
	dev = td->evtdev;

	/*
	 * Is the device not affected by the powerstate ?
	 */
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
		goto out;

	if (!tick_device_is_functional(dev))
		goto out;

	cpu = smp_processor_id();
	bc = tick_broadcast_device.evtdev;
	bc_stopped = cpumask_empty(tick_broadcast_mask);

	switch (mode) {
	case TICK_BROADCAST_FORCE:
		tick_broadcast_forced = 1;
	case TICK_BROADCAST_ON:
		cpumask_set_cpu(cpu, tick_broadcast_on);
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
			/*
			 * Only shutdown the cpu local device, if:
			 *
			 * - the broadcast device exists
			 * - the broadcast device is not a hrtimer based one
			 * - the broadcast device is in periodic mode to
			 *   avoid a hickup during switch to oneshot mode
			 */
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
			    tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
				clockevents_shutdown(dev);
		}
		break;

	case TICK_BROADCAST_OFF:
		if (tick_broadcast_forced)
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
				tick_setup_periodic(dev, 0);
		}
		break;
	}

	if (bc) {
		if (cpumask_empty(tick_broadcast_mask)) {
			if (!bc_stopped)
				clockevents_shutdown(bc);
		} else if (bc_stopped) {
			if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
				tick_broadcast_start_periodic(bc);
			else
				tick_broadcast_setup_oneshot(bc);
		}
	}
out:
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
开发者ID:markus-oberhumer,项目名称:linux,代码行数:78,代码来源:tick-broadcast.c


示例15: tick_device_uses_broadcast

/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
	unsigned long flags;
	int ret = 0;

	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
		tick_device_setup_broadcast_func(dev);
		cpumask_set_cpu(cpu, tick_broadcast_mask);
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
		ret = 1;
	} else {
		/*
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
		 */
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
		else
			tick_device_setup_broadcast_func(dev);

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
			 * is delivered by the broadcast device, if
			 * the broadcast device exists and is not
			 * hrtimer based.
			 */
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
				ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
			break;
		default:
			break;
		}
	}
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
	return ret;
}
开发者ID:markus-oberhumer,项目名称:linux,代码行数:84,代码来源:tick-broadcast.c


示例16: check_irq_vectors_for_cpu_disable

/*
 * This cpu is going to be removed and its vectors migrated to the remaining
 * online cpus.  Check to see if there are enough vectors in the remaining cpus.
 * This function is protected by stop_machine().
 */
int check_irq_vectors_for_cpu_disable(void)
{
	int irq, cpu;
	unsigned int this_cpu, vector, this_count, count;
	struct irq_desc *desc;
	struct irq_data *data;

	this_cpu = smp_processor_id();
	cpumask_copy(&online_new, cpu_online_mask);
	cpu_clear(this_cpu, online_new);

	this_count = 0;
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		irq = __this_cpu_read(vector_irq[vector]);
		if (irq >= 0) {
			desc = irq_to_desc(irq);
			data = irq_desc_get_irq_data(desc);
			cpumask_copy(&affinity_new, data->affinity);
			cpu_clear(this_cpu, affinity_new);

			/* Do not count inactive or per-cpu irqs. */
			if (!irq_has_action(irq) || irqd_is_per_cpu(data))
				continue;

			/*
			 * A single irq may be mapped to multiple
			 * cpu's vector_irq[] (for example IOAPIC cluster
			 * mode).  In this case we have two
			 * possibilities:
			 *
			 * 1) the resulting affinity mask is empty; that is
			 * this the down'd cpu is the last cpu in the irq's
			 * affinity mask, or
			 *
			 * 2) the resulting affinity mask is no longer
			 * a subset of the online cpus but the affinity
			 * mask is not zero; that is the down'd cpu is the
			 * last online cpu in a user set affinity mask.
			 */
			if (cpumask_empty(&affinity_new) ||
			    !cpumask_subset(&affinity_new, &online_new))
				this_count++;
		}
	}

	count = 0;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
		     vector++) {
			if (per_cpu(vector_irq, cpu)[vector] < 0)
				count++;
		}
	}

	if (count < this_count) {
		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
			this_cpu, this_count, count);
		return -ERANGE;
	}
	return 0;
}
开发者ID:AdaLovelance,项目名称:lxcGrsecKernels,代码行数:68,代码来源:irq.c


示例17: drv_read

static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
	struct acpi_processor_performance *perf = to_perf_data(data);
	struct drv_cmd cmd = {
		.reg = &perf->control_register,
		.func.read = data->cpu_freq_read,
	};
	int err;

	err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
	return cmd.val;
}

/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
	struct drv_cmd *cmd = _cmd;

	cmd->func.write(cmd->reg, cmd->val);
}

static void drv_write(struct acpi_cpufreq_data *data,
		      const struct cpumask *mask, u32 val)
{
	struct acpi_processor_performance *perf = to_perf_data(data);
	struct drv_cmd cmd = {
		.reg = &perf->control_register,
		.val = val,
		.func.write = data->cpu_freq_write,
	};
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, mask))
		do_drv_write(&cmd);

	smp_call_function_many(mask, do_drv_write, &cmd, 1);
	put_cpu();
}

static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
	u32 val;

	if (unlikely(cpumask_empty(mask)))
		return 0;

	val = drv_read(data, mask);

	pr_debug("get_cur_val = %u\n", val);

	return val;
}

static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
	struct acpi_cpufreq_data *data;
	struct cpufreq_policy *policy;
	unsigned int freq;
	unsigned int cached_freq;

	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);

	policy = cpufreq_cpu_get_raw(cpu);
	if (unlikely(!policy))
		return 0;

	data = policy->driver_data;
	if (unlikely(!data || !policy->freq_table))
		return 0;

	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

	pr_debug("cur freq = %u\n", freq);

	return freq;
}

static unsigned int check_freqs(struct cpufreq_policy *policy,
				const struct cpumask *mask, unsigned int freq)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	unsigned int cur_freq;
	unsigned int i;

	for (i = 0; i < 100; i++) {
		cur_freq = extract_freq(policy, get_cur_val(mask, data));
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
//.........这里部分代码省略.........
开发者ID:kishore1006,项目名称:linux,代码行数:101,代码来源:acpi-cpufreq.c


示例18: kernel_physical_mapping_init


//.........这里部分代码省略.........
		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
开发者ID:AllenWeb,项目名称:linux,代码行数:67,代码来源:init.c


示例19: tick_do_broadcast_on_off

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
static void tick_do_broadcast_on_off(unsigned long *reason)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
	int cpu, bc_stopped;

	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
	 * Is the device not affected by the powerstate ?
	 */
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
		goto out;

	if (!tick_device_is_functional(dev))
		goto out;

	bc_stopped = cpumask_empty(tick_broadcast_mask);

	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
		cpumask_set_cpu(cpu, tick_broadcast_on);
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
				clockevents_shutdown(dev);
		}
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
			tick_broadcast_force = 1;
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
		if (tick_broadcast_force)
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
				tick_setup_periodic(dev, 0);
		}
		break;
	}

	if (cpumask_empty(tick_broadcast_mask)) {
		if (!bc_stopped)
			clockevents_shutdown(bc);
	} else if (bc_stopped) {
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
	}
out:
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
开发者ID:GSandeep24,项目名称:android_kernel_lenovo_msm8916,代码行数:67,代码来源:tick-broadcast.c


示例20: viridian_hypercall


//.........这里部分代码省略.........
    case HvNotifyLongSpinWait:
        /*
         * See Microsoft Hypervisor Top Level Spec. section 18.5.1.
         */
        perfc_incr(mshv_call_long_wait);
        do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
        status = HV_STATUS_SUCCESS;
        break;

    case HvFlushVirtualAddressSpace:
    case HvFlushVirtualAddressList:
    {
        cpumask_t *pcpu_mask;
        struct vcpu *v;
        struct {
            uint64_t address_space;
            uint64_t flags;
            uint64_t vcpu_mask;
        } input_params;

        /*
         * See Microsoft Hypervisor Top Level Spec. sections 12.4.2
         * and 12.4.3.
         */
        perfc_incr(mshv_call_flush);

        /* These hypercalls should never use the fast-call convention. */
        status = HV_STATUS_INVALID_PARAMETER;
        if ( input.fast )
            break;

        /* Get input parameters. */
        if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
                                      sizeof(input_params)) != HVMCOPY_okay )
            break;

        /*
         * It is not clear from the spec. if we are supposed to
         * include current virtual CPU in the set or not in this case,
         * so err on the safe side.
         */
        if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
            input_params.vcpu_mask = ~0ul;

        pcpu_mask = &this_cpu(ipi_cpumask);
        cpumask_clear(pcpu_mask);

        /*
         * For each specified virtual CPU flush all ASIDs to invalidate
         * TLB entries the next time it is scheduled and then, if it
         * is currently running, add its physical CPU to a mask of
         * those which need to be interrupted to force a flush.
         */
        for_each_vcpu ( currd, v )
        {
            if ( v->vcpu_id >= (sizeof(input_params.vcpu_mask) * 8) )
                break;

            if ( !(input_params.vcpu_mask & (1ul << v->vcpu_id)) )
                continue;

            hvm_asid_flush_vcpu(v);
            if ( v != curr && v->is_running )
                __cpumask_set_cpu(v->processor, pcpu_mask);
        }

        /*
         * Since ASIDs have now been flushed it just remains to
         * force any CPUs currently running target vCPUs out of non-
         * root mode. It's possible that re-scheduling has taken place
         * so we may unnecessarily IPI some CPUs.
         */
        if ( !cpumask_empty(pcpu_mask) )
            smp_send_event_check_mask(pcpu_mask);

        output.rep_complete = input.rep_count;

        status = HV_STATUS_SUCCESS;
        break;
    }

    default:
        status = HV_STATUS_INVALID_HYPERCALL_CODE;
        break;
    }

out:
    output.result = status;
    switch (mode) {
    case 8:
        regs->rax = output.raw;
        break;
    default:
        regs->rdx = output.raw >> 32;
        regs->rax = (uint32_t)output.raw;
        break;
    }

    return HVM_HCALL_completed;
}
开发者ID:djs55,项目名称:xen,代码行数:101,代码来源:viridian.c



注:本文中的cpumask_empty函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cpumask_first函数代码示例发布时间:2022-05-30
下一篇:
C++ cpumask_copy函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap