• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cpumask_clear函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cpumask_clear函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_clear函数的具体用法?C++ cpumask_clear怎么用?C++ cpumask_clear使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cpumask_clear函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: config_L2

int config_L2(int size)
{
    int i;
    struct cpumask mask; 
    int cur_size = get_l2c_size();
	
    if (size != SZ_256K && size != SZ_512K) {
        printk("inlvalid input size %x\n", size);
        return -1;
    }
    if (in_interrupt()) {
        printk(KERN_ERR "Cannot use %s in interrupt/softirq context\n",
               __func__);
        return -1;
    }
    if (size == cur_size) {
        printk("Config L2 size %x is equal to current L2 size %x\n",
               size, cur_size);
        return 0;
    }
    cpumask_clear(&mask);
	for(i = 0; i < get_cluster_core_count(); i++)
		cpumask_set_cpu(i, &mask);
	
    atomic_set(&L1_flush_done, 0);
    get_online_cpus();
    //printk("[Config L2] Config L2 start, on line cpu = %d\n",num_online_cpus());    
    
    /* disable cache and flush L1 on Cluster0*/
    on_each_cpu_mask(&mask, (smp_call_func_t)atomic_flush, NULL, true);
    //while(atomic_read(&L1_flush_done) != num_online_cpus());    
    //printk("[Config L2] L1 flush done\n");
    
    /* Only need to flush Cluster0's L2 */    
    smp_call_function_any(&mask, (smp_call_func_t)inner_dcache_flush_L2, NULL, true);
    //printk("[Config L2] L2 flush done\n");
    
    /* change L2 size */    
    config_L2_size(size);
    //printk("[Config L2] Change L2 flush size done(size = %d)\n",size);
        
    /* enable Cluster0's cache */
    atomic_set(&L1_flush_done, 0);
    on_each_cpu_mask(&mask, (smp_call_func_t)__enable_cache, NULL, true);
    
    //update cr_alignment for other kernel function usage 
    cr_alignment = cr_alignment | (0x4); //C1_CBIT
    put_online_cpus();
    printk("Config L2 size %x done\n", size);
    return 0;
}
开发者ID:Elnter,项目名称:j608_kernel,代码行数:51,代码来源:mt_l2c.c


示例2: desc_smp_init

static void desc_smp_init(struct irq_desc *desc, int node,
			  const struct cpumask *affinity)
{
	if (!affinity)
		affinity = irq_default_affinity;
	cpumask_copy(desc->irq_common_data.affinity, affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
#ifdef CONFIG_NUMA
	desc->irq_common_data.node = node;
#endif
}
开发者ID:avagin,项目名称:linux,代码行数:14,代码来源:irqdesc.c


示例3: constrict_mask_to_node

/*
 * In XLP cpu mask for setting affinity of an interrupt cannot span multiple
 * nodes. Although this is not a h/w restriction, the effort to implement
 * this feature does not justify the potential benefit; not only that handling
 * non local interrupts are slightly slower, it could be expensive in terms of
 * memory access and other resource utilization
 *
 * @node	: node to which mask `mask` to be restricted
 * @src		: mask to restrict
 * @dst		: restricted mask (result)
 */
void constrict_mask_to_node(u8 node, struct cpumask *dst, const struct cpumask *src)
{
//	char buf[140];
    int i;

    cpumask_clear(dst);
    for (i = NLM_MAX_CPU_PER_NODE * node;
            i < (NLM_MAX_CPU_PER_NODE *(node + 1)); i++) {
        cpumask_set_cpu(i, dst);
    }
    cpumask_and(dst, dst, &phys_cpu_present_map);
    cpumask_and(dst, dst, src);
    return;
}
开发者ID:akennedy-adtran,项目名称:linux_mmc_2.6.32.9,代码行数:25,代码来源:ite.c


示例4: set_cpu_min_freq

static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
{
	int i, j, ntokens = 0;
	unsigned int val, cpu;
	const char *cp = buf;
	struct cpu_status *i_cpu_stats;
	struct cpufreq_policy policy;
	cpumask_var_t limit_mask;
	int ret;

	while ((cp = strpbrk(cp + 1, " :")))
		ntokens++;

	
	if (!(ntokens % 2))
		return -EINVAL;

	cp = buf;
	cpumask_clear(limit_mask);
	for (i = 0; i < ntokens; i += 2) {
		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
			return -EINVAL;
		if (cpu > (num_present_cpus() - 1))
			return -EINVAL;

		i_cpu_stats = &per_cpu(cpu_stats, cpu);

		i_cpu_stats->min = val;
		cpumask_set_cpu(cpu, limit_mask);

		cp = strchr(cp, ' ');
		cp++;
	}

	get_online_cpus();
	for_each_cpu(i, limit_mask) {
		i_cpu_stats = &per_cpu(cpu_stats, i);

		if (cpufreq_get_policy(&policy, i))
			continue;

		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
			ret = cpufreq_update_policy(i);
			if (ret)
				continue;
		}
		for_each_cpu(j, policy.related_cpus)
			cpumask_clear_cpu(j, limit_mask);
	}
开发者ID:Clumsy-Kernel-Development,项目名称:M9_Kernel,代码行数:49,代码来源:msm_performance.c


示例5: desc_smp_init

// ARM10C 20141004
// desc: kmem_cache#28-o0, node: 0
static void desc_smp_init(struct irq_desc *desc, int node)
{
	// desc->irq_data.node: (kmem_cache#28-o0)->irq_data.node, node: 0
	desc->irq_data.node = node;
	// desc->irq_data.node: (kmem_cache#28-o0)->irq_data.node: 0

	// desc->irq_data.affinity: (kmem_cache#28-o0)->irq_data.affinity,
	// irq_default_affinity->bits[0]: 0xF
	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
	// desc->irq_data.affinity: (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0xF

#ifdef CONFIG_GENERIC_PENDING_IRQ // CONFIG_GENERIC_PENDING_IRQ=n
	cpumask_clear(desc->pending_mask);
#endif
}
开发者ID:BozkurTR,项目名称:kernel,代码行数:17,代码来源:irqdesc.c


示例6: __stp_alloc_ring_buffer

static int __stp_alloc_ring_buffer(void)
{
	int i;
	unsigned long buffer_size = _stp_bufsize * 1024 * 1024;

	if (!alloc_cpumask_var(&_stp_relay_data.trace_reader_cpumask,
			       (GFP_KERNEL & ~__GFP_WAIT)))
		goto fail;
	cpumask_clear(_stp_relay_data.trace_reader_cpumask);

	if (buffer_size == 0) {
		dbug_trans(1, "using default buffer size...\n");
		buffer_size = _stp_nsubbufs * _stp_subbuf_size;
	}
        dbug_trans(1, "using buffer size %lu...\n", buffer_size);

	/* The number passed to ring_buffer_alloc() is per cpu.  Our
	 * 'buffer_size' is a total number of bytes to allocate.  So,
	 * we need to divide buffer_size by the number of cpus. */
	buffer_size /= num_online_cpus();
	dbug_trans(1, "%lu\n", buffer_size);
	_stp_relay_data.rb = ring_buffer_alloc(buffer_size, 0);
	if (!_stp_relay_data.rb)
		goto fail;

        /* Increment _stp_allocated_memory and
           _stp_allocated_net_memory to approximately account for
           buffers allocated by ring_buffer_alloc. */
        {
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
                u64 relay_pages;
                relay_pages = DIV_ROUND_UP (buffer_size, PAGE_SIZE);
                if (relay_pages < 2) relay_pages = 2;
                relay_pages *= num_online_cpus();
                _stp_allocated_net_memory += relay_pages * PAGE_SIZE;
                _stp_allocated_memory += relay_pages * PAGE_SIZE;
        }

	dbug_trans(0, "size = %lu\n", ring_buffer_size(_stp_relay_data.rb));
	return 0;

fail:
	__stp_free_ring_buffer();
	return -ENOMEM;
}
开发者ID:Open-Party,项目名称:systemtap,代码行数:47,代码来源:ring_buffer.c


示例7: setup_node_to_cpumask_map

/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: cpumask_of_node() is not valid until after this is done.
 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 */
static void __init setup_node_to_cpumask_map(void)
{
	int node;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES)
		setup_nr_node_ids();

	/* allocate and clear the mapping */
	for (node = 0; node < nr_node_ids; node++) {
		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
		cpumask_clear(node_to_cpumask_map[node]);
	}

	/* cpumask_of_node() will now work */
	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:24,代码来源:numa.c


示例8: crash_kexec_prepare_cpus

static void crash_kexec_prepare_cpus(int cpu)
{
	unsigned int msecs;

	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */

	crash_send_ipi(crash_ipi_callback);
	smp_wmb();

	/*
	 * FIXME: Until we will have the way to stop other CPUs reliably,
	 * the crash CPU will send an IPI and wait for other CPUs to
	 * respond.
	 * Delay of at least 10 seconds.
	 */
	printk(KERN_EMERG "Sending IPI to other cpus...\n");
	msecs = 10000;
	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
		cpu_relax();
		mdelay(1);
	}

	/* Would it be better to replace the trap vector here? */

	/*
	 * FIXME: In case if we do not get all CPUs, one possibility: ask the
	 * user to do soft reset such that we get all.
	 * Soft-reset will be used until better mechanism is implemented.
	 */
	if (cpumask_weight(&cpus_in_crash) < ncpus) {
		printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
			ncpus - cpumask_weight(&cpus_in_crash));
		printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
		cpumask_clear(&cpus_in_sr);
		atomic_set(&enter_on_soft_reset, 0);
		while (cpumask_weight(&cpus_in_crash) < ncpus)
			cpu_relax();
	}
	/*
	 * Make sure all CPUs are entered via soft-reset if the kdump is
	 * invoked using soft-reset.
	 */
	if (cpumask_test_cpu(cpu, &cpus_in_sr))
		crash_soft_reset_check(cpu);
	/* Leave the IPI callback set */
}
开发者ID:007kumarraja,项目名称:rockchip-rk3188-mk908,代码行数:46,代码来源:crash.c


示例9: blk_mq_update_queue_map

int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
			    const struct cpumask *online_mask)
{
	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
	cpumask_var_t cpus;

	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
		return 1;

	cpumask_clear(cpus);
	nr_cpus = nr_uniq_cpus = 0;
	for_each_cpu(i, online_mask) {
		nr_cpus++;
		first_sibling = get_first_sibling(i);
		if (!cpumask_test_cpu(first_sibling, cpus))
			nr_uniq_cpus++;
		cpumask_set_cpu(i, cpus);
	}
开发者ID:020gzh,项目名称:linux,代码行数:18,代码来源:blk-mq-cpumap.c


示例10: xlp_ites_init

/*
 * Initializes PIC ITE entries PRM 9.5.6.26
 * XLP restricts CPU affinity to 8 groups. Though configurable, they are
 * programmed to have the following patterns.
 * 0 =>	Only 0th cpu on the node
 * 1 => All local threads in node; mask = (0xffffffff) on node
 * 2 => cpu0-15 on node; mask = 0x0000ffff & online_cpu_mask on nodes
 * 3 => cpu15-31 on node; mask = 0xffff0000 & online_cpu_mask on node
 * 4 => All cpus on all nodes; i.e.,
 * mask = (0xffffffff_ffffffff_ffffffff_ffffffff & physical online cpu map)
 * These are programmer defined groups and can be changed as warranted.
 * Added 5 => CPUs 0-11
 * Added 6 => CPUs 0-7
 * Added 7 => CPUs 0-3
 * Actual programmed value will take into consideration cpu_online_mask.
 *
 * There is a major issue that needs addressing when run in multi node mode
 * Number of nodes must be determined and programmed correctly, if a bit in ITE
 * is programmed without physical thread being present, when interrupt is
 * dispatched to that CPU under global scheme, system would hang. Thus this
 * scenario should be avoided. That is why phys_cpu_present_map is used
 *
 * This function simply initializes the xlp_ites entries with proposed
 * CPUmasks.  */
static void xlp_ites_init(void)
{
    u64 bm = 0x1;
    u8 node;
    struct cpumask m;

    cpumask_clear(&m);
    for_each_online_node(node) {
        /* Simply set the static pattern in all */
        bm = 1;
        u32_to_cpumask(&xlp_ites[node][0], bm);
        cpumask_shift_left(&xlp_ites[node][0], &xlp_ites[node][0], NLM_MAX_CPU_PER_NODE * node); /* directs only to cpu0 of node `node` */

        bm = 0xffffffff;
        u32_to_cpumask(&xlp_ites[node][1], bm);
        cpumask_shift_left(&xlp_ites[node][1], &xlp_ites[node][1], NLM_MAX_CPU_PER_NODE * node); /* directs to all cpus of node `node` */
        cpumask_or(&m, &m, &xlp_ites[node][1]);

        bm = 0x0000ffff;
        u32_to_cpumask(&xlp_ites[node][2], bm);
        cpumask_shift_left(&xlp_ites[node][2], &xlp_ites[node][2], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0xffff0000;
        u32_to_cpumask(&xlp_ites[node][3], bm);
        cpumask_shift_left(&xlp_ites[node][3], &xlp_ites[node][3], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000ff;
        u32_to_cpumask(&xlp_ites[node][5], bm);
        cpumask_shift_left(&xlp_ites[node][5], &xlp_ites[node][5], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000f0;
        u32_to_cpumask(&xlp_ites[node][6], bm);
        cpumask_shift_left(&xlp_ites[node][6], &xlp_ites[node][6], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x0000000f;
        u32_to_cpumask(&xlp_ites[node][7], bm);
        cpumask_shift_left(&xlp_ites[node][7], &xlp_ites[node][7], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

    }
    for_each_online_node(node) {
        cpumask_copy(&xlp_ites[node][4], &m);
    }
//	dump_all_ites();
}
开发者ID:akennedy-adtran,项目名称:linux_mmc_2.6.32.9,代码行数:68,代码来源:ite.c


示例11: disable_nonboot_cpus

int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);
	arch_disable_nonboot_cpus_begin();

#ifdef CONFIG_DEBUG_PRINTK
	printk("Disabling non-boot CPUs ...\n");
#else
	;
#endif
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		error = _cpu_down(cpu, 1);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}

	arch_disable_nonboot_cpus_end();

	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
	}
	cpu_maps_update_done();
	return error;
}
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:43,代码来源:cpu.c


示例12: irq_move_masked_irq

void irq_move_masked_irq(struct irq_data *idata)
{
	struct irq_desc *desc = irq_data_to_desc(idata);
	struct irq_chip *chip = desc->irq_data.chip;

	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
		return;

	irqd_clr_move_pending(&desc->irq_data);

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (irqd_is_per_cpu(&desc->irq_data)) {
		WARN_ON(1);
		return;
	}

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!chip->irq_set_affinity)
		return;

	assert_raw_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is coming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);

	cpumask_clear(desc->pending_mask);
}
开发者ID:020gzh,项目名称:linux,代码行数:43,代码来源:migration.c


示例13: arch_init_irq

void __init arch_init_irq(void)
{
	int irq;

#ifdef CONFIG_SMP
	/* Set the default affinity to the boot cpu. */
	cpumask_clear(irq_default_affinity);
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif

	if (NR_IRQS < OCTEON_IRQ_LAST)
		pr_err("octeon_irq_init: NR_IRQS is set too low\n");

	/* 0 - 15 reserved for i8259 master and slave controller. */

	/* 17 - 23 Mips internal */
	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
					 handle_percpu_irq);
	}

	/* 24 - 87 CIU_INT_SUM0 */
	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
					 handle_percpu_irq);
	}

	/* 88 - 151 CIU_INT_SUM1 */
	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
					 handle_percpu_irq);
	}

#ifdef CONFIG_PCI_MSI
	/* 152 - 215 PCI/PCIe MSI interrupts */
	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
					 handle_percpu_irq);
	}
#endif
	set_c0_status(0x300 << 2);
}
开发者ID:jakev,项目名称:CobraDroidBeta,代码行数:42,代码来源:octeon-irq.c


示例14: run_parallel_many_CPUs_bulk

static void run_parallel_many_CPUs_bulk(enum queue_behavior_type type,
					uint32_t loops, int q_size, int prefill,
					int CPUs, int bulk)
{
	struct alf_queue *queue = NULL;
	cpumask_t cpumask;
	int i;

	if (CPUs == 0)
		return;

	if (!(queue = alloc_and_init_queue(q_size, prefill)))
		return; /* fail */

	/* Restrict the CPUs to run on
	 */
	if (verbose)
		pr_info("Limit to %d parallel CPUs (bulk:%d)\n", CPUs, bulk);
	cpumask_clear(&cpumask);
	for (i = 0; i < CPUs ; i++) {
		cpumask_set_cpu(i, &cpumask);
	}

	if (type & SPSC) {
		if (CPUs > 2) {
			pr_err("%s() ERR SPSC does not support CPUs > 2\n",
			       __func__);
			goto out;
		}
		run_parallel("alf_queue_BULK_SPSC_parallel_many_CPUs",
			     loops, &cpumask, bulk, queue,
			     time_bench_CPU_BULK_enq_or_deq_spsc);
	} else if (type & MPMC) {
		run_parallel("alf_queue_BULK_MPMC_parallel_many_CPUs",
			     loops, &cpumask, bulk, queue,
			     time_bench_CPU_BULK_enq_or_deq_mpmc);
	} else {
		pr_err("%s() WRONG TYPE!!! FIX\n", __func__);
	}
out:
	alf_queue_free(queue);
}
开发者ID:netoptimizer,项目名称:prototype-kernel,代码行数:42,代码来源:alf_queue_parallel01.c


示例15: disable_nonboot_cpus

int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);

	pr_info("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
		error = _cpu_down(cpu, 1);
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
			break;
		}
	}

	if (!error)
		BUG_ON(num_online_cpus() > 1);
	else
		pr_err("Non-boot CPUs are not disabled\n");

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

	cpu_maps_update_done();
	return error;
}
开发者ID:andy-shev,项目名称:linux,代码行数:42,代码来源:cpu.c


示例16: mode_store

static ssize_t
mode_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t count)
{
	if (!gbcl)
		return -EPERM;

	if (!strcmp(buf, "enable")) {
		bcl_mode_set(BCL_DEVICE_ENABLED);
		bcl_update_online_mask();
		pr_info("bcl enabled\n");
	} else if (!strcmp(buf, "disable")) {
		bcl_mode_set(BCL_DEVICE_DISABLED);
		cpumask_clear(bcl_cpu_online_mask);
		pr_info("bcl disabled\n");
	} else {
		return -EINVAL;
	}

	return count;
}
开发者ID:TeamRegular,项目名称:android_kernel_lge_msm8992,代码行数:21,代码来源:battery_current_limit.c


示例17: dbg_set_cpu_affinity

static int dbg_set_cpu_affinity(const char *val, struct kernel_param *kp)
{
	char *endptr;
	pid_t pid;
	int cpu;
	struct cpumask mask;
	long ret;
	pid = (pid_t)memparse(val, &endptr);
	if (*endptr != '@') {
		pr_info("%s: invalid input strin: %s\n", __func__, val);
		return -EINVAL;
	}
	cpu = memparse(++endptr, &endptr);
	cpumask_clear(&mask);
	cpumask_set_cpu(cpu, &mask);
	pr_info("%s: Setting %d cpu affinity to cpu%d\n",
		__func__, pid, cpu);
	ret = sched_setaffinity(pid, &mask);
	pr_info("%s: sched_setaffinity returned %ld\n", __func__, ret);
	return 0;
}
开发者ID:DaMadOne,项目名称:android_kernel_samsung_msm8976,代码行数:21,代码来源:sec_debug.c


示例18: round_robin_cpu

static void round_robin_cpu(unsigned int tsk_index)
{
	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
	cpumask_var_t tmp;
	int cpu;
	unsigned long min_weight = -1;
	unsigned long uninitialized_var(preferred_cpu);

	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
		return;

	mutex_lock(&round_robin_lock);
	cpumask_clear(tmp);
	for_each_cpu(cpu, pad_busy_cpus)
		cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
	cpumask_andnot(tmp, cpu_online_mask, tmp);
	/* avoid HT sibilings if possible */
	if (cpumask_empty(tmp))
		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
	if (cpumask_empty(tmp)) {
		mutex_unlock(&round_robin_lock);
		return;
	}
	for_each_cpu(cpu, tmp) {
		if (cpu_weight[cpu] < min_weight) {
			min_weight = cpu_weight[cpu];
			preferred_cpu = cpu;
		}
	}

	if (tsk_in_cpu[tsk_index] != -1)
		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = preferred_cpu;
	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
	cpu_weight[preferred_cpu]++;
	mutex_unlock(&round_robin_lock);

	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
开发者ID:47fortyseven,项目名称:custom_kernel_hermes,代码行数:39,代码来源:acpi_pad.c


示例19: bench_outstanding_parallel_cpus

void noinline bench_outstanding_parallel_cpus(uint32_t loops, int nr_cpus,
					      int outstanding_pages)
{
	const char *desc = "parallel_cpus";
	struct time_bench_sync sync;
	struct time_bench_cpu *cpu_tasks;
	struct cpumask my_cpumask;
	int i;

	/* Allocate records for CPUs */
	cpu_tasks = kzalloc(sizeof(*cpu_tasks) * nr_cpus, GFP_KERNEL);

	/* Reduce number of CPUs to run on */
	cpumask_clear(&my_cpumask);
	for (i = 0; i < nr_cpus ; i++) {
		cpumask_set_cpu(i, &my_cpumask);
	}
	pr_info("Limit to %d parallel CPUs\n", nr_cpus);
	time_bench_run_concurrent(loops, outstanding_pages, NULL,
				  &my_cpumask, &sync, cpu_tasks,
				  time_alloc_pages_outstanding);
	time_bench_print_stats_cpumask(desc, cpu_tasks, &my_cpumask);
	kfree(cpu_tasks);
}
开发者ID:netoptimizer,项目名称:prototype-kernel,代码行数:24,代码来源:page_bench02.c


示例20: homecache_mask

/* Return a mask of the cpus whose caches currently own these pages. */
static void homecache_mask(struct page *page, int pages,
			   struct cpumask *home_mask)
{
	int i;
	cpumask_clear(home_mask);
	for (i = 0; i < pages; ++i) {
		int home = page_home(&page[i]);
		if (home == PAGE_HOME_IMMUTABLE ||
		    home == PAGE_HOME_INCOHERENT) {
			cpumask_copy(home_mask, cpu_possible_mask);
			return;
		}
#if CHIP_HAS_CBOX_HOME_MAP()
		if (home == PAGE_HOME_HASH) {
			cpumask_or(home_mask, home_mask, &hash_for_home_map);
			continue;
		}
#endif
		if (home == PAGE_HOME_UNCACHED)
			continue;
		BUG_ON(home < 0 || home >= NR_CPUS);
		cpumask_set_cpu(home, home_mask);
	}
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:25,代码来源:homecache.c



注:本文中的cpumask_clear函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cpumask_clear_cpu函数代码示例发布时间:2022-05-30
下一篇:
C++ cpumask_bits函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap