本文整理汇总了C++中per_cpu_ptr函数的典型用法代码示例。如果您正苦于以下问题:C++ per_cpu_ptr函数的具体用法?C++ per_cpu_ptr怎么用?C++ per_cpu_ptr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了per_cpu_ptr函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: nicvf_get_ethtool_stats
static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
int stat, tmp_stats;
int sqs, cpu;
nicvf_update_stats(nic);
/* Update LMAC stats */
nicvf_update_lmac_stats(nic);
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
tmp_stats = 0;
for_each_possible_cpu(cpu)
tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
[nicvf_drv_stats[stat].index];
*(data++) = tmp_stats;
}
nicvf_get_qset_stats(nic, stats, &data);
for (sqs = 0; sqs < nic->sqs_count; sqs++) {
if (!nic->snicvf[sqs])
continue;
nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
}
for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
*(data++) = nic->bgx_stats.rx_stats[stat];
for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
*(data++) = nic->bgx_stats.tx_stats[stat];
}
开发者ID:forgivemyheart,项目名称:linux,代码行数:36,代码来源:nicvf_ethtool.c
示例2: ovs_dp_upcall
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
int dp_ifindex;
int err;
if (upcall_info->pid == 0) {
err = -ENOTCONN;
goto err;
}
dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex) {
err = -ENODEV;
goto err;
}
if (!skb_is_gso(skb))
err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
else
err = queue_gso_packets(dp_ifindex, skb, upcall_info);
if (err)
goto err;
return 0;
err:
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
u64_stats_update_begin(&stats->sync);
stats->n_lost++;
u64_stats_update_end(&stats->sync);
return err;
}
开发者ID:daveti,项目名称:prov-kernel,代码行数:36,代码来源:datapath.c
示例3: probe_mt65xx_mon_tracepoint
static void
probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev,
struct task_struct *next)
{
struct trace_array_cpu *data;
unsigned long flags;
int cpu;
int pc;
if (unlikely(!mt65xx_mon_ref))
return;
if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
return;
if(prev)
tracing_record_cmdline(prev);
if(next)
tracing_record_cmdline(next);
tracing_record_cmdline(current);
pc = preempt_count();
//local_irq_save(flags);
spin_lock_irqsave(&mt65xx_mon_spinlock, flags);
cpu = raw_smp_processor_id();
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
data = mt65xx_mon_trace->data[cpu];
#else
data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu);
#endif
if (likely(!atomic_read(&data->disabled)))
tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc);
spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags);
//local_irq_restore(flags);
}
开发者ID:Scorpio92,项目名称:mediatek,代码行数:36,代码来源:trace_mt65xx_mon.c
示例4: thread_group_cputime
/**
* thread_group_cputime - Sum the thread group time fields across all CPUs.
*
* @tsk: The task we use to identify the thread group.
* @times: task_cputime structure in which we return the summed fields.
*
* Walk the list of CPUs to sum the per-CPU time fields in the thread group
* time structure.
*/
void thread_group_cputime(
struct task_struct *tsk,
struct task_cputime *times)
{
struct signal_struct *sig;
int i;
struct task_cputime *tot;
sig = tsk->signal;
if (unlikely(!sig) || !sig->cputime.totals) {
times->utime = tsk->utime;
times->stime = tsk->stime;
times->sum_exec_runtime = tsk->se.sum_exec_runtime;
return;
}
times->stime = times->utime = cputime_zero;
times->sum_exec_runtime = 0;
for_each_possible_cpu(i) {
tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
times->utime = cputime_add(times->utime, tot->utime);
times->stime = cputime_add(times->stime, tot->stime);
times->sum_exec_runtime += tot->sum_exec_runtime;
}
}
开发者ID:traveller42,项目名称:linux-2.6.28.mx233-falconwing,代码行数:33,代码来源:posix-cpu-timers.c
示例5: cpumask_weight
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
int cpu, num_cpus;
unsigned int next_nr, next_index;
struct padata_parallel_queue *queue, *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
num_cpus = cpumask_weight(pd->cpumask.pcpu);
/*
*/
next_nr = pd->processed;
next_index = next_nr % num_cpus;
cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
padata = NULL;
reorder = &next_queue->reorder;
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
spin_unlock(&reorder->lock);
pd->processed++;
goto out;
}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:36,代码来源:padata.c
示例6: minit
static int minit(void)
{
int cpu;
unsigned long *this;
unsigned long file_size = (10 * 1024 * 1024 * 1024);
unsigned long chunk_num = file_size / CHUNKSIZE;
unsigned long bytes_num = chunk_num / sizeof(int);
printk("Start %s.\n", THIS_MODULE->name);
percpu_ptr = alloc_percpu(unsigned long);
for_each_online_cpu(cpu) {
this = *per_cpu_ptr(percpu_ptr, cpu);;
//alloc memory for every percpu-value.
this = vmalloc(bytes_num);
if (!this) {
printk(KERN_ERR "alloc bitmap failed.");
return -ENOMEM;
}
}
return 0;
}
开发者ID:imflyfish,项目名称:kernel_test,代码行数:24,代码来源:testper.c
示例7: dev_txq_stats_fold
static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
dev_txq_stats_fold(dev, stats);
if (vlan_dev_info(dev)->vlan_rx_stats) {
struct vlan_rx_stats *p, rx = {0};
int i;
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
rx.rx_packets += p->rx_packets;
rx.rx_bytes += p->rx_bytes;
rx.rx_errors += p->rx_errors;
rx.multicast += p->multicast;
}
stats->rx_packets = rx.rx_packets;
stats->rx_bytes = rx.rx_bytes;
stats->rx_errors = rx.rx_errors;
stats->multicast = rx.multicast;
}
return stats;
}
开发者ID:mfleming,项目名称:linux-2.6,代码行数:24,代码来源:vlan_dev.c
示例8: for_each_possible_cpu
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->multicast += rxmulticast;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
rx_errors += p->rx_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
}
return stats;
}
开发者ID:AmesianX,项目名称:netlink-mmap,代码行数:36,代码来源:vlan_dev.c
示例9: caam_qi_shutdown
void caam_qi_shutdown(struct device *qidev)
{
int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
napi_disable(irqtask);
netif_napi_del(irqtask);
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
qman_delete_cgr_safe(&priv->cgr);
qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
platform_device_unregister(priv->qi_pdev);
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:24,代码来源:qi.c
示例10: desc_set_defaults
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
const struct cpumask *affinity, struct module *owner)
{
int cpu;
desc->irq_common_data.handler_data = NULL;
desc->irq_common_data.msi_desc = NULL;
desc->irq_data.common = &desc->irq_common_data;
desc->irq_data.irq = irq;
desc->irq_data.chip = &no_irq_chip;
desc->irq_data.chip_data = NULL;
irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
desc->name = NULL;
desc->owner = owner;
for_each_possible_cpu(cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
desc_smp_init(desc, node, affinity);
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:24,代码来源:irqdesc.c
示例11: ixgbe_fcoe_ddp_setup
/**
* ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* Returns : 1 for success and 0 for no ddp
*/
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
if (!netdev || !sgl)
return 0;
adapter = netdev_priv(netdev);
if (xid >= IXGBE_FCOE_DDP_MAX) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return 0;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
ixgbe_fcoe_clear_ddp(ddp);
if (!fcoe->ddp_pool) {
e_warn(drv, "No ddp_pool resources allocated\n");
return 0;
}
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
if (!ddp_pool->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
goto out_noddp;
}
/* setup dma from scsi command sgl */
dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
goto out_noddp;
}
/* alloc the udl from per cpu ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
j = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
ddp_pool->noddp++;
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
/*
* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
//.........这里部分代码省略.........
开发者ID:insop,项目名称:linux,代码行数:101,代码来源:ixgbe_fcoe.c
示例12: tegra_init_timer
static int __init tegra_init_timer(struct device_node *np, bool tegra20)
{
struct timer_of *to;
int cpu, ret;
to = this_cpu_ptr(&tegra_to);
ret = timer_of_init(np, to);
if (ret)
goto out;
timer_reg_base = timer_of_base(to);
/*
* Configure microsecond timers to have 1MHz clock
* Config register is 0xqqww, where qq is "dividend", ww is "divisor"
* Uses n+1 scheme
*/
switch (timer_of_rate(to)) {
case 12000000:
usec_config = 0x000b; /* (11+1)/(0+1) */
break;
case 12800000:
usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
usec_config = 0x000c; /* (12+1)/(0+1) */
break;
case 16800000:
usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
usec_config = 0x0019; /* (25+1)/(0+1) */
break;
case 38400000:
usec_config = 0x04bf; /* (191+1)/(4+1) */
break;
case 48000000:
usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
ret = -EINVAL;
goto out;
}
writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
unsigned int base = tegra_base_for_cpu(cpu, tegra20);
unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
/*
* TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
* parent clock.
*/
if (tegra20)
cpu_to->of_clk.rate = 1000000;
cpu_to = per_cpu_ptr(&tegra_to, cpu);
cpu_to->of_base.base = timer_reg_base + base;
cpu_to->clkevt.cpumask = cpumask_of(cpu);
cpu_to->clkevt.irq = irq_of_parse_and_map(np, idx);
if (!cpu_to->clkevt.irq) {
pr_err("failed to map irq for cpu%d\n", cpu);
ret = -EINVAL;
goto out_irq;
}
irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
IRQF_TIMER | IRQF_NOBALANCING,
cpu_to->clkevt.name, &cpu_to->clkevt);
if (ret) {
pr_err("failed to set up irq for cpu%d: %d\n",
cpu, ret);
irq_dispose_mapping(cpu_to->clkevt.irq);
cpu_to->clkevt.irq = 0;
goto out_irq;
}
}
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000,
300, 32, clocksource_mmio_readl_up);
if (ret)
pr_err("failed to register clocksource: %d\n", ret);
#ifdef CONFIG_ARM
register_current_timer_delay(&tegra_delay_timer);
#endif
ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
tegra_timer_stop);
if (ret)
//.........这里部分代码省略.........
开发者ID:grate-driver,项目名称:linux,代码行数:101,代码来源:timer-tegra20.c
示例13: per_cpu_ptr
static struct cgroup_cpu_stat *cgroup_cpu_stat(struct cgroup *cgrp, int cpu)
{
return per_cpu_ptr(cgrp->cpu_stat, cpu);
}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:4,代码来源:stat.c
示例14: ipcomp_decompress
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipcomp_data *ipcd = x->data;
const int plen = skb->len;
int dlen = IPCOMP_SCRATCH_SIZE;
const u8 *start = skb->data;
const int cpu = get_cpu();
u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
int len;
if (err)
goto out;
if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
err = -EINVAL;
goto out;
}
len = dlen - plen;
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
__skb_put(skb, len);
len += plen;
skb_copy_to_linear_data(skb, scratch, len);
while ((scratch += len, dlen -= len) > 0) {
skb_frag_t *frag;
struct page *page;
err = -EMSGSIZE;
if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
goto out;
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
page = alloc_page(GFP_ATOMIC);
err = -ENOMEM;
if (!page)
goto out;
__skb_frag_set_page(frag, page);
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
frag->page_offset = 0;
skb_frag_size_set(frag, len);
memcpy(skb_frag_address(frag), scratch, len);
skb->truesize += len;
skb->data_len += len;
skb->len += len;
skb_shinfo(skb)->nr_frags++;
}
err = 0;
out:
put_cpu();
return err;
}
开发者ID:Albinoman887,项目名称:pyramid-3.4.10,代码行数:67,代码来源:xfrm_ipcomp.c
示例15: dpaa2_eth_get_ethtool_stats
/** Fill in hardware counters, as returned by MC.
*/
static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
int i = 0;
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
u32 fcnt, bcnt;
u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
/* Print standard counters, from DPNI statistics */
for (j = 0; j <= 2; j++) {
err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
j, &dpni_stats);
if (err != 0)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
switch (j) {
case 0:
num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
break;
case 1:
num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
break;
case 2:
num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
break;
}
for (k = 0; k < num_cnt; k++)
*(data + i++) = dpni_stats.raw.counter[k];
}
/* Print per-cpu extra stats */
for_each_online_cpu(k) {
extras = per_cpu_ptr(priv->percpu_extras, k);
for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
*((__u64 *)data + i + j) += *((__u64 *)extras + j);
}
i += j;
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
for (j = 0; j < priv->num_fqs; j++) {
/* Print FQ instantaneous counts */
err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
&fcnt, &bcnt);
if (err) {
netdev_warn(net_dev, "FQ query error %d", err);
return;
}
if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
fcnt_tx_total += fcnt;
bcnt_tx_total += bcnt;
} else {
fcnt_rx_total += fcnt;
bcnt_rx_total += bcnt;
}
}
*(data + i++) = fcnt_rx_total;
*(data + i++) = bcnt_rx_total;
*(data + i++) = fcnt_tx_total;
*(data + i++) = bcnt_tx_total;
err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
if (err) {
netdev_warn(net_dev, "Buffer count query error %d\n", err);
return;
}
*(data + i++) = buf_cnt;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:88,代码来源:dpaa2-ethtool.c
示例16: clamp_thread
//.........这里部分代码省略.........
unsigned long ecx = 1;
unsigned long eax = target_mwait;
/*
* REVISIT: may call enter_idle() to notify drivers who
* can save power during cpu idle. same for exit_idle()
*/
local_touch_nmi();
stop_critical_timings();
mwait_idle_with_hints(eax, ecx);
start_critical_timings();
atomic_inc(&idle_wakeup_counter);
}
preempt_enable();
}
del_timer_sync(&wakeup_timer);
clear_bit(cpunr, cpu_clamping_mask);
return 0;
}
/*
* 1 HZ polling while clamping is active, useful for userspace
* to monitor actual idle ratio.
*/
static void poll_pkg_cstate(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
static void poll_pkg_cstate(struct work_struct *dummy)
{
static u64 msr_last;
static u64 tsc_last;
static unsigned long jiffies_last;
u64 msr_now;
unsigned long jiffies_now;
u64 tsc_now;
u64 val64;
msr_now = pkg_state_counter();
tsc_now = rdtsc();
jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last)
pkg_cstate_ratio_cur = 1;
else {
if (tsc_now - tsc_last) {
val64 = 100 * (msr_now - msr_last);
do_div(val64, (tsc_now - tsc_last));
pkg_cstate_ratio_cur = val64;
}
}
/* update record */
msr_last = msr_now;
jiffies_last = jiffies_now;
tsc_last = tsc_now;
if (true == clamping)
schedule_delayed_work(&poll_pkg_cstate_work, HZ);
}
static int start_power_clamp(void)
{
unsigned long cpu;
struct task_struct *thread;
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
get_online_cpus();
/* prefer BSP */
control_cpu = 0;
if (!cpu_online(control_cpu))
control_cpu = smp_processor_id();
clamping = true;
schedule_delayed_work(&poll_pkg_cstate_work, 0);
/* start one thread per online cpu */
for_each_online_cpu(cpu) {
struct task_struct **p =
per_cpu_ptr(powerclamp_thread, cpu);
thread = kthread_create_on_node(clamp_thread,
(void *) cpu,
cpu_to_node(cpu),
"kidle_inject/%ld", cpu);
/* bind to cpu here */
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
wake_up_process(thread);
*p = thread;
}
}
put_online_cpus();
return 0;
}
开发者ID:mansr,项目名称:linux-tangox,代码行数:101,代码来源:intel_powerclamp.c
示例17: per_cpu_ptr
static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
{
return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
}
开发者ID:343829084,项目名称:linux-study,代码行数:4,代码来源:acpi-cpufreq.c
示例18: prepare_elf64_headers
static int prepare_elf64_headers(struct crash_elf_data *ced,
void **addr, unsigned long *sz)
{
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
unsigned char *buf, *bufp;
unsigned int cpu;
unsigned long long notes_addr;
int ret;
/* extra phdr for vmcoreinfo elf note */
nr_phdr = nr_cpus + 1;
nr_phdr += ced->max_nr_ranges;
/*
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
* area on x86_64 (ffffffff80000000 - ffffffffa0000000).
* I think this is required by tools like gdb. So same physical
* memory will be mapped in two elf headers. One will contain kernel
* text virtual addresses and other will have __va(physical) addresses.
*/
nr_phdr++;
elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
buf = vzalloc(elf_sz);
if (!buf)
return -ENOMEM;
bufp = buf;
ehdr = (Elf64_Ehdr *)bufp;
bufp += sizeof(Elf64_Ehdr);
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
ehdr->e_ident[EI_OSABI] = ELF_OSABI;
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
ehdr->e_type = ET_CORE;
ehdr->e_machine = ELF_ARCH;
ehdr->e_version = EV_CURRENT;
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
/* Prepare one phdr of type PT_NOTE for each present cpu */
for_each_present_cpu(cpu) {
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
phdr->p_offset = phdr->p_paddr = notes_addr;
phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
(ehdr->e_phnum)++;
}
/* Prepare one PT_NOTE header for vmcoreinfo */
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64
/* Prepare PT_LOAD type program header for kernel text region */
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_vaddr = (Elf64_Addr)_text;
phdr->p_filesz = phdr->p_memsz = _end - _text;
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
(ehdr->e_phnum)++;
#endif
/* Prepare PT_LOAD headers for system ram chunks. */
ced->ehdr = ehdr;
ced->bufp = bufp;
ret = walk_system_ram_res(0, -1, ced,
prepare_elf64_ram_headers_callback);
if (ret < 0)
return ret;
*addr = buf;
*sz = elf_sz;
return 0;
}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:90,代码来源:crash.c
示例19: __blk_add_trace
/*
* The worker for the various blk_add_trace*() types. Fills out a
* blk_io_trace structure and places it in a per-cpu subbuffer.
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL;
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
pid_t pid;
int cpu, pc = 0;
bool blk_tracer = blk_tracer_enabled;
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
return;
what |= ddir_act[rw & WRITE];
what |= MASK_TC_BIT(rw, SYNCIO);
what |= MASK_TC_BIT(rw, AHEAD);
what |= MASK_TC_BIT(rw, META);
what |= MASK_TC_BIT(rw, DISCARD);
what |= MASK_TC_BIT(rw, FLUSH);
what |= MASK_TC_BIT(rw, FUA);
pid = tsk->pid;
if (act_log_check(bt, what, sector, pid))
return;
cpu = raw_smp_processor_id();
if (blk_tracer) {
tracing_record_cmdline(current);
buffer = blk_tr->buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len,
0, pc);
if (!event)
return;
t = ring_buffer_event_data(event);
goto record_it;
}
/*
* A word about the locking here - we disable interrupts to reserve
* some space in the relay per-cpu buffer, to prevent an irq
* from coming in and stepping on our toes.
*/
local_irq_save(flags);
if (unlikely(tsk->btrace_seq != blktrace_seq))
trace_note_tsk(bt, tsk);
t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
if (t) {
sequence = per_cpu_ptr(bt->sequence, cpu);
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->sequence = ++(*sequence);
t->time = ktime_to_ns(ktime_get());
record_it:
/*
* These two are not needed in ftrace as they are in the
* generic trace_entry, filled by tracing_generic_entry_update,
* but for the trace_event->bin() synthesizer benefit we do it
* here too.
*/
t->cpu = cpu;
t->pid = pid;
t->sector = sector;
t->bytes = bytes;
t->action = what;
t->device = bt->dev;
t->error = error;
t->pdu_len = pdu_len;
if (pdu_len)
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) {
trace_buffer_unlock_commit(buffer, event, 0, pc);
return;
}
}
local_irq_restore(flags);
}
开发者ID:swrite,项目名称:ali_kernel,代码行数:92,代码来源:blktrace.c
示例20: dump_traces
static void dump_traces(void)
{
int *index;
struct trace_log_entry *ptr;
int i, cpu, max_entries, idx;
struct trace_log_header *header;
header =
(struct trace_log_header *)trace_log_data.start_vaddr;
pr_info("\n-------TRACE LOG---->8---------\n");
max_entries = header->entries;
pr_info("max entries:%d\n", max_entries);
if (trace_log_data.entries != max_entries)
pr_err("max entries mismatch - %d, %d\n", max_entries,
trace_log_data.entries);
for_each_possible_cpu(cpu) {
idx = header->index[IRQ_TRACE][cpu];
pr_info("\nIRQ_TRACE, cpu:%d, current index:%d", cpu, idx - 1);
index =
per_cpu_ptr(trace_log_data.index[IRQ_TRACE], cpu);
if (idx != *index)
pr_err("IRQ_TRACE indices mismatch. %d, %d\n",
idx, *index);
for (i = 0; i < max_entries; i++) {
ptr = trace_log_data.start[cpu] +
(i * SIZEOF_ENTRIES);
pr_info("%d:%s: irq:%d, timestamp:%lu\n", i,
(ptr->itl.irq & 0x40000000)
? "ENTRY" : "EXIT",
ptr->itl.irq & ~0x40000000,
ptr->itl.timestamp);
}
idx = header->index[SOFTIRQ_TRACE][cpu];
pr_info("\nSOFTIRQ_TRACE, cpu:%d, current index:%d", cpu,
idx - 1);
index =
per_cpu_ptr(trace_log_data.index[SOFTIRQ_TRACE], cpu);
if (idx != *index)
pr_err("SOFTIRQ_TRACE indices mismatch. %d, %d\n",
idx, *index);
for (i = 0; i < max_entries; i++) {
ptr = trace_log_data.start[cpu] +
(i * SIZEOF_ENTRIES);
pr_info("%d:%s: vec_nr:%d, timestamp:%lu\n", i,
(ptr->stl.vec_nr &
0x80000000) ? "ENTRY" : "EXIT",
ptr->stl.vec_nr &
~0x80000000,
ptr->stl.timestamp);
}
idx = header->index[SCHED_TRACE][cpu];
pr_info("\nSCHED_TRACE, cpu:%d, current index:%d", cpu,
idx - 1);
index =
per_cpu_ptr(trace_log_data.index[SCHED_TRACE], cpu);
if (idx != *index)
pr_err("SCHED_TRACE indices mismatch. %d, %d\n",
idx, *index);
for (i = 0; i < max_entries; i++) {
ptr = trace_log_data.start[cpu] +
(i * SIZEOF_ENTRIES);
pr_info("%d:pid:%d, timestamp:%lu\n", i,
ptr->sstl.pid,
ptr->sstl.timestamp);
}
idx = header->index[WORKQUEUE_TRACE][cpu];
pr_info("\nWORKQUEUE_TRACE, cpu:%d, current index:%d", cpu,
idx - 1);
index =
per_cpu_ptr(trace_log_data.index[WORKQUEUE_TRACE], cpu);
if (idx != *index)
pr_err("WORKQUEUE_TRACE indices mismatch. %d, %d\n",
idx, *index);
for (i = 0; i < max_entries; i++) {
ptr = trace_log_data.start[cpu] +
(i * SIZEOF_ENTRIES);
pr_info("%d:%s: %p, timestamp:%lu\n", i,
ptr->wtl.entry ? "ENTRY" : "EXIT",
ptr->wtl.func,
ptr->wtl.timestamp);
}
//.........这里部分代码省略.........
开发者ID:TheNikiz,项目名称:android_kernel_samsung_hawaii,代码行数:101,代码来源:trace_log.c
注:本文中的per_cpu_ptr函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论