本文整理汇总了C++中rt_task函数的典型用法代码示例。如果您正苦于以下问题:C++ rt_task函数的具体用法?C++ rt_task怎么用?C++ rt_task使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rt_task函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: probe_wakeup
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
unsigned long flags;
long disabled;
int pc;
if (likely(!tracer_enabled))
return;
tracing_record_cmdline(p);
tracing_record_cmdline(current);
if ((wakeup_rt && !rt_task(p)) ||
p->prio >= wakeup_prio ||
p->prio >= current->prio)
return;
pc = preempt_count();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (unlikely(disabled != 1))
goto out;
/* interrupts should be off from try_to_wake_up */
__raw_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
goto out_locked;
/* reset the trace */
__wakeup_reset(wakeup_trace);
wakeup_cpu = task_cpu(p);
wakeup_current_cpu = wakeup_cpu;
wakeup_prio = p->prio;
wakeup_task = p;
get_task_struct(wakeup_task);
local_save_flags(flags);
data = wakeup_trace->data[wakeup_cpu];
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
/*
* We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
__raw_spin_unlock(&wakeup_lock);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
开发者ID:chunyenho,项目名称:RTS-hw2,代码行数:60,代码来源:trace_sched_wakeup.c
示例2: global_dirty_limits
/*
* global_dirty_limits - background-writeback and dirty-throttling thresholds
*
* Calculate the dirty thresholds based on sysctl parameters
* - vm.dirty_background_ratio or vm.dirty_background_bytes
* - vm.dirty_ratio or vm.dirty_bytes
* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
* real-time tasks.
*/
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
unsigned long background;
unsigned long dirty;
unsigned long uninitialized_var(available_memory);
struct task_struct *tsk;
if (!vm_dirty_bytes || !dirty_background_bytes)
available_memory = determine_dirtyable_memory();
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
else
dirty = (vm_dirty_ratio * available_memory) / 100;
if (dirty_background_bytes)
background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
else
background = (dirty_background_ratio * available_memory) / 100;
if (background >= dirty)
background = dirty / 2;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
}
开发者ID:printusrzero,项目名称:hwp6s-kernel,代码行数:39,代码来源:page-writeback.c
示例3: hrtimer_nanosleep
long hrtimer_nanosleep(const struct timespec64 *rqtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
u64 slack;
slack = current->timer_slack_ns;
if (dl_task(current) || rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
goto out;
/* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_MODE_ABS) {
ret = -ERESTARTNOHAND;
goto out;
}
restart = ¤t->restart_block;
restart->fn = hrtimer_nanosleep_restart;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
开发者ID:mdamt,项目名称:linux,代码行数:32,代码来源:hrtimer.c
示例4: probe_wakeup
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
int cpu = smp_processor_id();
unsigned long flags;
long disabled;
int pc;
if (likely(!tracer_enabled))
return;
tracing_record_cmdline(p);
tracing_record_cmdline(current);
if (likely(!rt_task(p)) ||
p->prio >= wakeup_prio ||
p->prio >= current->prio)
return;
pc = preempt_count();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (unlikely(disabled != 1))
goto out;
/* interrupts should be off from try_to_wake_up */
__raw_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
goto out_locked;
/* reset the trace */
__wakeup_reset(wakeup_trace);
wakeup_cpu = task_cpu(p);
wakeup_prio = p->prio;
wakeup_task = p;
get_task_struct(wakeup_task);
local_save_flags(flags);
wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
__raw_spin_unlock(&wakeup_lock);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
开发者ID:percy-g2,项目名称:rowboat-kernel,代码行数:51,代码来源:trace_sched_wakeup.c
示例5: get_dirty_limits
void
get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
struct backing_dev_info *bdi)
{
int background_ratio; /* Percentages */
int dirty_ratio;
long background;
long dirty;
unsigned long available_memory = determine_dirtyable_memory();
struct task_struct *tsk;
dirty_ratio = vm_dirty_ratio;
if (dirty_ratio < 5)
dirty_ratio = 5;
background_ratio = dirty_background_ratio;
if (background_ratio >= dirty_ratio)
background_ratio = dirty_ratio / 2;
background = (background_ratio * available_memory) / 100;
dirty = (dirty_ratio * available_memory) / 100;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
if (bdi) {
u64 bdi_dirty;
long numerator, denominator;
/*
* Calculate this BDI's share of the dirty ratio.
*/
bdi_writeout_fraction(bdi, &numerator, &denominator);
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator;
do_div(bdi_dirty, denominator);
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;
*pbdi_dirty = bdi_dirty;
clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
task_dirty_limit(current, pbdi_dirty);
}
}
开发者ID:khenam,项目名称:ardrone-kernel,代码行数:50,代码来源:page-writeback.c
示例6: get_dirty_limits
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
static void
get_dirty_limits(long *pbackground, long *pdirty,
struct address_space *mapping)
{
int background_ratio; /* Percentages */
int dirty_ratio;
int unmapped_ratio;
long background;
long dirty;
unsigned long available_memory = vm_total_pages;
struct task_struct *tsk;
#ifdef CONFIG_HIGHMEM
/*
* We always exclude high memory from our count.
*/
available_memory -= totalhigh_pages;
#endif
unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES)) * 100) /
vm_total_pages;
dirty_ratio = vm_dirty_ratio;
if (dirty_ratio > unmapped_ratio / 2)
dirty_ratio = unmapped_ratio / 2;
if (dirty_ratio < 5)
dirty_ratio = 5;
background_ratio = dirty_background_ratio;
if (background_ratio >= dirty_ratio)
background_ratio = dirty_ratio / 2;
background = (background_ratio * available_memory) / 100;
dirty = (dirty_ratio * available_memory) / 100;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
}
开发者ID:qwerty1023,项目名称:wive-rtnl-firmware,代码行数:62,代码来源:page-writeback.c
示例7: estimate_accuracy
static long estimate_accuracy(struct timespec *tv)
{
unsigned long ret;
struct timespec now;
/*
* Realtime tasks get a slack of 0 for obvious reasons.
*/
if (rt_task(current))
return 0;
ktime_get_ts(&now);
now = timespec_sub(*tv, now);
ret = __estimate_accuracy(&now);
if (ret < current->timer_slack_ns)
return current->timer_slack_ns;
return ret;
}
开发者ID:raddirad,项目名称:xeon_phi_kernel_integration,代码行数:19,代码来源:micscif_select.c
示例8: hrtimer_nanosleep
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
unsigned long slack;
slack = current->timer_slack_ns;
if (rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
if (do_nanosleep(&t, mode))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_MODE_ABS) {
ret = -ERESTARTNOHAND;
goto out;
}
if (rmtp) {
ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
goto out;
}
restart = ¤t_thread_info()->restart_block;
restart->fn = hrtimer_nanosleep_restart;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.rmtp = rmtp;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
ret = -ERESTART_RESTARTBLOCK;
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
开发者ID:RolanDroid,项目名称:lge_MonsterKernel-lproj,代码行数:40,代码来源:hrtimer.c
示例9: get_dirty_limits
static void
get_dirty_limits(long *pbackground, long *pdirty,
struct address_space *mapping)
{
int background_ratio; /* Percentages */
int dirty_ratio;
int unmapped_ratio;
long background;
long dirty;
unsigned long available_memory = determine_dirtyable_memory();
struct task_struct *tsk;
unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES)) * 100) /
available_memory;
dirty_ratio = vm_dirty_ratio;
if (dirty_ratio > unmapped_ratio / 2)
dirty_ratio = unmapped_ratio / 2;
if (dirty_ratio < 5)
dirty_ratio = 5;
background_ratio = dirty_background_ratio;
if (background_ratio >= dirty_ratio)
background_ratio = dirty_ratio / 2;
background = (background_ratio * available_memory) / 100;
dirty = (dirty_ratio * available_memory) / 100;
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
background += background / 4;
dirty += dirty / 4;
}
*pbackground = background;
*pdirty = dirty;
}
开发者ID:cilynx,项目名称:dd-wrt,代码行数:37,代码来源:page-writeback.c
示例10: __mutex_lock_common
/*
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned long flags;
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Optimistic spinning.
*
* We try to spin for acquisition when we find that there are no
* pending waiters and the lock owner is currently running on a
* (different) CPU.
*
* The rationale is that if the lock owner is running, it is likely to
* release the lock soon.
*
* Since this needs the lock owner, and this mutex implementation
* doesn't track the owner atomically in the lock field, we need to
* track it non-atomically.
*
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
* to serialize everything.
*
* The mutex spinners are queued up using MCS lock so that only one
* spinner can compete for the mutex. However, if mutex spinning isn't
* going to happen, there is no point in going through the lock/unlock
* overhead.
*/
if (!mutex_can_spin_on_owner(lock))
goto slowpath;
for (;;) {
struct task_struct *owner;
struct mspin_node node;
/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
mspin_lock(MLOCK(lock), &node);
owner = ACCESS_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner)) {
mspin_unlock(MLOCK(lock), &node);
break;
}
if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
mspin_unlock(MLOCK(lock), &node);
preempt_enable();
return 0;
}
mspin_unlock(MLOCK(lock), &node);
/*
* When there's no owner, we might have preempted between the
* owner acquiring the lock and setting the owner field. If
* we're an RT task that will live-lock because we won't let
* the owner complete.
*/
if (!owner && (need_resched() || rt_task(task)))
break;
/*
* The cpu_relax() call is a compiler barrier which forces
* everything in this loop to be re-loaded. We don't need
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
arch_mutex_cpu_relax();
}
slowpath:
#endif
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
goto done;
lock_contended(&lock->dep_map, ip);
for (;;) {
/*
//.........这里部分代码省略.........
开发者ID:FrozenCow,项目名称:FIRE-ICE,代码行数:101,代码来源:mutex.c
示例11: main
//.........这里部分代码省略.........
timestamp_rel = 1;
case 'T':
with_timestamp = 1;
break;
default:
fprintf(stderr, "Unknown option %c\n", opt);
break;
}
}
ret = rt_dev_socket(PF_CAN, SOCK_RAW, CAN_RAW);
if (ret < 0) {
fprintf(stderr, "rt_dev_socket: %s\n", strerror(-ret));
return -1;
}
s = ret;
if (argv[optind] == NULL) {
if (verbose)
printf("interface all\n");
ifr.ifr_ifindex = 0;
} else {
if (verbose)
printf("interface %s\n", argv[optind]);
strncpy(ifr.ifr_name, argv[optind], IFNAMSIZ);
if (verbose)
printf("s=%d, ifr_name=%s\n", s, ifr.ifr_name);
ret = rt_dev_ioctl(s, SIOCGIFINDEX, &ifr);
if (ret < 0) {
fprintf(stderr, "rt_dev_ioctl GET_IFINDEX: %s\n", strerror(-ret));
goto failure;
}
}
if (err_mask) {
ret = rt_dev_setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
&err_mask, sizeof(err_mask));
if (ret < 0) {
fprintf(stderr, "rt_dev_setsockopt: %s\n", strerror(-ret));
goto failure;
}
if (verbose)
printf("Using err_mask=%#x\n", err_mask);
}
if (filter_count) {
ret = rt_dev_setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER,
&recv_filter, filter_count *
sizeof(struct can_filter));
if (ret < 0) {
fprintf(stderr, "rt_dev_setsockopt: %s\n", strerror(-ret));
goto failure;
}
}
recv_addr.can_family = AF_CAN;
recv_addr.can_ifindex = ifr.ifr_ifindex;
ret = rt_dev_bind(s, (struct sockaddr *)&recv_addr,
sizeof(struct sockaddr_can));
if (ret < 0) {
fprintf(stderr, "rt_dev_bind: %s\n", strerror(-ret));
goto failure;
}
if (timeout) {
if (verbose)
printf("Timeout: %lld ns\n", (long long)timeout);
ret = rt_dev_ioctl(s, RTCAN_RTIOC_RCV_TIMEOUT, &timeout);
if (ret) {
fprintf(stderr, "rt_dev_ioctl RCV_TIMEOUT: %s\n", strerror(-ret));
goto failure;
}
}
if (with_timestamp) {
ret = rt_dev_ioctl(s, RTCAN_RTIOC_TAKE_TIMESTAMP, RTCAN_TAKE_TIMESTAMPS);
if (ret) {
fprintf(stderr, "rt_dev_ioctl TAKE_TIMESTAMP: %s\n", strerror(-ret));
goto failure;
}
}
snprintf(name, sizeof(name), "rtcanrecv-%d", getpid());
ret = rt_task_shadow(&rt_task_desc, name, 0, 0);
if (ret) {
fprintf(stderr, "rt_task_shadow: %s\n", strerror(-ret));
goto failure;
}
rt_task();
/* never returns */
failure:
cleanup();
return -1;
}
开发者ID:meeusr,项目名称:xenomai-forge,代码行数:101,代码来源:rtcanrecv.c
示例12: main
//.........这里部分代码省略.........
}
if (argv[optind] == NULL) {
fprintf(stderr, "No Interface supplied\n");
exit(-1);
}
if (verbose)
printf("interface %s\n", argv[optind]);
ret = socket(PF_CAN, SOCK_RAW, CAN_RAW);
if (ret < 0) {
fprintf(stderr, "socket: %s\n", strerror(-ret));
return -1;
}
s = ret;
if (loopback >= 0) {
ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK,
&loopback, sizeof(loopback));
if (ret < 0) {
fprintf(stderr, "setsockopt: %s\n", strerror(-ret));
goto failure;
}
if (verbose)
printf("Using loopback=%d\n", loopback);
}
strncpy(ifr.ifr_name, argv[optind], IFNAMSIZ);
if (verbose)
printf("s=%d, ifr_name=%s\n", s, ifr.ifr_name);
ret = ioctl(s, SIOCGIFINDEX, &ifr);
if (ret < 0) {
fprintf(stderr, "ioctl: %s\n", strerror(-ret));
goto failure;
}
memset(&to_addr, 0, sizeof(to_addr));
to_addr.can_ifindex = ifr.ifr_ifindex;
to_addr.can_family = AF_CAN;
if (use_send) {
/* Suppress definiton of a default receive filter list */
ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
if (ret < 0) {
fprintf(stderr, "setsockopt: %s\n", strerror(-ret));
goto failure;
}
ret = bind(s, (struct sockaddr *)&to_addr, sizeof(to_addr));
if (ret < 0) {
fprintf(stderr, "bind: %s\n", strerror(-ret));
goto failure;
}
}
if (count)
frame.can_dlc = sizeof(int);
else {
for (i = optind + 1; i < argc; i++) {
frame.data[dlc] = strtoul(argv[i], NULL, 0);
dlc++;
if( dlc == 8 )
break;
}
frame.can_dlc = dlc;
}
if (rtr)
frame.can_id |= CAN_RTR_FLAG;
if (extended)
frame.can_id |= CAN_EFF_FLAG;
if (timeout) {
if (verbose)
printf("Timeout: %lld ns\n", (long long)timeout);
ret = ioctl(s, RTCAN_RTIOC_SND_TIMEOUT, &timeout);
if (ret) {
fprintf(stderr, "ioctl SND_TIMEOUT: %s\n", strerror(-ret));
goto failure;
}
}
snprintf(name, sizeof(name), "rtcansend-%d", getpid());
ret = rt_task_shadow(&rt_task_desc, name, 1, 0);
if (ret) {
fprintf(stderr, "rt_task_shadow: %s\n", strerror(-ret));
goto failure;
}
rt_task();
cleanup();
return 0;
failure:
cleanup();
return -1;
}
开发者ID:rcn-ee,项目名称:xenomai-3,代码行数:101,代码来源:rtcansend.c
示例13: boost_dying_task_prio
/*
* If this is a system OOM (not a memcg OOM) and the task selected to be
* killed is not already running at high (RT) priorities, speed up the
* recovery by boosting the dying task to the lowest FIFO priority.
* That helps with the recovery and avoids interfering with RT tasks.
*/
static void boost_dying_task_prio(struct task_struct *p,
struct mem_cgroup *mem)
{
struct sched_param param = { .sched_priority = 1 };
if (mem)
return;
if (!rt_task(p))
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
}
/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t = p;
do {
task_lock(t);
if (likely(t->mm))
return t;
task_unlock(t);
} while_each_thread(p, t);
return NULL;
}
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
/* When mem_cgroup_out_of_memory() and p is not member of the group */
if (mem && !task_in_mem_cgroup(p, mem))
return true;
/* p may not have freeable memory in nodemask */
if (!has_intersects_mems_allowed(p, nodemask))
return true;
return false;
}
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
/*
* Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
* so the entire heuristic doesn't need to be executed for something
* that cannot be killed.
*/
if (atomic_read(&p->mm->oom_disable_count)) {
task_unlock(p);
return 0;
}
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
*/
if (!totalpages)
totalpages = 1;
/*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
//.........这里部分代码省略.........
开发者ID:285452612,项目名称:ali_kernel,代码行数:101,代码来源:oom_kill.c
注:本文中的rt_task函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论