• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ rt_mutex_owner函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中rt_mutex_owner函数的典型用法代码示例。如果您正苦于以下问题:C++ rt_mutex_owner函数的具体用法?C++ rt_mutex_owner怎么用?C++ rt_mutex_owner使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了rt_mutex_owner函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: printk_lock

static void printk_lock(struct rt_mutex *lock, int print_owner)
{
	if (lock->name)
//		printk(" [%p] {%s}\n",
;
	else
//		printk(" [%p] {%s:%d}\n",
;

	if (print_owner && rt_mutex_owner(lock)) {
;
;
		printk_task(rt_mutex_owner(lock));
;
	}
}
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:16,代码来源:rtmutex-debug.c


示例2: try_to_take_rt_mutex

/*
 * Try to take an rt-mutex
 *
 * This fails
 * - when the lock has a real owner
 * - when a different pending owner exists and has higher priority than current
 *
 * Must be called with lock->wait_lock held.
 */
static int try_to_take_rt_mutex(struct rt_mutex *lock)
{
	/*
	 * We have to be careful here if the atomic speedups are
	 * enabled, such that, when
	 *  - no other waiter is on the lock
	 *  - the lock has been released since we did the cmpxchg
	 * the lock can be released or taken while we are doing the
	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
	 *
	 * The atomic acquire/release aware variant of
	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
	 * the WAITERS bit, the atomic release / acquire can not
	 * happen anymore and lock->wait_lock protects us from the
	 * non-atomic case.
	 *
	 * Note, that this might set lock->owner =
	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
	 * any more. This is fixed up when we take the ownership.
	 * This is the transitional state explained at the top of this file.
	 */
	mark_rt_mutex_waiters(lock);

	if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
		return 0;

	/* We got the lock. */
	debug_rt_mutex_lock(lock);

	rt_mutex_set_owner(lock, current, 0);

	rt_mutex_deadlock_account_lock(lock, current);

	return 1;
}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:44,代码来源:rtmutex.c


示例3: rt_mutex_slowtrylock

/*
 * Slow path try-lock function:
 */
static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
{
	int ret;

	/*
	 * If the lock already has an owner we fail to get the lock.
	 * This can be done without taking the @lock->wait_lock as
	 * it is only being read, and this is a trylock anyway.
	 */
	if (rt_mutex_owner(lock))
		return 0;

	/*
	 * The mutex has currently no owner. Lock the wait lock and
	 * try to acquire the lock.
	 */
	raw_spin_lock(&lock->wait_lock);

	ret = try_to_take_rt_mutex(lock, current, NULL);

	/*
	 * try_to_take_rt_mutex() sets the lock waiters bit
	 * unconditionally. Clean this up.
	 */
	fixup_rt_mutex_waiters(lock);

	raw_spin_unlock(&lock->wait_lock);

	return ret;
}
开发者ID:Neves4,项目名称:DatKernel,代码行数:33,代码来源:rtmutex.c


示例4: printk_lock

static void printk_lock(struct rt_mutex *lock, int print_owner)
{
	if (lock->name)
		printk(" [%p] {%s}\n",
			lock, lock->name);
	else
		printk(" [%p] {%s:%d}\n",
			lock, lock->file, lock->line);

	if (print_owner && rt_mutex_owner(lock)) {
		printk(".. ->owner: %p\n", lock->owner);
		printk(".. held by:  ");
		printk_task(rt_mutex_owner(lock));
		printk("\n");
	}
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:16,代码来源:rtmutex-debug.c


示例5: rt_mutex_start_proxy_lock

/**
 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
 * @lock:		the rt_mutex to take
 * @waiter:		the pre-initialized rt_mutex_waiter
 * @task:		the task to prepare
 * @detect_deadlock:	perform deadlock detection (1) or not (0)
 *
 * Returns:
 *  0 - task blocked on lock
 *  1 - acquired the lock for task, caller should wake it up
 * <0 - error
 *
 * Special API call for FUTEX_REQUEUE_PI support.
 */
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
			      struct rt_mutex_waiter *waiter,
			      struct task_struct *task, int detect_deadlock)
{
	int ret;

	raw_spin_lock(&lock->wait_lock);

	if (try_to_take_rt_mutex(lock, task, NULL)) {
		raw_spin_unlock(&lock->wait_lock);
		return 1;
	}

	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);

	if (ret && !rt_mutex_owner(lock)) {
		/*
		 * Reset the return value. We might have
		 * returned with -EDEADLK and the owner
		 * released the lock while we were walking the
		 * pi chain.  Let the waiter sort it out.
		 */
		ret = 0;
	}

	if (unlikely(ret))
		remove_waiter(lock, waiter);

	raw_spin_unlock(&lock->wait_lock);

	debug_rt_mutex_print_deadlock(waiter);

	return ret;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:48,代码来源:rtmutex.c


示例6: task_blocks_on_rt_mutex

/*
 * Task blocks on lock.
 *
 * Prepare waiter and propagate pi chain
 *
 * This must be called with lock->wait_lock held.
 */
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
                                   struct rt_mutex_waiter *waiter,
                                   int detect_deadlock)
{
    struct task_struct *owner = rt_mutex_owner(lock);
    struct rt_mutex_waiter *top_waiter = waiter;
    unsigned long flags;
    int boost = 0, res;

    spin_lock_irqsave(&current->pi_lock, flags);
    __rt_mutex_adjust_prio(current);
    waiter->task = current;
    waiter->lock = lock;
    plist_node_init(&waiter->list_entry, current->prio);
    plist_node_init(&waiter->pi_list_entry, current->prio);

    /* Get the top priority waiter on the lock */
    if (rt_mutex_has_waiters(lock))
        top_waiter = rt_mutex_top_waiter(lock);
    plist_add(&waiter->list_entry, &lock->wait_list);

    current->pi_blocked_on = waiter;

    spin_unlock_irqrestore(&current->pi_lock, flags);

    if (waiter == rt_mutex_top_waiter(lock)) {
        spin_lock_irqsave(&owner->pi_lock, flags);
        plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
        plist_add(&waiter->pi_list_entry, &owner->pi_waiters);

        __rt_mutex_adjust_prio(owner);
        if (owner->pi_blocked_on) {
            boost = 1;
            /* gets dropped in rt_mutex_adjust_prio_chain()! */
            get_task_struct(owner);
        }
        spin_unlock_irqrestore(&owner->pi_lock, flags);
    }
    else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
        spin_lock_irqsave(&owner->pi_lock, flags);
        if (owner->pi_blocked_on) {
            boost = 1;
            /* gets dropped in rt_mutex_adjust_prio_chain()! */
            get_task_struct(owner);
        }
        spin_unlock_irqrestore(&owner->pi_lock, flags);
    }
    if (!boost)
        return 0;

    spin_unlock(&lock->wait_lock);

    res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
                                     current);

    spin_lock(&lock->wait_lock);

    return res;
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:66,代码来源:rtmutex.c


示例7: try_to_steal_lock

/*
 * Optimization: check if we can steal the lock from the
 * assigned pending owner [which might not have taken the
 * lock yet]:
 */
static inline int try_to_steal_lock(struct rt_mutex *lock,
				    struct task_struct *task)
{
	struct task_struct *pendowner = rt_mutex_owner(lock);
	struct rt_mutex_waiter *next;
	unsigned long flags;

	if (!rt_mutex_owner_pending(lock))
		return 0;

	if (pendowner == task)
		return 1;

	raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
	if (task->prio >= pendowner->prio) {
		raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
		return 0;
	}

	/*
	 * Check if a waiter is enqueued on the pending owners
	 * pi_waiters list. Remove it and readjust pending owners
	 * priority.
	 */
	if (likely(!rt_mutex_has_waiters(lock))) {
		raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
		return 1;
	}

	/* No chain handling, pending owner is not blocked on anything: */
	next = rt_mutex_top_waiter(lock);
	plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
	__rt_mutex_adjust_prio(pendowner);
	raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);

	/*
	 * We are going to steal the lock and a waiter was
	 * enqueued on the pending owners pi_waiters queue. So
	 * we have to enqueue this waiter into
	 * task->pi_waiters list. This covers the case,
	 * where task is boosted because it holds another
	 * lock and gets unboosted because the booster is
	 * interrupted, so we would delay a waiter with higher
	 * priority as task->normal_prio.
	 *
	 * Note: in the rare case of a SCHED_OTHER task changing
	 * its priority and thus stealing the lock, next->task
	 * might be task:
	 */
	if (likely(next->task != task)) {
		raw_spin_lock_irqsave(&task->pi_lock, flags);
		plist_add(&next->pi_list_entry, &task->pi_waiters);
		__rt_mutex_adjust_prio(task);
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
	}
	return 1;
}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:62,代码来源:rtmutex.c


示例8: debug_rt_mutex_deadlock

/*
 * We fill out the fields in the waiter to store the information about
 * the deadlock. We print when we return. act_waiter can be NULL in
 * case of a remove waiter operation.
 */
void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
			     struct rt_mutex *lock)
{
	struct task_struct *task;

	if (!rt_trace_on || detect || !act_waiter)
		return;

	task = rt_mutex_owner(act_waiter->lock);
	if (task && task != current) {
		act_waiter->deadlock_task_pid = task->pid;
		act_waiter->deadlock_lock = lock;
	}
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:19,代码来源:rtmutex-debug.c


示例9: debug_rt_mutex_deadlock

/*
 * We fill out the fields in the waiter to store the information about
 * the deadlock. We print when we return. act_waiter can be NULL in
 * case of a remove waiter operation.
 */
void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
			     struct rt_mutex_waiter *act_waiter,
			     struct rt_mutex *lock)
{
	struct task_struct *task;

	if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
		return;

	task = rt_mutex_owner(act_waiter->lock);
	if (task && task != current) {
		act_waiter->deadlock_task_pid = get_pid(task_pid(task));
		act_waiter->deadlock_lock = lock;
	}
}
开发者ID:mkrufky,项目名称:linux,代码行数:20,代码来源:rtmutex-debug.c


示例10: remove_waiter

/*
 * Remove a waiter from a lock
 *
 * Must be called with lock->wait_lock held
 */
static void remove_waiter(struct rt_mutex *lock,
                          struct rt_mutex_waiter *waiter)
{
    int first = (waiter == rt_mutex_top_waiter(lock));
    struct task_struct *owner = rt_mutex_owner(lock);
    unsigned long flags;
    int boost = 0;

    spin_lock_irqsave(&current->pi_lock, flags);
    plist_del(&waiter->list_entry, &lock->wait_list);
    waiter->task = NULL;
    current->pi_blocked_on = NULL;
    spin_unlock_irqrestore(&current->pi_lock, flags);

    if (first && owner != current) {

        spin_lock_irqsave(&owner->pi_lock, flags);

        plist_del(&waiter->pi_list_entry, &owner->pi_waiters);

        if (rt_mutex_has_waiters(lock)) {
            struct rt_mutex_waiter *next;

            next = rt_mutex_top_waiter(lock);
            plist_add(&next->pi_list_entry, &owner->pi_waiters);
        }
        __rt_mutex_adjust_prio(owner);

        if (owner->pi_blocked_on) {
            boost = 1;
            /* gets dropped in rt_mutex_adjust_prio_chain()! */
            get_task_struct(owner);
        }
        spin_unlock_irqrestore(&owner->pi_lock, flags);
    }

    WARN_ON(!plist_node_empty(&waiter->pi_list_entry));

    if (!boost)
        return;

    spin_unlock(&lock->wait_lock);

    rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);

    spin_lock(&lock->wait_lock);
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:52,代码来源:rtmutex.c


示例11: rt_mutex_slowtrylock

/*
 * Slow path try-lock function:
 */
static inline int
rt_mutex_slowtrylock(struct rt_mutex *lock)
{
	int ret = 0;

	raw_spin_lock(&lock->wait_lock);

	if (likely(rt_mutex_owner(lock) != current)) {

		ret = try_to_take_rt_mutex(lock, current, NULL);
		/*
		 * try_to_take_rt_mutex() sets the lock waiters
		 * bit unconditionally. Clean this up.
		 */
		fixup_rt_mutex_waiters(lock);
	}

	raw_spin_unlock(&lock->wait_lock);

	return ret;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:24,代码来源:rtmutex.c


示例12: rt_mutex_start_proxy_lock

/**
 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
 * @lock:		the rt_mutex to take
 * @waiter:		the pre-initialized rt_mutex_waiter
 * @task:		the task to prepare
 * @detect_deadlock:	perform deadlock detection (1) or not (0)
 *
 * Returns:
 *  0 - task blocked on lock
 *  1 - acquired the lock for task, caller should wake it up
 * <0 - error
 *
 * Special API call for FUTEX_REQUEUE_PI support.
 */
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
			      struct rt_mutex_waiter *waiter,
			      struct task_struct *task, int detect_deadlock)
{
	int ret;

	raw_spin_lock(&lock->wait_lock);

	mark_rt_mutex_waiters(lock);

	if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
		/* We got the lock for task. */
		debug_rt_mutex_lock(lock);
		rt_mutex_set_owner(lock, task, 0);
		raw_spin_unlock(&lock->wait_lock);
		rt_mutex_deadlock_account_lock(lock, task);
		return 1;
	}

	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);

	if (ret && !waiter->task) {
		/*
		 * Reset the return value. We might have
		 * returned with -EDEADLK and the owner
		 * released the lock while we were walking the
		 * pi chain.  Let the waiter sort it out.
		 */
		ret = 0;
	}
	raw_spin_unlock(&lock->wait_lock);

	debug_rt_mutex_print_deadlock(waiter);

	return ret;
}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:50,代码来源:rtmutex.c


示例13: rt_mutex_slowtrylock

/*
 * Slow path try-lock function:
 */
static inline int
rt_mutex_slowtrylock(struct rt_mutex *lock)
{
	unsigned long flags;
	int ret = 0;

	spin_lock_irqsave(&lock->wait_lock, flags);

	if (likely(rt_mutex_owner(lock) != current)) {

		init_lists(lock);

		ret = try_to_take_rt_mutex(lock);
		/*
		 * try_to_take_rt_mutex() sets the lock waiters
		 * bit unconditionally. Clean this up.
		 */
		fixup_rt_mutex_waiters(lock);
	}

	spin_unlock_irqrestore(&lock->wait_lock, flags);

	return ret;
}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:27,代码来源:rtmutex.c


示例14: debug_rt_mutex_proxy_unlock

void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
	TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:4,代码来源:rtmutex-debug.c


示例15: debug_rt_mutex_unlock

void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
	TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:4,代码来源:rtmutex-debug.c


示例16: try_to_take_rt_mutex

/*
 * Try to take an rt-mutex
 *
 * Must be called with lock->wait_lock held.
 *
 * @lock:   the lock to be acquired.
 * @task:   the task which wants to acquire the lock
 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
 */
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
		struct rt_mutex_waiter *waiter)
{
	/*
	 * We have to be careful here if the atomic speedups are
	 * enabled, such that, when
	 *  - no other waiter is on the lock
	 *  - the lock has been released since we did the cmpxchg
	 * the lock can be released or taken while we are doing the
	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
	 *
	 * The atomic acquire/release aware variant of
	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
	 * the WAITERS bit, the atomic release / acquire can not
	 * happen anymore and lock->wait_lock protects us from the
	 * non-atomic case.
	 *
	 * Note, that this might set lock->owner =
	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
	 * any more. This is fixed up when we take the ownership.
	 * This is the transitional state explained at the top of this file.
	 */
	mark_rt_mutex_waiters(lock);

	if (rt_mutex_owner(lock))
		return 0;

	/*
	 * It will get the lock because of one of these conditions:
	 * 1) there is no waiter
	 * 2) higher priority than waiters
	 * 3) it is top waiter
	 */
	if (rt_mutex_has_waiters(lock)) {
		if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
			if (!waiter || waiter != rt_mutex_top_waiter(lock))
				return 0;
		}
	}

	if (waiter || rt_mutex_has_waiters(lock)) {
		unsigned long flags;
		struct rt_mutex_waiter *top;

		raw_spin_lock_irqsave(&task->pi_lock, flags);

		/* remove the queued waiter. */
		if (waiter) {
			plist_del(&waiter->list_entry, &lock->wait_list);
			task->pi_blocked_on = NULL;
		}

		/*
		 * We have to enqueue the top waiter(if it exists) into
		 * task->pi_waiters list.
		 */
		if (rt_mutex_has_waiters(lock)) {
			top = rt_mutex_top_waiter(lock);
			top->pi_list_entry.prio = top->list_entry.prio;
			plist_add(&top->pi_list_entry, &task->pi_waiters);
		}
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
	}

	/* We got the lock. */
	debug_rt_mutex_lock(lock);

	rt_mutex_set_owner(lock, task);

	rt_mutex_deadlock_account_lock(lock, task);

	return 1;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:82,代码来源:rtmutex.c


示例17: rt_mutex_adjust_prio_chain

/*
 * Adjust the priority chain. Also used for deadlock detection.
 * Decreases task's usage by one - may thus free the task.
 * Returns 0 or -EDEADLK.
 */
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
				      int deadlock_detect,
				      struct rt_mutex *orig_lock,
				      struct rt_mutex_waiter *orig_waiter,
				      struct task_struct *top_task)
{
	struct rt_mutex *lock;
	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
	int detect_deadlock, ret = 0, depth = 0;
	unsigned long flags;

	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
							 deadlock_detect);

	/*
	 * The (de)boosting is a step by step approach with a lot of
	 * pitfalls. We want this to be preemptible and we want hold a
	 * maximum of two locks per step. So we have to check
	 * carefully whether things change under us.
	 */
 again:
	if (++depth > max_lock_depth) {
		static int prev_max;

		/*
		 * Print this only once. If the admin changes the limit,
		 * print a new message when reaching the limit again.
		 */
		if (prev_max != max_lock_depth) {
			prev_max = max_lock_depth;
			printk(KERN_WARNING "Maximum lock depth %d reached "
			       "task: %s (%d)\n", max_lock_depth,
			       top_task->comm, task_pid_nr(top_task));
		}
		put_task_struct(task);

		return deadlock_detect ? -EDEADLK : 0;
	}
 retry:
	/*
	 * Task can not go away as we did a get_task() before !
	 */
	raw_spin_lock_irqsave(&task->pi_lock, flags);

	waiter = task->pi_blocked_on;
	/*
	 * Check whether the end of the boosting chain has been
	 * reached or the state of the chain has changed while we
	 * dropped the locks.
	 */
	if (!waiter)
		goto out_unlock_pi;

	/*
	 * Check the orig_waiter state. After we dropped the locks,
	 * the previous owner of the lock might have released the lock.
	 */
	if (orig_waiter && !rt_mutex_owner(orig_lock))
		goto out_unlock_pi;

	/*
	 * Drop out, when the task has no waiters. Note,
	 * top_waiter can be NULL, when we are in the deboosting
	 * mode!
	 */
	if (top_waiter && (!task_has_pi_waiters(task) ||
			   top_waiter != task_top_pi_waiter(task)))
		goto out_unlock_pi;

	/*
	 * When deadlock detection is off then we check, if further
	 * priority adjustment is necessary.
	 */
	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
		goto out_unlock_pi;

	lock = waiter->lock;
	if (!raw_spin_trylock(&lock->wait_lock)) {
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
		cpu_relax();
		goto retry;
	}

	/* Deadlock detection */
	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
		raw_spin_unlock(&lock->wait_lock);
		ret = deadlock_detect ? -EDEADLK : 0;
		goto out_unlock_pi;
	}

	top_waiter = rt_mutex_top_waiter(lock);

	/* Requeue the waiter */
	plist_del(&waiter->list_entry, &lock->wait_list);
//.........这里部分代码省略.........
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:101,代码来源:rtmutex.c


示例18: rt_spin_lock_slowlock

/*
 * Slow path lock function spin_lock style: this variant is very
 * careful not to miss any non-lock wakeups.
 *
 * The wakeup side uses wake_up_process_mutex, which, combined with
 * the xchg code of this function is a transparent sleep/wakeup
 * mechanism nested within any existing sleep/wakeup mechanism. This
 * enables the seemless use of arbitrary (blocking) spinlocks within
 * sleep/wakeup event loops.
 */
static void fastcall noinline __sched
rt_spin_lock_slowlock(struct rt_mutex *lock)
{
	struct rt_mutex_waiter waiter;
	unsigned long saved_state, state, flags;

	debug_rt_mutex_init_waiter(&waiter);
	waiter.task = NULL;

	spin_lock_irqsave(&lock->wait_lock, flags);
	init_lists(lock);

	/* Try to acquire the lock again: */
	if (try_to_take_rt_mutex(lock)) {
		spin_unlock_irqrestore(&lock->wait_lock, flags);
		return;
	}

	BUG_ON(rt_mutex_owner(lock) == current);

	/*
	 * Here we save whatever state the task was in originally,
	 * we'll restore it at the end of the function and we'll take
	 * any intermediate wakeup into account as well, independently
	 * of the lock sleep/wakeup mechanism. When we get a real
	 * wakeup the task->state is TASK_RUNNING and we change
	 * saved_state accordingly. If we did not get a real wakeup
	 * then we return with the saved state.
	 */
	saved_state = xchg(&current->state, TASK_UNINTERRUPTIBLE);

	for (;;) {
		unsigned long saved_flags;
		int saved_lock_depth = current->lock_depth;

		/* Try to acquire the lock */
		if (try_to_take_rt_mutex(lock))
			break;
		/*
		 * waiter.task is NULL the first time we come here and
		 * when we have been woken up by the previous owner
		 * but the lock got stolen by an higher prio task.
		 */
		if (!waiter.task) {
			task_blocks_on_rt_mutex(lock, &waiter, 0, flags);
			/* Wakeup during boost ? */
			if (unlikely(!waiter.task))
				continue;
		}

		/*
		 * Prevent schedule() to drop BKL, while waiting for
		 * the lock ! We restore lock_depth when we come back.
		 */
		saved_flags = current->flags & PF_NOSCHED;
		current->lock_depth = -1;
		current->flags &= ~PF_NOSCHED;
		spin_unlock_irqrestore(&lock->wait_lock, flags);

		debug_rt_mutex_print_deadlock(&waiter);

		schedule_rt_mutex(lock);

		spin_lock_irqsave(&lock->wait_lock, flags);
		current->flags |= saved_flags;
		current->lock_depth = saved_lock_depth;
		state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
		if (unlikely(state == TASK_RUNNING))
			saved_state = TASK_RUNNING;
	}

	state = xchg(&current->state, saved_state);
	if (unlikely(state == TASK_RUNNING))
		current->state = TASK_RUNNING;

	/*
	 * Extremely rare case, if we got woken up by a non-mutex wakeup,
	 * and we managed to steal the lock despite us not being the
	 * highest-prio waiter (due to SCHED_OTHER changing prio), then we
	 * can end up with a non-NULL waiter.task:
	 */
	if (unlikely(waiter.task))
		remove_waiter(lock, &waiter, flags);
	/*
	 * try_to_take_rt_mutex() sets the waiter bit
	 * unconditionally. We might have to fix that up:
	 */
	fixup_rt_mutex_waiters(lock);

	spin_unlock_irqrestore(&lock->wait_lock, flags);
//.........这里部分代码省略.........
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:101,代码来源:rtmutex.c


示例19: while

	do {
		owner = *p;
	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
}

/*
 * Safe fastpath aware unlock:
 * 1) Clear the waiters bit
 * 2) Drop lock->wait_lock
 * 3) Try to unlock the lock with cmpxchg
 */
static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
	__releases(lock->wait_lock)
{
	struct task_struct *owner = rt_mutex_owner(lock);

	clear_rt_mutex_waiters(lock);
	raw_spin_unlock(&lock->wait_lock);
	/*
	 * If a new waiter comes in between the unlock and the cmpxchg
	 * we have two situations:
	 *
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 * cmpxchg(p, owner, 0) == owner
	 *					mark_rt_mutex_waiters(lock);
	 *					acquire(lock);
	 * or:
	 *
	 * unlock(wait_lock);
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:30,代码来源:rtmutex.c



注:本文中的rt_mutex_owner函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ rt_mutex_release函数代码示例发布时间:2022-05-30
下一篇:
C++ rt_mutex_debug_task_free函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap