aboutsummaryrefslogtreecommitdiff
path: root/kernel/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r--kernel/rtmutex.c252
1 files changed, 29 insertions, 223 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index d9ca207cec0..1e09308bf2a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -82,47 +82,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
owner = *p;
} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
}
-
-/*
- * Safe fastpath aware unlock:
- * 1) Clear the waiters bit
- * 2) Drop lock->wait_lock
- * 3) Try to unlock the lock with cmpxchg
- */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
- __releases(lock->wait_lock)
-{
- struct task_struct *owner = rt_mutex_owner(lock);
-
- clear_rt_mutex_waiters(lock);
- raw_spin_unlock(&lock->wait_lock);
- /*
- * If a new waiter comes in between the unlock and the cmpxchg
- * we have two situations:
- *
- * unlock(wait_lock);
- * lock(wait_lock);
- * cmpxchg(p, owner, 0) == owner
- * mark_rt_mutex_waiters(lock);
- * acquire(lock);
- * or:
- *
- * unlock(wait_lock);
- * lock(wait_lock);
- * mark_rt_mutex_waiters(lock);
- *
- * cmpxchg(p, owner, 0) != owner
- * enqueue_waiter();
- * unlock(wait_lock);
- * lock(wait_lock);
- * wake waiter();
- * unlock(wait_lock);
- * lock(wait_lock);
- * acquire(lock);
- */
- return rt_mutex_cmpxchg(lock, owner, NULL);
-}
-
#else
# define rt_mutex_cmpxchg(l,c,n) (0)
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -130,17 +89,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
-
-/*
- * Simple slow path only version: lock->owner is protected by lock->wait_lock.
- */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
- __releases(lock->wait_lock)
-{
- lock->owner = NULL;
- raw_spin_unlock(&lock->wait_lock);
- return true;
-}
#endif
/*
@@ -194,11 +142,6 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
*/
int max_lock_depth = 1024;
-static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
-{
- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
-}
-
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
@@ -207,7 +150,6 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
- struct rt_mutex *next_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
@@ -241,7 +183,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
}
put_task_struct(task);
- return -EDEADLK;
+ return deadlock_detect ? -EDEADLK : 0;
}
retry:
/*
@@ -266,32 +208,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto out_unlock_pi;
/*
- * We dropped all locks after taking a refcount on @task, so
- * the task might have moved on in the lock chain or even left
- * the chain completely and blocks now on an unrelated lock or
- * on @orig_lock.
- *
- * We stored the lock on which @task was blocked in @next_lock,
- * so we can detect the chain change.
- */
- if (next_lock != waiter->lock)
- goto out_unlock_pi;
-
- /*
* Drop out, when the task has no waiters. Note,
* top_waiter can be NULL, when we are in the deboosting
* mode!
*/
- if (top_waiter) {
- if (!task_has_pi_waiters(task))
- goto out_unlock_pi;
- /*
- * If deadlock detection is off, we stop here if we
- * are not the top pi waiter of the task.
- */
- if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
- goto out_unlock_pi;
- }
+ if (top_waiter && (!task_has_pi_waiters(task) ||
+ top_waiter != task_top_pi_waiter(task)))
+ goto out_unlock_pi;
/*
* When deadlock detection is off then we check, if further
@@ -307,16 +230,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto retry;
}
- /*
- * Deadlock detection. If the lock is the same as the original
- * lock which caused us to walk the lock chain or if the
- * current lock is owned by the task which initiated the chain
- * walk, we detected a deadlock.
- */
+ /* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
raw_spin_unlock(&lock->wait_lock);
- ret = -EDEADLK;
+ ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -363,26 +281,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
__rt_mutex_adjust_prio(task);
}
- /*
- * Check whether the task which owns the current lock is pi
- * blocked itself. If yes we store a pointer to the lock for
- * the lock chain change detection above. After we dropped
- * task->pi_lock next_lock cannot be dereferenced anymore.
- */
- next_lock = task_blocked_on_lock(task);
-
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
- /*
- * We reached the end of the lock chain. Stop right here. No
- * point to go back just to figure that out.
- */
- if (!next_lock)
- goto out_put_task;
-
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -493,21 +396,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
- struct rt_mutex *next_lock;
- int chain_walk = 0, res;
unsigned long flags;
-
- /*
- * Early deadlock detection. We really don't want the task to
- * enqueue on itself just to untangle the mess later. It's not
- * only an optimization. We drop the locks, so another waiter
- * can come in before the chain walk detects the deadlock. So
- * the other will detect the deadlock and return -EDEADLOCK,
- * which is wrong, as the other waiter is not in a deadlock
- * situation.
- */
- if (owner == task)
- return -EDEADLK;
+ int chain_walk = 0, res;
raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
@@ -528,28 +418,20 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
if (!owner)
return 0;
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
- chain_walk = 1;
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
+ else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+ chain_walk = 1;
- /* Store the lock on which owner is blocked or NULL */
- next_lock = task_blocked_on_lock(owner);
-
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
- /*
- * Even if full deadlock detection is on, if the owner is not
- * blocked itself, we can avoid finding this out in the chain
- * walk.
- */
- if (!chain_walk || !next_lock)
+ if (!chain_walk)
return 0;
/*
@@ -561,8 +443,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
raw_spin_unlock(&lock->wait_lock);
- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
- next_lock, waiter, task);
+ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+ task);
raw_spin_lock(&lock->wait_lock);
@@ -572,8 +454,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
/*
* Wake up the next waiter on the lock.
*
- * Remove the top waiter from the current tasks pi waiter list and
- * wake it up.
+ * Remove the top waiter from the current tasks waiter list and wake it up.
*
* Called with lock->wait_lock held.
*/
@@ -594,23 +475,10 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
*/
plist_del(&waiter->pi_list_entry, &current->pi_waiters);
- /*
- * As we are waking up the top waiter, and the waiter stays
- * queued on the lock until it gets the lock, this lock
- * obviously has waiters. Just set the bit here and this has
- * the added benefit of forcing all new tasks into the
- * slow path making sure no task of lower priority than
- * the top waiter can steal this lock.
- */
- lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+ rt_mutex_set_owner(lock, NULL);
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- /*
- * It's safe to dereference waiter as it cannot go away as
- * long as we hold lock->wait_lock. The waiter task needs to
- * acquire it in order to dequeue the waiter.
- */
wake_up_process(waiter->task);
}
@@ -625,8 +493,8 @@ static void remove_waiter(struct rt_mutex *lock,
{
int first = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock = NULL;
unsigned long flags;
+ int chain_walk = 0;
raw_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -650,15 +518,15 @@ static void remove_waiter(struct rt_mutex *lock,
}
__rt_mutex_adjust_prio(owner);
- /* Store the lock on which owner is blocked or NULL */
- next_lock = task_blocked_on_lock(owner);
+ if (owner->pi_blocked_on)
+ chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- if (!next_lock)
+ if (!chain_walk)
return;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -666,7 +534,7 @@ static void remove_waiter(struct rt_mutex *lock,
raw_spin_unlock(&lock->wait_lock);
- rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
+ rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
raw_spin_lock(&lock->wait_lock);
}
@@ -679,7 +547,6 @@ static void remove_waiter(struct rt_mutex *lock,
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
- struct rt_mutex *next_lock;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -689,13 +556,12 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- next_lock = waiter->lock;
+
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
-
- rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
+ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
/**
@@ -747,26 +613,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
return ret;
}
-static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
- struct rt_mutex_waiter *w)
-{
- /*
- * If the result is not -EDEADLOCK or the caller requested
- * deadlock detection, nothing to do here.
- */
- if (res != -EDEADLOCK || detect_deadlock)
- return;
-
- /*
- * Yell lowdly and stop the task right here.
- */
- rt_mutex_print_deadlock(w);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
-}
-
/*
* Slow path lock function:
*/
@@ -804,10 +650,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
set_current_state(TASK_RUNNING);
- if (unlikely(ret)) {
+ if (unlikely(ret))
remove_waiter(lock, &waiter);
- rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
- }
/*
* try_to_take_rt_mutex() sets the waiter bit
@@ -863,49 +707,12 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
rt_mutex_deadlock_account_unlock(current);
- /*
- * We must be careful here if the fast path is enabled. If we
- * have no waiters queued we cannot set owner to NULL here
- * because of:
- *
- * foo->lock->owner = NULL;
- * rtmutex_lock(foo->lock); <- fast path
- * free = atomic_dec_and_test(foo->refcnt);
- * rtmutex_unlock(foo->lock); <- fast path
- * if (free)
- * kfree(foo);
- * raw_spin_unlock(foo->lock->wait_lock);
- *
- * So for the fastpath enabled kernel:
- *
- * Nothing can set the waiters bit as long as we hold
- * lock->wait_lock. So we do the following sequence:
- *
- * owner = rt_mutex_owner(lock);
- * clear_rt_mutex_waiters(lock);
- * raw_spin_unlock(&lock->wait_lock);
- * if (cmpxchg(&lock->owner, owner, 0) == owner)
- * return;
- * goto retry;
- *
- * The fastpath disabled variant is simple as all access to
- * lock->owner is serialized by lock->wait_lock:
- *
- * lock->owner = NULL;
- * raw_spin_unlock(&lock->wait_lock);
- */
- while (!rt_mutex_has_waiters(lock)) {
- /* Drops lock->wait_lock ! */
- if (unlock_rt_mutex_safe(lock) == true)
- return;
- /* Relock the rtmutex and try again */
- raw_spin_lock(&lock->wait_lock);
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ raw_spin_unlock(&lock->wait_lock);
+ return;
}
- /*
- * The wakeup next waiter path does not suffer from the above
- * race. See the comments there.
- */
wakeup_next_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
@@ -1152,8 +959,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
return 1;
}
- /* We enforce deadlock detection for futexes */
- ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
/*