mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
locking/mutex: Rework task_struct::blocked_on
Track the blocked-on relation for mutexes, to allow following this
relation at schedule time.
task
| blocked-on
v
mutex
| owner
v
task
This all will be used for tracking blocked-task/mutex chains
with the prox-execution patch in a similar fashion to how
priority inheritance is done with rt_mutexes.
For serialization, blocked-on is only set by the task itself
(current). And both when setting or clearing (potentially by
others), is done while holding the mutex::wait_lock.
[minor changes while rebasing]
[jstultz: Fix blocked_on tracking in __mutex_lock_common in error paths]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
Signed-off-by: Connor O'Brien <connoro@google.com>
Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lkml.kernel.org/r/20250712033407.2383110-3-jstultz@google.com
This commit is contained in:
@@ -1230,10 +1230,7 @@ struct task_struct {
|
||||
struct rt_mutex_waiter *pi_blocked_on;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
/* Mutex deadlock detection: */
|
||||
struct mutex_waiter *blocked_on;
|
||||
#endif
|
||||
struct mutex *blocked_on; /* lock we're blocked on */
|
||||
|
||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
|
||||
/*
|
||||
|
||||
@@ -2123,9 +2123,8 @@ __latent_entropy struct task_struct *copy_process(
|
||||
lockdep_init_task(p);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
p->blocked_on = NULL; /* not blocked yet */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BCACHE
|
||||
p->sequential_io = 0;
|
||||
p->sequential_io_avg = 0;
|
||||
|
||||
@@ -53,17 +53,18 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
{
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
|
||||
/* Mark the current thread as blocked on the lock: */
|
||||
task->blocked_on = waiter;
|
||||
/* Current thread can't be already blocked (since it's executing!) */
|
||||
DEBUG_LOCKS_WARN_ON(task->blocked_on);
|
||||
}
|
||||
|
||||
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct mutex *blocked_on = READ_ONCE(task->blocked_on);
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
||||
DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
||||
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
|
||||
task->blocked_on = NULL;
|
||||
DEBUG_LOCKS_WARN_ON(blocked_on && blocked_on != lock);
|
||||
|
||||
INIT_LIST_HEAD(&waiter->list);
|
||||
waiter->task = NULL;
|
||||
|
||||
@@ -644,6 +644,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
goto err_early_kill;
|
||||
}
|
||||
|
||||
WARN_ON(current->blocked_on);
|
||||
current->blocked_on = lock;
|
||||
set_current_state(state);
|
||||
trace_contention_begin(lock, LCB_F_MUTEX);
|
||||
for (;;) {
|
||||
@@ -680,6 +682,12 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
|
||||
first = __mutex_waiter_is_first(lock, &waiter);
|
||||
|
||||
/*
|
||||
* As we likely have been woken up by task
|
||||
* that has cleared our blocked_on state, re-set
|
||||
* it to the lock we are trying to aquire.
|
||||
*/
|
||||
current->blocked_on = lock;
|
||||
set_current_state(state);
|
||||
/*
|
||||
* Here we order against unlock; we must either see it change
|
||||
@@ -691,8 +699,11 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
|
||||
if (first) {
|
||||
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
|
||||
/* clear blocked_on as mutex_optimistic_spin may schedule() */
|
||||
current->blocked_on = NULL;
|
||||
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
|
||||
break;
|
||||
current->blocked_on = lock;
|
||||
trace_contention_begin(lock, LCB_F_MUTEX);
|
||||
}
|
||||
|
||||
@@ -700,6 +711,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
}
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
acquired:
|
||||
current->blocked_on = NULL;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
if (ww_ctx) {
|
||||
@@ -729,9 +741,11 @@ skip_wait:
|
||||
return 0;
|
||||
|
||||
err:
|
||||
current->blocked_on = NULL;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
__mutex_remove_waiter(lock, &waiter);
|
||||
err_early_kill:
|
||||
WARN_ON(current->blocked_on);
|
||||
trace_contention_end(lock, ret);
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
@@ -942,6 +956,14 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
next = waiter->task;
|
||||
|
||||
debug_mutex_wake_waiter(lock, waiter);
|
||||
/*
|
||||
* Unlock wakeups can be happening in parallel
|
||||
* (when optimistic spinners steal and release
|
||||
* the lock), so blocked_on may already be
|
||||
* cleared here.
|
||||
*/
|
||||
WARN_ON(next->blocked_on && next->blocked_on != lock);
|
||||
next->blocked_on = NULL;
|
||||
wake_q_add(&wake_q, next);
|
||||
}
|
||||
|
||||
|
||||
@@ -283,7 +283,15 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
|
||||
if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
|
||||
#ifndef WW_RT
|
||||
debug_mutex_wake_waiter(lock, waiter);
|
||||
/*
|
||||
* When waking up the task to die, be sure to clear the
|
||||
* blocked_on pointer. Otherwise we can see circular
|
||||
* blocked_on relationships that can't resolve.
|
||||
*/
|
||||
WARN_ON(waiter->task->blocked_on &&
|
||||
waiter->task->blocked_on != lock);
|
||||
#endif
|
||||
waiter->task->blocked_on = NULL;
|
||||
wake_q_add(wake_q, waiter->task);
|
||||
}
|
||||
|
||||
@@ -331,9 +339,15 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
|
||||
* it's wounded in __ww_mutex_check_kill() or has a
|
||||
* wakeup pending to re-read the wounded state.
|
||||
*/
|
||||
if (owner != current)
|
||||
if (owner != current) {
|
||||
/*
|
||||
* When waking up the task to wound, be sure to clear the
|
||||
* blocked_on pointer. Otherwise we can see circular
|
||||
* blocked_on relationships that can't resolve.
|
||||
*/
|
||||
owner->blocked_on = NULL;
|
||||
wake_q_add(wake_q, owner);
|
||||
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user