From c797384edc967d42ee00e3cafaf2f20a826cbe45 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Tue, 8 Dec 2020 11:44:29 -0800 Subject: [PATCH] ANDROID: kthread: break dependency between worker->lock and task_struct->pi_lock A number of kthread-related functions indirectly take task_struct->pi_lock while holding worker->lock in the call chain like this: spin_lock(&worker->lock) kthread_insert_work wake_up_process try_to_wake_up raw_spin_lock_irqsave(&p->pi_lock, flags) This lock dependency exists whenever kthread_insert_work is called either directly or indirectly via __kthread_queue_delayed_work in the following functions: kthread_queue_work kthread_delayed_work_timer_fn kthread_queue_delayed_work kthread_flush_work kthread_mod_delayed_work This creates possibilities for circular dependencies like the one reported at [1]. Break this lock dependency by moving task wakeup after worker->lock has been released. [1]: https://lore.kernel.org/lkml/CAJuCfpG4NkhpQvZjgXZ_3gm6Hf1QgN_eUOQ8iX9Cv1k9whLwSQ@mail.gmail.com Reported-by: Ke Wang Reported-by: Shakeel Butt Suggested-by: Peter Zijlstra Tested-by: Shakeel Butt Signed-off-by: Suren Baghdasaryan Link: https://lkml.org/lkml/2020/5/4/1148 Cherry picked instead of commit 461daba06bdcb9c7a3f92b9bbd110e1f7d093ffc as an alternative solution for the lockdep error that does not break KMI. Bug: 174875976 Signed-off-by: Suren Baghdasaryan Change-Id: I2478847f66c85dca953846cd77955928d690c97c --- kernel/kthread.c | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 1e9bf63a30de..ec0bdcc79ef9 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -806,14 +807,15 @@ static void kthread_insert_work_sanity_check(struct kthread_worker *worker, /* insert @work before @pos in @worker */ static void kthread_insert_work(struct kthread_worker *worker, struct kthread_work *work, - struct list_head *pos) + struct list_head *pos, + struct wake_q_head *wake_q) { kthread_insert_work_sanity_check(worker, work); list_add_tail(&work->node, pos); work->worker = worker; if (!worker->current_work && likely(worker->task)) - wake_up_process(worker->task); + wake_q_add(wake_q, worker->task); } /** @@ -831,15 +833,19 @@ static void kthread_insert_work(struct kthread_worker *worker, bool kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work) { - bool ret = false; + DEFINE_WAKE_Q(wake_q); unsigned long flags; + bool ret = false; raw_spin_lock_irqsave(&worker->lock, flags); if (!queuing_blocked(worker, work)) { - kthread_insert_work(worker, work, &worker->work_list); + kthread_insert_work(worker, work, &worker->work_list, &wake_q); ret = true; } raw_spin_unlock_irqrestore(&worker->lock, flags); + + wake_up_q(&wake_q); + return ret; } EXPORT_SYMBOL_GPL(kthread_queue_work); @@ -857,6 +863,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); struct kthread_work *work = &dwork->work; struct kthread_worker *worker = work->worker; + DEFINE_WAKE_Q(wake_q); unsigned long flags; /* @@ -873,15 +880,18 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) /* Move the work from worker->delayed_work_list. */ WARN_ON_ONCE(list_empty(&work->node)); list_del_init(&work->node); - kthread_insert_work(worker, work, &worker->work_list); + kthread_insert_work(worker, work, &worker->work_list, &wake_q); raw_spin_unlock_irqrestore(&worker->lock, flags); + + wake_up_q(&wake_q); } EXPORT_SYMBOL(kthread_delayed_work_timer_fn); static void __kthread_queue_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, - unsigned long delay) + unsigned long delay, + struct wake_q_head *wake_q) { struct timer_list *timer = &dwork->timer; struct kthread_work *work = &dwork->work; @@ -897,7 +907,7 @@ static void __kthread_queue_delayed_work(struct kthread_worker *worker, * on that there's no such delay when @delay is 0. */ if (!delay) { - kthread_insert_work(worker, work, &worker->work_list); + kthread_insert_work(worker, work, &worker->work_list, wake_q); return; } @@ -930,17 +940,21 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, unsigned long delay) { struct kthread_work *work = &dwork->work; + DEFINE_WAKE_Q(wake_q); unsigned long flags; bool ret = false; raw_spin_lock_irqsave(&worker->lock, flags); if (!queuing_blocked(worker, work)) { - __kthread_queue_delayed_work(worker, dwork, delay); + __kthread_queue_delayed_work(worker, dwork, delay, &wake_q); ret = true; } raw_spin_unlock_irqrestore(&worker->lock, flags); + + wake_up_q(&wake_q); + return ret; } EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); @@ -969,6 +983,7 @@ void kthread_flush_work(struct kthread_work *work) KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), COMPLETION_INITIALIZER_ONSTACK(fwork.done), }; + DEFINE_WAKE_Q(wake_q); struct kthread_worker *worker; bool noop = false; @@ -981,15 +996,18 @@ void kthread_flush_work(struct kthread_work *work) WARN_ON_ONCE(work->worker != worker); if (!list_empty(&work->node)) - kthread_insert_work(worker, &fwork.work, work->node.next); + kthread_insert_work(worker, &fwork.work, + work->node.next, &wake_q); else if (worker->current_work == work) kthread_insert_work(worker, &fwork.work, - worker->work_list.next); + worker->work_list.next, &wake_q); else noop = true; raw_spin_unlock_irq(&worker->lock); + wake_up_q(&wake_q); + if (!noop) wait_for_completion(&fwork.done); } @@ -1067,6 +1085,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, unsigned long delay) { struct kthread_work *work = &dwork->work; + DEFINE_WAKE_Q(wake_q); unsigned long flags; int ret = false; @@ -1085,9 +1104,12 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, ret = __kthread_cancel_work(work, true, &flags); fast_queue: - __kthread_queue_delayed_work(worker, dwork, delay); + __kthread_queue_delayed_work(worker, dwork, delay, &wake_q); out: raw_spin_unlock_irqrestore(&worker->lock, flags); + + wake_up_q(&wake_q); + return ret; } EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);