summaryrefslogtreecommitdiffstats
path: root/kernel/hung_task.c
diff options
context:
space:
mode:
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>2019-01-03 18:26:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-04 16:13:45 -0500
commit304ae42739b108305f8d7b3eb3c1aec7c2b643a9 (patch)
tree6deeb37924ba3177857df47f67507823b57e65d5 /kernel/hung_task.c
parent168e06f7937d96c7222037d8a05565e8a6eb00fe (diff)
kernel/hung_task.c: break RCU locks based on jiffies
check_hung_uninterruptible_tasks() is currently calling rcu_lock_break() for every 1024 threads. But check_hung_task() is very slow if printk() was called, and is very fast otherwise. If many threads within some 1024 threads called printk(), the RCU grace period might be extended enough to trigger RCU stall warnings. Therefore, calling rcu_lock_break() for every some fixed jiffies will be safer. Link: http://lkml.kernel.org/r/1544800658-11423-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/hung_task.c')
-rw-r--r--kernel/hung_task.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 85af0cde7f46..4a9191617076 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -34,7 +34,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
34 * is disabled during the critical section. It also controls the size of 34 * is disabled during the critical section. It also controls the size of
35 * the RCU grace period. So it needs to be upper-bound. 35 * the RCU grace period. So it needs to be upper-bound.
36 */ 36 */
37#define HUNG_TASK_BATCHING 1024 37#define HUNG_TASK_LOCK_BREAK (HZ / 10)
38 38
39/* 39/*
40 * Zero means infinite timeout - no checking done: 40 * Zero means infinite timeout - no checking done:
@@ -171,7 +171,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
171static void check_hung_uninterruptible_tasks(unsigned long timeout) 171static void check_hung_uninterruptible_tasks(unsigned long timeout)
172{ 172{
173 int max_count = sysctl_hung_task_check_count; 173 int max_count = sysctl_hung_task_check_count;
174 int batch_count = HUNG_TASK_BATCHING; 174 unsigned long last_break = jiffies;
175 struct task_struct *g, *t; 175 struct task_struct *g, *t;
176 176
177 /* 177 /*
@@ -186,10 +186,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
186 for_each_process_thread(g, t) { 186 for_each_process_thread(g, t) {
187 if (!max_count--) 187 if (!max_count--)
188 goto unlock; 188 goto unlock;
189 if (!--batch_count) { 189 if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
190 batch_count = HUNG_TASK_BATCHING;
191 if (!rcu_lock_break(g, t)) 190 if (!rcu_lock_break(g, t))
192 goto unlock; 191 goto unlock;
192 last_break = jiffies;
193 } 193 }
194 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ 194 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
195 if (t->state == TASK_UNINTERRUPTIBLE) 195 if (t->state == TASK_UNINTERRUPTIBLE)