diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-06-25 04:19:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-07-05 05:17:45 -0400 |
commit | b728ca06029d085a1585c1926610f26de93b9146 (patch) | |
tree | 3fd55fef326f355af0a61785ff92c6f11aa5b2e5 /kernel/cpu.c | |
parent | 99b625670f1447ecf0739161efbe7f2f43c0e0b6 (diff) |
sched: Rework check_for_tasks()
1) Iterate thru all of threads in the system.
Check for all threads, not only for group leaders.
2) Check for p->on_rq instead of p->state and cputime.
Preempted task in !TASK_RUNNING state OR just
created task may be queued, that we want to be
reported too.
3) Use read_lock() instead of write_lock().
This function does not change any structures, and
read_lock() is enough.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Fabian Frederick <fabf@skynet.be>
Cc: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Cc: Konstantin Khorenko <khorenko@parallels.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael wang <wangyun@linux.vnet.ibm.com>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Turner <pjt@google.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Todd E Brandt <todd.e.brandt@linux.intel.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1403684395.3462.44.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index a343bde710b1..81e2a388a0f6 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -274,21 +274,28 @@ void clear_tasks_mm_cpumask(int cpu) | |||
274 | rcu_read_unlock(); | 274 | rcu_read_unlock(); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline void check_for_tasks(int cpu) | 277 | static inline void check_for_tasks(int dead_cpu) |
278 | { | 278 | { |
279 | struct task_struct *p; | 279 | struct task_struct *g, *p; |
280 | cputime_t utime, stime; | ||
281 | 280 | ||
282 | write_lock_irq(&tasklist_lock); | 281 | read_lock_irq(&tasklist_lock); |
283 | for_each_process(p) { | 282 | do_each_thread(g, p) { |
284 | task_cputime(p, &utime, &stime); | 283 | if (!p->on_rq) |
285 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && | 284 | continue; |
286 | (utime || stime)) | 285 | /* |
287 | pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n", | 286 | * We do the check with unlocked task_rq(p)->lock. |
288 | p->comm, task_pid_nr(p), cpu, | 287 | * Order the reading to do not warn about a task, |
289 | p->state, p->flags); | 288 | * which was running on this cpu in the past, and |
290 | } | 289 | * it's just been woken on another cpu. |
291 | write_unlock_irq(&tasklist_lock); | 290 | */ |
291 | rmb(); | ||
292 | if (task_cpu(p) != dead_cpu) | ||
293 | continue; | ||
294 | |||
295 | pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", | ||
296 | p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); | ||
297 | } while_each_thread(g, p); | ||
298 | read_unlock_irq(&tasklist_lock); | ||
292 | } | 299 | } |
293 | 300 | ||
294 | struct take_cpu_down_param { | 301 | struct take_cpu_down_param { |