aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2009-06-11 08:12:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 14:26:38 -0400
commitb415c49a864dab8ee90713833d642dd461eccae9 (patch)
treef5e763afdab9a43ff632993d7be49e73d8a6df6c /kernel/slow-work.c
parent6adc74b7d03c06a8e15d51fe33c3d924ada9271a (diff)
slow_work_thread() should do the exclusive wait
slow_work_thread() sleeps on slow_work_thread_wq without WQ_FLAG_EXCLUSIVE, this means that slow_work_enqueue()->__wake_up(nr_exclusive => 1) wakes up all kslowd threads. This is not what we want, so we change slow_work_thread() to use prepare_to_wait_exclusive() instead. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index b28d19135f43..521ed2004d63 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -372,8 +372,8 @@ static int slow_work_thread(void *_data)
372 vsmax *= atomic_read(&slow_work_thread_count); 372 vsmax *= atomic_read(&slow_work_thread_count);
373 vsmax /= 100; 373 vsmax /= 100;
374 374
375 prepare_to_wait(&slow_work_thread_wq, &wait, 375 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
376 TASK_INTERRUPTIBLE); 376 TASK_INTERRUPTIBLE);
377 if (!freezing(current) && 377 if (!freezing(current) &&
378 !slow_work_threads_should_exit && 378 !slow_work_threads_should_exit &&
379 !slow_work_available(vsmax) && 379 !slow_work_available(vsmax) &&