aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-04-03 11:42:35 -0400
committerDavid Howells <dhowells@redhat.com>2009-04-03 11:42:35 -0400
commit109d9272c423f46604d45fedfe87e21ee0b25180 (patch)
treeb6bd1d94fd3e018ee97dedb577aa6757787db1a9 /kernel/slow-work.c
parent07fe7cb7c7c179f473fd9c823348fd3eb5dad369 (diff)
Make slow-work thread pool actually dynamic
Make the slow-work thread pool actually dynamic in the number of threads it contains. With this patch, it will both create additional threads when it has extra work to do, and cull excess threads that aren't doing anything. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Serge Hallyn <serue@us.ibm.com> Acked-by: Steve Dickson <steved@redhat.com> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Tested-by: Daire Byrne <Daire.Byrne@framestore.com>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c138
1 files changed, 137 insertions, 1 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 5a7392734c82..454abb21c8bd 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,6 +16,14 @@
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <asm/system.h> 17#include <asm/system.h>
18 18
19#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
20 * things to do */
21#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
22 * OOM */
23
24static void slow_work_cull_timeout(unsigned long);
25static void slow_work_oom_timeout(unsigned long);
26
19/* 27/*
20 * The pool of threads has at least min threads in it as long as someone is 28 * The pool of threads has at least min threads in it as long as someone is
21 * using the facility, and may have as many as max. 29 * using the facility, and may have as many as max.
@@ -29,6 +37,12 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
29static atomic_t slow_work_thread_count; 37static atomic_t slow_work_thread_count;
30static atomic_t vslow_work_executing_count; 38static atomic_t vslow_work_executing_count;
31 39
40static bool slow_work_may_not_start_new_thread;
41static bool slow_work_cull; /* cull a thread due to lack of activity */
42static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
43static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
44static struct slow_work slow_work_new_thread; /* new thread starter */
45
32/* 46/*
33 * The queues of work items and the lock governing access to them. These are 47 * The queues of work items and the lock governing access to them. These are
34 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 48 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
@@ -89,6 +103,14 @@ static bool slow_work_execute(void)
89 103
90 vsmax = slow_work_calc_vsmax(); 104 vsmax = slow_work_calc_vsmax();
91 105
106 /* see if we can schedule a new thread to be started if we're not
107 * keeping up with the work */
108 if (!waitqueue_active(&slow_work_thread_wq) &&
109 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
110 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
111 !slow_work_may_not_start_new_thread)
112 slow_work_enqueue(&slow_work_new_thread);
113
92 /* find something to execute */ 114 /* find something to execute */
93 spin_lock_irq(&slow_work_queue_lock); 115 spin_lock_irq(&slow_work_queue_lock);
94 if (!list_empty(&vslow_work_queue) && 116 if (!list_empty(&vslow_work_queue) &&
@@ -243,6 +265,33 @@ cant_get_ref:
243EXPORT_SYMBOL(slow_work_enqueue); 265EXPORT_SYMBOL(slow_work_enqueue);
244 266
245/* 267/*
268 * Worker thread culling algorithm
269 */
270static bool slow_work_cull_thread(void)
271{
272 unsigned long flags;
273 bool do_cull = false;
274
275 spin_lock_irqsave(&slow_work_queue_lock, flags);
276
277 if (slow_work_cull) {
278 slow_work_cull = false;
279
280 if (list_empty(&slow_work_queue) &&
281 list_empty(&vslow_work_queue) &&
282 atomic_read(&slow_work_thread_count) >
283 slow_work_min_threads) {
284 mod_timer(&slow_work_cull_timer,
285 jiffies + SLOW_WORK_CULL_TIMEOUT);
286 do_cull = true;
287 }
288 }
289
290 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
291 return do_cull;
292}
293
294/*
246 * Determine if there is slow work available for dispatch 295 * Determine if there is slow work available for dispatch
247 */ 296 */
248static inline bool slow_work_available(int vsmax) 297static inline bool slow_work_available(int vsmax)
@@ -273,7 +322,8 @@ static int slow_work_thread(void *_data)
273 TASK_INTERRUPTIBLE); 322 TASK_INTERRUPTIBLE);
274 if (!freezing(current) && 323 if (!freezing(current) &&
275 !slow_work_threads_should_exit && 324 !slow_work_threads_should_exit &&
276 !slow_work_available(vsmax)) 325 !slow_work_available(vsmax) &&
326 !slow_work_cull)
277 schedule(); 327 schedule();
278 finish_wait(&slow_work_thread_wq, &wait); 328 finish_wait(&slow_work_thread_wq, &wait);
279 329
@@ -285,11 +335,20 @@ static int slow_work_thread(void *_data)
285 335
286 if (slow_work_available(vsmax) && slow_work_execute()) { 336 if (slow_work_available(vsmax) && slow_work_execute()) {
287 cond_resched(); 337 cond_resched();
338 if (list_empty(&slow_work_queue) &&
339 list_empty(&vslow_work_queue) &&
340 atomic_read(&slow_work_thread_count) >
341 slow_work_min_threads)
342 mod_timer(&slow_work_cull_timer,
343 jiffies + SLOW_WORK_CULL_TIMEOUT);
288 continue; 344 continue;
289 } 345 }
290 346
291 if (slow_work_threads_should_exit) 347 if (slow_work_threads_should_exit)
292 break; 348 break;
349
350 if (slow_work_cull && slow_work_cull_thread())
351 break;
293 } 352 }
294 353
295 if (atomic_dec_and_test(&slow_work_thread_count)) 354 if (atomic_dec_and_test(&slow_work_thread_count))
@@ -297,6 +356,77 @@ static int slow_work_thread(void *_data)
297 return 0; 356 return 0;
298} 357}
299 358
359/*
360 * Handle thread cull timer expiration
361 */
362static void slow_work_cull_timeout(unsigned long data)
363{
364 slow_work_cull = true;
365 wake_up(&slow_work_thread_wq);
366}
367
368/*
369 * Get a reference on slow work thread starter
370 */
371static int slow_work_new_thread_get_ref(struct slow_work *work)
372{
373 return 0;
374}
375
376/*
377 * Drop a reference on slow work thread starter
378 */
379static void slow_work_new_thread_put_ref(struct slow_work *work)
380{
381}
382
383/*
384 * Start a new slow work thread
385 */
386static void slow_work_new_thread_execute(struct slow_work *work)
387{
388 struct task_struct *p;
389
390 if (slow_work_threads_should_exit)
391 return;
392
393 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
394 return;
395
396 if (!mutex_trylock(&slow_work_user_lock))
397 return;
398
399 slow_work_may_not_start_new_thread = true;
400 atomic_inc(&slow_work_thread_count);
401 p = kthread_run(slow_work_thread, NULL, "kslowd");
402 if (IS_ERR(p)) {
403 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
404 if (atomic_dec_and_test(&slow_work_thread_count))
405 BUG(); /* we're running on a slow work thread... */
406 mod_timer(&slow_work_oom_timer,
407 jiffies + SLOW_WORK_OOM_TIMEOUT);
408 } else {
409 /* ratelimit the starting of new threads */
410 mod_timer(&slow_work_oom_timer, jiffies + 1);
411 }
412
413 mutex_unlock(&slow_work_user_lock);
414}
415
416static const struct slow_work_ops slow_work_new_thread_ops = {
417 .get_ref = slow_work_new_thread_get_ref,
418 .put_ref = slow_work_new_thread_put_ref,
419 .execute = slow_work_new_thread_execute,
420};
421
422/*
423 * post-OOM new thread start suppression expiration
424 */
425static void slow_work_oom_timeout(unsigned long data)
426{
427 slow_work_may_not_start_new_thread = false;
428}
429
300/** 430/**
301 * slow_work_register_user - Register a user of the facility 431 * slow_work_register_user - Register a user of the facility
302 * 432 *
@@ -316,6 +446,10 @@ int slow_work_register_user(void)
316 init_completion(&slow_work_last_thread_exited); 446 init_completion(&slow_work_last_thread_exited);
317 447
318 slow_work_threads_should_exit = false; 448 slow_work_threads_should_exit = false;
449 slow_work_init(&slow_work_new_thread,
450 &slow_work_new_thread_ops);
451 slow_work_may_not_start_new_thread = false;
452 slow_work_cull = false;
319 453
320 /* start the minimum number of threads */ 454 /* start the minimum number of threads */
321 for (loop = 0; loop < slow_work_min_threads; loop++) { 455 for (loop = 0; loop < slow_work_min_threads; loop++) {
@@ -369,6 +503,8 @@ void slow_work_unregister_user(void)
369 " Shut down complete\n"); 503 " Shut down complete\n");
370 } 504 }
371 505
506 del_timer_sync(&slow_work_cull_timer);
507
372 mutex_unlock(&slow_work_user_lock); 508 mutex_unlock(&slow_work_user_lock);
373} 509}
374EXPORT_SYMBOL(slow_work_unregister_user); 510EXPORT_SYMBOL(slow_work_unregister_user);