diff options
author | David Howells <dhowells@redhat.com> | 2009-04-03 11:42:35 -0400 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2009-04-03 11:42:35 -0400 |
commit | 12e22c5e4bc08ab4b05ac079fe40d9891c5e81a0 (patch) | |
tree | e3d34a8f21d4c00dff311dfef564c59a76e1ae70 /kernel/slow-work.c | |
parent | 109d9272c423f46604d45fedfe87e21ee0b25180 (diff) |
Make the slow work pool configurable
Make the slow work pool configurable through /proc/sys/kernel/slow-work.
(*) /proc/sys/kernel/slow-work/min-threads
The minimum number of threads that should be in the pool as long as it is
in use. This may be anywhere between 2 and max-threads.
(*) /proc/sys/kernel/slow-work/max-threads
The maximum number of threads that should in the pool. This may be
anywhere between min-threads and 255 or NR_CPUS * 2, whichever is greater.
(*) /proc/sys/kernel/slow-work/vslow-percentage
The percentage of active threads in the pool that may be used to execute
very slow work items. This may be between 1 and 99. The resultant number
is bounded to between 1 and one fewer than the number of active threads.
This ensures there is always at least one thread that can process very
slow work items, and always at least one thread that won't.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Serge Hallyn <serue@us.ibm.com>
Acked-by: Steve Dickson <steved@redhat.com>
Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Tested-by: Daire Byrne <Daire.Byrne@framestore.com>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r-- | kernel/slow-work.c | 118 |
1 files changed, 116 insertions, 2 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 454abb21c8bd..3f65900aa3cb 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/kthread.h> | 14 | #include <linux/kthread.h> |
15 | #include <linux/freezer.h> | 15 | #include <linux/freezer.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <asm/system.h> | ||
18 | 17 | ||
19 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | 18 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of |
20 | * things to do */ | 19 | * things to do */ |
@@ -24,6 +23,14 @@ | |||
24 | static void slow_work_cull_timeout(unsigned long); | 23 | static void slow_work_cull_timeout(unsigned long); |
25 | static void slow_work_oom_timeout(unsigned long); | 24 | static void slow_work_oom_timeout(unsigned long); |
26 | 25 | ||
26 | #ifdef CONFIG_SYSCTL | ||
27 | static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *, | ||
28 | void __user *, size_t *, loff_t *); | ||
29 | |||
30 | static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *, | ||
31 | void __user *, size_t *, loff_t *); | ||
32 | #endif | ||
33 | |||
27 | /* | 34 | /* |
28 | * The pool of threads has at least min threads in it as long as someone is | 35 | * The pool of threads has at least min threads in it as long as someone is |
29 | * using the facility, and may have as many as max. | 36 | * using the facility, and may have as many as max. |
@@ -34,6 +41,51 @@ static unsigned slow_work_min_threads = 2; | |||
34 | static unsigned slow_work_max_threads = 4; | 41 | static unsigned slow_work_max_threads = 4; |
35 | static unsigned vslow_work_proportion = 50; /* % of threads that may process | 42 | static unsigned vslow_work_proportion = 50; /* % of threads that may process |
36 | * very slow work */ | 43 | * very slow work */ |
44 | |||
45 | #ifdef CONFIG_SYSCTL | ||
46 | static const int slow_work_min_min_threads = 2; | ||
47 | static int slow_work_max_max_threads = 255; | ||
48 | static const int slow_work_min_vslow = 1; | ||
49 | static const int slow_work_max_vslow = 99; | ||
50 | |||
51 | ctl_table slow_work_sysctls[] = { | ||
52 | { | ||
53 | .ctl_name = CTL_UNNUMBERED, | ||
54 | .procname = "min-threads", | ||
55 | .data = &slow_work_min_threads, | ||
56 | .maxlen = sizeof(unsigned), | ||
57 | .mode = 0644, | ||
58 | .proc_handler = slow_work_min_threads_sysctl, | ||
59 | .extra1 = (void *) &slow_work_min_min_threads, | ||
60 | .extra2 = &slow_work_max_threads, | ||
61 | }, | ||
62 | { | ||
63 | .ctl_name = CTL_UNNUMBERED, | ||
64 | .procname = "max-threads", | ||
65 | .data = &slow_work_max_threads, | ||
66 | .maxlen = sizeof(unsigned), | ||
67 | .mode = 0644, | ||
68 | .proc_handler = slow_work_max_threads_sysctl, | ||
69 | .extra1 = &slow_work_min_threads, | ||
70 | .extra2 = (void *) &slow_work_max_max_threads, | ||
71 | }, | ||
72 | { | ||
73 | .ctl_name = CTL_UNNUMBERED, | ||
74 | .procname = "vslow-percentage", | ||
75 | .data = &vslow_work_proportion, | ||
76 | .maxlen = sizeof(unsigned), | ||
77 | .mode = 0644, | ||
78 | .proc_handler = &proc_dointvec_minmax, | ||
79 | .extra1 = (void *) &slow_work_min_vslow, | ||
80 | .extra2 = (void *) &slow_work_max_vslow, | ||
81 | }, | ||
82 | { .ctl_name = 0 } | ||
83 | }; | ||
84 | #endif | ||
85 | |||
86 | /* | ||
87 | * The active state of the thread pool | ||
88 | */ | ||
37 | static atomic_t slow_work_thread_count; | 89 | static atomic_t slow_work_thread_count; |
38 | static atomic_t vslow_work_executing_count; | 90 | static atomic_t vslow_work_executing_count; |
39 | 91 | ||
@@ -427,6 +479,64 @@ static void slow_work_oom_timeout(unsigned long data) | |||
427 | slow_work_may_not_start_new_thread = false; | 479 | slow_work_may_not_start_new_thread = false; |
428 | } | 480 | } |
429 | 481 | ||
482 | #ifdef CONFIG_SYSCTL | ||
483 | /* | ||
484 | * Handle adjustment of the minimum number of threads | ||
485 | */ | ||
486 | static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, | ||
487 | struct file *filp, void __user *buffer, | ||
488 | size_t *lenp, loff_t *ppos) | ||
489 | { | ||
490 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
491 | int n; | ||
492 | |||
493 | if (ret == 0) { | ||
494 | mutex_lock(&slow_work_user_lock); | ||
495 | if (slow_work_user_count > 0) { | ||
496 | /* see if we need to start or stop threads */ | ||
497 | n = atomic_read(&slow_work_thread_count) - | ||
498 | slow_work_min_threads; | ||
499 | |||
500 | if (n < 0 && !slow_work_may_not_start_new_thread) | ||
501 | slow_work_enqueue(&slow_work_new_thread); | ||
502 | else if (n > 0) | ||
503 | mod_timer(&slow_work_cull_timer, | ||
504 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
505 | } | ||
506 | mutex_unlock(&slow_work_user_lock); | ||
507 | } | ||
508 | |||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * Handle adjustment of the maximum number of threads | ||
514 | */ | ||
515 | static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, | ||
516 | struct file *filp, void __user *buffer, | ||
517 | size_t *lenp, loff_t *ppos) | ||
518 | { | ||
519 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
520 | int n; | ||
521 | |||
522 | if (ret == 0) { | ||
523 | mutex_lock(&slow_work_user_lock); | ||
524 | if (slow_work_user_count > 0) { | ||
525 | /* see if we need to stop threads */ | ||
526 | n = slow_work_max_threads - | ||
527 | atomic_read(&slow_work_thread_count); | ||
528 | |||
529 | if (n < 0) | ||
530 | mod_timer(&slow_work_cull_timer, | ||
531 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
532 | } | ||
533 | mutex_unlock(&slow_work_user_lock); | ||
534 | } | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | #endif /* CONFIG_SYSCTL */ | ||
539 | |||
430 | /** | 540 | /** |
431 | * slow_work_register_user - Register a user of the facility | 541 | * slow_work_register_user - Register a user of the facility |
432 | * | 542 | * |
@@ -516,8 +626,12 @@ static int __init init_slow_work(void) | |||
516 | { | 626 | { |
517 | unsigned nr_cpus = num_possible_cpus(); | 627 | unsigned nr_cpus = num_possible_cpus(); |
518 | 628 | ||
519 | if (nr_cpus > slow_work_max_threads) | 629 | if (slow_work_max_threads < nr_cpus) |
520 | slow_work_max_threads = nr_cpus; | 630 | slow_work_max_threads = nr_cpus; |
631 | #ifdef CONFIG_SYSCTL | ||
632 | if (slow_work_max_max_threads < nr_cpus * 2) | ||
633 | slow_work_max_max_threads = nr_cpus * 2; | ||
634 | #endif | ||
521 | return 0; | 635 | return 0; |
522 | } | 636 | } |
523 | 637 | ||