aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c118
1 files changed, 116 insertions, 2 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 454abb21c8bd..3f65900aa3cb 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -14,7 +14,6 @@
14#include <linux/kthread.h> 14#include <linux/kthread.h>
15#include <linux/freezer.h> 15#include <linux/freezer.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <asm/system.h>
18 17
19#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 18#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
20 * things to do */ 19 * things to do */
@@ -24,6 +23,14 @@
24static void slow_work_cull_timeout(unsigned long); 23static void slow_work_cull_timeout(unsigned long);
25static void slow_work_oom_timeout(unsigned long); 24static void slow_work_oom_timeout(unsigned long);
26 25
26#ifdef CONFIG_SYSCTL
27static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
28 void __user *, size_t *, loff_t *);
29
30static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
31 void __user *, size_t *, loff_t *);
32#endif
33
27/* 34/*
28 * The pool of threads has at least min threads in it as long as someone is 35 * The pool of threads has at least min threads in it as long as someone is
29 * using the facility, and may have as many as max. 36 * using the facility, and may have as many as max.
@@ -34,6 +41,51 @@ static unsigned slow_work_min_threads = 2;
34static unsigned slow_work_max_threads = 4; 41static unsigned slow_work_max_threads = 4;
35static unsigned vslow_work_proportion = 50; /* % of threads that may process 42static unsigned vslow_work_proportion = 50; /* % of threads that may process
36 * very slow work */ 43 * very slow work */
44
45#ifdef CONFIG_SYSCTL
46static const int slow_work_min_min_threads = 2;
47static int slow_work_max_max_threads = 255;
48static const int slow_work_min_vslow = 1;
49static const int slow_work_max_vslow = 99;
50
51ctl_table slow_work_sysctls[] = {
52 {
53 .ctl_name = CTL_UNNUMBERED,
54 .procname = "min-threads",
55 .data = &slow_work_min_threads,
56 .maxlen = sizeof(unsigned),
57 .mode = 0644,
58 .proc_handler = slow_work_min_threads_sysctl,
59 .extra1 = (void *) &slow_work_min_min_threads,
60 .extra2 = &slow_work_max_threads,
61 },
62 {
63 .ctl_name = CTL_UNNUMBERED,
64 .procname = "max-threads",
65 .data = &slow_work_max_threads,
66 .maxlen = sizeof(unsigned),
67 .mode = 0644,
68 .proc_handler = slow_work_max_threads_sysctl,
69 .extra1 = &slow_work_min_threads,
70 .extra2 = (void *) &slow_work_max_max_threads,
71 },
72 {
73 .ctl_name = CTL_UNNUMBERED,
74 .procname = "vslow-percentage",
75 .data = &vslow_work_proportion,
76 .maxlen = sizeof(unsigned),
77 .mode = 0644,
78 .proc_handler = &proc_dointvec_minmax,
79 .extra1 = (void *) &slow_work_min_vslow,
80 .extra2 = (void *) &slow_work_max_vslow,
81 },
82 { .ctl_name = 0 }
83};
84#endif
85
86/*
87 * The active state of the thread pool
88 */
37static atomic_t slow_work_thread_count; 89static atomic_t slow_work_thread_count;
38static atomic_t vslow_work_executing_count; 90static atomic_t vslow_work_executing_count;
39 91
@@ -427,6 +479,64 @@ static void slow_work_oom_timeout(unsigned long data)
427 slow_work_may_not_start_new_thread = false; 479 slow_work_may_not_start_new_thread = false;
428} 480}
429 481
482#ifdef CONFIG_SYSCTL
483/*
484 * Handle adjustment of the minimum number of threads
485 */
486static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
487 struct file *filp, void __user *buffer,
488 size_t *lenp, loff_t *ppos)
489{
490 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
491 int n;
492
493 if (ret == 0) {
494 mutex_lock(&slow_work_user_lock);
495 if (slow_work_user_count > 0) {
496 /* see if we need to start or stop threads */
497 n = atomic_read(&slow_work_thread_count) -
498 slow_work_min_threads;
499
500 if (n < 0 && !slow_work_may_not_start_new_thread)
501 slow_work_enqueue(&slow_work_new_thread);
502 else if (n > 0)
503 mod_timer(&slow_work_cull_timer,
504 jiffies + SLOW_WORK_CULL_TIMEOUT);
505 }
506 mutex_unlock(&slow_work_user_lock);
507 }
508
509 return ret;
510}
511
512/*
513 * Handle adjustment of the maximum number of threads
514 */
515static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
516 struct file *filp, void __user *buffer,
517 size_t *lenp, loff_t *ppos)
518{
519 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
520 int n;
521
522 if (ret == 0) {
523 mutex_lock(&slow_work_user_lock);
524 if (slow_work_user_count > 0) {
525 /* see if we need to stop threads */
526 n = slow_work_max_threads -
527 atomic_read(&slow_work_thread_count);
528
529 if (n < 0)
530 mod_timer(&slow_work_cull_timer,
531 jiffies + SLOW_WORK_CULL_TIMEOUT);
532 }
533 mutex_unlock(&slow_work_user_lock);
534 }
535
536 return ret;
537}
538#endif /* CONFIG_SYSCTL */
539
430/** 540/**
431 * slow_work_register_user - Register a user of the facility 541 * slow_work_register_user - Register a user of the facility
432 * 542 *
@@ -516,8 +626,12 @@ static int __init init_slow_work(void)
516{ 626{
517 unsigned nr_cpus = num_possible_cpus(); 627 unsigned nr_cpus = num_possible_cpus();
518 628
519 if (nr_cpus > slow_work_max_threads) 629 if (slow_work_max_threads < nr_cpus)
520 slow_work_max_threads = nr_cpus; 630 slow_work_max_threads = nr_cpus;
631#ifdef CONFIG_SYSCTL
632 if (slow_work_max_max_threads < nr_cpus * 2)
633 slow_work_max_max_threads = nr_cpus * 2;
634#endif
521 return 0; 635 return 0;
522} 636}
523 637