diff options
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r-- | kernel/slow-work.c | 44 |
1 files changed, 33 insertions, 11 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index f67e1daae93d..b763bc2d2670 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -16,13 +16,8 @@ | |||
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | 19 | #include <linux/proc_fs.h> | |
20 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | 20 | #include "slow-work.h" |
21 | * things to do */ | ||
22 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
23 | * OOM */ | ||
24 | |||
25 | #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ | ||
26 | 21 | ||
27 | static void slow_work_cull_timeout(unsigned long); | 22 | static void slow_work_cull_timeout(unsigned long); |
28 | static void slow_work_oom_timeout(unsigned long); | 23 | static void slow_work_oom_timeout(unsigned long); |
@@ -117,6 +112,15 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock); | |||
117 | #endif | 112 | #endif |
118 | 113 | ||
119 | /* | 114 | /* |
115 | * Data for tracking currently executing items for indication through /proc | ||
116 | */ | ||
117 | #ifdef CONFIG_SLOW_WORK_PROC | ||
118 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; | ||
119 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; | ||
120 | DEFINE_RWLOCK(slow_work_execs_lock); | ||
121 | #endif | ||
122 | |||
123 | /* | ||
120 | * The queues of work items and the lock governing access to them. These are | 124 | * The queues of work items and the lock governing access to them. These are |
121 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues | 125 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues |
122 | * as the number of threads bears no relation to the number of CPUs. | 126 | * as the number of threads bears no relation to the number of CPUs. |
@@ -124,9 +128,9 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock); | |||
124 | * There are two queues of work items: one for slow work items, and one for | 128 | * There are two queues of work items: one for slow work items, and one for |
125 | * very slow work items. | 129 | * very slow work items. |
126 | */ | 130 | */ |
127 | static LIST_HEAD(slow_work_queue); | 131 | LIST_HEAD(slow_work_queue); |
128 | static LIST_HEAD(vslow_work_queue); | 132 | LIST_HEAD(vslow_work_queue); |
129 | static DEFINE_SPINLOCK(slow_work_queue_lock); | 133 | DEFINE_SPINLOCK(slow_work_queue_lock); |
130 | 134 | ||
131 | /* | 135 | /* |
132 | * The thread controls. A variable used to signal to the threads that they | 136 | * The thread controls. A variable used to signal to the threads that they |
@@ -182,7 +186,7 @@ static unsigned slow_work_calc_vsmax(void) | |||
182 | * Attempt to execute stuff queued on a slow thread. Return true if we managed | 186 | * Attempt to execute stuff queued on a slow thread. Return true if we managed |
183 | * it, false if there was nothing to do. | 187 | * it, false if there was nothing to do. |
184 | */ | 188 | */ |
185 | static bool slow_work_execute(int id) | 189 | static noinline bool slow_work_execute(int id) |
186 | { | 190 | { |
187 | #ifdef CONFIG_MODULES | 191 | #ifdef CONFIG_MODULES |
188 | struct module *module; | 192 | struct module *module; |
@@ -227,6 +231,10 @@ static bool slow_work_execute(int id) | |||
227 | if (work) | 231 | if (work) |
228 | slow_work_thread_processing[id] = work->owner; | 232 | slow_work_thread_processing[id] = work->owner; |
229 | #endif | 233 | #endif |
234 | if (work) { | ||
235 | slow_work_mark_time(work); | ||
236 | slow_work_begin_exec(id, work); | ||
237 | } | ||
230 | 238 | ||
231 | spin_unlock_irq(&slow_work_queue_lock); | 239 | spin_unlock_irq(&slow_work_queue_lock); |
232 | 240 | ||
@@ -247,6 +255,8 @@ static bool slow_work_execute(int id) | |||
247 | /* wake up anyone waiting for this work to be complete */ | 255 | /* wake up anyone waiting for this work to be complete */ |
248 | wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); | 256 | wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); |
249 | 257 | ||
258 | slow_work_end_exec(id, work); | ||
259 | |||
250 | /* if someone tried to enqueue the item whilst we were executing it, | 260 | /* if someone tried to enqueue the item whilst we were executing it, |
251 | * then it'll be left unenqueued to avoid multiple threads trying to | 261 | * then it'll be left unenqueued to avoid multiple threads trying to |
252 | * execute it simultaneously | 262 | * execute it simultaneously |
@@ -285,6 +295,7 @@ auto_requeue: | |||
285 | * - we transfer our ref on the item back to the appropriate queue | 295 | * - we transfer our ref on the item back to the appropriate queue |
286 | * - don't wake another thread up as we're awake already | 296 | * - don't wake another thread up as we're awake already |
287 | */ | 297 | */ |
298 | slow_work_mark_time(work); | ||
288 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 299 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
289 | list_add_tail(&work->link, &vslow_work_queue); | 300 | list_add_tail(&work->link, &vslow_work_queue); |
290 | else | 301 | else |
@@ -368,6 +379,7 @@ int slow_work_enqueue(struct slow_work *work) | |||
368 | ret = slow_work_get_ref(work); | 379 | ret = slow_work_get_ref(work); |
369 | if (ret < 0) | 380 | if (ret < 0) |
370 | goto failed; | 381 | goto failed; |
382 | slow_work_mark_time(work); | ||
371 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 383 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
372 | list_add_tail(&work->link, &vslow_work_queue); | 384 | list_add_tail(&work->link, &vslow_work_queue); |
373 | else | 385 | else |
@@ -489,6 +501,7 @@ static void delayed_slow_work_timer(unsigned long data) | |||
489 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | 501 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
490 | put = true; | 502 | put = true; |
491 | } else { | 503 | } else { |
504 | slow_work_mark_time(work); | ||
492 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 505 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
493 | list_add_tail(&work->link, &vslow_work_queue); | 506 | list_add_tail(&work->link, &vslow_work_queue); |
494 | else | 507 | else |
@@ -627,6 +640,7 @@ static int slow_work_thread(void *_data) | |||
627 | id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); | 640 | id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); |
628 | BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); | 641 | BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); |
629 | __set_bit(id, slow_work_ids); | 642 | __set_bit(id, slow_work_ids); |
643 | slow_work_set_thread_pid(id, current->pid); | ||
630 | spin_unlock_irq(&slow_work_queue_lock); | 644 | spin_unlock_irq(&slow_work_queue_lock); |
631 | 645 | ||
632 | sprintf(current->comm, "kslowd%03u", id); | 646 | sprintf(current->comm, "kslowd%03u", id); |
@@ -669,6 +683,7 @@ static int slow_work_thread(void *_data) | |||
669 | } | 683 | } |
670 | 684 | ||
671 | spin_lock_irq(&slow_work_queue_lock); | 685 | spin_lock_irq(&slow_work_queue_lock); |
686 | slow_work_set_thread_pid(id, 0); | ||
672 | __clear_bit(id, slow_work_ids); | 687 | __clear_bit(id, slow_work_ids); |
673 | spin_unlock_irq(&slow_work_queue_lock); | 688 | spin_unlock_irq(&slow_work_queue_lock); |
674 | 689 | ||
@@ -722,6 +737,9 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
722 | static const struct slow_work_ops slow_work_new_thread_ops = { | 737 | static const struct slow_work_ops slow_work_new_thread_ops = { |
723 | .owner = THIS_MODULE, | 738 | .owner = THIS_MODULE, |
724 | .execute = slow_work_new_thread_execute, | 739 | .execute = slow_work_new_thread_execute, |
740 | #ifdef CONFIG_SLOW_WORK_PROC | ||
741 | .desc = slow_work_new_thread_desc, | ||
742 | #endif | ||
725 | }; | 743 | }; |
726 | 744 | ||
727 | /* | 745 | /* |
@@ -949,6 +967,10 @@ static int __init init_slow_work(void) | |||
949 | if (slow_work_max_max_threads < nr_cpus * 2) | 967 | if (slow_work_max_max_threads < nr_cpus * 2) |
950 | slow_work_max_max_threads = nr_cpus * 2; | 968 | slow_work_max_max_threads = nr_cpus * 2; |
951 | #endif | 969 | #endif |
970 | #ifdef CONFIG_SLOW_WORK_PROC | ||
971 | proc_create("slow_work_rq", S_IFREG | 0400, NULL, | ||
972 | &slow_work_runqueue_fops); | ||
973 | #endif | ||
952 | return 0; | 974 | return 0; |
953 | } | 975 | } |
954 | 976 | ||