aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-11-19 13:10:51 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:10:51 -0500
commit8fba10a42d191de612e60e7009c8f0313f90a9b3 (patch)
tree1e772fcc7ad3eb5bb3ca1c6cd156171295e6db25 /kernel/slow-work.c
parent6b8268b17a1ffc942bc72d7d00274e433d6b6719 (diff)
SLOW_WORK: Allow the work items to be viewed through a /proc file
Allow the executing and queued work items to be viewed through a /proc file for debugging purposes. The contents look something like the following: THR PID ITEM ADDR FL MARK DESC === ===== ================ == ===== ========== 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK In the 'THR' column, executing items show the thread they're occupying and queued threads indicate which queue they're on. 'PID' shows the process ID of a slow-work thread that's executing something. 'FL' shows the work item flags. 'MARK' indicates how long since an item was queued or began executing. Lastly, the 'DESC' column permits the owner of an item to give some information. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c44
1 files changed, 33 insertions, 11 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index f67e1daae93d..b763bc2d2670 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,13 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/proc_fs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24
25#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
26 21
27static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
28static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -117,6 +112,15 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock);
117#endif 112#endif
118 113
119/* 114/*
115 * Data for tracking currently executing items for indication through /proc
116 */
117#ifdef CONFIG_SLOW_WORK_PROC
118struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
119pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
120DEFINE_RWLOCK(slow_work_execs_lock);
121#endif
122
123/*
120 * The queues of work items and the lock governing access to them. These are 124 * The queues of work items and the lock governing access to them. These are
121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 125 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
122 * as the number of threads bears no relation to the number of CPUs. 126 * as the number of threads bears no relation to the number of CPUs.
@@ -124,9 +128,9 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock);
124 * There are two queues of work items: one for slow work items, and one for 128 * There are two queues of work items: one for slow work items, and one for
125 * very slow work items. 129 * very slow work items.
126 */ 130 */
127static LIST_HEAD(slow_work_queue); 131LIST_HEAD(slow_work_queue);
128static LIST_HEAD(vslow_work_queue); 132LIST_HEAD(vslow_work_queue);
129static DEFINE_SPINLOCK(slow_work_queue_lock); 133DEFINE_SPINLOCK(slow_work_queue_lock);
130 134
131/* 135/*
132 * The thread controls. A variable used to signal to the threads that they 136 * The thread controls. A variable used to signal to the threads that they
@@ -182,7 +186,7 @@ static unsigned slow_work_calc_vsmax(void)
182 * Attempt to execute stuff queued on a slow thread. Return true if we managed 186 * Attempt to execute stuff queued on a slow thread. Return true if we managed
183 * it, false if there was nothing to do. 187 * it, false if there was nothing to do.
184 */ 188 */
185static bool slow_work_execute(int id) 189static noinline bool slow_work_execute(int id)
186{ 190{
187#ifdef CONFIG_MODULES 191#ifdef CONFIG_MODULES
188 struct module *module; 192 struct module *module;
@@ -227,6 +231,10 @@ static bool slow_work_execute(int id)
227 if (work) 231 if (work)
228 slow_work_thread_processing[id] = work->owner; 232 slow_work_thread_processing[id] = work->owner;
229#endif 233#endif
234 if (work) {
235 slow_work_mark_time(work);
236 slow_work_begin_exec(id, work);
237 }
230 238
231 spin_unlock_irq(&slow_work_queue_lock); 239 spin_unlock_irq(&slow_work_queue_lock);
232 240
@@ -247,6 +255,8 @@ static bool slow_work_execute(int id)
247 /* wake up anyone waiting for this work to be complete */ 255 /* wake up anyone waiting for this work to be complete */
248 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); 256 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
249 257
258 slow_work_end_exec(id, work);
259
250 /* if someone tried to enqueue the item whilst we were executing it, 260 /* if someone tried to enqueue the item whilst we were executing it,
251 * then it'll be left unenqueued to avoid multiple threads trying to 261 * then it'll be left unenqueued to avoid multiple threads trying to
252 * execute it simultaneously 262 * execute it simultaneously
@@ -285,6 +295,7 @@ auto_requeue:
285 * - we transfer our ref on the item back to the appropriate queue 295 * - we transfer our ref on the item back to the appropriate queue
286 * - don't wake another thread up as we're awake already 296 * - don't wake another thread up as we're awake already
287 */ 297 */
298 slow_work_mark_time(work);
288 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 299 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
289 list_add_tail(&work->link, &vslow_work_queue); 300 list_add_tail(&work->link, &vslow_work_queue);
290 else 301 else
@@ -368,6 +379,7 @@ int slow_work_enqueue(struct slow_work *work)
368 ret = slow_work_get_ref(work); 379 ret = slow_work_get_ref(work);
369 if (ret < 0) 380 if (ret < 0)
370 goto failed; 381 goto failed;
382 slow_work_mark_time(work);
371 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 383 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
372 list_add_tail(&work->link, &vslow_work_queue); 384 list_add_tail(&work->link, &vslow_work_queue);
373 else 385 else
@@ -489,6 +501,7 @@ static void delayed_slow_work_timer(unsigned long data)
489 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 501 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
490 put = true; 502 put = true;
491 } else { 503 } else {
504 slow_work_mark_time(work);
492 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 505 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
493 list_add_tail(&work->link, &vslow_work_queue); 506 list_add_tail(&work->link, &vslow_work_queue);
494 else 507 else
@@ -627,6 +640,7 @@ static int slow_work_thread(void *_data)
627 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); 640 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
628 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); 641 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
629 __set_bit(id, slow_work_ids); 642 __set_bit(id, slow_work_ids);
643 slow_work_set_thread_pid(id, current->pid);
630 spin_unlock_irq(&slow_work_queue_lock); 644 spin_unlock_irq(&slow_work_queue_lock);
631 645
632 sprintf(current->comm, "kslowd%03u", id); 646 sprintf(current->comm, "kslowd%03u", id);
@@ -669,6 +683,7 @@ static int slow_work_thread(void *_data)
669 } 683 }
670 684
671 spin_lock_irq(&slow_work_queue_lock); 685 spin_lock_irq(&slow_work_queue_lock);
686 slow_work_set_thread_pid(id, 0);
672 __clear_bit(id, slow_work_ids); 687 __clear_bit(id, slow_work_ids);
673 spin_unlock_irq(&slow_work_queue_lock); 688 spin_unlock_irq(&slow_work_queue_lock);
674 689
@@ -722,6 +737,9 @@ static void slow_work_new_thread_execute(struct slow_work *work)
722static const struct slow_work_ops slow_work_new_thread_ops = { 737static const struct slow_work_ops slow_work_new_thread_ops = {
723 .owner = THIS_MODULE, 738 .owner = THIS_MODULE,
724 .execute = slow_work_new_thread_execute, 739 .execute = slow_work_new_thread_execute,
740#ifdef CONFIG_SLOW_WORK_PROC
741 .desc = slow_work_new_thread_desc,
742#endif
725}; 743};
726 744
727/* 745/*
@@ -949,6 +967,10 @@ static int __init init_slow_work(void)
949 if (slow_work_max_max_threads < nr_cpus * 2) 967 if (slow_work_max_max_threads < nr_cpus * 2)
950 slow_work_max_max_threads = nr_cpus * 2; 968 slow_work_max_max_threads = nr_cpus * 2;
951#endif 969#endif
970#ifdef CONFIG_SLOW_WORK_PROC
971 proc_create("slow_work_rq", S_IFREG | 0400, NULL,
972 &slow_work_runqueue_fops);
973#endif
952 return 0; 974 return 0;
953} 975}
954 976