aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/slow-work.txt60
-rw-r--r--include/linux/slow-work.h11
-rw-r--r--init/Kconfig10
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/slow-work-proc.c227
-rw-r--r--kernel/slow-work.c44
-rw-r--r--kernel/slow-work.h72
7 files changed, 413 insertions, 12 deletions
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index a9d1b0ffdded..f120238e70fe 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -149,7 +149,8 @@ ITEM OPERATIONS
149=============== 149===============
150 150
151Each work item requires a table of operations of type struct slow_work_ops. 151Each work item requires a table of operations of type struct slow_work_ops.
152Only ->execute() is required, getting and putting of a reference are optional. 152Only ->execute() is required; the getting and putting of a reference and the
153describing of an item are all optional.
153 154
154 (*) Get a reference on an item: 155 (*) Get a reference on an item:
155 156
@@ -179,6 +180,16 @@ Only ->execute() is required, getting and putting of a reference are optional.
179 This should perform the work required of the item. It may sleep, it may 180 This should perform the work required of the item. It may sleep, it may
180 perform disk I/O and it may wait for locks. 181 perform disk I/O and it may wait for locks.
181 182
183 (*) View an item through /proc:
184
185 void (*desc)(struct slow_work *work, struct seq_file *m);
186
187 If supplied, this should print to 'm' a small string describing the work
188 the item is to do. This should be no more than about 40 characters, and
189 shouldn't include a newline character.
190
191 See the 'Viewing executing and queued items' section below.
192
182 193
183================== 194==================
184POOL CONFIGURATION 195POOL CONFIGURATION
@@ -203,3 +214,50 @@ The slow-work thread pool has a number of configurables:
203 is bounded to between 1 and one fewer than the number of active threads. 214 is bounded to between 1 and one fewer than the number of active threads.
204 This ensures there is always at least one thread that can process very 215 This ensures there is always at least one thread that can process very
205 slow work items, and always at least one thread that won't. 216 slow work items, and always at least one thread that won't.
217
218
219==================================
220VIEWING EXECUTING AND QUEUED ITEMS
221==================================
222
223If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available:
224
225 /proc/slow_work_rq
226
227through which the list of work items being executed and the queues of items to
228be executed may be viewed. The owner of a work item is given the chance to
229add some information of its own.
230
231The contents look something like the following:
232
233 THR PID ITEM ADDR FL MARK DESC
234 === ===== ================ == ===== ==========
235 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK
236 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
237 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK
238 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN
239 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
240 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
241 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
242 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN
243 vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
244 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
245 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
246 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
247 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
248 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
249 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK
250 vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK
251 vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK
252 vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK
253 vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK
254 vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK
255 vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK
256 vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK
257
258In the 'THR' column, executing items show the thread they're occupying and
259queued threads indicate which queue they're on. 'PID' shows the process ID of
260a slow-work thread that's executing something. 'FL' shows the work item flags.
261'MARK' indicates how long since an item was queued or began executing. Lastly,
262the 'DESC' column permits the owner of an item to give some information.
263
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index b245b9a9cc0b..f41485145ed1 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -20,6 +20,9 @@
20#include <linux/timer.h> 20#include <linux/timer.h>
21 21
22struct slow_work; 22struct slow_work;
23#ifdef CONFIG_SLOW_WORK_PROC
24struct seq_file;
25#endif
23 26
24/* 27/*
25 * The operations used to support slow work items 28 * The operations used to support slow work items
@@ -38,6 +41,11 @@ struct slow_work_ops {
38 41
39 /* execute a work item */ 42 /* execute a work item */
40 void (*execute)(struct slow_work *work); 43 void (*execute)(struct slow_work *work);
44
45#ifdef CONFIG_SLOW_WORK_PROC
46 /* describe a work item for /proc */
47 void (*desc)(struct slow_work *work, struct seq_file *m);
48#endif
41}; 49};
42 50
43/* 51/*
@@ -56,6 +64,9 @@ struct slow_work {
56#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ 64#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
57 const struct slow_work_ops *ops; /* operations table for this item */ 65 const struct slow_work_ops *ops; /* operations table for this item */
58 struct list_head link; /* link in queue */ 66 struct list_head link; /* link in queue */
67#ifdef CONFIG_SLOW_WORK_PROC
68 struct timespec mark; /* jiffies at which queued or exec begun */
69#endif
59}; 70};
60 71
61struct delayed_slow_work { 72struct delayed_slow_work {
diff --git a/init/Kconfig b/init/Kconfig
index 9e03ef8b311e..ab5c64801fe5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1098,6 +1098,16 @@ config SLOW_WORK
1098 1098
1099 See Documentation/slow-work.txt. 1099 See Documentation/slow-work.txt.
1100 1100
1101config SLOW_WORK_PROC
1102 bool "Slow work debugging through /proc"
1103 default n
1104 depends on SLOW_WORK && PROC_FS
1105 help
1106 Display the contents of the slow work run queue through /proc,
1107 including items currently executing.
1108
1109 See Documentation/slow-work.txt.
1110
1101endmenu # General setup 1111endmenu # General setup
1102 1112
1103config HAVE_GENERIC_DMA_COHERENT 1113config HAVE_GENERIC_DMA_COHERENT
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..776ffed1556d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
94obj-$(CONFIG_RING_BUFFER) += trace/ 94obj-$(CONFIG_RING_BUFFER) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o 96obj-$(CONFIG_SLOW_WORK) += slow-work.o
97obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
98 99
99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c
new file mode 100644
index 000000000000..3988032571f5
--- /dev/null
+++ b/kernel/slow-work-proc.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for /proc
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/proc/slow_work_rq" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index f67e1daae93d..b763bc2d2670 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,13 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/proc_fs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24
25#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
26 21
27static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
28static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -117,6 +112,15 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock);
117#endif 112#endif
118 113
119/* 114/*
115 * Data for tracking currently executing items for indication through /proc
116 */
117#ifdef CONFIG_SLOW_WORK_PROC
118struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
119pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
120DEFINE_RWLOCK(slow_work_execs_lock);
121#endif
122
123/*
120 * The queues of work items and the lock governing access to them. These are 124 * The queues of work items and the lock governing access to them. These are
121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 125 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
122 * as the number of threads bears no relation to the number of CPUs. 126 * as the number of threads bears no relation to the number of CPUs.
@@ -124,9 +128,9 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock);
124 * There are two queues of work items: one for slow work items, and one for 128 * There are two queues of work items: one for slow work items, and one for
125 * very slow work items. 129 * very slow work items.
126 */ 130 */
127static LIST_HEAD(slow_work_queue); 131LIST_HEAD(slow_work_queue);
128static LIST_HEAD(vslow_work_queue); 132LIST_HEAD(vslow_work_queue);
129static DEFINE_SPINLOCK(slow_work_queue_lock); 133DEFINE_SPINLOCK(slow_work_queue_lock);
130 134
131/* 135/*
132 * The thread controls. A variable used to signal to the threads that they 136 * The thread controls. A variable used to signal to the threads that they
@@ -182,7 +186,7 @@ static unsigned slow_work_calc_vsmax(void)
182 * Attempt to execute stuff queued on a slow thread. Return true if we managed 186 * Attempt to execute stuff queued on a slow thread. Return true if we managed
183 * it, false if there was nothing to do. 187 * it, false if there was nothing to do.
184 */ 188 */
185static bool slow_work_execute(int id) 189static noinline bool slow_work_execute(int id)
186{ 190{
187#ifdef CONFIG_MODULES 191#ifdef CONFIG_MODULES
188 struct module *module; 192 struct module *module;
@@ -227,6 +231,10 @@ static bool slow_work_execute(int id)
227 if (work) 231 if (work)
228 slow_work_thread_processing[id] = work->owner; 232 slow_work_thread_processing[id] = work->owner;
229#endif 233#endif
234 if (work) {
235 slow_work_mark_time(work);
236 slow_work_begin_exec(id, work);
237 }
230 238
231 spin_unlock_irq(&slow_work_queue_lock); 239 spin_unlock_irq(&slow_work_queue_lock);
232 240
@@ -247,6 +255,8 @@ static bool slow_work_execute(int id)
247 /* wake up anyone waiting for this work to be complete */ 255 /* wake up anyone waiting for this work to be complete */
248 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); 256 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
249 257
258 slow_work_end_exec(id, work);
259
250 /* if someone tried to enqueue the item whilst we were executing it, 260 /* if someone tried to enqueue the item whilst we were executing it,
251 * then it'll be left unenqueued to avoid multiple threads trying to 261 * then it'll be left unenqueued to avoid multiple threads trying to
252 * execute it simultaneously 262 * execute it simultaneously
@@ -285,6 +295,7 @@ auto_requeue:
285 * - we transfer our ref on the item back to the appropriate queue 295 * - we transfer our ref on the item back to the appropriate queue
286 * - don't wake another thread up as we're awake already 296 * - don't wake another thread up as we're awake already
287 */ 297 */
298 slow_work_mark_time(work);
288 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 299 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
289 list_add_tail(&work->link, &vslow_work_queue); 300 list_add_tail(&work->link, &vslow_work_queue);
290 else 301 else
@@ -368,6 +379,7 @@ int slow_work_enqueue(struct slow_work *work)
368 ret = slow_work_get_ref(work); 379 ret = slow_work_get_ref(work);
369 if (ret < 0) 380 if (ret < 0)
370 goto failed; 381 goto failed;
382 slow_work_mark_time(work);
371 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 383 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
372 list_add_tail(&work->link, &vslow_work_queue); 384 list_add_tail(&work->link, &vslow_work_queue);
373 else 385 else
@@ -489,6 +501,7 @@ static void delayed_slow_work_timer(unsigned long data)
489 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 501 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
490 put = true; 502 put = true;
491 } else { 503 } else {
504 slow_work_mark_time(work);
492 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 505 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
493 list_add_tail(&work->link, &vslow_work_queue); 506 list_add_tail(&work->link, &vslow_work_queue);
494 else 507 else
@@ -627,6 +640,7 @@ static int slow_work_thread(void *_data)
627 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); 640 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
628 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); 641 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
629 __set_bit(id, slow_work_ids); 642 __set_bit(id, slow_work_ids);
643 slow_work_set_thread_pid(id, current->pid);
630 spin_unlock_irq(&slow_work_queue_lock); 644 spin_unlock_irq(&slow_work_queue_lock);
631 645
632 sprintf(current->comm, "kslowd%03u", id); 646 sprintf(current->comm, "kslowd%03u", id);
@@ -669,6 +683,7 @@ static int slow_work_thread(void *_data)
669 } 683 }
670 684
671 spin_lock_irq(&slow_work_queue_lock); 685 spin_lock_irq(&slow_work_queue_lock);
686 slow_work_set_thread_pid(id, 0);
672 __clear_bit(id, slow_work_ids); 687 __clear_bit(id, slow_work_ids);
673 spin_unlock_irq(&slow_work_queue_lock); 688 spin_unlock_irq(&slow_work_queue_lock);
674 689
@@ -722,6 +737,9 @@ static void slow_work_new_thread_execute(struct slow_work *work)
722static const struct slow_work_ops slow_work_new_thread_ops = { 737static const struct slow_work_ops slow_work_new_thread_ops = {
723 .owner = THIS_MODULE, 738 .owner = THIS_MODULE,
724 .execute = slow_work_new_thread_execute, 739 .execute = slow_work_new_thread_execute,
740#ifdef CONFIG_SLOW_WORK_PROC
741 .desc = slow_work_new_thread_desc,
742#endif
725}; 743};
726 744
727/* 745/*
@@ -949,6 +967,10 @@ static int __init init_slow_work(void)
949 if (slow_work_max_max_threads < nr_cpus * 2) 967 if (slow_work_max_max_threads < nr_cpus * 2)
950 slow_work_max_max_threads = nr_cpus * 2; 968 slow_work_max_max_threads = nr_cpus * 2;
951#endif 969#endif
970#ifdef CONFIG_SLOW_WORK_PROC
971 proc_create("slow_work_rq", S_IFREG | 0400, NULL,
972 &slow_work_runqueue_fops);
973#endif
952 return 0; 974 return 0;
953} 975}
954 976
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..3c2f007f3ad6
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_PROC
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-proc.c
34 */
35#ifdef CONFIG_SLOW_WORK_PROC
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}