diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-08 11:02:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-08 11:02:57 -0400 |
commit | ff96e612cba32510e263e17b213235fe5746397e (patch) | |
tree | a8df57d76b10e0901a4fb76cd2987eb9826a560a /kernel/slow-work.c | |
parent | cd84a42f315e50edd454c27a3da3951ccd3d735a (diff) | |
parent | 577c9c456f0e1371cbade38eaf91ae8e8a308555 (diff) |
Merge commit 'v2.6.30-rc1' into core/urgent
Merge reason: need latest upstream to queue up dependent fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r-- | kernel/slow-work.c | 640 |
1 files changed, 640 insertions, 0 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c new file mode 100644 index 000000000000..cf2bc01186ef --- /dev/null +++ b/kernel/slow-work.c | |||
@@ -0,0 +1,640 @@ | |||
1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/slow-work.txt | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/slow-work.h> | ||
16 | #include <linux/kthread.h> | ||
17 | #include <linux/freezer.h> | ||
18 | #include <linux/wait.h> | ||
19 | |||
20 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | ||
21 | * things to do */ | ||
22 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
23 | * OOM */ | ||
24 | |||
25 | static void slow_work_cull_timeout(unsigned long); | ||
26 | static void slow_work_oom_timeout(unsigned long); | ||
27 | |||
28 | #ifdef CONFIG_SYSCTL | ||
29 | static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *, | ||
30 | void __user *, size_t *, loff_t *); | ||
31 | |||
32 | static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *, | ||
33 | void __user *, size_t *, loff_t *); | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * The pool of threads has at least min threads in it as long as someone is | ||
38 | * using the facility, and may have as many as max. | ||
39 | * | ||
40 | * A portion of the pool may be processing very slow operations. | ||
41 | */ | ||
42 | static unsigned slow_work_min_threads = 2; | ||
43 | static unsigned slow_work_max_threads = 4; | ||
44 | static unsigned vslow_work_proportion = 50; /* % of threads that may process | ||
45 | * very slow work */ | ||
46 | |||
47 | #ifdef CONFIG_SYSCTL | ||
48 | static const int slow_work_min_min_threads = 2; | ||
49 | static int slow_work_max_max_threads = 255; | ||
50 | static const int slow_work_min_vslow = 1; | ||
51 | static const int slow_work_max_vslow = 99; | ||
52 | |||
53 | ctl_table slow_work_sysctls[] = { | ||
54 | { | ||
55 | .ctl_name = CTL_UNNUMBERED, | ||
56 | .procname = "min-threads", | ||
57 | .data = &slow_work_min_threads, | ||
58 | .maxlen = sizeof(unsigned), | ||
59 | .mode = 0644, | ||
60 | .proc_handler = slow_work_min_threads_sysctl, | ||
61 | .extra1 = (void *) &slow_work_min_min_threads, | ||
62 | .extra2 = &slow_work_max_threads, | ||
63 | }, | ||
64 | { | ||
65 | .ctl_name = CTL_UNNUMBERED, | ||
66 | .procname = "max-threads", | ||
67 | .data = &slow_work_max_threads, | ||
68 | .maxlen = sizeof(unsigned), | ||
69 | .mode = 0644, | ||
70 | .proc_handler = slow_work_max_threads_sysctl, | ||
71 | .extra1 = &slow_work_min_threads, | ||
72 | .extra2 = (void *) &slow_work_max_max_threads, | ||
73 | }, | ||
74 | { | ||
75 | .ctl_name = CTL_UNNUMBERED, | ||
76 | .procname = "vslow-percentage", | ||
77 | .data = &vslow_work_proportion, | ||
78 | .maxlen = sizeof(unsigned), | ||
79 | .mode = 0644, | ||
80 | .proc_handler = &proc_dointvec_minmax, | ||
81 | .extra1 = (void *) &slow_work_min_vslow, | ||
82 | .extra2 = (void *) &slow_work_max_vslow, | ||
83 | }, | ||
84 | { .ctl_name = 0 } | ||
85 | }; | ||
86 | #endif | ||
87 | |||
88 | /* | ||
89 | * The active state of the thread pool | ||
90 | */ | ||
91 | static atomic_t slow_work_thread_count; | ||
92 | static atomic_t vslow_work_executing_count; | ||
93 | |||
94 | static bool slow_work_may_not_start_new_thread; | ||
95 | static bool slow_work_cull; /* cull a thread due to lack of activity */ | ||
96 | static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); | ||
97 | static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); | ||
98 | static struct slow_work slow_work_new_thread; /* new thread starter */ | ||
99 | |||
100 | /* | ||
101 | * The queues of work items and the lock governing access to them. These are | ||
102 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues | ||
103 | * as the number of threads bears no relation to the number of CPUs. | ||
104 | * | ||
105 | * There are two queues of work items: one for slow work items, and one for | ||
106 | * very slow work items. | ||
107 | */ | ||
108 | static LIST_HEAD(slow_work_queue); | ||
109 | static LIST_HEAD(vslow_work_queue); | ||
110 | static DEFINE_SPINLOCK(slow_work_queue_lock); | ||
111 | |||
112 | /* | ||
113 | * The thread controls. A variable used to signal to the threads that they | ||
114 | * should exit when the queue is empty, a waitqueue used by the threads to wait | ||
115 | * for signals, and a completion set by the last thread to exit. | ||
116 | */ | ||
117 | static bool slow_work_threads_should_exit; | ||
118 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); | ||
119 | static DECLARE_COMPLETION(slow_work_last_thread_exited); | ||
120 | |||
121 | /* | ||
122 | * The number of users of the thread pool and its lock. Whilst this is zero we | ||
123 | * have no threads hanging around, and when this reaches zero, we wait for all | ||
124 | * active or queued work items to complete and kill all the threads we do have. | ||
125 | */ | ||
126 | static int slow_work_user_count; | ||
127 | static DEFINE_MUTEX(slow_work_user_lock); | ||
128 | |||
129 | /* | ||
130 | * Calculate the maximum number of active threads in the pool that are | ||
131 | * permitted to process very slow work items. | ||
132 | * | ||
133 | * The answer is rounded up to at least 1, but may not equal or exceed the | ||
134 | * maximum number of the threads in the pool. This means we always have at | ||
135 | * least one thread that can process slow work items, and we always have at | ||
136 | * least one thread that won't get tied up doing so. | ||
137 | */ | ||
138 | static unsigned slow_work_calc_vsmax(void) | ||
139 | { | ||
140 | unsigned vsmax; | ||
141 | |||
142 | vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; | ||
143 | vsmax /= 100; | ||
144 | vsmax = max(vsmax, 1U); | ||
145 | return min(vsmax, slow_work_max_threads - 1); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Attempt to execute stuff queued on a slow thread. Return true if we managed | ||
150 | * it, false if there was nothing to do. | ||
151 | */ | ||
152 | static bool slow_work_execute(void) | ||
153 | { | ||
154 | struct slow_work *work = NULL; | ||
155 | unsigned vsmax; | ||
156 | bool very_slow; | ||
157 | |||
158 | vsmax = slow_work_calc_vsmax(); | ||
159 | |||
160 | /* see if we can schedule a new thread to be started if we're not | ||
161 | * keeping up with the work */ | ||
162 | if (!waitqueue_active(&slow_work_thread_wq) && | ||
163 | (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && | ||
164 | atomic_read(&slow_work_thread_count) < slow_work_max_threads && | ||
165 | !slow_work_may_not_start_new_thread) | ||
166 | slow_work_enqueue(&slow_work_new_thread); | ||
167 | |||
168 | /* find something to execute */ | ||
169 | spin_lock_irq(&slow_work_queue_lock); | ||
170 | if (!list_empty(&vslow_work_queue) && | ||
171 | atomic_read(&vslow_work_executing_count) < vsmax) { | ||
172 | work = list_entry(vslow_work_queue.next, | ||
173 | struct slow_work, link); | ||
174 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) | ||
175 | BUG(); | ||
176 | list_del_init(&work->link); | ||
177 | atomic_inc(&vslow_work_executing_count); | ||
178 | very_slow = true; | ||
179 | } else if (!list_empty(&slow_work_queue)) { | ||
180 | work = list_entry(slow_work_queue.next, | ||
181 | struct slow_work, link); | ||
182 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) | ||
183 | BUG(); | ||
184 | list_del_init(&work->link); | ||
185 | very_slow = false; | ||
186 | } else { | ||
187 | very_slow = false; /* avoid the compiler warning */ | ||
188 | } | ||
189 | spin_unlock_irq(&slow_work_queue_lock); | ||
190 | |||
191 | if (!work) | ||
192 | return false; | ||
193 | |||
194 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) | ||
195 | BUG(); | ||
196 | |||
197 | work->ops->execute(work); | ||
198 | |||
199 | if (very_slow) | ||
200 | atomic_dec(&vslow_work_executing_count); | ||
201 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); | ||
202 | |||
203 | /* if someone tried to enqueue the item whilst we were executing it, | ||
204 | * then it'll be left unenqueued to avoid multiple threads trying to | ||
205 | * execute it simultaneously | ||
206 | * | ||
207 | * there is, however, a race between us testing the pending flag and | ||
208 | * getting the spinlock, and between the enqueuer setting the pending | ||
209 | * flag and getting the spinlock, so we use a deferral bit to tell us | ||
210 | * if the enqueuer got there first | ||
211 | */ | ||
212 | if (test_bit(SLOW_WORK_PENDING, &work->flags)) { | ||
213 | spin_lock_irq(&slow_work_queue_lock); | ||
214 | |||
215 | if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && | ||
216 | test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) | ||
217 | goto auto_requeue; | ||
218 | |||
219 | spin_unlock_irq(&slow_work_queue_lock); | ||
220 | } | ||
221 | |||
222 | work->ops->put_ref(work); | ||
223 | return true; | ||
224 | |||
225 | auto_requeue: | ||
226 | /* we must complete the enqueue operation | ||
227 | * - we transfer our ref on the item back to the appropriate queue | ||
228 | * - don't wake another thread up as we're awake already | ||
229 | */ | ||
230 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | ||
231 | list_add_tail(&work->link, &vslow_work_queue); | ||
232 | else | ||
233 | list_add_tail(&work->link, &slow_work_queue); | ||
234 | spin_unlock_irq(&slow_work_queue_lock); | ||
235 | return true; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * slow_work_enqueue - Schedule a slow work item for processing | ||
240 | * @work: The work item to queue | ||
241 | * | ||
242 | * Schedule a slow work item for processing. If the item is already undergoing | ||
243 | * execution, this guarantees not to re-enter the execution routine until the | ||
244 | * first execution finishes. | ||
245 | * | ||
246 | * The item is pinned by this function as it retains a reference to it, managed | ||
247 | * through the item operations. The item is unpinned once it has been | ||
248 | * executed. | ||
249 | * | ||
250 | * An item may hog the thread that is running it for a relatively large amount | ||
251 | * of time, sufficient, for example, to perform several lookup, mkdir, create | ||
252 | * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. | ||
253 | * | ||
254 | * Conversely, if a number of items are awaiting processing, it may take some | ||
255 | * time before any given item is given attention. The number of threads in the | ||
256 | * pool may be increased to deal with demand, but only up to a limit. | ||
257 | * | ||
258 | * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in | ||
259 | * the very slow queue, from which only a portion of the threads will be | ||
260 | * allowed to pick items to execute. This ensures that very slow items won't | ||
261 | * overly block ones that are just ordinarily slow. | ||
262 | * | ||
263 | * Returns 0 if successful, -EAGAIN if not. | ||
264 | */ | ||
265 | int slow_work_enqueue(struct slow_work *work) | ||
266 | { | ||
267 | unsigned long flags; | ||
268 | |||
269 | BUG_ON(slow_work_user_count <= 0); | ||
270 | BUG_ON(!work); | ||
271 | BUG_ON(!work->ops); | ||
272 | BUG_ON(!work->ops->get_ref); | ||
273 | |||
274 | /* when honouring an enqueue request, we only promise that we will run | ||
275 | * the work function in the future; we do not promise to run it once | ||
276 | * per enqueue request | ||
277 | * | ||
278 | * we use the PENDING bit to merge together repeat requests without | ||
279 | * having to disable IRQs and take the spinlock, whilst still | ||
280 | * maintaining our promise | ||
281 | */ | ||
282 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | ||
283 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
284 | |||
285 | /* we promise that we will not attempt to execute the work | ||
286 | * function in more than one thread simultaneously | ||
287 | * | ||
288 | * this, however, leaves us with a problem if we're asked to | ||
289 | * enqueue the work whilst someone is executing the work | ||
290 | * function as simply queueing the work immediately means that | ||
291 | * another thread may try executing it whilst it is already | ||
292 | * under execution | ||
293 | * | ||
294 | * to deal with this, we set the ENQ_DEFERRED bit instead of | ||
295 | * enqueueing, and the thread currently executing the work | ||
296 | * function will enqueue the work item when the work function | ||
297 | * returns and it has cleared the EXECUTING bit | ||
298 | */ | ||
299 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | ||
300 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | ||
301 | } else { | ||
302 | if (work->ops->get_ref(work) < 0) | ||
303 | goto cant_get_ref; | ||
304 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | ||
305 | list_add_tail(&work->link, &vslow_work_queue); | ||
306 | else | ||
307 | list_add_tail(&work->link, &slow_work_queue); | ||
308 | wake_up(&slow_work_thread_wq); | ||
309 | } | ||
310 | |||
311 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
312 | } | ||
313 | return 0; | ||
314 | |||
315 | cant_get_ref: | ||
316 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
317 | return -EAGAIN; | ||
318 | } | ||
319 | EXPORT_SYMBOL(slow_work_enqueue); | ||
320 | |||
321 | /* | ||
322 | * Worker thread culling algorithm | ||
323 | */ | ||
324 | static bool slow_work_cull_thread(void) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | bool do_cull = false; | ||
328 | |||
329 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
330 | |||
331 | if (slow_work_cull) { | ||
332 | slow_work_cull = false; | ||
333 | |||
334 | if (list_empty(&slow_work_queue) && | ||
335 | list_empty(&vslow_work_queue) && | ||
336 | atomic_read(&slow_work_thread_count) > | ||
337 | slow_work_min_threads) { | ||
338 | mod_timer(&slow_work_cull_timer, | ||
339 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
340 | do_cull = true; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
345 | return do_cull; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * Determine if there is slow work available for dispatch | ||
350 | */ | ||
351 | static inline bool slow_work_available(int vsmax) | ||
352 | { | ||
353 | return !list_empty(&slow_work_queue) || | ||
354 | (!list_empty(&vslow_work_queue) && | ||
355 | atomic_read(&vslow_work_executing_count) < vsmax); | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * Worker thread dispatcher | ||
360 | */ | ||
361 | static int slow_work_thread(void *_data) | ||
362 | { | ||
363 | int vsmax; | ||
364 | |||
365 | DEFINE_WAIT(wait); | ||
366 | |||
367 | set_freezable(); | ||
368 | set_user_nice(current, -5); | ||
369 | |||
370 | for (;;) { | ||
371 | vsmax = vslow_work_proportion; | ||
372 | vsmax *= atomic_read(&slow_work_thread_count); | ||
373 | vsmax /= 100; | ||
374 | |||
375 | prepare_to_wait(&slow_work_thread_wq, &wait, | ||
376 | TASK_INTERRUPTIBLE); | ||
377 | if (!freezing(current) && | ||
378 | !slow_work_threads_should_exit && | ||
379 | !slow_work_available(vsmax) && | ||
380 | !slow_work_cull) | ||
381 | schedule(); | ||
382 | finish_wait(&slow_work_thread_wq, &wait); | ||
383 | |||
384 | try_to_freeze(); | ||
385 | |||
386 | vsmax = vslow_work_proportion; | ||
387 | vsmax *= atomic_read(&slow_work_thread_count); | ||
388 | vsmax /= 100; | ||
389 | |||
390 | if (slow_work_available(vsmax) && slow_work_execute()) { | ||
391 | cond_resched(); | ||
392 | if (list_empty(&slow_work_queue) && | ||
393 | list_empty(&vslow_work_queue) && | ||
394 | atomic_read(&slow_work_thread_count) > | ||
395 | slow_work_min_threads) | ||
396 | mod_timer(&slow_work_cull_timer, | ||
397 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
398 | continue; | ||
399 | } | ||
400 | |||
401 | if (slow_work_threads_should_exit) | ||
402 | break; | ||
403 | |||
404 | if (slow_work_cull && slow_work_cull_thread()) | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | if (atomic_dec_and_test(&slow_work_thread_count)) | ||
409 | complete_and_exit(&slow_work_last_thread_exited, 0); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Handle thread cull timer expiration | ||
415 | */ | ||
416 | static void slow_work_cull_timeout(unsigned long data) | ||
417 | { | ||
418 | slow_work_cull = true; | ||
419 | wake_up(&slow_work_thread_wq); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Get a reference on slow work thread starter | ||
424 | */ | ||
425 | static int slow_work_new_thread_get_ref(struct slow_work *work) | ||
426 | { | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * Drop a reference on slow work thread starter | ||
432 | */ | ||
433 | static void slow_work_new_thread_put_ref(struct slow_work *work) | ||
434 | { | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Start a new slow work thread | ||
439 | */ | ||
440 | static void slow_work_new_thread_execute(struct slow_work *work) | ||
441 | { | ||
442 | struct task_struct *p; | ||
443 | |||
444 | if (slow_work_threads_should_exit) | ||
445 | return; | ||
446 | |||
447 | if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads) | ||
448 | return; | ||
449 | |||
450 | if (!mutex_trylock(&slow_work_user_lock)) | ||
451 | return; | ||
452 | |||
453 | slow_work_may_not_start_new_thread = true; | ||
454 | atomic_inc(&slow_work_thread_count); | ||
455 | p = kthread_run(slow_work_thread, NULL, "kslowd"); | ||
456 | if (IS_ERR(p)) { | ||
457 | printk(KERN_DEBUG "Slow work thread pool: OOM\n"); | ||
458 | if (atomic_dec_and_test(&slow_work_thread_count)) | ||
459 | BUG(); /* we're running on a slow work thread... */ | ||
460 | mod_timer(&slow_work_oom_timer, | ||
461 | jiffies + SLOW_WORK_OOM_TIMEOUT); | ||
462 | } else { | ||
463 | /* ratelimit the starting of new threads */ | ||
464 | mod_timer(&slow_work_oom_timer, jiffies + 1); | ||
465 | } | ||
466 | |||
467 | mutex_unlock(&slow_work_user_lock); | ||
468 | } | ||
469 | |||
470 | static const struct slow_work_ops slow_work_new_thread_ops = { | ||
471 | .get_ref = slow_work_new_thread_get_ref, | ||
472 | .put_ref = slow_work_new_thread_put_ref, | ||
473 | .execute = slow_work_new_thread_execute, | ||
474 | }; | ||
475 | |||
476 | /* | ||
477 | * post-OOM new thread start suppression expiration | ||
478 | */ | ||
479 | static void slow_work_oom_timeout(unsigned long data) | ||
480 | { | ||
481 | slow_work_may_not_start_new_thread = false; | ||
482 | } | ||
483 | |||
484 | #ifdef CONFIG_SYSCTL | ||
485 | /* | ||
486 | * Handle adjustment of the minimum number of threads | ||
487 | */ | ||
488 | static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, | ||
489 | struct file *filp, void __user *buffer, | ||
490 | size_t *lenp, loff_t *ppos) | ||
491 | { | ||
492 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
493 | int n; | ||
494 | |||
495 | if (ret == 0) { | ||
496 | mutex_lock(&slow_work_user_lock); | ||
497 | if (slow_work_user_count > 0) { | ||
498 | /* see if we need to start or stop threads */ | ||
499 | n = atomic_read(&slow_work_thread_count) - | ||
500 | slow_work_min_threads; | ||
501 | |||
502 | if (n < 0 && !slow_work_may_not_start_new_thread) | ||
503 | slow_work_enqueue(&slow_work_new_thread); | ||
504 | else if (n > 0) | ||
505 | mod_timer(&slow_work_cull_timer, | ||
506 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
507 | } | ||
508 | mutex_unlock(&slow_work_user_lock); | ||
509 | } | ||
510 | |||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * Handle adjustment of the maximum number of threads | ||
516 | */ | ||
517 | static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, | ||
518 | struct file *filp, void __user *buffer, | ||
519 | size_t *lenp, loff_t *ppos) | ||
520 | { | ||
521 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
522 | int n; | ||
523 | |||
524 | if (ret == 0) { | ||
525 | mutex_lock(&slow_work_user_lock); | ||
526 | if (slow_work_user_count > 0) { | ||
527 | /* see if we need to stop threads */ | ||
528 | n = slow_work_max_threads - | ||
529 | atomic_read(&slow_work_thread_count); | ||
530 | |||
531 | if (n < 0) | ||
532 | mod_timer(&slow_work_cull_timer, | ||
533 | jiffies + SLOW_WORK_CULL_TIMEOUT); | ||
534 | } | ||
535 | mutex_unlock(&slow_work_user_lock); | ||
536 | } | ||
537 | |||
538 | return ret; | ||
539 | } | ||
540 | #endif /* CONFIG_SYSCTL */ | ||
541 | |||
542 | /** | ||
543 | * slow_work_register_user - Register a user of the facility | ||
544 | * | ||
545 | * Register a user of the facility, starting up the initial threads if there | ||
546 | * aren't any other users at this point. This will return 0 if successful, or | ||
547 | * an error if not. | ||
548 | */ | ||
549 | int slow_work_register_user(void) | ||
550 | { | ||
551 | struct task_struct *p; | ||
552 | int loop; | ||
553 | |||
554 | mutex_lock(&slow_work_user_lock); | ||
555 | |||
556 | if (slow_work_user_count == 0) { | ||
557 | printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); | ||
558 | init_completion(&slow_work_last_thread_exited); | ||
559 | |||
560 | slow_work_threads_should_exit = false; | ||
561 | slow_work_init(&slow_work_new_thread, | ||
562 | &slow_work_new_thread_ops); | ||
563 | slow_work_may_not_start_new_thread = false; | ||
564 | slow_work_cull = false; | ||
565 | |||
566 | /* start the minimum number of threads */ | ||
567 | for (loop = 0; loop < slow_work_min_threads; loop++) { | ||
568 | atomic_inc(&slow_work_thread_count); | ||
569 | p = kthread_run(slow_work_thread, NULL, "kslowd"); | ||
570 | if (IS_ERR(p)) | ||
571 | goto error; | ||
572 | } | ||
573 | printk(KERN_NOTICE "Slow work thread pool: Ready\n"); | ||
574 | } | ||
575 | |||
576 | slow_work_user_count++; | ||
577 | mutex_unlock(&slow_work_user_lock); | ||
578 | return 0; | ||
579 | |||
580 | error: | ||
581 | if (atomic_dec_and_test(&slow_work_thread_count)) | ||
582 | complete(&slow_work_last_thread_exited); | ||
583 | if (loop > 0) { | ||
584 | printk(KERN_ERR "Slow work thread pool:" | ||
585 | " Aborting startup on ENOMEM\n"); | ||
586 | slow_work_threads_should_exit = true; | ||
587 | wake_up_all(&slow_work_thread_wq); | ||
588 | wait_for_completion(&slow_work_last_thread_exited); | ||
589 | printk(KERN_ERR "Slow work thread pool: Aborted\n"); | ||
590 | } | ||
591 | mutex_unlock(&slow_work_user_lock); | ||
592 | return PTR_ERR(p); | ||
593 | } | ||
594 | EXPORT_SYMBOL(slow_work_register_user); | ||
595 | |||
596 | /** | ||
597 | * slow_work_unregister_user - Unregister a user of the facility | ||
598 | * | ||
599 | * Unregister a user of the facility, killing all the threads if this was the | ||
600 | * last one. | ||
601 | */ | ||
602 | void slow_work_unregister_user(void) | ||
603 | { | ||
604 | mutex_lock(&slow_work_user_lock); | ||
605 | |||
606 | BUG_ON(slow_work_user_count <= 0); | ||
607 | |||
608 | slow_work_user_count--; | ||
609 | if (slow_work_user_count == 0) { | ||
610 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); | ||
611 | slow_work_threads_should_exit = true; | ||
612 | wake_up_all(&slow_work_thread_wq); | ||
613 | wait_for_completion(&slow_work_last_thread_exited); | ||
614 | printk(KERN_NOTICE "Slow work thread pool:" | ||
615 | " Shut down complete\n"); | ||
616 | } | ||
617 | |||
618 | del_timer_sync(&slow_work_cull_timer); | ||
619 | |||
620 | mutex_unlock(&slow_work_user_lock); | ||
621 | } | ||
622 | EXPORT_SYMBOL(slow_work_unregister_user); | ||
623 | |||
624 | /* | ||
625 | * Initialise the slow work facility | ||
626 | */ | ||
627 | static int __init init_slow_work(void) | ||
628 | { | ||
629 | unsigned nr_cpus = num_possible_cpus(); | ||
630 | |||
631 | if (slow_work_max_threads < nr_cpus) | ||
632 | slow_work_max_threads = nr_cpus; | ||
633 | #ifdef CONFIG_SYSCTL | ||
634 | if (slow_work_max_max_threads < nr_cpus * 2) | ||
635 | slow_work_max_max_threads = nr_cpus * 2; | ||
636 | #endif | ||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | subsys_initcall(init_slow_work); | ||