aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c519
1 files changed, 471 insertions, 48 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..7494bbf5a270 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/debugfs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,13 +43,12 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
53ctl_table slow_work_sysctls[] = { 50ctl_table slow_work_sysctls[] = {
54 { 51 {
55 .ctl_name = CTL_UNNUMBERED,
56 .procname = "min-threads", 52 .procname = "min-threads",
57 .data = &slow_work_min_threads, 53 .data = &slow_work_min_threads,
58 .maxlen = sizeof(unsigned), 54 .maxlen = sizeof(unsigned),
@@ -62,7 +58,6 @@ ctl_table slow_work_sysctls[] = {
62 .extra2 = &slow_work_max_threads, 58 .extra2 = &slow_work_max_threads,
63 }, 59 },
64 { 60 {
65 .ctl_name = CTL_UNNUMBERED,
66 .procname = "max-threads", 61 .procname = "max-threads",
67 .data = &slow_work_max_threads, 62 .data = &slow_work_max_threads,
68 .maxlen = sizeof(unsigned), 63 .maxlen = sizeof(unsigned),
@@ -72,16 +67,15 @@ ctl_table slow_work_sysctls[] = {
72 .extra2 = (void *) &slow_work_max_max_threads, 67 .extra2 = (void *) &slow_work_max_max_threads,
73 }, 68 },
74 { 69 {
75 .ctl_name = CTL_UNNUMBERED,
76 .procname = "vslow-percentage", 70 .procname = "vslow-percentage",
77 .data = &vslow_work_proportion, 71 .data = &vslow_work_proportion,
78 .maxlen = sizeof(unsigned), 72 .maxlen = sizeof(unsigned),
79 .mode = 0644, 73 .mode = 0644,
80 .proc_handler = &proc_dointvec_minmax, 74 .proc_handler = proc_dointvec_minmax,
81 .extra1 = (void *) &slow_work_min_vslow, 75 .extra1 = (void *) &slow_work_min_vslow,
82 .extra2 = (void *) &slow_work_max_vslow, 76 .extra2 = (void *) &slow_work_max_vslow,
83 }, 77 },
84 { .ctl_name = 0 } 78 {}
85}; 79};
86#endif 80#endif
87 81
@@ -98,6 +92,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 92static struct slow_work slow_work_new_thread; /* new thread starter */
99 93
100/* 94/*
95 * slow work ID allocation (use slow_work_queue_lock)
96 */
97static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
98
99/*
100 * Unregistration tracking to prevent put_ref() from disappearing during module
101 * unload
102 */
103#ifdef CONFIG_MODULES
104static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
105static struct module *slow_work_unreg_module;
106static struct slow_work *slow_work_unreg_work_item;
107static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
108static DEFINE_MUTEX(slow_work_unreg_sync_lock);
109
110static void slow_work_set_thread_processing(int id, struct slow_work *work)
111{
112 if (work)
113 slow_work_thread_processing[id] = work->owner;
114}
115static void slow_work_done_thread_processing(int id, struct slow_work *work)
116{
117 struct module *module = slow_work_thread_processing[id];
118
119 slow_work_thread_processing[id] = NULL;
120 smp_mb();
121 if (slow_work_unreg_work_item == work ||
122 slow_work_unreg_module == module)
123 wake_up_all(&slow_work_unreg_wq);
124}
125static void slow_work_clear_thread_processing(int id)
126{
127 slow_work_thread_processing[id] = NULL;
128}
129#else
130static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
131static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
132static void slow_work_clear_thread_processing(int id) {}
133#endif
134
135/*
136 * Data for tracking currently executing items for indication through /proc
137 */
138#ifdef CONFIG_SLOW_WORK_DEBUG
139struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
140pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
141DEFINE_RWLOCK(slow_work_execs_lock);
142#endif
143
144/*
101 * The queues of work items and the lock governing access to them. These are 145 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 146 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 147 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +149,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 149 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 150 * very slow work items.
107 */ 151 */
108static LIST_HEAD(slow_work_queue); 152LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 153LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 154DEFINE_SPINLOCK(slow_work_queue_lock);
155
156/*
157 * The following are two wait queues that get pinged when a work item is placed
158 * on an empty queue. These allow work items that are hogging a thread by
159 * sleeping in a way that could be deferred to yield their thread and enqueue
160 * themselves.
161 */
162static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
163static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 164
112/* 165/*
113 * The thread controls. A variable used to signal to the threads that they 166 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +179,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 179static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 180static DEFINE_MUTEX(slow_work_user_lock);
128 181
182static inline int slow_work_get_ref(struct slow_work *work)
183{
184 if (work->ops->get_ref)
185 return work->ops->get_ref(work);
186
187 return 0;
188}
189
190static inline void slow_work_put_ref(struct slow_work *work)
191{
192 if (work->ops->put_ref)
193 work->ops->put_ref(work);
194}
195
129/* 196/*
130 * Calculate the maximum number of active threads in the pool that are 197 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 198 * permitted to process very slow work items.
@@ -149,7 +216,7 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 216 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 217 * it, false if there was nothing to do.
151 */ 218 */
152static bool slow_work_execute(void) 219static noinline bool slow_work_execute(int id)
153{ 220{
154 struct slow_work *work = NULL; 221 struct slow_work *work = NULL;
155 unsigned vsmax; 222 unsigned vsmax;
@@ -186,6 +253,13 @@ static bool slow_work_execute(void)
186 } else { 253 } else {
187 very_slow = false; /* avoid the compiler warning */ 254 very_slow = false; /* avoid the compiler warning */
188 } 255 }
256
257 slow_work_set_thread_processing(id, work);
258 if (work) {
259 slow_work_mark_time(work);
260 slow_work_begin_exec(id, work);
261 }
262
189 spin_unlock_irq(&slow_work_queue_lock); 263 spin_unlock_irq(&slow_work_queue_lock);
190 264
191 if (!work) 265 if (!work)
@@ -194,12 +268,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 268 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 269 BUG();
196 270
197 work->ops->execute(work); 271 /* don't execute if the work is in the process of being cancelled */
272 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
273 work->ops->execute(work);
198 274
199 if (very_slow) 275 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 276 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 277 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 278
279 /* wake up anyone waiting for this work to be complete */
280 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
281
282 slow_work_end_exec(id, work);
283
203 /* if someone tried to enqueue the item whilst we were executing it, 284 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 285 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 286 * execute it simultaneously
@@ -219,7 +300,10 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 300 spin_unlock_irq(&slow_work_queue_lock);
220 } 301 }
221 302
222 work->ops->put_ref(work); 303 /* sort out the race between module unloading and put_ref() */
304 slow_work_put_ref(work);
305 slow_work_done_thread_processing(id, work);
306
223 return true; 307 return true;
224 308
225auto_requeue: 309auto_requeue:
@@ -227,15 +311,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 311 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 312 * - don't wake another thread up as we're awake already
229 */ 313 */
314 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 315 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 316 list_add_tail(&work->link, &vslow_work_queue);
232 else 317 else
233 list_add_tail(&work->link, &slow_work_queue); 318 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 319 spin_unlock_irq(&slow_work_queue_lock);
320 slow_work_clear_thread_processing(id);
235 return true; 321 return true;
236} 322}
237 323
238/** 324/**
325 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
326 * work: The work item under execution that wants to sleep
327 * _timeout: Scheduler sleep timeout
328 *
329 * Allow a requeueable work item to sleep on a slow-work processor thread until
330 * that thread is needed to do some other work or the sleep is interrupted by
331 * some other event.
332 *
333 * The caller must set up a wake up event before calling this and must have set
334 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
335 * condition before calling this function as no test is made here.
336 *
337 * False is returned if there is nothing on the queue; true is returned if the
338 * work item should be requeued
339 */
340bool slow_work_sleep_till_thread_needed(struct slow_work *work,
341 signed long *_timeout)
342{
343 wait_queue_head_t *wfo_wq;
344 struct list_head *queue;
345
346 DEFINE_WAIT(wait);
347
348 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
349 wfo_wq = &vslow_work_queue_waits_for_occupation;
350 queue = &vslow_work_queue;
351 } else {
352 wfo_wq = &slow_work_queue_waits_for_occupation;
353 queue = &slow_work_queue;
354 }
355
356 if (!list_empty(queue))
357 return true;
358
359 add_wait_queue_exclusive(wfo_wq, &wait);
360 if (list_empty(queue))
361 *_timeout = schedule_timeout(*_timeout);
362 finish_wait(wfo_wq, &wait);
363
364 return !list_empty(queue);
365}
366EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
367
368/**
239 * slow_work_enqueue - Schedule a slow work item for processing 369 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 370 * @work: The work item to queue
241 * 371 *
@@ -260,16 +390,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 390 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 391 * overly block ones that are just ordinarily slow.
262 * 392 *
263 * Returns 0 if successful, -EAGAIN if not. 393 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
394 * attempted queued)
264 */ 395 */
265int slow_work_enqueue(struct slow_work *work) 396int slow_work_enqueue(struct slow_work *work)
266{ 397{
398 wait_queue_head_t *wfo_wq;
399 struct list_head *queue;
267 unsigned long flags; 400 unsigned long flags;
401 int ret;
402
403 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
404 return -ECANCELED;
268 405
269 BUG_ON(slow_work_user_count <= 0); 406 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 407 BUG_ON(!work);
271 BUG_ON(!work->ops); 408 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 409
274 /* when honouring an enqueue request, we only promise that we will run 410 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 411 * the work function in the future; we do not promise to run it once
@@ -280,8 +416,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 416 * maintaining our promise
281 */ 417 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 418 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
419 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
420 wfo_wq = &vslow_work_queue_waits_for_occupation;
421 queue = &vslow_work_queue;
422 } else {
423 wfo_wq = &slow_work_queue_waits_for_occupation;
424 queue = &slow_work_queue;
425 }
426
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 427 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 428
429 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
430 goto cancelled;
431
285 /* we promise that we will not attempt to execute the work 432 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 433 * function in more than one thread simultaneously
287 * 434 *
@@ -299,25 +446,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 446 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 447 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 448 } else {
302 if (work->ops->get_ref(work) < 0) 449 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 450 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 451 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 452 slow_work_mark_time(work);
306 else 453 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 454 wake_up(&slow_work_thread_wq);
455
456 /* if someone who could be requeued is sleeping on a
457 * thread, then ask them to yield their thread */
458 if (work->link.prev == queue)
459 wake_up(wfo_wq);
309 } 460 }
310 461
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 462 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 463 }
313 return 0; 464 return 0;
314 465
315cant_get_ref: 466cancelled:
467 ret = -ECANCELED;
468failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 469 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 470 return ret;
318} 471}
319EXPORT_SYMBOL(slow_work_enqueue); 472EXPORT_SYMBOL(slow_work_enqueue);
320 473
474static int slow_work_wait(void *word)
475{
476 schedule();
477 return 0;
478}
479
480/**
481 * slow_work_cancel - Cancel a slow work item
482 * @work: The work item to cancel
483 *
484 * This function will cancel a previously enqueued work item. If we cannot
485 * cancel the work item, it is guarenteed to have run when this function
486 * returns.
487 */
488void slow_work_cancel(struct slow_work *work)
489{
490 bool wait = true, put = false;
491
492 set_bit(SLOW_WORK_CANCELLING, &work->flags);
493 smp_mb();
494
495 /* if the work item is a delayed work item with an active timer, we
496 * need to wait for the timer to finish _before_ getting the spinlock,
497 * lest we deadlock against the timer routine
498 *
499 * the timer routine will leave DELAYED set if it notices the
500 * CANCELLING flag in time
501 */
502 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
503 struct delayed_slow_work *dwork =
504 container_of(work, struct delayed_slow_work, work);
505 del_timer_sync(&dwork->timer);
506 }
507
508 spin_lock_irq(&slow_work_queue_lock);
509
510 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
511 /* the timer routine aborted or never happened, so we are left
512 * holding the timer's reference on the item and should just
513 * drop the pending flag and wait for any ongoing execution to
514 * finish */
515 struct delayed_slow_work *dwork =
516 container_of(work, struct delayed_slow_work, work);
517
518 BUG_ON(timer_pending(&dwork->timer));
519 BUG_ON(!list_empty(&work->link));
520
521 clear_bit(SLOW_WORK_DELAYED, &work->flags);
522 put = true;
523 clear_bit(SLOW_WORK_PENDING, &work->flags);
524
525 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
526 !list_empty(&work->link)) {
527 /* the link in the pending queue holds a reference on the item
528 * that we will need to release */
529 list_del_init(&work->link);
530 wait = false;
531 put = true;
532 clear_bit(SLOW_WORK_PENDING, &work->flags);
533
534 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
535 /* the executor is holding our only reference on the item, so
536 * we merely need to wait for it to finish executing */
537 clear_bit(SLOW_WORK_PENDING, &work->flags);
538 }
539
540 spin_unlock_irq(&slow_work_queue_lock);
541
542 /* the EXECUTING flag is set by the executor whilst the spinlock is set
543 * and before the item is dequeued - so assuming the above doesn't
544 * actually dequeue it, simply waiting for the EXECUTING flag to be
545 * released here should be sufficient */
546 if (wait)
547 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
548 TASK_UNINTERRUPTIBLE);
549
550 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
551 if (put)
552 slow_work_put_ref(work);
553}
554EXPORT_SYMBOL(slow_work_cancel);
555
556/*
557 * Handle expiry of the delay timer, indicating that a delayed slow work item
558 * should now be queued if not cancelled
559 */
560static void delayed_slow_work_timer(unsigned long data)
561{
562 wait_queue_head_t *wfo_wq;
563 struct list_head *queue;
564 struct slow_work *work = (struct slow_work *) data;
565 unsigned long flags;
566 bool queued = false, put = false, first = false;
567
568 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
569 wfo_wq = &vslow_work_queue_waits_for_occupation;
570 queue = &vslow_work_queue;
571 } else {
572 wfo_wq = &slow_work_queue_waits_for_occupation;
573 queue = &slow_work_queue;
574 }
575
576 spin_lock_irqsave(&slow_work_queue_lock, flags);
577 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
578 clear_bit(SLOW_WORK_DELAYED, &work->flags);
579
580 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
581 /* we discard the reference the timer was holding in
582 * favour of the one the executor holds */
583 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
584 put = true;
585 } else {
586 slow_work_mark_time(work);
587 list_add_tail(&work->link, queue);
588 queued = true;
589 if (work->link.prev == queue)
590 first = true;
591 }
592 }
593
594 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
595 if (put)
596 slow_work_put_ref(work);
597 if (first)
598 wake_up(wfo_wq);
599 if (queued)
600 wake_up(&slow_work_thread_wq);
601}
602
603/**
604 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
605 * @dwork: The delayed work item to queue
606 * @delay: When to start executing the work, in jiffies from now
607 *
608 * This is similar to slow_work_enqueue(), but it adds a delay before the work
609 * is actually queued for processing.
610 *
611 * The item can have delayed processing requested on it whilst it is being
612 * executed. The delay will begin immediately, and if it expires before the
613 * item finishes executing, the item will be placed back on the queue when it
614 * has done executing.
615 */
616int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
617 unsigned long delay)
618{
619 struct slow_work *work = &dwork->work;
620 unsigned long flags;
621 int ret;
622
623 if (delay == 0)
624 return slow_work_enqueue(&dwork->work);
625
626 BUG_ON(slow_work_user_count <= 0);
627 BUG_ON(!work);
628 BUG_ON(!work->ops);
629
630 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
631 return -ECANCELED;
632
633 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
634 spin_lock_irqsave(&slow_work_queue_lock, flags);
635
636 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
637 goto cancelled;
638
639 /* the timer holds a reference whilst it is pending */
640 ret = work->ops->get_ref(work);
641 if (ret < 0)
642 goto cant_get_ref;
643
644 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
645 BUG();
646 dwork->timer.expires = jiffies + delay;
647 dwork->timer.data = (unsigned long) work;
648 dwork->timer.function = delayed_slow_work_timer;
649 add_timer(&dwork->timer);
650
651 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
652 }
653
654 return 0;
655
656cancelled:
657 ret = -ECANCELED;
658cant_get_ref:
659 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
660 return ret;
661}
662EXPORT_SYMBOL(delayed_slow_work_enqueue);
663
321/* 664/*
322 * Schedule a cull of the thread pool at some time in the near future 665 * Schedule a cull of the thread pool at some time in the near future
323 */ 666 */
@@ -368,13 +711,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 711 */
369static int slow_work_thread(void *_data) 712static int slow_work_thread(void *_data)
370{ 713{
371 int vsmax; 714 int vsmax, id;
372 715
373 DEFINE_WAIT(wait); 716 DEFINE_WAIT(wait);
374 717
375 set_freezable(); 718 set_freezable();
376 set_user_nice(current, -5); 719 set_user_nice(current, -5);
377 720
721 /* allocate ourselves an ID */
722 spin_lock_irq(&slow_work_queue_lock);
723 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
724 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
725 __set_bit(id, slow_work_ids);
726 slow_work_set_thread_pid(id, current->pid);
727 spin_unlock_irq(&slow_work_queue_lock);
728
729 sprintf(current->comm, "kslowd%03u", id);
730
378 for (;;) { 731 for (;;) {
379 vsmax = vslow_work_proportion; 732 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 733 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +748,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 748 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 749 vsmax /= 100;
397 750
398 if (slow_work_available(vsmax) && slow_work_execute()) { 751 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 752 cond_resched();
400 if (list_empty(&slow_work_queue) && 753 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 754 list_empty(&vslow_work_queue) &&
@@ -412,6 +765,11 @@ static int slow_work_thread(void *_data)
412 break; 765 break;
413 } 766 }
414 767
768 spin_lock_irq(&slow_work_queue_lock);
769 slow_work_set_thread_pid(id, 0);
770 __clear_bit(id, slow_work_ids);
771 spin_unlock_irq(&slow_work_queue_lock);
772
415 if (atomic_dec_and_test(&slow_work_thread_count)) 773 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 774 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 775 return 0;
@@ -427,21 +785,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 785}
428 786
429/* 787/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 788 * Start a new slow work thread
446 */ 789 */
447static void slow_work_new_thread_execute(struct slow_work *work) 790static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +818,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 818}
476 819
477static const struct slow_work_ops slow_work_new_thread_ops = { 820static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 821 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 822 .execute = slow_work_new_thread_execute,
823#ifdef CONFIG_SLOW_WORK_DEBUG
824 .desc = slow_work_new_thread_desc,
825#endif
481}; 826};
482 827
483/* 828/*
@@ -546,12 +891,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 891
547/** 892/**
548 * slow_work_register_user - Register a user of the facility 893 * slow_work_register_user - Register a user of the facility
894 * @module: The module about to make use of the facility
549 * 895 *
550 * Register a user of the facility, starting up the initial threads if there 896 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 897 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 898 * an error if not.
553 */ 899 */
554int slow_work_register_user(void) 900int slow_work_register_user(struct module *module)
555{ 901{
556 struct task_struct *p; 902 struct task_struct *p;
557 int loop; 903 int loop;
@@ -598,14 +944,81 @@ error:
598} 944}
599EXPORT_SYMBOL(slow_work_register_user); 945EXPORT_SYMBOL(slow_work_register_user);
600 946
947/*
948 * wait for all outstanding items from the calling module to complete
949 * - note that more items may be queued whilst we're waiting
950 */
951static void slow_work_wait_for_items(struct module *module)
952{
953#ifdef CONFIG_MODULES
954 DECLARE_WAITQUEUE(myself, current);
955 struct slow_work *work;
956 int loop;
957
958 mutex_lock(&slow_work_unreg_sync_lock);
959 add_wait_queue(&slow_work_unreg_wq, &myself);
960
961 for (;;) {
962 spin_lock_irq(&slow_work_queue_lock);
963
964 /* first of all, we wait for the last queued item in each list
965 * to be processed */
966 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
967 if (work->owner == module) {
968 set_current_state(TASK_UNINTERRUPTIBLE);
969 slow_work_unreg_work_item = work;
970 goto do_wait;
971 }
972 }
973 list_for_each_entry_reverse(work, &slow_work_queue, link) {
974 if (work->owner == module) {
975 set_current_state(TASK_UNINTERRUPTIBLE);
976 slow_work_unreg_work_item = work;
977 goto do_wait;
978 }
979 }
980
981 /* then we wait for the items being processed to finish */
982 slow_work_unreg_module = module;
983 smp_mb();
984 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
985 if (slow_work_thread_processing[loop] == module)
986 goto do_wait;
987 }
988 spin_unlock_irq(&slow_work_queue_lock);
989 break; /* okay, we're done */
990
991 do_wait:
992 spin_unlock_irq(&slow_work_queue_lock);
993 schedule();
994 slow_work_unreg_work_item = NULL;
995 slow_work_unreg_module = NULL;
996 }
997
998 remove_wait_queue(&slow_work_unreg_wq, &myself);
999 mutex_unlock(&slow_work_unreg_sync_lock);
1000#endif /* CONFIG_MODULES */
1001}
1002
601/** 1003/**
602 * slow_work_unregister_user - Unregister a user of the facility 1004 * slow_work_unregister_user - Unregister a user of the facility
1005 * @module: The module whose items should be cleared
603 * 1006 *
604 * Unregister a user of the facility, killing all the threads if this was the 1007 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 1008 * last one.
1009 *
1010 * This waits for all the work items belonging to the nominated module to go
1011 * away before proceeding.
606 */ 1012 */
607void slow_work_unregister_user(void) 1013void slow_work_unregister_user(struct module *module)
608{ 1014{
1015 /* first of all, wait for all outstanding items from the calling module
1016 * to complete */
1017 if (module)
1018 slow_work_wait_for_items(module);
1019
1020 /* then we can actually go about shutting down the facility if need
1021 * be */
609 mutex_lock(&slow_work_user_lock); 1022 mutex_lock(&slow_work_user_lock);
610 1023
611 BUG_ON(slow_work_user_count <= 0); 1024 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1052,16 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1052 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1053 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1054#endif
1055#ifdef CONFIG_SLOW_WORK_DEBUG
1056 {
1057 struct dentry *dbdir;
1058
1059 dbdir = debugfs_create_dir("slow_work", NULL);
1060 if (dbdir && !IS_ERR(dbdir))
1061 debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
1062 NULL, &slow_work_runqueue_fops);
1063 }
1064#endif
642 return 0; 1065 return 0;
643} 1066}
644 1067