aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/slow-work.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-11-19 13:10:23 -0500
committerDavid Howells <dhowells@redhat.com>2009-11-19 13:10:23 -0500
commit3d7a641e544e428191667e8b1f83f96fa46dbd65 (patch)
tree172aa672eca96b94f5531885b82abb82b43c7d8a /kernel/slow-work.c
parent66b00a7c93ec782d118d2c03bd599cfd041e80a1 (diff)
SLOW_WORK: Wait for outstanding work items belonging to a module to clear
Wait for outstanding slow work items belonging to a module to clear when unregistering that module as a user of the facility. This prevents the put_ref code of a work item from being taken away before it returns. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r--kernel/slow-work.c132
1 files changed, 126 insertions, 6 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..dd08f376e406 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -22,6 +22,8 @@
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after 22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */ 23 * OOM */
24 24
25#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
26
25static void slow_work_cull_timeout(unsigned long); 27static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 28static void slow_work_oom_timeout(unsigned long);
27 29
@@ -46,7 +48,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 48
47#ifdef CONFIG_SYSCTL 49#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 50static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 51static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 52static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 53static const int slow_work_max_vslow = 99;
52 54
@@ -98,6 +100,23 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 100static struct slow_work slow_work_new_thread; /* new thread starter */
99 101
100/* 102/*
103 * slow work ID allocation (use slow_work_queue_lock)
104 */
105static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
106
107/*
108 * Unregistration tracking to prevent put_ref() from disappearing during module
109 * unload
110 */
111#ifdef CONFIG_MODULES
112static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
113static struct module *slow_work_unreg_module;
114static struct slow_work *slow_work_unreg_work_item;
115static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
116static DEFINE_MUTEX(slow_work_unreg_sync_lock);
117#endif
118
119/*
101 * The queues of work items and the lock governing access to them. These are 120 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 122 * as the number of threads bears no relation to the number of CPUs.
@@ -149,8 +168,11 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 168 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 169 * it, false if there was nothing to do.
151 */ 170 */
152static bool slow_work_execute(void) 171static bool slow_work_execute(int id)
153{ 172{
173#ifdef CONFIG_MODULES
174 struct module *module;
175#endif
154 struct slow_work *work = NULL; 176 struct slow_work *work = NULL;
155 unsigned vsmax; 177 unsigned vsmax;
156 bool very_slow; 178 bool very_slow;
@@ -186,6 +208,12 @@ static bool slow_work_execute(void)
186 } else { 208 } else {
187 very_slow = false; /* avoid the compiler warning */ 209 very_slow = false; /* avoid the compiler warning */
188 } 210 }
211
212#ifdef CONFIG_MODULES
213 if (work)
214 slow_work_thread_processing[id] = work->owner;
215#endif
216
189 spin_unlock_irq(&slow_work_queue_lock); 217 spin_unlock_irq(&slow_work_queue_lock);
190 218
191 if (!work) 219 if (!work)
@@ -219,7 +247,18 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 247 spin_unlock_irq(&slow_work_queue_lock);
220 } 248 }
221 249
250 /* sort out the race between module unloading and put_ref() */
222 work->ops->put_ref(work); 251 work->ops->put_ref(work);
252
253#ifdef CONFIG_MODULES
254 module = slow_work_thread_processing[id];
255 slow_work_thread_processing[id] = NULL;
256 smp_mb();
257 if (slow_work_unreg_work_item == work ||
258 slow_work_unreg_module == module)
259 wake_up_all(&slow_work_unreg_wq);
260#endif
261
223 return true; 262 return true;
224 263
225auto_requeue: 264auto_requeue:
@@ -232,6 +271,7 @@ auto_requeue:
232 else 271 else
233 list_add_tail(&work->link, &slow_work_queue); 272 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 273 spin_unlock_irq(&slow_work_queue_lock);
274 slow_work_thread_processing[id] = NULL;
235 return true; 275 return true;
236} 276}
237 277
@@ -368,13 +408,22 @@ static inline bool slow_work_available(int vsmax)
368 */ 408 */
369static int slow_work_thread(void *_data) 409static int slow_work_thread(void *_data)
370{ 410{
371 int vsmax; 411 int vsmax, id;
372 412
373 DEFINE_WAIT(wait); 413 DEFINE_WAIT(wait);
374 414
375 set_freezable(); 415 set_freezable();
376 set_user_nice(current, -5); 416 set_user_nice(current, -5);
377 417
418 /* allocate ourselves an ID */
419 spin_lock_irq(&slow_work_queue_lock);
420 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
421 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
422 __set_bit(id, slow_work_ids);
423 spin_unlock_irq(&slow_work_queue_lock);
424
425 sprintf(current->comm, "kslowd%03u", id);
426
378 for (;;) { 427 for (;;) {
379 vsmax = vslow_work_proportion; 428 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 429 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +444,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 444 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 445 vsmax /= 100;
397 446
398 if (slow_work_available(vsmax) && slow_work_execute()) { 447 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 448 cond_resched();
400 if (list_empty(&slow_work_queue) && 449 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 450 list_empty(&vslow_work_queue) &&
@@ -412,6 +461,10 @@ static int slow_work_thread(void *_data)
412 break; 461 break;
413 } 462 }
414 463
464 spin_lock_irq(&slow_work_queue_lock);
465 __clear_bit(id, slow_work_ids);
466 spin_unlock_irq(&slow_work_queue_lock);
467
415 if (atomic_dec_and_test(&slow_work_thread_count)) 468 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 469 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 470 return 0;
@@ -475,6 +528,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 528}
476 529
477static const struct slow_work_ops slow_work_new_thread_ops = { 530static const struct slow_work_ops slow_work_new_thread_ops = {
531 .owner = THIS_MODULE,
478 .get_ref = slow_work_new_thread_get_ref, 532 .get_ref = slow_work_new_thread_get_ref,
479 .put_ref = slow_work_new_thread_put_ref, 533 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 534 .execute = slow_work_new_thread_execute,
@@ -546,12 +600,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 600
547/** 601/**
548 * slow_work_register_user - Register a user of the facility 602 * slow_work_register_user - Register a user of the facility
603 * @module: The module about to make use of the facility
549 * 604 *
550 * Register a user of the facility, starting up the initial threads if there 605 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 606 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 607 * an error if not.
553 */ 608 */
554int slow_work_register_user(void) 609int slow_work_register_user(struct module *module)
555{ 610{
556 struct task_struct *p; 611 struct task_struct *p;
557 int loop; 612 int loop;
@@ -598,14 +653,79 @@ error:
598} 653}
599EXPORT_SYMBOL(slow_work_register_user); 654EXPORT_SYMBOL(slow_work_register_user);
600 655
656/*
657 * wait for all outstanding items from the calling module to complete
658 * - note that more items may be queued whilst we're waiting
659 */
660static void slow_work_wait_for_items(struct module *module)
661{
662 DECLARE_WAITQUEUE(myself, current);
663 struct slow_work *work;
664 int loop;
665
666 mutex_lock(&slow_work_unreg_sync_lock);
667 add_wait_queue(&slow_work_unreg_wq, &myself);
668
669 for (;;) {
670 spin_lock_irq(&slow_work_queue_lock);
671
672 /* first of all, we wait for the last queued item in each list
673 * to be processed */
674 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
675 if (work->owner == module) {
676 set_current_state(TASK_UNINTERRUPTIBLE);
677 slow_work_unreg_work_item = work;
678 goto do_wait;
679 }
680 }
681 list_for_each_entry_reverse(work, &slow_work_queue, link) {
682 if (work->owner == module) {
683 set_current_state(TASK_UNINTERRUPTIBLE);
684 slow_work_unreg_work_item = work;
685 goto do_wait;
686 }
687 }
688
689 /* then we wait for the items being processed to finish */
690 slow_work_unreg_module = module;
691 smp_mb();
692 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
693 if (slow_work_thread_processing[loop] == module)
694 goto do_wait;
695 }
696 spin_unlock_irq(&slow_work_queue_lock);
697 break; /* okay, we're done */
698
699 do_wait:
700 spin_unlock_irq(&slow_work_queue_lock);
701 schedule();
702 slow_work_unreg_work_item = NULL;
703 slow_work_unreg_module = NULL;
704 }
705
706 remove_wait_queue(&slow_work_unreg_wq, &myself);
707 mutex_unlock(&slow_work_unreg_sync_lock);
708}
709
601/** 710/**
602 * slow_work_unregister_user - Unregister a user of the facility 711 * slow_work_unregister_user - Unregister a user of the facility
712 * @module: The module whose items should be cleared
603 * 713 *
604 * Unregister a user of the facility, killing all the threads if this was the 714 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 715 * last one.
716 *
717 * This waits for all the work items belonging to the nominated module to go
718 * away before proceeding.
606 */ 719 */
607void slow_work_unregister_user(void) 720void slow_work_unregister_user(struct module *module)
608{ 721{
722 /* first of all, wait for all outstanding items from the calling module
723 * to complete */
724 if (module)
725 slow_work_wait_for_items(module);
726
727 /* then we can actually go about shutting down the facility if need
728 * be */
609 mutex_lock(&slow_work_user_lock); 729 mutex_lock(&slow_work_user_lock);
610 730
611 BUG_ON(slow_work_user_count <= 0); 731 BUG_ON(slow_work_user_count <= 0);