aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/slow-work.txt13
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object.c1
-rw-r--r--fs/fscache/operation.c1
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/recovery.c1
-rw-r--r--include/linux/slow-work.h8
-rw-r--r--kernel/slow-work.c132
8 files changed, 150 insertions, 16 deletions
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index ebc50f808ea4..f12fda31dcdc 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -64,9 +64,11 @@ USING SLOW WORK ITEMS
64Firstly, a module or subsystem wanting to make use of slow work items must 64Firstly, a module or subsystem wanting to make use of slow work items must
65register its interest: 65register its interest:
66 66
67 int ret = slow_work_register_user(); 67 int ret = slow_work_register_user(struct module *module);
68 68
69This will return 0 if successful, or a -ve error upon failure. 69This will return 0 if successful, or a -ve error upon failure. The module
70pointer should be the module interested in using this facility (almost
71certainly THIS_MODULE).
70 72
71 73
72Slow work items may then be set up by: 74Slow work items may then be set up by:
@@ -110,7 +112,12 @@ operation. When all a module's slow work items have been processed, and the
110module has no further interest in the facility, it should unregister its 112module has no further interest in the facility, it should unregister its
111interest: 113interest:
112 114
113 slow_work_unregister_user(); 115 slow_work_unregister_user(struct module *module);
116
117The module pointer is used to wait for all outstanding work items for that
118module before completing the unregistration. This prevents the put_ref() code
119from being taken away before it completes. module should almost certainly be
120THIS_MODULE.
114 121
115 122
116=============== 123===============
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b597499..add6bdb53f04 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
48{ 48{
49 int ret; 49 int ret;
50 50
51 ret = slow_work_register_user(); 51 ret = slow_work_register_user(THIS_MODULE);
52 if (ret < 0) 52 if (ret < 0)
53 goto error_slow_work; 53 goto error_slow_work;
54 54
@@ -80,7 +80,7 @@ error_kobj:
80error_cookie_jar: 80error_cookie_jar:
81 fscache_proc_cleanup(); 81 fscache_proc_cleanup();
82error_proc: 82error_proc:
83 slow_work_unregister_user(); 83 slow_work_unregister_user(THIS_MODULE);
84error_slow_work: 84error_slow_work:
85 return ret; 85 return ret;
86} 86}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
97 kobject_put(fscache_root); 97 kobject_put(fscache_root);
98 kmem_cache_destroy(fscache_cookie_jar); 98 kmem_cache_destroy(fscache_cookie_jar);
99 fscache_proc_cleanup(); 99 fscache_proc_cleanup();
100 slow_work_unregister_user(); 100 slow_work_unregister_user(THIS_MODULE);
101 printk(KERN_NOTICE "FS-Cache: Unloaded\n"); 101 printk(KERN_NOTICE "FS-Cache: Unloaded\n");
102} 102}
103 103
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79d..d236eb1d6f37 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -45,6 +45,7 @@ static void fscache_enqueue_dependents(struct fscache_object *);
45static void fscache_dequeue_object(struct fscache_object *); 45static void fscache_dequeue_object(struct fscache_object *);
46 46
47const struct slow_work_ops fscache_object_slow_work_ops = { 47const struct slow_work_ops fscache_object_slow_work_ops = {
48 .owner = THIS_MODULE,
48 .get_ref = fscache_object_slow_work_get_ref, 49 .get_ref = fscache_object_slow_work_get_ref,
49 .put_ref = fscache_object_slow_work_put_ref, 50 .put_ref = fscache_object_slow_work_put_ref,
50 .execute = fscache_object_slow_work_execute, 51 .execute = fscache_object_slow_work_execute,
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6b..f1a2857b2ff5 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -453,6 +453,7 @@ static void fscache_op_execute(struct slow_work *work)
453} 453}
454 454
455const struct slow_work_ops fscache_op_slow_work_ops = { 455const struct slow_work_ops fscache_op_slow_work_ops = {
456 .owner = THIS_MODULE,
456 .get_ref = fscache_op_get_ref, 457 .get_ref = fscache_op_get_ref,
457 .put_ref = fscache_op_put_ref, 458 .put_ref = fscache_op_put_ref,
458 .execute = fscache_op_execute, 459 .execute = fscache_op_execute,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d082..5b31f7741a8f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
114 if (error) 114 if (error)
115 goto fail_unregister; 115 goto fail_unregister;
116 116
117 error = slow_work_register_user(); 117 error = slow_work_register_user(THIS_MODULE);
118 if (error) 118 if (error)
119 goto fail_slow; 119 goto fail_slow;
120 120
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
163 gfs2_unregister_debugfs(); 163 gfs2_unregister_debugfs();
164 unregister_filesystem(&gfs2_fs_type); 164 unregister_filesystem(&gfs2_fs_type);
165 unregister_filesystem(&gfs2meta_fs_type); 165 unregister_filesystem(&gfs2meta_fs_type);
166 slow_work_unregister_user(); 166 slow_work_unregister_user(THIS_MODULE);
167 167
168 kmem_cache_destroy(gfs2_quotad_cachep); 168 kmem_cache_destroy(gfs2_quotad_cachep);
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 169 kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d3..b2bb779f09ed 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -593,6 +593,7 @@ fail:
593} 593}
594 594
595struct slow_work_ops gfs2_recover_ops = { 595struct slow_work_ops gfs2_recover_ops = {
596 .owner = THIS_MODULE,
596 .get_ref = gfs2_recover_get_ref, 597 .get_ref = gfs2_recover_get_ref,
597 .put_ref = gfs2_recover_put_ref, 598 .put_ref = gfs2_recover_put_ref,
598 .execute = gfs2_recover_work, 599 .execute = gfs2_recover_work,
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index b65c8881f07a..9adb2b30754f 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -24,6 +24,9 @@ struct slow_work;
24 * The operations used to support slow work items 24 * The operations used to support slow work items
25 */ 25 */
26struct slow_work_ops { 26struct slow_work_ops {
27 /* owner */
28 struct module *owner;
29
27 /* get a ref on a work item 30 /* get a ref on a work item
28 * - return 0 if successful, -ve if not 31 * - return 0 if successful, -ve if not
29 */ 32 */
@@ -42,6 +45,7 @@ struct slow_work_ops {
42 * queued 45 * queued
43 */ 46 */
44struct slow_work { 47struct slow_work {
48 struct module *owner; /* the owning module */
45 unsigned long flags; 49 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ 50#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */ 51#define SLOW_WORK_EXECUTING 1 /* item currently executing */
@@ -84,8 +88,8 @@ static inline void vslow_work_init(struct slow_work *work,
84} 88}
85 89
86extern int slow_work_enqueue(struct slow_work *work); 90extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void); 91extern int slow_work_register_user(struct module *owner);
88extern void slow_work_unregister_user(void); 92extern void slow_work_unregister_user(struct module *owner);
89 93
90#ifdef CONFIG_SYSCTL 94#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[]; 95extern ctl_table slow_work_sysctls[];
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..dd08f376e406 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -22,6 +22,8 @@
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after 22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */ 23 * OOM */
24 24
25#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
26
25static void slow_work_cull_timeout(unsigned long); 27static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 28static void slow_work_oom_timeout(unsigned long);
27 29
@@ -46,7 +48,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 48
47#ifdef CONFIG_SYSCTL 49#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 50static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 51static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 52static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 53static const int slow_work_max_vslow = 99;
52 54
@@ -98,6 +100,23 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 100static struct slow_work slow_work_new_thread; /* new thread starter */
99 101
100/* 102/*
103 * slow work ID allocation (use slow_work_queue_lock)
104 */
105static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
106
107/*
108 * Unregistration tracking to prevent put_ref() from disappearing during module
109 * unload
110 */
111#ifdef CONFIG_MODULES
112static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
113static struct module *slow_work_unreg_module;
114static struct slow_work *slow_work_unreg_work_item;
115static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
116static DEFINE_MUTEX(slow_work_unreg_sync_lock);
117#endif
118
119/*
101 * The queues of work items and the lock governing access to them. These are 120 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 122 * as the number of threads bears no relation to the number of CPUs.
@@ -149,8 +168,11 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 168 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 169 * it, false if there was nothing to do.
151 */ 170 */
152static bool slow_work_execute(void) 171static bool slow_work_execute(int id)
153{ 172{
173#ifdef CONFIG_MODULES
174 struct module *module;
175#endif
154 struct slow_work *work = NULL; 176 struct slow_work *work = NULL;
155 unsigned vsmax; 177 unsigned vsmax;
156 bool very_slow; 178 bool very_slow;
@@ -186,6 +208,12 @@ static bool slow_work_execute(void)
186 } else { 208 } else {
187 very_slow = false; /* avoid the compiler warning */ 209 very_slow = false; /* avoid the compiler warning */
188 } 210 }
211
212#ifdef CONFIG_MODULES
213 if (work)
214 slow_work_thread_processing[id] = work->owner;
215#endif
216
189 spin_unlock_irq(&slow_work_queue_lock); 217 spin_unlock_irq(&slow_work_queue_lock);
190 218
191 if (!work) 219 if (!work)
@@ -219,7 +247,18 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 247 spin_unlock_irq(&slow_work_queue_lock);
220 } 248 }
221 249
250 /* sort out the race between module unloading and put_ref() */
222 work->ops->put_ref(work); 251 work->ops->put_ref(work);
252
253#ifdef CONFIG_MODULES
254 module = slow_work_thread_processing[id];
255 slow_work_thread_processing[id] = NULL;
256 smp_mb();
257 if (slow_work_unreg_work_item == work ||
258 slow_work_unreg_module == module)
259 wake_up_all(&slow_work_unreg_wq);
260#endif
261
223 return true; 262 return true;
224 263
225auto_requeue: 264auto_requeue:
@@ -232,6 +271,7 @@ auto_requeue:
232 else 271 else
233 list_add_tail(&work->link, &slow_work_queue); 272 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 273 spin_unlock_irq(&slow_work_queue_lock);
274 slow_work_thread_processing[id] = NULL;
235 return true; 275 return true;
236} 276}
237 277
@@ -368,13 +408,22 @@ static inline bool slow_work_available(int vsmax)
368 */ 408 */
369static int slow_work_thread(void *_data) 409static int slow_work_thread(void *_data)
370{ 410{
371 int vsmax; 411 int vsmax, id;
372 412
373 DEFINE_WAIT(wait); 413 DEFINE_WAIT(wait);
374 414
375 set_freezable(); 415 set_freezable();
376 set_user_nice(current, -5); 416 set_user_nice(current, -5);
377 417
418 /* allocate ourselves an ID */
419 spin_lock_irq(&slow_work_queue_lock);
420 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
421 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
422 __set_bit(id, slow_work_ids);
423 spin_unlock_irq(&slow_work_queue_lock);
424
425 sprintf(current->comm, "kslowd%03u", id);
426
378 for (;;) { 427 for (;;) {
379 vsmax = vslow_work_proportion; 428 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 429 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +444,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 444 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 445 vsmax /= 100;
397 446
398 if (slow_work_available(vsmax) && slow_work_execute()) { 447 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 448 cond_resched();
400 if (list_empty(&slow_work_queue) && 449 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 450 list_empty(&vslow_work_queue) &&
@@ -412,6 +461,10 @@ static int slow_work_thread(void *_data)
412 break; 461 break;
413 } 462 }
414 463
464 spin_lock_irq(&slow_work_queue_lock);
465 __clear_bit(id, slow_work_ids);
466 spin_unlock_irq(&slow_work_queue_lock);
467
415 if (atomic_dec_and_test(&slow_work_thread_count)) 468 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 469 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 470 return 0;
@@ -475,6 +528,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 528}
476 529
477static const struct slow_work_ops slow_work_new_thread_ops = { 530static const struct slow_work_ops slow_work_new_thread_ops = {
531 .owner = THIS_MODULE,
478 .get_ref = slow_work_new_thread_get_ref, 532 .get_ref = slow_work_new_thread_get_ref,
479 .put_ref = slow_work_new_thread_put_ref, 533 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 534 .execute = slow_work_new_thread_execute,
@@ -546,12 +600,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 600
547/** 601/**
548 * slow_work_register_user - Register a user of the facility 602 * slow_work_register_user - Register a user of the facility
603 * @module: The module about to make use of the facility
549 * 604 *
550 * Register a user of the facility, starting up the initial threads if there 605 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 606 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 607 * an error if not.
553 */ 608 */
554int slow_work_register_user(void) 609int slow_work_register_user(struct module *module)
555{ 610{
556 struct task_struct *p; 611 struct task_struct *p;
557 int loop; 612 int loop;
@@ -598,14 +653,79 @@ error:
598} 653}
599EXPORT_SYMBOL(slow_work_register_user); 654EXPORT_SYMBOL(slow_work_register_user);
600 655
656/*
657 * wait for all outstanding items from the calling module to complete
658 * - note that more items may be queued whilst we're waiting
659 */
660static void slow_work_wait_for_items(struct module *module)
661{
662 DECLARE_WAITQUEUE(myself, current);
663 struct slow_work *work;
664 int loop;
665
666 mutex_lock(&slow_work_unreg_sync_lock);
667 add_wait_queue(&slow_work_unreg_wq, &myself);
668
669 for (;;) {
670 spin_lock_irq(&slow_work_queue_lock);
671
672 /* first of all, we wait for the last queued item in each list
673 * to be processed */
674 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
675 if (work->owner == module) {
676 set_current_state(TASK_UNINTERRUPTIBLE);
677 slow_work_unreg_work_item = work;
678 goto do_wait;
679 }
680 }
681 list_for_each_entry_reverse(work, &slow_work_queue, link) {
682 if (work->owner == module) {
683 set_current_state(TASK_UNINTERRUPTIBLE);
684 slow_work_unreg_work_item = work;
685 goto do_wait;
686 }
687 }
688
689 /* then we wait for the items being processed to finish */
690 slow_work_unreg_module = module;
691 smp_mb();
692 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
693 if (slow_work_thread_processing[loop] == module)
694 goto do_wait;
695 }
696 spin_unlock_irq(&slow_work_queue_lock);
697 break; /* okay, we're done */
698
699 do_wait:
700 spin_unlock_irq(&slow_work_queue_lock);
701 schedule();
702 slow_work_unreg_work_item = NULL;
703 slow_work_unreg_module = NULL;
704 }
705
706 remove_wait_queue(&slow_work_unreg_wq, &myself);
707 mutex_unlock(&slow_work_unreg_sync_lock);
708}
709
601/** 710/**
602 * slow_work_unregister_user - Unregister a user of the facility 711 * slow_work_unregister_user - Unregister a user of the facility
712 * @module: The module whose items should be cleared
603 * 713 *
604 * Unregister a user of the facility, killing all the threads if this was the 714 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 715 * last one.
716 *
717 * This waits for all the work items belonging to the nominated module to go
718 * away before proceeding.
606 */ 719 */
607void slow_work_unregister_user(void) 720void slow_work_unregister_user(struct module *module)
608{ 721{
722 /* first of all, wait for all outstanding items from the calling module
723 * to complete */
724 if (module)
725 slow_work_wait_for_items(module);
726
727 /* then we can actually go about shutting down the facility if need
728 * be */
609 mutex_lock(&slow_work_user_lock); 729 mutex_lock(&slow_work_user_lock);
610 730
611 BUG_ON(slow_work_user_count <= 0); 731 BUG_ON(slow_work_user_count <= 0);