diff options
Diffstat (limited to 'kernel/slow-work.c')
-rw-r--r-- | kernel/slow-work.c | 64 |
1 files changed, 41 insertions, 23 deletions
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index da94f3c101af..00889bd3c590 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/debugfs.h> |
20 | #include "slow-work.h" | 20 | #include "slow-work.h" |
21 | 21 | ||
22 | static void slow_work_cull_timeout(unsigned long); | 22 | static void slow_work_cull_timeout(unsigned long); |
@@ -109,12 +109,36 @@ static struct module *slow_work_unreg_module; | |||
109 | static struct slow_work *slow_work_unreg_work_item; | 109 | static struct slow_work *slow_work_unreg_work_item; |
110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); | 110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); |
111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); | 111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); |
112 | |||
113 | static void slow_work_set_thread_processing(int id, struct slow_work *work) | ||
114 | { | ||
115 | if (work) | ||
116 | slow_work_thread_processing[id] = work->owner; | ||
117 | } | ||
118 | static void slow_work_done_thread_processing(int id, struct slow_work *work) | ||
119 | { | ||
120 | struct module *module = slow_work_thread_processing[id]; | ||
121 | |||
122 | slow_work_thread_processing[id] = NULL; | ||
123 | smp_mb(); | ||
124 | if (slow_work_unreg_work_item == work || | ||
125 | slow_work_unreg_module == module) | ||
126 | wake_up_all(&slow_work_unreg_wq); | ||
127 | } | ||
128 | static void slow_work_clear_thread_processing(int id) | ||
129 | { | ||
130 | slow_work_thread_processing[id] = NULL; | ||
131 | } | ||
132 | #else | ||
133 | static void slow_work_set_thread_processing(int id, struct slow_work *work) {} | ||
134 | static void slow_work_done_thread_processing(int id, struct slow_work *work) {} | ||
135 | static void slow_work_clear_thread_processing(int id) {} | ||
112 | #endif | 136 | #endif |
113 | 137 | ||
114 | /* | 138 | /* |
115 | * Data for tracking currently executing items for indication through /proc | 139 | * Data for tracking currently executing items for indication through /proc |
116 | */ | 140 | */ |
117 | #ifdef CONFIG_SLOW_WORK_PROC | 141 | #ifdef CONFIG_SLOW_WORK_DEBUG |
118 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; | 142 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; |
119 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; | 143 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; |
120 | DEFINE_RWLOCK(slow_work_execs_lock); | 144 | DEFINE_RWLOCK(slow_work_execs_lock); |
@@ -197,9 +221,6 @@ static unsigned slow_work_calc_vsmax(void) | |||
197 | */ | 221 | */ |
198 | static noinline bool slow_work_execute(int id) | 222 | static noinline bool slow_work_execute(int id) |
199 | { | 223 | { |
200 | #ifdef CONFIG_MODULES | ||
201 | struct module *module; | ||
202 | #endif | ||
203 | struct slow_work *work = NULL; | 224 | struct slow_work *work = NULL; |
204 | unsigned vsmax; | 225 | unsigned vsmax; |
205 | bool very_slow; | 226 | bool very_slow; |
@@ -236,10 +257,7 @@ static noinline bool slow_work_execute(int id) | |||
236 | very_slow = false; /* avoid the compiler warning */ | 257 | very_slow = false; /* avoid the compiler warning */ |
237 | } | 258 | } |
238 | 259 | ||
239 | #ifdef CONFIG_MODULES | 260 | slow_work_set_thread_processing(id, work); |
240 | if (work) | ||
241 | slow_work_thread_processing[id] = work->owner; | ||
242 | #endif | ||
243 | if (work) { | 261 | if (work) { |
244 | slow_work_mark_time(work); | 262 | slow_work_mark_time(work); |
245 | slow_work_begin_exec(id, work); | 263 | slow_work_begin_exec(id, work); |
@@ -287,15 +305,7 @@ static noinline bool slow_work_execute(int id) | |||
287 | 305 | ||
288 | /* sort out the race between module unloading and put_ref() */ | 306 | /* sort out the race between module unloading and put_ref() */ |
289 | slow_work_put_ref(work); | 307 | slow_work_put_ref(work); |
290 | 308 | slow_work_done_thread_processing(id, work); | |
291 | #ifdef CONFIG_MODULES | ||
292 | module = slow_work_thread_processing[id]; | ||
293 | slow_work_thread_processing[id] = NULL; | ||
294 | smp_mb(); | ||
295 | if (slow_work_unreg_work_item == work || | ||
296 | slow_work_unreg_module == module) | ||
297 | wake_up_all(&slow_work_unreg_wq); | ||
298 | #endif | ||
299 | 309 | ||
300 | return true; | 310 | return true; |
301 | 311 | ||
@@ -310,7 +320,7 @@ auto_requeue: | |||
310 | else | 320 | else |
311 | list_add_tail(&work->link, &slow_work_queue); | 321 | list_add_tail(&work->link, &slow_work_queue); |
312 | spin_unlock_irq(&slow_work_queue_lock); | 322 | spin_unlock_irq(&slow_work_queue_lock); |
313 | slow_work_thread_processing[id] = NULL; | 323 | slow_work_clear_thread_processing(id); |
314 | return true; | 324 | return true; |
315 | } | 325 | } |
316 | 326 | ||
@@ -813,7 +823,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
813 | static const struct slow_work_ops slow_work_new_thread_ops = { | 823 | static const struct slow_work_ops slow_work_new_thread_ops = { |
814 | .owner = THIS_MODULE, | 824 | .owner = THIS_MODULE, |
815 | .execute = slow_work_new_thread_execute, | 825 | .execute = slow_work_new_thread_execute, |
816 | #ifdef CONFIG_SLOW_WORK_PROC | 826 | #ifdef CONFIG_SLOW_WORK_DEBUG |
817 | .desc = slow_work_new_thread_desc, | 827 | .desc = slow_work_new_thread_desc, |
818 | #endif | 828 | #endif |
819 | }; | 829 | }; |
@@ -943,6 +953,7 @@ EXPORT_SYMBOL(slow_work_register_user); | |||
943 | */ | 953 | */ |
944 | static void slow_work_wait_for_items(struct module *module) | 954 | static void slow_work_wait_for_items(struct module *module) |
945 | { | 955 | { |
956 | #ifdef CONFIG_MODULES | ||
946 | DECLARE_WAITQUEUE(myself, current); | 957 | DECLARE_WAITQUEUE(myself, current); |
947 | struct slow_work *work; | 958 | struct slow_work *work; |
948 | int loop; | 959 | int loop; |
@@ -989,6 +1000,7 @@ static void slow_work_wait_for_items(struct module *module) | |||
989 | 1000 | ||
990 | remove_wait_queue(&slow_work_unreg_wq, &myself); | 1001 | remove_wait_queue(&slow_work_unreg_wq, &myself); |
991 | mutex_unlock(&slow_work_unreg_sync_lock); | 1002 | mutex_unlock(&slow_work_unreg_sync_lock); |
1003 | #endif /* CONFIG_MODULES */ | ||
992 | } | 1004 | } |
993 | 1005 | ||
994 | /** | 1006 | /** |
@@ -1043,9 +1055,15 @@ static int __init init_slow_work(void) | |||
1043 | if (slow_work_max_max_threads < nr_cpus * 2) | 1055 | if (slow_work_max_max_threads < nr_cpus * 2) |
1044 | slow_work_max_max_threads = nr_cpus * 2; | 1056 | slow_work_max_max_threads = nr_cpus * 2; |
1045 | #endif | 1057 | #endif |
1046 | #ifdef CONFIG_SLOW_WORK_PROC | 1058 | #ifdef CONFIG_SLOW_WORK_DEBUG |
1047 | proc_create("slow_work_rq", S_IFREG | 0400, NULL, | 1059 | { |
1048 | &slow_work_runqueue_fops); | 1060 | struct dentry *dbdir; |
1061 | |||
1062 | dbdir = debugfs_create_dir("slow_work", NULL); | ||
1063 | if (dbdir && !IS_ERR(dbdir)) | ||
1064 | debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, | ||
1065 | NULL, &slow_work_runqueue_fops); | ||
1066 | } | ||
1049 | #endif | 1067 | #endif |
1050 | return 0; | 1068 | return 0; |
1051 | } | 1069 | } |