diff options
-rw-r--r-- | include/linux/workqueue.h | 40 | ||||
-rw-r--r-- | kernel/workqueue.c | 42 |
2 files changed, 54 insertions, 28 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 33e24e734d50..48b7422f25ae 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -232,12 +232,31 @@ enum { | |||
232 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | 232 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
233 | }; | 233 | }; |
234 | 234 | ||
235 | /* | ||
236 | * System-wide workqueues which are always present. | ||
237 | * | ||
238 | * system_wq is the one used by schedule[_delayed]_work[_on](). | ||
239 | * Multi-CPU multi-threaded. There are users which expect relatively | ||
240 | * short queue flush time. Don't queue works which can run for too | ||
241 | * long. | ||
242 | * | ||
243 | * system_long_wq is similar to system_wq but may host long running | ||
244 | * works. Queue flushing might take relatively long. | ||
245 | * | ||
246 | * system_nrt_wq is non-reentrant and guarantees that any given work | ||
247 | * item is never executed in parallel by multiple CPUs. Queue | ||
248 | * flushing might take relatively long. | ||
249 | */ | ||
250 | extern struct workqueue_struct *system_wq; | ||
251 | extern struct workqueue_struct *system_long_wq; | ||
252 | extern struct workqueue_struct *system_nrt_wq; | ||
253 | |||
235 | extern struct workqueue_struct * | 254 | extern struct workqueue_struct * |
236 | __create_workqueue_key(const char *name, unsigned int flags, int max_active, | 255 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |
237 | struct lock_class_key *key, const char *lock_name); | 256 | struct lock_class_key *key, const char *lock_name); |
238 | 257 | ||
239 | #ifdef CONFIG_LOCKDEP | 258 | #ifdef CONFIG_LOCKDEP |
240 | #define __create_workqueue(name, flags, max_active) \ | 259 | #define alloc_workqueue(name, flags, max_active) \ |
241 | ({ \ | 260 | ({ \ |
242 | static struct lock_class_key __key; \ | 261 | static struct lock_class_key __key; \ |
243 | const char *__lock_name; \ | 262 | const char *__lock_name; \ |
@@ -247,21 +266,20 @@ __create_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
247 | else \ | 266 | else \ |
248 | __lock_name = #name; \ | 267 | __lock_name = #name; \ |
249 | \ | 268 | \ |
250 | __create_workqueue_key((name), (flags), (max_active), \ | 269 | __alloc_workqueue_key((name), (flags), (max_active), \ |
251 | &__key, __lock_name); \ | 270 | &__key, __lock_name); \ |
252 | }) | 271 | }) |
253 | #else | 272 | #else |
254 | #define __create_workqueue(name, flags, max_active) \ | 273 | #define alloc_workqueue(name, flags, max_active) \ |
255 | __create_workqueue_key((name), (flags), (max_active), NULL, NULL) | 274 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) |
256 | #endif | 275 | #endif |
257 | 276 | ||
258 | #define create_workqueue(name) \ | 277 | #define create_workqueue(name) \ |
259 | __create_workqueue((name), WQ_RESCUER, 1) | 278 | alloc_workqueue((name), WQ_RESCUER, 1) |
260 | #define create_freezeable_workqueue(name) \ | 279 | #define create_freezeable_workqueue(name) \ |
261 | __create_workqueue((name), \ | 280 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) |
262 | WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) | ||
263 | #define create_singlethread_workqueue(name) \ | 281 | #define create_singlethread_workqueue(name) \ |
264 | __create_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) | 282 | alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) |
265 | 283 | ||
266 | extern void destroy_workqueue(struct workqueue_struct *wq); | 284 | extern void destroy_workqueue(struct workqueue_struct *wq); |
267 | 285 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4190e84cf995..16ce617974d2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -210,6 +210,13 @@ struct workqueue_struct { | |||
210 | #endif | 210 | #endif |
211 | }; | 211 | }; |
212 | 212 | ||
213 | struct workqueue_struct *system_wq __read_mostly; | ||
214 | struct workqueue_struct *system_long_wq __read_mostly; | ||
215 | struct workqueue_struct *system_nrt_wq __read_mostly; | ||
216 | EXPORT_SYMBOL_GPL(system_wq); | ||
217 | EXPORT_SYMBOL_GPL(system_long_wq); | ||
218 | EXPORT_SYMBOL_GPL(system_nrt_wq); | ||
219 | |||
213 | #define for_each_busy_worker(worker, i, pos, gcwq) \ | 220 | #define for_each_busy_worker(worker, i, pos, gcwq) \ |
214 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ | 221 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ |
215 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) | 222 | hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) |
@@ -2306,8 +2313,6 @@ int cancel_delayed_work_sync(struct delayed_work *dwork) | |||
2306 | } | 2313 | } |
2307 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 2314 | EXPORT_SYMBOL(cancel_delayed_work_sync); |
2308 | 2315 | ||
2309 | static struct workqueue_struct *keventd_wq __read_mostly; | ||
2310 | |||
2311 | /** | 2316 | /** |
2312 | * schedule_work - put work task in global workqueue | 2317 | * schedule_work - put work task in global workqueue |
2313 | * @work: job to be done | 2318 | * @work: job to be done |
@@ -2321,7 +2326,7 @@ static struct workqueue_struct *keventd_wq __read_mostly; | |||
2321 | */ | 2326 | */ |
2322 | int schedule_work(struct work_struct *work) | 2327 | int schedule_work(struct work_struct *work) |
2323 | { | 2328 | { |
2324 | return queue_work(keventd_wq, work); | 2329 | return queue_work(system_wq, work); |
2325 | } | 2330 | } |
2326 | EXPORT_SYMBOL(schedule_work); | 2331 | EXPORT_SYMBOL(schedule_work); |
2327 | 2332 | ||
@@ -2334,7 +2339,7 @@ EXPORT_SYMBOL(schedule_work); | |||
2334 | */ | 2339 | */ |
2335 | int schedule_work_on(int cpu, struct work_struct *work) | 2340 | int schedule_work_on(int cpu, struct work_struct *work) |
2336 | { | 2341 | { |
2337 | return queue_work_on(cpu, keventd_wq, work); | 2342 | return queue_work_on(cpu, system_wq, work); |
2338 | } | 2343 | } |
2339 | EXPORT_SYMBOL(schedule_work_on); | 2344 | EXPORT_SYMBOL(schedule_work_on); |
2340 | 2345 | ||
@@ -2349,7 +2354,7 @@ EXPORT_SYMBOL(schedule_work_on); | |||
2349 | int schedule_delayed_work(struct delayed_work *dwork, | 2354 | int schedule_delayed_work(struct delayed_work *dwork, |
2350 | unsigned long delay) | 2355 | unsigned long delay) |
2351 | { | 2356 | { |
2352 | return queue_delayed_work(keventd_wq, dwork, delay); | 2357 | return queue_delayed_work(system_wq, dwork, delay); |
2353 | } | 2358 | } |
2354 | EXPORT_SYMBOL(schedule_delayed_work); | 2359 | EXPORT_SYMBOL(schedule_delayed_work); |
2355 | 2360 | ||
@@ -2382,7 +2387,7 @@ EXPORT_SYMBOL(flush_delayed_work); | |||
2382 | int schedule_delayed_work_on(int cpu, | 2387 | int schedule_delayed_work_on(int cpu, |
2383 | struct delayed_work *dwork, unsigned long delay) | 2388 | struct delayed_work *dwork, unsigned long delay) |
2384 | { | 2389 | { |
2385 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 2390 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); |
2386 | } | 2391 | } |
2387 | EXPORT_SYMBOL(schedule_delayed_work_on); | 2392 | EXPORT_SYMBOL(schedule_delayed_work_on); |
2388 | 2393 | ||
@@ -2447,7 +2452,7 @@ int schedule_on_each_cpu(work_func_t func) | |||
2447 | */ | 2452 | */ |
2448 | void flush_scheduled_work(void) | 2453 | void flush_scheduled_work(void) |
2449 | { | 2454 | { |
2450 | flush_workqueue(keventd_wq); | 2455 | flush_workqueue(system_wq); |
2451 | } | 2456 | } |
2452 | EXPORT_SYMBOL(flush_scheduled_work); | 2457 | EXPORT_SYMBOL(flush_scheduled_work); |
2453 | 2458 | ||
@@ -2479,7 +2484,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); | |||
2479 | 2484 | ||
2480 | int keventd_up(void) | 2485 | int keventd_up(void) |
2481 | { | 2486 | { |
2482 | return keventd_wq != NULL; | 2487 | return system_wq != NULL; |
2483 | } | 2488 | } |
2484 | 2489 | ||
2485 | static struct cpu_workqueue_struct *alloc_cwqs(void) | 2490 | static struct cpu_workqueue_struct *alloc_cwqs(void) |
@@ -2539,15 +2544,16 @@ static int wq_clamp_max_active(int max_active, const char *name) | |||
2539 | return clamp_val(max_active, 1, WQ_MAX_ACTIVE); | 2544 | return clamp_val(max_active, 1, WQ_MAX_ACTIVE); |
2540 | } | 2545 | } |
2541 | 2546 | ||
2542 | struct workqueue_struct *__create_workqueue_key(const char *name, | 2547 | struct workqueue_struct *__alloc_workqueue_key(const char *name, |
2543 | unsigned int flags, | 2548 | unsigned int flags, |
2544 | int max_active, | 2549 | int max_active, |
2545 | struct lock_class_key *key, | 2550 | struct lock_class_key *key, |
2546 | const char *lock_name) | 2551 | const char *lock_name) |
2547 | { | 2552 | { |
2548 | struct workqueue_struct *wq; | 2553 | struct workqueue_struct *wq; |
2549 | unsigned int cpu; | 2554 | unsigned int cpu; |
2550 | 2555 | ||
2556 | max_active = max_active ?: WQ_DFL_ACTIVE; | ||
2551 | max_active = wq_clamp_max_active(max_active, name); | 2557 | max_active = wq_clamp_max_active(max_active, name); |
2552 | 2558 | ||
2553 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 2559 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); |
@@ -2626,7 +2632,7 @@ err: | |||
2626 | } | 2632 | } |
2627 | return NULL; | 2633 | return NULL; |
2628 | } | 2634 | } |
2629 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 2635 | EXPORT_SYMBOL_GPL(__alloc_workqueue_key); |
2630 | 2636 | ||
2631 | /** | 2637 | /** |
2632 | * destroy_workqueue - safely terminate a workqueue | 2638 | * destroy_workqueue - safely terminate a workqueue |
@@ -2910,7 +2916,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
2910 | continue; | 2916 | continue; |
2911 | 2917 | ||
2912 | debug_work_activate(rebind_work); | 2918 | debug_work_activate(rebind_work); |
2913 | insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, | 2919 | insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, |
2914 | worker->scheduled.next, | 2920 | worker->scheduled.next, |
2915 | work_color_to_flags(WORK_NO_COLOR)); | 2921 | work_color_to_flags(WORK_NO_COLOR)); |
2916 | } | 2922 | } |
@@ -3287,6 +3293,8 @@ void __init init_workqueues(void) | |||
3287 | spin_unlock_irq(&gcwq->lock); | 3293 | spin_unlock_irq(&gcwq->lock); |
3288 | } | 3294 | } |
3289 | 3295 | ||
3290 | keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); | 3296 | system_wq = alloc_workqueue("events", 0, 0); |
3291 | BUG_ON(!keventd_wq); | 3297 | system_long_wq = alloc_workqueue("events_long", 0, 0); |
3298 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); | ||
3299 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); | ||
3292 | } | 3300 | } |