aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:14 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:14 -0400
commitd320c03830b17af64e4547075003b1eeb274bc6c (patch)
tree9917a69dc2efa6f37c54097c4651408faf4b343b /kernel/workqueue.c
parentb71ab8c2025caef8db719aa41af0ed735dc543cd (diff)
workqueue: s/__create_workqueue()/alloc_workqueue()/, and add system workqueues
This patch makes changes to make new workqueue features available to its users. * Now that workqueue is more featureful, there should be a public workqueue creation function which takes paramters to control them. Rename __create_workqueue() to alloc_workqueue() and make 0 max_active mean WQ_DFL_ACTIVE. In the long run, all create_workqueue_*() will be converted over to alloc_workqueue(). * To further unify access interface, rename keventd_wq to system_wq and export it. * Add system_long_wq and system_nrt_wq. The former is to host long running works separately (so that flush_scheduled_work() dosen't take so long) and the latter guarantees any queued work item is never executed in parallel by multiple CPUs. These will be used by future patches to update workqueue users. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4190e84cf995..16ce617974d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -210,6 +210,13 @@ struct workqueue_struct {
210#endif 210#endif
211}; 211};
212 212
213struct workqueue_struct *system_wq __read_mostly;
214struct workqueue_struct *system_long_wq __read_mostly;
215struct workqueue_struct *system_nrt_wq __read_mostly;
216EXPORT_SYMBOL_GPL(system_wq);
217EXPORT_SYMBOL_GPL(system_long_wq);
218EXPORT_SYMBOL_GPL(system_nrt_wq);
219
213#define for_each_busy_worker(worker, i, pos, gcwq) \ 220#define for_each_busy_worker(worker, i, pos, gcwq) \
214 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 221 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
215 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 222 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
@@ -2306,8 +2313,6 @@ int cancel_delayed_work_sync(struct delayed_work *dwork)
2306} 2313}
2307EXPORT_SYMBOL(cancel_delayed_work_sync); 2314EXPORT_SYMBOL(cancel_delayed_work_sync);
2308 2315
2309static struct workqueue_struct *keventd_wq __read_mostly;
2310
2311/** 2316/**
2312 * schedule_work - put work task in global workqueue 2317 * schedule_work - put work task in global workqueue
2313 * @work: job to be done 2318 * @work: job to be done
@@ -2321,7 +2326,7 @@ static struct workqueue_struct *keventd_wq __read_mostly;
2321 */ 2326 */
2322int schedule_work(struct work_struct *work) 2327int schedule_work(struct work_struct *work)
2323{ 2328{
2324 return queue_work(keventd_wq, work); 2329 return queue_work(system_wq, work);
2325} 2330}
2326EXPORT_SYMBOL(schedule_work); 2331EXPORT_SYMBOL(schedule_work);
2327 2332
@@ -2334,7 +2339,7 @@ EXPORT_SYMBOL(schedule_work);
2334 */ 2339 */
2335int schedule_work_on(int cpu, struct work_struct *work) 2340int schedule_work_on(int cpu, struct work_struct *work)
2336{ 2341{
2337 return queue_work_on(cpu, keventd_wq, work); 2342 return queue_work_on(cpu, system_wq, work);
2338} 2343}
2339EXPORT_SYMBOL(schedule_work_on); 2344EXPORT_SYMBOL(schedule_work_on);
2340 2345
@@ -2349,7 +2354,7 @@ EXPORT_SYMBOL(schedule_work_on);
2349int schedule_delayed_work(struct delayed_work *dwork, 2354int schedule_delayed_work(struct delayed_work *dwork,
2350 unsigned long delay) 2355 unsigned long delay)
2351{ 2356{
2352 return queue_delayed_work(keventd_wq, dwork, delay); 2357 return queue_delayed_work(system_wq, dwork, delay);
2353} 2358}
2354EXPORT_SYMBOL(schedule_delayed_work); 2359EXPORT_SYMBOL(schedule_delayed_work);
2355 2360
@@ -2382,7 +2387,7 @@ EXPORT_SYMBOL(flush_delayed_work);
2382int schedule_delayed_work_on(int cpu, 2387int schedule_delayed_work_on(int cpu,
2383 struct delayed_work *dwork, unsigned long delay) 2388 struct delayed_work *dwork, unsigned long delay)
2384{ 2389{
2385 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 2390 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2386} 2391}
2387EXPORT_SYMBOL(schedule_delayed_work_on); 2392EXPORT_SYMBOL(schedule_delayed_work_on);
2388 2393
@@ -2447,7 +2452,7 @@ int schedule_on_each_cpu(work_func_t func)
2447 */ 2452 */
2448void flush_scheduled_work(void) 2453void flush_scheduled_work(void)
2449{ 2454{
2450 flush_workqueue(keventd_wq); 2455 flush_workqueue(system_wq);
2451} 2456}
2452EXPORT_SYMBOL(flush_scheduled_work); 2457EXPORT_SYMBOL(flush_scheduled_work);
2453 2458
@@ -2479,7 +2484,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
2479 2484
2480int keventd_up(void) 2485int keventd_up(void)
2481{ 2486{
2482 return keventd_wq != NULL; 2487 return system_wq != NULL;
2483} 2488}
2484 2489
2485static struct cpu_workqueue_struct *alloc_cwqs(void) 2490static struct cpu_workqueue_struct *alloc_cwqs(void)
@@ -2539,15 +2544,16 @@ static int wq_clamp_max_active(int max_active, const char *name)
2539 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 2544 return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
2540} 2545}
2541 2546
2542struct workqueue_struct *__create_workqueue_key(const char *name, 2547struct workqueue_struct *__alloc_workqueue_key(const char *name,
2543 unsigned int flags, 2548 unsigned int flags,
2544 int max_active, 2549 int max_active,
2545 struct lock_class_key *key, 2550 struct lock_class_key *key,
2546 const char *lock_name) 2551 const char *lock_name)
2547{ 2552{
2548 struct workqueue_struct *wq; 2553 struct workqueue_struct *wq;
2549 unsigned int cpu; 2554 unsigned int cpu;
2550 2555
2556 max_active = max_active ?: WQ_DFL_ACTIVE;
2551 max_active = wq_clamp_max_active(max_active, name); 2557 max_active = wq_clamp_max_active(max_active, name);
2552 2558
2553 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 2559 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
@@ -2626,7 +2632,7 @@ err:
2626 } 2632 }
2627 return NULL; 2633 return NULL;
2628} 2634}
2629EXPORT_SYMBOL_GPL(__create_workqueue_key); 2635EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
2630 2636
2631/** 2637/**
2632 * destroy_workqueue - safely terminate a workqueue 2638 * destroy_workqueue - safely terminate a workqueue
@@ -2910,7 +2916,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
2910 continue; 2916 continue;
2911 2917
2912 debug_work_activate(rebind_work); 2918 debug_work_activate(rebind_work);
2913 insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, 2919 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
2914 worker->scheduled.next, 2920 worker->scheduled.next,
2915 work_color_to_flags(WORK_NO_COLOR)); 2921 work_color_to_flags(WORK_NO_COLOR));
2916 } 2922 }
@@ -3287,6 +3293,8 @@ void __init init_workqueues(void)
3287 spin_unlock_irq(&gcwq->lock); 3293 spin_unlock_irq(&gcwq->lock);
3288 } 3294 }
3289 3295
3290 keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); 3296 system_wq = alloc_workqueue("events", 0, 0);
3291 BUG_ON(!keventd_wq); 3297 system_long_wq = alloc_workqueue("events_long", 0, 0);
3298 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3299 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
3292} 3300}