diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:10 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:10 -0400 |
commit | 4690c4ab56c71919893ca25252f2dd65b58188c7 (patch) | |
tree | dcdb5b7dd2104db2dc8babe66064dd6f5022247f /kernel | |
parent | c790bce0481857412c964c5e9d46d56e41c4b051 (diff) |
workqueue: misc/cosmetic updates
Make the following updates in preparation of concurrency managed
workqueue. None of these changes causes any visible behavior
difference.
* Add comments and adjust indentations to data structures and several
functions.
* Rename wq_per_cpu() to get_cwq() and swap the position of two
parameters for consistency. Convert a direct per_cpu_ptr() access
to wq->cpu_wq to get_cwq().
* Add work_static() and Update set_wq_data() such that it sets the
flags part to WORK_STRUCT_PENDING | WORK_STRUCT_STATIC if static |
@extra_flags.
* Move santiy check on work->entry emptiness from queue_work_on() to
__queue_work() which all queueing paths share.
* Make __queue_work() take @cpu and @wq instead of @cwq.
* Restructure flush_work() and __create_workqueue_key() to make them
easier to modify.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 131 |
1 files changed, 84 insertions, 47 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a47fbf92fae..c56146a755e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -37,6 +37,16 @@ | |||
37 | #include <trace/events/workqueue.h> | 37 | #include <trace/events/workqueue.h> |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Structure fields follow one of the following exclusion rules. | ||
41 | * | ||
42 | * I: Set during initialization and read-only afterwards. | ||
43 | * | ||
44 | * L: cwq->lock protected. Access with cwq->lock held. | ||
45 | * | ||
46 | * W: workqueue_lock protected. | ||
47 | */ | ||
48 | |||
49 | /* | ||
40 | * The per-CPU workqueue (if single thread, we always use the first | 50 | * The per-CPU workqueue (if single thread, we always use the first |
41 | * possible cpu). | 51 | * possible cpu). |
42 | */ | 52 | */ |
@@ -48,8 +58,8 @@ struct cpu_workqueue_struct { | |||
48 | wait_queue_head_t more_work; | 58 | wait_queue_head_t more_work; |
49 | struct work_struct *current_work; | 59 | struct work_struct *current_work; |
50 | 60 | ||
51 | struct workqueue_struct *wq; | 61 | struct workqueue_struct *wq; /* I: the owning workqueue */ |
52 | struct task_struct *thread; | 62 | struct task_struct *thread; |
53 | } ____cacheline_aligned; | 63 | } ____cacheline_aligned; |
54 | 64 | ||
55 | /* | 65 | /* |
@@ -57,13 +67,13 @@ struct cpu_workqueue_struct { | |||
57 | * per-CPU workqueues: | 67 | * per-CPU workqueues: |
58 | */ | 68 | */ |
59 | struct workqueue_struct { | 69 | struct workqueue_struct { |
60 | struct cpu_workqueue_struct *cpu_wq; | 70 | struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ |
61 | struct list_head list; | 71 | struct list_head list; /* W: list of all workqueues */ |
62 | const char *name; | 72 | const char *name; /* I: workqueue name */ |
63 | int singlethread; | 73 | int singlethread; |
64 | int freezeable; /* Freeze threads during suspend */ | 74 | int freezeable; /* Freeze threads during suspend */ |
65 | #ifdef CONFIG_LOCKDEP | 75 | #ifdef CONFIG_LOCKDEP |
66 | struct lockdep_map lockdep_map; | 76 | struct lockdep_map lockdep_map; |
67 | #endif | 77 | #endif |
68 | }; | 78 | }; |
69 | 79 | ||
@@ -204,8 +214,8 @@ static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) | |||
204 | ? cpu_singlethread_map : cpu_populated_map; | 214 | ? cpu_singlethread_map : cpu_populated_map; |
205 | } | 215 | } |
206 | 216 | ||
207 | static | 217 | static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, |
208 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 218 | struct workqueue_struct *wq) |
209 | { | 219 | { |
210 | if (unlikely(is_wq_single_threaded(wq))) | 220 | if (unlikely(is_wq_single_threaded(wq))) |
211 | cpu = singlethread_cpu; | 221 | cpu = singlethread_cpu; |
@@ -217,15 +227,13 @@ struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | |||
217 | * - Must *only* be called if the pending flag is set | 227 | * - Must *only* be called if the pending flag is set |
218 | */ | 228 | */ |
219 | static inline void set_wq_data(struct work_struct *work, | 229 | static inline void set_wq_data(struct work_struct *work, |
220 | struct cpu_workqueue_struct *cwq) | 230 | struct cpu_workqueue_struct *cwq, |
231 | unsigned long extra_flags) | ||
221 | { | 232 | { |
222 | unsigned long new; | ||
223 | |||
224 | BUG_ON(!work_pending(work)); | 233 | BUG_ON(!work_pending(work)); |
225 | 234 | ||
226 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); | 235 | atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | |
227 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 236 | (1UL << WORK_STRUCT_PENDING) | extra_flags); |
228 | atomic_long_set(&work->data, new); | ||
229 | } | 237 | } |
230 | 238 | ||
231 | /* | 239 | /* |
@@ -233,9 +241,7 @@ static inline void set_wq_data(struct work_struct *work, | |||
233 | */ | 241 | */ |
234 | static inline void clear_wq_data(struct work_struct *work) | 242 | static inline void clear_wq_data(struct work_struct *work) |
235 | { | 243 | { |
236 | unsigned long flags = *work_data_bits(work) & | 244 | atomic_long_set(&work->data, work_static(work)); |
237 | (1UL << WORK_STRUCT_STATIC); | ||
238 | atomic_long_set(&work->data, flags); | ||
239 | } | 245 | } |
240 | 246 | ||
241 | static inline | 247 | static inline |
@@ -244,29 +250,47 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
244 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 250 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
245 | } | 251 | } |
246 | 252 | ||
253 | /** | ||
254 | * insert_work - insert a work into cwq | ||
255 | * @cwq: cwq @work belongs to | ||
256 | * @work: work to insert | ||
257 | * @head: insertion point | ||
258 | * @extra_flags: extra WORK_STRUCT_* flags to set | ||
259 | * | ||
260 | * Insert @work into @cwq after @head. | ||
261 | * | ||
262 | * CONTEXT: | ||
263 | * spin_lock_irq(cwq->lock). | ||
264 | */ | ||
247 | static void insert_work(struct cpu_workqueue_struct *cwq, | 265 | static void insert_work(struct cpu_workqueue_struct *cwq, |
248 | struct work_struct *work, struct list_head *head) | 266 | struct work_struct *work, struct list_head *head, |
267 | unsigned int extra_flags) | ||
249 | { | 268 | { |
250 | trace_workqueue_insertion(cwq->thread, work); | 269 | trace_workqueue_insertion(cwq->thread, work); |
251 | 270 | ||
252 | set_wq_data(work, cwq); | 271 | /* we own @work, set data and link */ |
272 | set_wq_data(work, cwq, extra_flags); | ||
273 | |||
253 | /* | 274 | /* |
254 | * Ensure that we get the right work->data if we see the | 275 | * Ensure that we get the right work->data if we see the |
255 | * result of list_add() below, see try_to_grab_pending(). | 276 | * result of list_add() below, see try_to_grab_pending(). |
256 | */ | 277 | */ |
257 | smp_wmb(); | 278 | smp_wmb(); |
279 | |||
258 | list_add_tail(&work->entry, head); | 280 | list_add_tail(&work->entry, head); |
259 | wake_up(&cwq->more_work); | 281 | wake_up(&cwq->more_work); |
260 | } | 282 | } |
261 | 283 | ||
262 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 284 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
263 | struct work_struct *work) | 285 | struct work_struct *work) |
264 | { | 286 | { |
287 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
265 | unsigned long flags; | 288 | unsigned long flags; |
266 | 289 | ||
267 | debug_work_activate(work); | 290 | debug_work_activate(work); |
268 | spin_lock_irqsave(&cwq->lock, flags); | 291 | spin_lock_irqsave(&cwq->lock, flags); |
269 | insert_work(cwq, work, &cwq->worklist); | 292 | BUG_ON(!list_empty(&work->entry)); |
293 | insert_work(cwq, work, &cwq->worklist, 0); | ||
270 | spin_unlock_irqrestore(&cwq->lock, flags); | 294 | spin_unlock_irqrestore(&cwq->lock, flags); |
271 | } | 295 | } |
272 | 296 | ||
@@ -308,8 +332,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | |||
308 | int ret = 0; | 332 | int ret = 0; |
309 | 333 | ||
310 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 334 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
311 | BUG_ON(!list_empty(&work->entry)); | 335 | __queue_work(cpu, wq, work); |
312 | __queue_work(wq_per_cpu(wq, cpu), work); | ||
313 | ret = 1; | 336 | ret = 1; |
314 | } | 337 | } |
315 | return ret; | 338 | return ret; |
@@ -320,9 +343,8 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
320 | { | 343 | { |
321 | struct delayed_work *dwork = (struct delayed_work *)__data; | 344 | struct delayed_work *dwork = (struct delayed_work *)__data; |
322 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); | 345 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); |
323 | struct workqueue_struct *wq = cwq->wq; | ||
324 | 346 | ||
325 | __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); | 347 | __queue_work(smp_processor_id(), cwq->wq, &dwork->work); |
326 | } | 348 | } |
327 | 349 | ||
328 | /** | 350 | /** |
@@ -366,7 +388,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
366 | timer_stats_timer_set_start_info(&dwork->timer); | 388 | timer_stats_timer_set_start_info(&dwork->timer); |
367 | 389 | ||
368 | /* This stores cwq for the moment, for the timer_fn */ | 390 | /* This stores cwq for the moment, for the timer_fn */ |
369 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 391 | set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); |
370 | timer->expires = jiffies + delay; | 392 | timer->expires = jiffies + delay; |
371 | timer->data = (unsigned long)dwork; | 393 | timer->data = (unsigned long)dwork; |
372 | timer->function = delayed_work_timer_fn; | 394 | timer->function = delayed_work_timer_fn; |
@@ -430,6 +452,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
430 | spin_unlock_irq(&cwq->lock); | 452 | spin_unlock_irq(&cwq->lock); |
431 | } | 453 | } |
432 | 454 | ||
455 | /** | ||
456 | * worker_thread - the worker thread function | ||
457 | * @__cwq: cwq to serve | ||
458 | * | ||
459 | * The cwq worker thread function. | ||
460 | */ | ||
433 | static int worker_thread(void *__cwq) | 461 | static int worker_thread(void *__cwq) |
434 | { | 462 | { |
435 | struct cpu_workqueue_struct *cwq = __cwq; | 463 | struct cpu_workqueue_struct *cwq = __cwq; |
@@ -468,6 +496,17 @@ static void wq_barrier_func(struct work_struct *work) | |||
468 | complete(&barr->done); | 496 | complete(&barr->done); |
469 | } | 497 | } |
470 | 498 | ||
499 | /** | ||
500 | * insert_wq_barrier - insert a barrier work | ||
501 | * @cwq: cwq to insert barrier into | ||
502 | * @barr: wq_barrier to insert | ||
503 | * @head: insertion point | ||
504 | * | ||
505 | * Insert barrier @barr into @cwq before @head. | ||
506 | * | ||
507 | * CONTEXT: | ||
508 | * spin_lock_irq(cwq->lock). | ||
509 | */ | ||
471 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 510 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
472 | struct wq_barrier *barr, struct list_head *head) | 511 | struct wq_barrier *barr, struct list_head *head) |
473 | { | 512 | { |
@@ -479,11 +518,10 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
479 | */ | 518 | */ |
480 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | 519 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); |
481 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 520 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
482 | |||
483 | init_completion(&barr->done); | 521 | init_completion(&barr->done); |
484 | 522 | ||
485 | debug_work_activate(&barr->work); | 523 | debug_work_activate(&barr->work); |
486 | insert_work(cwq, &barr->work, head); | 524 | insert_work(cwq, &barr->work, head, 0); |
487 | } | 525 | } |
488 | 526 | ||
489 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 527 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
@@ -517,9 +555,6 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
517 | * | 555 | * |
518 | * We sleep until all works which were queued on entry have been handled, | 556 | * We sleep until all works which were queued on entry have been handled, |
519 | * but we are not livelocked by new incoming ones. | 557 | * but we are not livelocked by new incoming ones. |
520 | * | ||
521 | * This function used to run the workqueues itself. Now we just wait for the | ||
522 | * helper threads to do it. | ||
523 | */ | 558 | */ |
524 | void flush_workqueue(struct workqueue_struct *wq) | 559 | void flush_workqueue(struct workqueue_struct *wq) |
525 | { | 560 | { |
@@ -558,7 +593,6 @@ int flush_work(struct work_struct *work) | |||
558 | lock_map_acquire(&cwq->wq->lockdep_map); | 593 | lock_map_acquire(&cwq->wq->lockdep_map); |
559 | lock_map_release(&cwq->wq->lockdep_map); | 594 | lock_map_release(&cwq->wq->lockdep_map); |
560 | 595 | ||
561 | prev = NULL; | ||
562 | spin_lock_irq(&cwq->lock); | 596 | spin_lock_irq(&cwq->lock); |
563 | if (!list_empty(&work->entry)) { | 597 | if (!list_empty(&work->entry)) { |
564 | /* | 598 | /* |
@@ -567,22 +601,22 @@ int flush_work(struct work_struct *work) | |||
567 | */ | 601 | */ |
568 | smp_rmb(); | 602 | smp_rmb(); |
569 | if (unlikely(cwq != get_wq_data(work))) | 603 | if (unlikely(cwq != get_wq_data(work))) |
570 | goto out; | 604 | goto already_gone; |
571 | prev = &work->entry; | 605 | prev = &work->entry; |
572 | } else { | 606 | } else { |
573 | if (cwq->current_work != work) | 607 | if (cwq->current_work != work) |
574 | goto out; | 608 | goto already_gone; |
575 | prev = &cwq->worklist; | 609 | prev = &cwq->worklist; |
576 | } | 610 | } |
577 | insert_wq_barrier(cwq, &barr, prev->next); | 611 | insert_wq_barrier(cwq, &barr, prev->next); |
578 | out: | ||
579 | spin_unlock_irq(&cwq->lock); | ||
580 | if (!prev) | ||
581 | return 0; | ||
582 | 612 | ||
613 | spin_unlock_irq(&cwq->lock); | ||
583 | wait_for_completion(&barr.done); | 614 | wait_for_completion(&barr.done); |
584 | destroy_work_on_stack(&barr.work); | 615 | destroy_work_on_stack(&barr.work); |
585 | return 1; | 616 | return 1; |
617 | already_gone: | ||
618 | spin_unlock_irq(&cwq->lock); | ||
619 | return 0; | ||
586 | } | 620 | } |
587 | EXPORT_SYMBOL_GPL(flush_work); | 621 | EXPORT_SYMBOL_GPL(flush_work); |
588 | 622 | ||
@@ -665,7 +699,7 @@ static void wait_on_work(struct work_struct *work) | |||
665 | cpu_map = wq_cpu_map(wq); | 699 | cpu_map = wq_cpu_map(wq); |
666 | 700 | ||
667 | for_each_cpu(cpu, cpu_map) | 701 | for_each_cpu(cpu, cpu_map) |
668 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 702 | wait_on_cpu_work(get_cwq(cpu, wq), work); |
669 | } | 703 | } |
670 | 704 | ||
671 | static int __cancel_work_timer(struct work_struct *work, | 705 | static int __cancel_work_timer(struct work_struct *work, |
@@ -782,9 +816,8 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
782 | void flush_delayed_work(struct delayed_work *dwork) | 816 | void flush_delayed_work(struct delayed_work *dwork) |
783 | { | 817 | { |
784 | if (del_timer_sync(&dwork->timer)) { | 818 | if (del_timer_sync(&dwork->timer)) { |
785 | struct cpu_workqueue_struct *cwq; | 819 | __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, |
786 | cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); | 820 | &dwork->work); |
787 | __queue_work(cwq, &dwork->work); | ||
788 | put_cpu(); | 821 | put_cpu(); |
789 | } | 822 | } |
790 | flush_work(&dwork->work); | 823 | flush_work(&dwork->work); |
@@ -991,13 +1024,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
991 | 1024 | ||
992 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 1025 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); |
993 | if (!wq) | 1026 | if (!wq) |
994 | return NULL; | 1027 | goto err; |
995 | 1028 | ||
996 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 1029 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); |
997 | if (!wq->cpu_wq) { | 1030 | if (!wq->cpu_wq) |
998 | kfree(wq); | 1031 | goto err; |
999 | return NULL; | ||
1000 | } | ||
1001 | 1032 | ||
1002 | wq->name = name; | 1033 | wq->name = name; |
1003 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 1034 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); |
@@ -1041,6 +1072,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
1041 | wq = NULL; | 1072 | wq = NULL; |
1042 | } | 1073 | } |
1043 | return wq; | 1074 | return wq; |
1075 | err: | ||
1076 | if (wq) { | ||
1077 | free_percpu(wq->cpu_wq); | ||
1078 | kfree(wq); | ||
1079 | } | ||
1080 | return NULL; | ||
1044 | } | 1081 | } |
1045 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 1082 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
1046 | 1083 | ||