aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:12 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:12 -0400
commit8b03ae3cde59af9facab7c831b4141515d5dbcc8 (patch)
tree900829ac17fa941e9819208489081e4de1218ac4 /kernel/workqueue.c
parenta0a1a5fd4fb15ec61117c759fe9f5c16c53d9e9c (diff)
workqueue: introduce global cwq and unify cwq locks
There is one gcwq (global cwq) per each cpu and all cwqs on an cpu point to it. A gcwq contains a lock to be used by all cwqs on the cpu and an ida to give IDs to workers belonging to the cpu. This patch introduces gcwq, moves worker_ida into gcwq and make all cwqs on the same cpu use the cpu's gcwq->lock instead of separate locks. gcwq->ida is now protected by gcwq->lock too. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c160
1 files changed, 98 insertions, 62 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4d059c532792..b043f57516bd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -40,38 +40,45 @@
40 * 40 *
41 * I: Set during initialization and read-only afterwards. 41 * I: Set during initialization and read-only afterwards.
42 * 42 *
43 * L: cwq->lock protected. Access with cwq->lock held. 43 * L: gcwq->lock protected. Access with gcwq->lock held.
44 * 44 *
45 * F: wq->flush_mutex protected. 45 * F: wq->flush_mutex protected.
46 * 46 *
47 * W: workqueue_lock protected. 47 * W: workqueue_lock protected.
48 */ 48 */
49 49
50struct global_cwq;
50struct cpu_workqueue_struct; 51struct cpu_workqueue_struct;
51 52
52struct worker { 53struct worker {
53 struct work_struct *current_work; /* L: work being processed */ 54 struct work_struct *current_work; /* L: work being processed */
54 struct list_head scheduled; /* L: scheduled works */ 55 struct list_head scheduled; /* L: scheduled works */
55 struct task_struct *task; /* I: worker task */ 56 struct task_struct *task; /* I: worker task */
57 struct global_cwq *gcwq; /* I: the associated gcwq */
56 struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ 58 struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
57 int id; /* I: worker id */ 59 int id; /* I: worker id */
58}; 60};
59 61
60/* 62/*
63 * Global per-cpu workqueue.
64 */
65struct global_cwq {
66 spinlock_t lock; /* the gcwq lock */
67 unsigned int cpu; /* I: the associated cpu */
68 struct ida worker_ida; /* L: for worker IDs */
69} ____cacheline_aligned_in_smp;
70
71/*
61 * The per-CPU workqueue (if single thread, we always use the first 72 * The per-CPU workqueue (if single thread, we always use the first
62 * possible cpu). The lower WORK_STRUCT_FLAG_BITS of 73 * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
63 * work_struct->data are used for flags and thus cwqs need to be 74 * work_struct->data are used for flags and thus cwqs need to be
64 * aligned at two's power of the number of flag bits. 75 * aligned at two's power of the number of flag bits.
65 */ 76 */
66struct cpu_workqueue_struct { 77struct cpu_workqueue_struct {
67 78 struct global_cwq *gcwq; /* I: the associated gcwq */
68 spinlock_t lock;
69
70 struct list_head worklist; 79 struct list_head worklist;
71 wait_queue_head_t more_work; 80 wait_queue_head_t more_work;
72 unsigned int cpu;
73 struct worker *worker; 81 struct worker *worker;
74
75 struct workqueue_struct *wq; /* I: the owning workqueue */ 82 struct workqueue_struct *wq; /* I: the owning workqueue */
76 int work_color; /* L: current color */ 83 int work_color; /* L: current color */
77 int flush_color; /* L: flushing color */ 84 int flush_color; /* L: flushing color */
@@ -228,13 +235,19 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
228/* Serializes the accesses to the list of workqueues. */ 235/* Serializes the accesses to the list of workqueues. */
229static DEFINE_SPINLOCK(workqueue_lock); 236static DEFINE_SPINLOCK(workqueue_lock);
230static LIST_HEAD(workqueues); 237static LIST_HEAD(workqueues);
231static DEFINE_PER_CPU(struct ida, worker_ida);
232static bool workqueue_freezing; /* W: have wqs started freezing? */ 238static bool workqueue_freezing; /* W: have wqs started freezing? */
233 239
240static DEFINE_PER_CPU(struct global_cwq, global_cwq);
241
234static int worker_thread(void *__worker); 242static int worker_thread(void *__worker);
235 243
236static int singlethread_cpu __read_mostly; 244static int singlethread_cpu __read_mostly;
237 245
246static struct global_cwq *get_gcwq(unsigned int cpu)
247{
248 return &per_cpu(global_cwq, cpu);
249}
250
238static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 251static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
239 struct workqueue_struct *wq) 252 struct workqueue_struct *wq)
240{ 253{
@@ -303,7 +316,7 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
303 * Insert @work into @cwq after @head. 316 * Insert @work into @cwq after @head.
304 * 317 *
305 * CONTEXT: 318 * CONTEXT:
306 * spin_lock_irq(cwq->lock). 319 * spin_lock_irq(gcwq->lock).
307 */ 320 */
308static void insert_work(struct cpu_workqueue_struct *cwq, 321static void insert_work(struct cpu_workqueue_struct *cwq,
309 struct work_struct *work, struct list_head *head, 322 struct work_struct *work, struct list_head *head,
@@ -326,12 +339,13 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
326 struct work_struct *work) 339 struct work_struct *work)
327{ 340{
328 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); 341 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
342 struct global_cwq *gcwq = cwq->gcwq;
329 struct list_head *worklist; 343 struct list_head *worklist;
330 unsigned long flags; 344 unsigned long flags;
331 345
332 debug_work_activate(work); 346 debug_work_activate(work);
333 347
334 spin_lock_irqsave(&cwq->lock, flags); 348 spin_lock_irqsave(&gcwq->lock, flags);
335 BUG_ON(!list_empty(&work->entry)); 349 BUG_ON(!list_empty(&work->entry));
336 350
337 cwq->nr_in_flight[cwq->work_color]++; 351 cwq->nr_in_flight[cwq->work_color]++;
@@ -344,7 +358,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
344 358
345 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 359 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
346 360
347 spin_unlock_irqrestore(&cwq->lock, flags); 361 spin_unlock_irqrestore(&gcwq->lock, flags);
348} 362}
349 363
350/** 364/**
@@ -483,39 +497,41 @@ static struct worker *alloc_worker(void)
483 */ 497 */
484static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) 498static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
485{ 499{
500 struct global_cwq *gcwq = cwq->gcwq;
486 int id = -1; 501 int id = -1;
487 struct worker *worker = NULL; 502 struct worker *worker = NULL;
488 503
489 spin_lock(&workqueue_lock); 504 spin_lock_irq(&gcwq->lock);
490 while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) { 505 while (ida_get_new(&gcwq->worker_ida, &id)) {
491 spin_unlock(&workqueue_lock); 506 spin_unlock_irq(&gcwq->lock);
492 if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL)) 507 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
493 goto fail; 508 goto fail;
494 spin_lock(&workqueue_lock); 509 spin_lock_irq(&gcwq->lock);
495 } 510 }
496 spin_unlock(&workqueue_lock); 511 spin_unlock_irq(&gcwq->lock);
497 512
498 worker = alloc_worker(); 513 worker = alloc_worker();
499 if (!worker) 514 if (!worker)
500 goto fail; 515 goto fail;
501 516
517 worker->gcwq = gcwq;
502 worker->cwq = cwq; 518 worker->cwq = cwq;
503 worker->id = id; 519 worker->id = id;
504 520
505 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", 521 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
506 cwq->cpu, id); 522 gcwq->cpu, id);
507 if (IS_ERR(worker->task)) 523 if (IS_ERR(worker->task))
508 goto fail; 524 goto fail;
509 525
510 if (bind) 526 if (bind)
511 kthread_bind(worker->task, cwq->cpu); 527 kthread_bind(worker->task, gcwq->cpu);
512 528
513 return worker; 529 return worker;
514fail: 530fail:
515 if (id >= 0) { 531 if (id >= 0) {
516 spin_lock(&workqueue_lock); 532 spin_lock_irq(&gcwq->lock);
517 ida_remove(&per_cpu(worker_ida, cwq->cpu), id); 533 ida_remove(&gcwq->worker_ida, id);
518 spin_unlock(&workqueue_lock); 534 spin_unlock_irq(&gcwq->lock);
519 } 535 }
520 kfree(worker); 536 kfree(worker);
521 return NULL; 537 return NULL;
@@ -528,7 +544,7 @@ fail:
528 * Start @worker. 544 * Start @worker.
529 * 545 *
530 * CONTEXT: 546 * CONTEXT:
531 * spin_lock_irq(cwq->lock). 547 * spin_lock_irq(gcwq->lock).
532 */ 548 */
533static void start_worker(struct worker *worker) 549static void start_worker(struct worker *worker)
534{ 550{
@@ -543,7 +559,7 @@ static void start_worker(struct worker *worker)
543 */ 559 */
544static void destroy_worker(struct worker *worker) 560static void destroy_worker(struct worker *worker)
545{ 561{
546 int cpu = worker->cwq->cpu; 562 struct global_cwq *gcwq = worker->gcwq;
547 int id = worker->id; 563 int id = worker->id;
548 564
549 /* sanity check frenzy */ 565 /* sanity check frenzy */
@@ -553,9 +569,9 @@ static void destroy_worker(struct worker *worker)
553 kthread_stop(worker->task); 569 kthread_stop(worker->task);
554 kfree(worker); 570 kfree(worker);
555 571
556 spin_lock(&workqueue_lock); 572 spin_lock_irq(&gcwq->lock);
557 ida_remove(&per_cpu(worker_ida, cpu), id); 573 ida_remove(&gcwq->worker_ida, id);
558 spin_unlock(&workqueue_lock); 574 spin_unlock_irq(&gcwq->lock);
559} 575}
560 576
561/** 577/**
@@ -573,7 +589,7 @@ static void destroy_worker(struct worker *worker)
573 * nested inside outer list_for_each_entry_safe(). 589 * nested inside outer list_for_each_entry_safe().
574 * 590 *
575 * CONTEXT: 591 * CONTEXT:
576 * spin_lock_irq(cwq->lock). 592 * spin_lock_irq(gcwq->lock).
577 */ 593 */
578static void move_linked_works(struct work_struct *work, struct list_head *head, 594static void move_linked_works(struct work_struct *work, struct list_head *head,
579 struct work_struct **nextp) 595 struct work_struct **nextp)
@@ -617,7 +633,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
617 * decrement nr_in_flight of its cwq and handle workqueue flushing. 633 * decrement nr_in_flight of its cwq and handle workqueue flushing.
618 * 634 *
619 * CONTEXT: 635 * CONTEXT:
620 * spin_lock_irq(cwq->lock). 636 * spin_lock_irq(gcwq->lock).
621 */ 637 */
622static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 638static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
623{ 639{
@@ -664,11 +680,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
664 * call this function to process a work. 680 * call this function to process a work.
665 * 681 *
666 * CONTEXT: 682 * CONTEXT:
667 * spin_lock_irq(cwq->lock) which is released and regrabbed. 683 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
668 */ 684 */
669static void process_one_work(struct worker *worker, struct work_struct *work) 685static void process_one_work(struct worker *worker, struct work_struct *work)
670{ 686{
671 struct cpu_workqueue_struct *cwq = worker->cwq; 687 struct cpu_workqueue_struct *cwq = worker->cwq;
688 struct global_cwq *gcwq = cwq->gcwq;
672 work_func_t f = work->func; 689 work_func_t f = work->func;
673 int work_color; 690 int work_color;
674#ifdef CONFIG_LOCKDEP 691#ifdef CONFIG_LOCKDEP
@@ -687,7 +704,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
687 work_color = get_work_color(work); 704 work_color = get_work_color(work);
688 list_del_init(&work->entry); 705 list_del_init(&work->entry);
689 706
690 spin_unlock_irq(&cwq->lock); 707 spin_unlock_irq(&gcwq->lock);
691 708
692 BUG_ON(get_wq_data(work) != cwq); 709 BUG_ON(get_wq_data(work) != cwq);
693 work_clear_pending(work); 710 work_clear_pending(work);
@@ -707,7 +724,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
707 dump_stack(); 724 dump_stack();
708 } 725 }
709 726
710 spin_lock_irq(&cwq->lock); 727 spin_lock_irq(&gcwq->lock);
711 728
712 /* we're done with it, release */ 729 /* we're done with it, release */
713 worker->current_work = NULL; 730 worker->current_work = NULL;
@@ -723,7 +740,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
723 * fetches a work from the top and executes it. 740 * fetches a work from the top and executes it.
724 * 741 *
725 * CONTEXT: 742 * CONTEXT:
726 * spin_lock_irq(cwq->lock) which may be released and regrabbed 743 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
727 * multiple times. 744 * multiple times.
728 */ 745 */
729static void process_scheduled_works(struct worker *worker) 746static void process_scheduled_works(struct worker *worker)
@@ -744,6 +761,7 @@ static void process_scheduled_works(struct worker *worker)
744static int worker_thread(void *__worker) 761static int worker_thread(void *__worker)
745{ 762{
746 struct worker *worker = __worker; 763 struct worker *worker = __worker;
764 struct global_cwq *gcwq = worker->gcwq;
747 struct cpu_workqueue_struct *cwq = worker->cwq; 765 struct cpu_workqueue_struct *cwq = worker->cwq;
748 DEFINE_WAIT(wait); 766 DEFINE_WAIT(wait);
749 767
@@ -758,11 +776,11 @@ static int worker_thread(void *__worker)
758 break; 776 break;
759 777
760 if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, 778 if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
761 get_cpu_mask(cwq->cpu)))) 779 get_cpu_mask(gcwq->cpu))))
762 set_cpus_allowed_ptr(worker->task, 780 set_cpus_allowed_ptr(worker->task,
763 get_cpu_mask(cwq->cpu)); 781 get_cpu_mask(gcwq->cpu));
764 782
765 spin_lock_irq(&cwq->lock); 783 spin_lock_irq(&gcwq->lock);
766 784
767 while (!list_empty(&cwq->worklist)) { 785 while (!list_empty(&cwq->worklist)) {
768 struct work_struct *work = 786 struct work_struct *work =
@@ -782,7 +800,7 @@ static int worker_thread(void *__worker)
782 } 800 }
783 } 801 }
784 802
785 spin_unlock_irq(&cwq->lock); 803 spin_unlock_irq(&gcwq->lock);
786 } 804 }
787 805
788 return 0; 806 return 0;
@@ -821,7 +839,7 @@ static void wq_barrier_func(struct work_struct *work)
821 * underneath us, so we can't reliably determine cwq from @target. 839 * underneath us, so we can't reliably determine cwq from @target.
822 * 840 *
823 * CONTEXT: 841 * CONTEXT:
824 * spin_lock_irq(cwq->lock). 842 * spin_lock_irq(gcwq->lock).
825 */ 843 */
826static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 844static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
827 struct wq_barrier *barr, 845 struct wq_barrier *barr,
@@ -831,7 +849,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
831 unsigned int linked = 0; 849 unsigned int linked = 0;
832 850
833 /* 851 /*
834 * debugobject calls are safe here even with cwq->lock locked 852 * debugobject calls are safe here even with gcwq->lock locked
835 * as we know for sure that this will not trigger any of the 853 * as we know for sure that this will not trigger any of the
836 * checks and call back into the fixup functions where we 854 * checks and call back into the fixup functions where we
837 * might deadlock. 855 * might deadlock.
@@ -904,8 +922,9 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
904 922
905 for_each_possible_cpu(cpu) { 923 for_each_possible_cpu(cpu) {
906 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 924 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
925 struct global_cwq *gcwq = cwq->gcwq;
907 926
908 spin_lock_irq(&cwq->lock); 927 spin_lock_irq(&gcwq->lock);
909 928
910 if (flush_color >= 0) { 929 if (flush_color >= 0) {
911 BUG_ON(cwq->flush_color != -1); 930 BUG_ON(cwq->flush_color != -1);
@@ -922,7 +941,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
922 cwq->work_color = work_color; 941 cwq->work_color = work_color;
923 } 942 }
924 943
925 spin_unlock_irq(&cwq->lock); 944 spin_unlock_irq(&gcwq->lock);
926 } 945 }
927 946
928 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 947 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
@@ -1097,17 +1116,19 @@ int flush_work(struct work_struct *work)
1097{ 1116{
1098 struct worker *worker = NULL; 1117 struct worker *worker = NULL;
1099 struct cpu_workqueue_struct *cwq; 1118 struct cpu_workqueue_struct *cwq;
1119 struct global_cwq *gcwq;
1100 struct wq_barrier barr; 1120 struct wq_barrier barr;
1101 1121
1102 might_sleep(); 1122 might_sleep();
1103 cwq = get_wq_data(work); 1123 cwq = get_wq_data(work);
1104 if (!cwq) 1124 if (!cwq)
1105 return 0; 1125 return 0;
1126 gcwq = cwq->gcwq;
1106 1127
1107 lock_map_acquire(&cwq->wq->lockdep_map); 1128 lock_map_acquire(&cwq->wq->lockdep_map);
1108 lock_map_release(&cwq->wq->lockdep_map); 1129 lock_map_release(&cwq->wq->lockdep_map);
1109 1130
1110 spin_lock_irq(&cwq->lock); 1131 spin_lock_irq(&gcwq->lock);
1111 if (!list_empty(&work->entry)) { 1132 if (!list_empty(&work->entry)) {
1112 /* 1133 /*
1113 * See the comment near try_to_grab_pending()->smp_rmb(). 1134 * See the comment near try_to_grab_pending()->smp_rmb().
@@ -1124,12 +1145,12 @@ int flush_work(struct work_struct *work)
1124 } 1145 }
1125 1146
1126 insert_wq_barrier(cwq, &barr, work, worker); 1147 insert_wq_barrier(cwq, &barr, work, worker);
1127 spin_unlock_irq(&cwq->lock); 1148 spin_unlock_irq(&gcwq->lock);
1128 wait_for_completion(&barr.done); 1149 wait_for_completion(&barr.done);
1129 destroy_work_on_stack(&barr.work); 1150 destroy_work_on_stack(&barr.work);
1130 return 1; 1151 return 1;
1131already_gone: 1152already_gone:
1132 spin_unlock_irq(&cwq->lock); 1153 spin_unlock_irq(&gcwq->lock);
1133 return 0; 1154 return 0;
1134} 1155}
1135EXPORT_SYMBOL_GPL(flush_work); 1156EXPORT_SYMBOL_GPL(flush_work);
@@ -1140,6 +1161,7 @@ EXPORT_SYMBOL_GPL(flush_work);
1140 */ 1161 */
1141static int try_to_grab_pending(struct work_struct *work) 1162static int try_to_grab_pending(struct work_struct *work)
1142{ 1163{
1164 struct global_cwq *gcwq;
1143 struct cpu_workqueue_struct *cwq; 1165 struct cpu_workqueue_struct *cwq;
1144 int ret = -1; 1166 int ret = -1;
1145 1167
@@ -1154,8 +1176,9 @@ static int try_to_grab_pending(struct work_struct *work)
1154 cwq = get_wq_data(work); 1176 cwq = get_wq_data(work);
1155 if (!cwq) 1177 if (!cwq)
1156 return ret; 1178 return ret;
1179 gcwq = cwq->gcwq;
1157 1180
1158 spin_lock_irq(&cwq->lock); 1181 spin_lock_irq(&gcwq->lock);
1159 if (!list_empty(&work->entry)) { 1182 if (!list_empty(&work->entry)) {
1160 /* 1183 /*
1161 * This work is queued, but perhaps we locked the wrong cwq. 1184 * This work is queued, but perhaps we locked the wrong cwq.
@@ -1170,7 +1193,7 @@ static int try_to_grab_pending(struct work_struct *work)
1170 ret = 1; 1193 ret = 1;
1171 } 1194 }
1172 } 1195 }
1173 spin_unlock_irq(&cwq->lock); 1196 spin_unlock_irq(&gcwq->lock);
1174 1197
1175 return ret; 1198 return ret;
1176} 1199}
@@ -1178,10 +1201,11 @@ static int try_to_grab_pending(struct work_struct *work)
1178static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 1201static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
1179 struct work_struct *work) 1202 struct work_struct *work)
1180{ 1203{
1204 struct global_cwq *gcwq = cwq->gcwq;
1181 struct wq_barrier barr; 1205 struct wq_barrier barr;
1182 struct worker *worker; 1206 struct worker *worker;
1183 1207
1184 spin_lock_irq(&cwq->lock); 1208 spin_lock_irq(&gcwq->lock);
1185 1209
1186 worker = NULL; 1210 worker = NULL;
1187 if (unlikely(cwq->worker && cwq->worker->current_work == work)) { 1211 if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
@@ -1189,7 +1213,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
1189 insert_wq_barrier(cwq, &barr, work, worker); 1213 insert_wq_barrier(cwq, &barr, work, worker);
1190 } 1214 }
1191 1215
1192 spin_unlock_irq(&cwq->lock); 1216 spin_unlock_irq(&gcwq->lock);
1193 1217
1194 if (unlikely(worker)) { 1218 if (unlikely(worker)) {
1195 wait_for_completion(&barr.done); 1219 wait_for_completion(&barr.done);
@@ -1567,13 +1591,13 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
1567 */ 1591 */
1568 for_each_possible_cpu(cpu) { 1592 for_each_possible_cpu(cpu) {
1569 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1593 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1594 struct global_cwq *gcwq = get_gcwq(cpu);
1570 1595
1571 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 1596 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1572 cwq->cpu = cpu; 1597 cwq->gcwq = gcwq;
1573 cwq->wq = wq; 1598 cwq->wq = wq;
1574 cwq->flush_color = -1; 1599 cwq->flush_color = -1;
1575 cwq->max_active = max_active; 1600 cwq->max_active = max_active;
1576 spin_lock_init(&cwq->lock);
1577 INIT_LIST_HEAD(&cwq->worklist); 1601 INIT_LIST_HEAD(&cwq->worklist);
1578 INIT_LIST_HEAD(&cwq->delayed_works); 1602 INIT_LIST_HEAD(&cwq->delayed_works);
1579 init_waitqueue_head(&cwq->more_work); 1603 init_waitqueue_head(&cwq->more_work);
@@ -1744,7 +1768,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
1744 * list instead of the cwq ones. 1768 * list instead of the cwq ones.
1745 * 1769 *
1746 * CONTEXT: 1770 * CONTEXT:
1747 * Grabs and releases workqueue_lock and cwq->lock's. 1771 * Grabs and releases workqueue_lock and gcwq->lock's.
1748 */ 1772 */
1749void freeze_workqueues_begin(void) 1773void freeze_workqueues_begin(void)
1750{ 1774{
@@ -1757,16 +1781,18 @@ void freeze_workqueues_begin(void)
1757 workqueue_freezing = true; 1781 workqueue_freezing = true;
1758 1782
1759 for_each_possible_cpu(cpu) { 1783 for_each_possible_cpu(cpu) {
1784 struct global_cwq *gcwq = get_gcwq(cpu);
1785
1786 spin_lock_irq(&gcwq->lock);
1787
1760 list_for_each_entry(wq, &workqueues, list) { 1788 list_for_each_entry(wq, &workqueues, list) {
1761 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1789 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1762 1790
1763 spin_lock_irq(&cwq->lock);
1764
1765 if (wq->flags & WQ_FREEZEABLE) 1791 if (wq->flags & WQ_FREEZEABLE)
1766 cwq->max_active = 0; 1792 cwq->max_active = 0;
1767
1768 spin_unlock_irq(&cwq->lock);
1769 } 1793 }
1794
1795 spin_unlock_irq(&gcwq->lock);
1770 } 1796 }
1771 1797
1772 spin_unlock(&workqueue_lock); 1798 spin_unlock(&workqueue_lock);
@@ -1825,7 +1851,7 @@ out_unlock:
1825 * frozen works are transferred to their respective cwq worklists. 1851 * frozen works are transferred to their respective cwq worklists.
1826 * 1852 *
1827 * CONTEXT: 1853 * CONTEXT:
1828 * Grabs and releases workqueue_lock and cwq->lock's. 1854 * Grabs and releases workqueue_lock and gcwq->lock's.
1829 */ 1855 */
1830void thaw_workqueues(void) 1856void thaw_workqueues(void)
1831{ 1857{
@@ -1838,14 +1864,16 @@ void thaw_workqueues(void)
1838 goto out_unlock; 1864 goto out_unlock;
1839 1865
1840 for_each_possible_cpu(cpu) { 1866 for_each_possible_cpu(cpu) {
1867 struct global_cwq *gcwq = get_gcwq(cpu);
1868
1869 spin_lock_irq(&gcwq->lock);
1870
1841 list_for_each_entry(wq, &workqueues, list) { 1871 list_for_each_entry(wq, &workqueues, list) {
1842 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1872 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1843 1873
1844 if (!(wq->flags & WQ_FREEZEABLE)) 1874 if (!(wq->flags & WQ_FREEZEABLE))
1845 continue; 1875 continue;
1846 1876
1847 spin_lock_irq(&cwq->lock);
1848
1849 /* restore max_active and repopulate worklist */ 1877 /* restore max_active and repopulate worklist */
1850 cwq->max_active = wq->saved_max_active; 1878 cwq->max_active = wq->saved_max_active;
1851 1879
@@ -1854,9 +1882,9 @@ void thaw_workqueues(void)
1854 cwq_activate_first_delayed(cwq); 1882 cwq_activate_first_delayed(cwq);
1855 1883
1856 wake_up(&cwq->more_work); 1884 wake_up(&cwq->more_work);
1857
1858 spin_unlock_irq(&cwq->lock);
1859 } 1885 }
1886
1887 spin_unlock_irq(&gcwq->lock);
1860 } 1888 }
1861 1889
1862 workqueue_freezing = false; 1890 workqueue_freezing = false;
@@ -1869,11 +1897,19 @@ void __init init_workqueues(void)
1869{ 1897{
1870 unsigned int cpu; 1898 unsigned int cpu;
1871 1899
1872 for_each_possible_cpu(cpu)
1873 ida_init(&per_cpu(worker_ida, cpu));
1874
1875 singlethread_cpu = cpumask_first(cpu_possible_mask); 1900 singlethread_cpu = cpumask_first(cpu_possible_mask);
1876 hotcpu_notifier(workqueue_cpu_callback, 0); 1901 hotcpu_notifier(workqueue_cpu_callback, 0);
1902
1903 /* initialize gcwqs */
1904 for_each_possible_cpu(cpu) {
1905 struct global_cwq *gcwq = get_gcwq(cpu);
1906
1907 spin_lock_init(&gcwq->lock);
1908 gcwq->cpu = cpu;
1909
1910 ida_init(&gcwq->worker_ida);
1911 }
1912
1877 keventd_wq = create_workqueue("events"); 1913 keventd_wq = create_workqueue("events");
1878 BUG_ON(!keventd_wq); 1914 BUG_ON(!keventd_wq);
1879} 1915}