diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-12 17:46:37 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-12 17:46:37 -0400 |
commit | bd7bdd43dcb81bb08240b9401b36a104f77dc135 (patch) | |
tree | 4d12a15e7e72f2d64fb6e58a145e56b4da1a341a | |
parent | 974271c485a4d8bb801decc616748f90aafb07ec (diff) |
workqueue: factor out worker_pool from global_cwq
Move worklist and all worker management fields from global_cwq into
the new struct worker_pool. worker_pool points back to the containing
gcwq. worker and cpu_workqueue_struct are updated to point to
worker_pool instead of gcwq too.
This change is mechanical and doesn't introduce any functional
difference other than rearranging of fields and an added level of
indirection in some places. This is to prepare for multiple pools per
gcwq.
v2: Comment typo fixes as suggested by Namhyung.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
-rw-r--r-- | include/trace/events/workqueue.h | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 216 |
2 files changed, 118 insertions, 100 deletions
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 4018f5058f27..f28d1b65f178 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h | |||
@@ -54,7 +54,7 @@ TRACE_EVENT(workqueue_queue_work, | |||
54 | __entry->function = work->func; | 54 | __entry->function = work->func; |
55 | __entry->workqueue = cwq->wq; | 55 | __entry->workqueue = cwq->wq; |
56 | __entry->req_cpu = req_cpu; | 56 | __entry->req_cpu = req_cpu; |
57 | __entry->cpu = cwq->gcwq->cpu; | 57 | __entry->cpu = cwq->pool->gcwq->cpu; |
58 | ), | 58 | ), |
59 | 59 | ||
60 | TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", | 60 | TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 27637c284cb9..61f154467026 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -115,6 +115,7 @@ enum { | |||
115 | */ | 115 | */ |
116 | 116 | ||
117 | struct global_cwq; | 117 | struct global_cwq; |
118 | struct worker_pool; | ||
118 | 119 | ||
119 | /* | 120 | /* |
120 | * The poor guys doing the actual heavy lifting. All on-duty workers | 121 | * The poor guys doing the actual heavy lifting. All on-duty workers |
@@ -131,7 +132,7 @@ struct worker { | |||
131 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ | 132 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ |
132 | struct list_head scheduled; /* L: scheduled works */ | 133 | struct list_head scheduled; /* L: scheduled works */ |
133 | struct task_struct *task; /* I: worker task */ | 134 | struct task_struct *task; /* I: worker task */ |
134 | struct global_cwq *gcwq; /* I: the associated gcwq */ | 135 | struct worker_pool *pool; /* I: the associated pool */ |
135 | /* 64 bytes boundary on 64bit, 32 on 32bit */ | 136 | /* 64 bytes boundary on 64bit, 32 on 32bit */ |
136 | unsigned long last_active; /* L: last active timestamp */ | 137 | unsigned long last_active; /* L: last active timestamp */ |
137 | unsigned int flags; /* X: flags */ | 138 | unsigned int flags; /* X: flags */ |
@@ -139,6 +140,21 @@ struct worker { | |||
139 | struct work_struct rebind_work; /* L: rebind worker to cpu */ | 140 | struct work_struct rebind_work; /* L: rebind worker to cpu */ |
140 | }; | 141 | }; |
141 | 142 | ||
143 | struct worker_pool { | ||
144 | struct global_cwq *gcwq; /* I: the owning gcwq */ | ||
145 | |||
146 | struct list_head worklist; /* L: list of pending works */ | ||
147 | int nr_workers; /* L: total number of workers */ | ||
148 | int nr_idle; /* L: currently idle ones */ | ||
149 | |||
150 | struct list_head idle_list; /* X: list of idle workers */ | ||
151 | struct timer_list idle_timer; /* L: worker idle timeout */ | ||
152 | struct timer_list mayday_timer; /* L: SOS timer for workers */ | ||
153 | |||
154 | struct ida worker_ida; /* L: for worker IDs */ | ||
155 | struct worker *first_idle; /* L: first idle worker */ | ||
156 | }; | ||
157 | |||
142 | /* | 158 | /* |
143 | * Global per-cpu workqueue. There's one and only one for each cpu | 159 | * Global per-cpu workqueue. There's one and only one for each cpu |
144 | * and all works are queued and processed here regardless of their | 160 | * and all works are queued and processed here regardless of their |
@@ -146,27 +162,18 @@ struct worker { | |||
146 | */ | 162 | */ |
147 | struct global_cwq { | 163 | struct global_cwq { |
148 | spinlock_t lock; /* the gcwq lock */ | 164 | spinlock_t lock; /* the gcwq lock */ |
149 | struct list_head worklist; /* L: list of pending works */ | ||
150 | unsigned int cpu; /* I: the associated cpu */ | 165 | unsigned int cpu; /* I: the associated cpu */ |
151 | unsigned int flags; /* L: GCWQ_* flags */ | 166 | unsigned int flags; /* L: GCWQ_* flags */ |
152 | 167 | ||
153 | int nr_workers; /* L: total number of workers */ | 168 | /* workers are chained either in busy_hash or pool idle_list */ |
154 | int nr_idle; /* L: currently idle ones */ | ||
155 | |||
156 | /* workers are chained either in the idle_list or busy_hash */ | ||
157 | struct list_head idle_list; /* X: list of idle workers */ | ||
158 | struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; | 169 | struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; |
159 | /* L: hash of busy workers */ | 170 | /* L: hash of busy workers */ |
160 | 171 | ||
161 | struct timer_list idle_timer; /* L: worker idle timeout */ | 172 | struct worker_pool pool; /* the worker pools */ |
162 | struct timer_list mayday_timer; /* L: SOS timer for dworkers */ | ||
163 | |||
164 | struct ida worker_ida; /* L: for worker IDs */ | ||
165 | 173 | ||
166 | struct task_struct *trustee; /* L: for gcwq shutdown */ | 174 | struct task_struct *trustee; /* L: for gcwq shutdown */ |
167 | unsigned int trustee_state; /* L: trustee state */ | 175 | unsigned int trustee_state; /* L: trustee state */ |
168 | wait_queue_head_t trustee_wait; /* trustee wait */ | 176 | wait_queue_head_t trustee_wait; /* trustee wait */ |
169 | struct worker *first_idle; /* L: first idle worker */ | ||
170 | } ____cacheline_aligned_in_smp; | 177 | } ____cacheline_aligned_in_smp; |
171 | 178 | ||
172 | /* | 179 | /* |
@@ -175,7 +182,7 @@ struct global_cwq { | |||
175 | * aligned at two's power of the number of flag bits. | 182 | * aligned at two's power of the number of flag bits. |
176 | */ | 183 | */ |
177 | struct cpu_workqueue_struct { | 184 | struct cpu_workqueue_struct { |
178 | struct global_cwq *gcwq; /* I: the associated gcwq */ | 185 | struct worker_pool *pool; /* I: the associated pool */ |
179 | struct workqueue_struct *wq; /* I: the owning workqueue */ | 186 | struct workqueue_struct *wq; /* I: the owning workqueue */ |
180 | int work_color; /* L: current color */ | 187 | int work_color; /* L: current color */ |
181 | int flush_color; /* L: flushing color */ | 188 | int flush_color; /* L: flushing color */ |
@@ -555,7 +562,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) | |||
555 | 562 | ||
556 | if (data & WORK_STRUCT_CWQ) | 563 | if (data & WORK_STRUCT_CWQ) |
557 | return ((struct cpu_workqueue_struct *) | 564 | return ((struct cpu_workqueue_struct *) |
558 | (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; | 565 | (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; |
559 | 566 | ||
560 | cpu = data >> WORK_STRUCT_FLAG_BITS; | 567 | cpu = data >> WORK_STRUCT_FLAG_BITS; |
561 | if (cpu == WORK_CPU_NONE) | 568 | if (cpu == WORK_CPU_NONE) |
@@ -587,13 +594,13 @@ static bool __need_more_worker(struct global_cwq *gcwq) | |||
587 | */ | 594 | */ |
588 | static bool need_more_worker(struct global_cwq *gcwq) | 595 | static bool need_more_worker(struct global_cwq *gcwq) |
589 | { | 596 | { |
590 | return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); | 597 | return !list_empty(&gcwq->pool.worklist) && __need_more_worker(gcwq); |
591 | } | 598 | } |
592 | 599 | ||
593 | /* Can I start working? Called from busy but !running workers. */ | 600 | /* Can I start working? Called from busy but !running workers. */ |
594 | static bool may_start_working(struct global_cwq *gcwq) | 601 | static bool may_start_working(struct global_cwq *gcwq) |
595 | { | 602 | { |
596 | return gcwq->nr_idle; | 603 | return gcwq->pool.nr_idle; |
597 | } | 604 | } |
598 | 605 | ||
599 | /* Do I need to keep working? Called from currently running workers. */ | 606 | /* Do I need to keep working? Called from currently running workers. */ |
@@ -601,7 +608,7 @@ static bool keep_working(struct global_cwq *gcwq) | |||
601 | { | 608 | { |
602 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); | 609 | atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); |
603 | 610 | ||
604 | return !list_empty(&gcwq->worklist) && | 611 | return !list_empty(&gcwq->pool.worklist) && |
605 | (atomic_read(nr_running) <= 1 || | 612 | (atomic_read(nr_running) <= 1 || |
606 | gcwq->flags & GCWQ_HIGHPRI_PENDING); | 613 | gcwq->flags & GCWQ_HIGHPRI_PENDING); |
607 | } | 614 | } |
@@ -622,8 +629,8 @@ static bool need_to_manage_workers(struct global_cwq *gcwq) | |||
622 | static bool too_many_workers(struct global_cwq *gcwq) | 629 | static bool too_many_workers(struct global_cwq *gcwq) |
623 | { | 630 | { |
624 | bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; | 631 | bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; |
625 | int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ | 632 | int nr_idle = gcwq->pool.nr_idle + managing; /* manager is considered idle */ |
626 | int nr_busy = gcwq->nr_workers - nr_idle; | 633 | int nr_busy = gcwq->pool.nr_workers - nr_idle; |
627 | 634 | ||
628 | return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; | 635 | return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; |
629 | } | 636 | } |
@@ -635,10 +642,10 @@ static bool too_many_workers(struct global_cwq *gcwq) | |||
635 | /* Return the first worker. Safe with preemption disabled */ | 642 | /* Return the first worker. Safe with preemption disabled */ |
636 | static struct worker *first_worker(struct global_cwq *gcwq) | 643 | static struct worker *first_worker(struct global_cwq *gcwq) |
637 | { | 644 | { |
638 | if (unlikely(list_empty(&gcwq->idle_list))) | 645 | if (unlikely(list_empty(&gcwq->pool.idle_list))) |
639 | return NULL; | 646 | return NULL; |
640 | 647 | ||
641 | return list_first_entry(&gcwq->idle_list, struct worker, entry); | 648 | return list_first_entry(&gcwq->pool.idle_list, struct worker, entry); |
642 | } | 649 | } |
643 | 650 | ||
644 | /** | 651 | /** |
@@ -696,7 +703,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, | |||
696 | unsigned int cpu) | 703 | unsigned int cpu) |
697 | { | 704 | { |
698 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; | 705 | struct worker *worker = kthread_data(task), *to_wakeup = NULL; |
699 | struct global_cwq *gcwq = get_gcwq(cpu); | 706 | struct worker_pool *pool = worker->pool; |
707 | struct global_cwq *gcwq = pool->gcwq; | ||
700 | atomic_t *nr_running = get_gcwq_nr_running(cpu); | 708 | atomic_t *nr_running = get_gcwq_nr_running(cpu); |
701 | 709 | ||
702 | if (worker->flags & WORKER_NOT_RUNNING) | 710 | if (worker->flags & WORKER_NOT_RUNNING) |
@@ -716,7 +724,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, | |||
716 | * could be manipulating idle_list, so dereferencing idle_list | 724 | * could be manipulating idle_list, so dereferencing idle_list |
717 | * without gcwq lock is safe. | 725 | * without gcwq lock is safe. |
718 | */ | 726 | */ |
719 | if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) | 727 | if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) |
720 | to_wakeup = first_worker(gcwq); | 728 | to_wakeup = first_worker(gcwq); |
721 | return to_wakeup ? to_wakeup->task : NULL; | 729 | return to_wakeup ? to_wakeup->task : NULL; |
722 | } | 730 | } |
@@ -737,7 +745,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, | |||
737 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, | 745 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, |
738 | bool wakeup) | 746 | bool wakeup) |
739 | { | 747 | { |
740 | struct global_cwq *gcwq = worker->gcwq; | 748 | struct worker_pool *pool = worker->pool; |
749 | struct global_cwq *gcwq = pool->gcwq; | ||
741 | 750 | ||
742 | WARN_ON_ONCE(worker->task != current); | 751 | WARN_ON_ONCE(worker->task != current); |
743 | 752 | ||
@@ -752,7 +761,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, | |||
752 | 761 | ||
753 | if (wakeup) { | 762 | if (wakeup) { |
754 | if (atomic_dec_and_test(nr_running) && | 763 | if (atomic_dec_and_test(nr_running) && |
755 | !list_empty(&gcwq->worklist)) | 764 | !list_empty(&pool->worklist)) |
756 | wake_up_worker(gcwq); | 765 | wake_up_worker(gcwq); |
757 | } else | 766 | } else |
758 | atomic_dec(nr_running); | 767 | atomic_dec(nr_running); |
@@ -773,7 +782,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, | |||
773 | */ | 782 | */ |
774 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | 783 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) |
775 | { | 784 | { |
776 | struct global_cwq *gcwq = worker->gcwq; | 785 | struct global_cwq *gcwq = worker->pool->gcwq; |
777 | unsigned int oflags = worker->flags; | 786 | unsigned int oflags = worker->flags; |
778 | 787 | ||
779 | WARN_ON_ONCE(worker->task != current); | 788 | WARN_ON_ONCE(worker->task != current); |
@@ -894,9 +903,9 @@ static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, | |||
894 | struct work_struct *twork; | 903 | struct work_struct *twork; |
895 | 904 | ||
896 | if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) | 905 | if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) |
897 | return &gcwq->worklist; | 906 | return &gcwq->pool.worklist; |
898 | 907 | ||
899 | list_for_each_entry(twork, &gcwq->worklist, entry) { | 908 | list_for_each_entry(twork, &gcwq->pool.worklist, entry) { |
900 | struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); | 909 | struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); |
901 | 910 | ||
902 | if (!(tcwq->wq->flags & WQ_HIGHPRI)) | 911 | if (!(tcwq->wq->flags & WQ_HIGHPRI)) |
@@ -924,7 +933,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
924 | struct work_struct *work, struct list_head *head, | 933 | struct work_struct *work, struct list_head *head, |
925 | unsigned int extra_flags) | 934 | unsigned int extra_flags) |
926 | { | 935 | { |
927 | struct global_cwq *gcwq = cwq->gcwq; | 936 | struct global_cwq *gcwq = cwq->pool->gcwq; |
928 | 937 | ||
929 | /* we own @work, set data and link */ | 938 | /* we own @work, set data and link */ |
930 | set_work_cwq(work, cwq, extra_flags); | 939 | set_work_cwq(work, cwq, extra_flags); |
@@ -1196,7 +1205,8 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); | |||
1196 | */ | 1205 | */ |
1197 | static void worker_enter_idle(struct worker *worker) | 1206 | static void worker_enter_idle(struct worker *worker) |
1198 | { | 1207 | { |
1199 | struct global_cwq *gcwq = worker->gcwq; | 1208 | struct worker_pool *pool = worker->pool; |
1209 | struct global_cwq *gcwq = pool->gcwq; | ||
1200 | 1210 | ||
1201 | BUG_ON(worker->flags & WORKER_IDLE); | 1211 | BUG_ON(worker->flags & WORKER_IDLE); |
1202 | BUG_ON(!list_empty(&worker->entry) && | 1212 | BUG_ON(!list_empty(&worker->entry) && |
@@ -1204,15 +1214,15 @@ static void worker_enter_idle(struct worker *worker) | |||
1204 | 1214 | ||
1205 | /* can't use worker_set_flags(), also called from start_worker() */ | 1215 | /* can't use worker_set_flags(), also called from start_worker() */ |
1206 | worker->flags |= WORKER_IDLE; | 1216 | worker->flags |= WORKER_IDLE; |
1207 | gcwq->nr_idle++; | 1217 | pool->nr_idle++; |
1208 | worker->last_active = jiffies; | 1218 | worker->last_active = jiffies; |
1209 | 1219 | ||
1210 | /* idle_list is LIFO */ | 1220 | /* idle_list is LIFO */ |
1211 | list_add(&worker->entry, &gcwq->idle_list); | 1221 | list_add(&worker->entry, &pool->idle_list); |
1212 | 1222 | ||
1213 | if (likely(!(worker->flags & WORKER_ROGUE))) { | 1223 | if (likely(!(worker->flags & WORKER_ROGUE))) { |
1214 | if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) | 1224 | if (too_many_workers(gcwq) && !timer_pending(&pool->idle_timer)) |
1215 | mod_timer(&gcwq->idle_timer, | 1225 | mod_timer(&pool->idle_timer, |
1216 | jiffies + IDLE_WORKER_TIMEOUT); | 1226 | jiffies + IDLE_WORKER_TIMEOUT); |
1217 | } else | 1227 | } else |
1218 | wake_up_all(&gcwq->trustee_wait); | 1228 | wake_up_all(&gcwq->trustee_wait); |
@@ -1223,7 +1233,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1223 | * warning may trigger spuriously. Check iff trustee is idle. | 1233 | * warning may trigger spuriously. Check iff trustee is idle. |
1224 | */ | 1234 | */ |
1225 | WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && | 1235 | WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && |
1226 | gcwq->nr_workers == gcwq->nr_idle && | 1236 | pool->nr_workers == pool->nr_idle && |
1227 | atomic_read(get_gcwq_nr_running(gcwq->cpu))); | 1237 | atomic_read(get_gcwq_nr_running(gcwq->cpu))); |
1228 | } | 1238 | } |
1229 | 1239 | ||
@@ -1238,11 +1248,11 @@ static void worker_enter_idle(struct worker *worker) | |||
1238 | */ | 1248 | */ |
1239 | static void worker_leave_idle(struct worker *worker) | 1249 | static void worker_leave_idle(struct worker *worker) |
1240 | { | 1250 | { |
1241 | struct global_cwq *gcwq = worker->gcwq; | 1251 | struct worker_pool *pool = worker->pool; |
1242 | 1252 | ||
1243 | BUG_ON(!(worker->flags & WORKER_IDLE)); | 1253 | BUG_ON(!(worker->flags & WORKER_IDLE)); |
1244 | worker_clr_flags(worker, WORKER_IDLE); | 1254 | worker_clr_flags(worker, WORKER_IDLE); |
1245 | gcwq->nr_idle--; | 1255 | pool->nr_idle--; |
1246 | list_del_init(&worker->entry); | 1256 | list_del_init(&worker->entry); |
1247 | } | 1257 | } |
1248 | 1258 | ||
@@ -1279,7 +1289,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1279 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1289 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1280 | __acquires(&gcwq->lock) | 1290 | __acquires(&gcwq->lock) |
1281 | { | 1291 | { |
1282 | struct global_cwq *gcwq = worker->gcwq; | 1292 | struct global_cwq *gcwq = worker->pool->gcwq; |
1283 | struct task_struct *task = worker->task; | 1293 | struct task_struct *task = worker->task; |
1284 | 1294 | ||
1285 | while (true) { | 1295 | while (true) { |
@@ -1321,7 +1331,7 @@ __acquires(&gcwq->lock) | |||
1321 | static void worker_rebind_fn(struct work_struct *work) | 1331 | static void worker_rebind_fn(struct work_struct *work) |
1322 | { | 1332 | { |
1323 | struct worker *worker = container_of(work, struct worker, rebind_work); | 1333 | struct worker *worker = container_of(work, struct worker, rebind_work); |
1324 | struct global_cwq *gcwq = worker->gcwq; | 1334 | struct global_cwq *gcwq = worker->pool->gcwq; |
1325 | 1335 | ||
1326 | if (worker_maybe_bind_and_lock(worker)) | 1336 | if (worker_maybe_bind_and_lock(worker)) |
1327 | worker_clr_flags(worker, WORKER_REBIND); | 1337 | worker_clr_flags(worker, WORKER_REBIND); |
@@ -1362,13 +1372,14 @@ static struct worker *alloc_worker(void) | |||
1362 | static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | 1372 | static struct worker *create_worker(struct global_cwq *gcwq, bool bind) |
1363 | { | 1373 | { |
1364 | bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; | 1374 | bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; |
1375 | struct worker_pool *pool = &gcwq->pool; | ||
1365 | struct worker *worker = NULL; | 1376 | struct worker *worker = NULL; |
1366 | int id = -1; | 1377 | int id = -1; |
1367 | 1378 | ||
1368 | spin_lock_irq(&gcwq->lock); | 1379 | spin_lock_irq(&gcwq->lock); |
1369 | while (ida_get_new(&gcwq->worker_ida, &id)) { | 1380 | while (ida_get_new(&pool->worker_ida, &id)) { |
1370 | spin_unlock_irq(&gcwq->lock); | 1381 | spin_unlock_irq(&gcwq->lock); |
1371 | if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) | 1382 | if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) |
1372 | goto fail; | 1383 | goto fail; |
1373 | spin_lock_irq(&gcwq->lock); | 1384 | spin_lock_irq(&gcwq->lock); |
1374 | } | 1385 | } |
@@ -1378,7 +1389,7 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | |||
1378 | if (!worker) | 1389 | if (!worker) |
1379 | goto fail; | 1390 | goto fail; |
1380 | 1391 | ||
1381 | worker->gcwq = gcwq; | 1392 | worker->pool = pool; |
1382 | worker->id = id; | 1393 | worker->id = id; |
1383 | 1394 | ||
1384 | if (!on_unbound_cpu) | 1395 | if (!on_unbound_cpu) |
@@ -1409,7 +1420,7 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | |||
1409 | fail: | 1420 | fail: |
1410 | if (id >= 0) { | 1421 | if (id >= 0) { |
1411 | spin_lock_irq(&gcwq->lock); | 1422 | spin_lock_irq(&gcwq->lock); |
1412 | ida_remove(&gcwq->worker_ida, id); | 1423 | ida_remove(&pool->worker_ida, id); |
1413 | spin_unlock_irq(&gcwq->lock); | 1424 | spin_unlock_irq(&gcwq->lock); |
1414 | } | 1425 | } |
1415 | kfree(worker); | 1426 | kfree(worker); |
@@ -1428,7 +1439,7 @@ fail: | |||
1428 | static void start_worker(struct worker *worker) | 1439 | static void start_worker(struct worker *worker) |
1429 | { | 1440 | { |
1430 | worker->flags |= WORKER_STARTED; | 1441 | worker->flags |= WORKER_STARTED; |
1431 | worker->gcwq->nr_workers++; | 1442 | worker->pool->nr_workers++; |
1432 | worker_enter_idle(worker); | 1443 | worker_enter_idle(worker); |
1433 | wake_up_process(worker->task); | 1444 | wake_up_process(worker->task); |
1434 | } | 1445 | } |
@@ -1444,7 +1455,8 @@ static void start_worker(struct worker *worker) | |||
1444 | */ | 1455 | */ |
1445 | static void destroy_worker(struct worker *worker) | 1456 | static void destroy_worker(struct worker *worker) |
1446 | { | 1457 | { |
1447 | struct global_cwq *gcwq = worker->gcwq; | 1458 | struct worker_pool *pool = worker->pool; |
1459 | struct global_cwq *gcwq = pool->gcwq; | ||
1448 | int id = worker->id; | 1460 | int id = worker->id; |
1449 | 1461 | ||
1450 | /* sanity check frenzy */ | 1462 | /* sanity check frenzy */ |
@@ -1452,9 +1464,9 @@ static void destroy_worker(struct worker *worker) | |||
1452 | BUG_ON(!list_empty(&worker->scheduled)); | 1464 | BUG_ON(!list_empty(&worker->scheduled)); |
1453 | 1465 | ||
1454 | if (worker->flags & WORKER_STARTED) | 1466 | if (worker->flags & WORKER_STARTED) |
1455 | gcwq->nr_workers--; | 1467 | pool->nr_workers--; |
1456 | if (worker->flags & WORKER_IDLE) | 1468 | if (worker->flags & WORKER_IDLE) |
1457 | gcwq->nr_idle--; | 1469 | pool->nr_idle--; |
1458 | 1470 | ||
1459 | list_del_init(&worker->entry); | 1471 | list_del_init(&worker->entry); |
1460 | worker->flags |= WORKER_DIE; | 1472 | worker->flags |= WORKER_DIE; |
@@ -1465,7 +1477,7 @@ static void destroy_worker(struct worker *worker) | |||
1465 | kfree(worker); | 1477 | kfree(worker); |
1466 | 1478 | ||
1467 | spin_lock_irq(&gcwq->lock); | 1479 | spin_lock_irq(&gcwq->lock); |
1468 | ida_remove(&gcwq->worker_ida, id); | 1480 | ida_remove(&pool->worker_ida, id); |
1469 | } | 1481 | } |
1470 | 1482 | ||
1471 | static void idle_worker_timeout(unsigned long __gcwq) | 1483 | static void idle_worker_timeout(unsigned long __gcwq) |
@@ -1479,11 +1491,12 @@ static void idle_worker_timeout(unsigned long __gcwq) | |||
1479 | unsigned long expires; | 1491 | unsigned long expires; |
1480 | 1492 | ||
1481 | /* idle_list is kept in LIFO order, check the last one */ | 1493 | /* idle_list is kept in LIFO order, check the last one */ |
1482 | worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 1494 | worker = list_entry(gcwq->pool.idle_list.prev, struct worker, |
1495 | entry); | ||
1483 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 1496 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; |
1484 | 1497 | ||
1485 | if (time_before(jiffies, expires)) | 1498 | if (time_before(jiffies, expires)) |
1486 | mod_timer(&gcwq->idle_timer, expires); | 1499 | mod_timer(&gcwq->pool.idle_timer, expires); |
1487 | else { | 1500 | else { |
1488 | /* it's been idle for too long, wake up manager */ | 1501 | /* it's been idle for too long, wake up manager */ |
1489 | gcwq->flags |= GCWQ_MANAGE_WORKERS; | 1502 | gcwq->flags |= GCWQ_MANAGE_WORKERS; |
@@ -1504,7 +1517,7 @@ static bool send_mayday(struct work_struct *work) | |||
1504 | return false; | 1517 | return false; |
1505 | 1518 | ||
1506 | /* mayday mayday mayday */ | 1519 | /* mayday mayday mayday */ |
1507 | cpu = cwq->gcwq->cpu; | 1520 | cpu = cwq->pool->gcwq->cpu; |
1508 | /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ | 1521 | /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ |
1509 | if (cpu == WORK_CPU_UNBOUND) | 1522 | if (cpu == WORK_CPU_UNBOUND) |
1510 | cpu = 0; | 1523 | cpu = 0; |
@@ -1527,13 +1540,13 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
1527 | * allocation deadlock. Send distress signals to | 1540 | * allocation deadlock. Send distress signals to |
1528 | * rescuers. | 1541 | * rescuers. |
1529 | */ | 1542 | */ |
1530 | list_for_each_entry(work, &gcwq->worklist, entry) | 1543 | list_for_each_entry(work, &gcwq->pool.worklist, entry) |
1531 | send_mayday(work); | 1544 | send_mayday(work); |
1532 | } | 1545 | } |
1533 | 1546 | ||
1534 | spin_unlock_irq(&gcwq->lock); | 1547 | spin_unlock_irq(&gcwq->lock); |
1535 | 1548 | ||
1536 | mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); | 1549 | mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INTERVAL); |
1537 | } | 1550 | } |
1538 | 1551 | ||
1539 | /** | 1552 | /** |
@@ -1568,14 +1581,14 @@ restart: | |||
1568 | spin_unlock_irq(&gcwq->lock); | 1581 | spin_unlock_irq(&gcwq->lock); |
1569 | 1582 | ||
1570 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ | 1583 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ |
1571 | mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); | 1584 | mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); |
1572 | 1585 | ||
1573 | while (true) { | 1586 | while (true) { |
1574 | struct worker *worker; | 1587 | struct worker *worker; |
1575 | 1588 | ||
1576 | worker = create_worker(gcwq, true); | 1589 | worker = create_worker(gcwq, true); |
1577 | if (worker) { | 1590 | if (worker) { |
1578 | del_timer_sync(&gcwq->mayday_timer); | 1591 | del_timer_sync(&gcwq->pool.mayday_timer); |
1579 | spin_lock_irq(&gcwq->lock); | 1592 | spin_lock_irq(&gcwq->lock); |
1580 | start_worker(worker); | 1593 | start_worker(worker); |
1581 | BUG_ON(need_to_create_worker(gcwq)); | 1594 | BUG_ON(need_to_create_worker(gcwq)); |
@@ -1592,7 +1605,7 @@ restart: | |||
1592 | break; | 1605 | break; |
1593 | } | 1606 | } |
1594 | 1607 | ||
1595 | del_timer_sync(&gcwq->mayday_timer); | 1608 | del_timer_sync(&gcwq->pool.mayday_timer); |
1596 | spin_lock_irq(&gcwq->lock); | 1609 | spin_lock_irq(&gcwq->lock); |
1597 | if (need_to_create_worker(gcwq)) | 1610 | if (need_to_create_worker(gcwq)) |
1598 | goto restart; | 1611 | goto restart; |
@@ -1622,11 +1635,12 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq) | |||
1622 | struct worker *worker; | 1635 | struct worker *worker; |
1623 | unsigned long expires; | 1636 | unsigned long expires; |
1624 | 1637 | ||
1625 | worker = list_entry(gcwq->idle_list.prev, struct worker, entry); | 1638 | worker = list_entry(gcwq->pool.idle_list.prev, struct worker, |
1639 | entry); | ||
1626 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; | 1640 | expires = worker->last_active + IDLE_WORKER_TIMEOUT; |
1627 | 1641 | ||
1628 | if (time_before(jiffies, expires)) { | 1642 | if (time_before(jiffies, expires)) { |
1629 | mod_timer(&gcwq->idle_timer, expires); | 1643 | mod_timer(&gcwq->pool.idle_timer, expires); |
1630 | break; | 1644 | break; |
1631 | } | 1645 | } |
1632 | 1646 | ||
@@ -1659,7 +1673,7 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq) | |||
1659 | */ | 1673 | */ |
1660 | static bool manage_workers(struct worker *worker) | 1674 | static bool manage_workers(struct worker *worker) |
1661 | { | 1675 | { |
1662 | struct global_cwq *gcwq = worker->gcwq; | 1676 | struct global_cwq *gcwq = worker->pool->gcwq; |
1663 | bool ret = false; | 1677 | bool ret = false; |
1664 | 1678 | ||
1665 | if (gcwq->flags & GCWQ_MANAGING_WORKERS) | 1679 | if (gcwq->flags & GCWQ_MANAGING_WORKERS) |
@@ -1732,7 +1746,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1732 | { | 1746 | { |
1733 | struct work_struct *work = list_first_entry(&cwq->delayed_works, | 1747 | struct work_struct *work = list_first_entry(&cwq->delayed_works, |
1734 | struct work_struct, entry); | 1748 | struct work_struct, entry); |
1735 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1749 | struct list_head *pos = gcwq_determine_ins_pos(cwq->pool->gcwq, cwq); |
1736 | 1750 | ||
1737 | trace_workqueue_activate_work(work); | 1751 | trace_workqueue_activate_work(work); |
1738 | move_linked_works(work, pos, NULL); | 1752 | move_linked_works(work, pos, NULL); |
@@ -1808,7 +1822,8 @@ __releases(&gcwq->lock) | |||
1808 | __acquires(&gcwq->lock) | 1822 | __acquires(&gcwq->lock) |
1809 | { | 1823 | { |
1810 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1824 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
1811 | struct global_cwq *gcwq = cwq->gcwq; | 1825 | struct worker_pool *pool = worker->pool; |
1826 | struct global_cwq *gcwq = pool->gcwq; | ||
1812 | struct hlist_head *bwh = busy_worker_head(gcwq, work); | 1827 | struct hlist_head *bwh = busy_worker_head(gcwq, work); |
1813 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; | 1828 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; |
1814 | work_func_t f = work->func; | 1829 | work_func_t f = work->func; |
@@ -1854,10 +1869,10 @@ __acquires(&gcwq->lock) | |||
1854 | * wake up another worker; otherwise, clear HIGHPRI_PENDING. | 1869 | * wake up another worker; otherwise, clear HIGHPRI_PENDING. |
1855 | */ | 1870 | */ |
1856 | if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { | 1871 | if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { |
1857 | struct work_struct *nwork = list_first_entry(&gcwq->worklist, | 1872 | struct work_struct *nwork = list_first_entry(&pool->worklist, |
1858 | struct work_struct, entry); | 1873 | struct work_struct, entry); |
1859 | 1874 | ||
1860 | if (!list_empty(&gcwq->worklist) && | 1875 | if (!list_empty(&pool->worklist) && |
1861 | get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) | 1876 | get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) |
1862 | wake_up_worker(gcwq); | 1877 | wake_up_worker(gcwq); |
1863 | else | 1878 | else |
@@ -1950,7 +1965,8 @@ static void process_scheduled_works(struct worker *worker) | |||
1950 | static int worker_thread(void *__worker) | 1965 | static int worker_thread(void *__worker) |
1951 | { | 1966 | { |
1952 | struct worker *worker = __worker; | 1967 | struct worker *worker = __worker; |
1953 | struct global_cwq *gcwq = worker->gcwq; | 1968 | struct worker_pool *pool = worker->pool; |
1969 | struct global_cwq *gcwq = pool->gcwq; | ||
1954 | 1970 | ||
1955 | /* tell the scheduler that this is a workqueue worker */ | 1971 | /* tell the scheduler that this is a workqueue worker */ |
1956 | worker->task->flags |= PF_WQ_WORKER; | 1972 | worker->task->flags |= PF_WQ_WORKER; |
@@ -1990,7 +2006,7 @@ recheck: | |||
1990 | 2006 | ||
1991 | do { | 2007 | do { |
1992 | struct work_struct *work = | 2008 | struct work_struct *work = |
1993 | list_first_entry(&gcwq->worklist, | 2009 | list_first_entry(&pool->worklist, |
1994 | struct work_struct, entry); | 2010 | struct work_struct, entry); |
1995 | 2011 | ||
1996 | if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { | 2012 | if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { |
@@ -2064,14 +2080,15 @@ repeat: | |||
2064 | for_each_mayday_cpu(cpu, wq->mayday_mask) { | 2080 | for_each_mayday_cpu(cpu, wq->mayday_mask) { |
2065 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; | 2081 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; |
2066 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); | 2082 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); |
2067 | struct global_cwq *gcwq = cwq->gcwq; | 2083 | struct worker_pool *pool = cwq->pool; |
2084 | struct global_cwq *gcwq = pool->gcwq; | ||
2068 | struct work_struct *work, *n; | 2085 | struct work_struct *work, *n; |
2069 | 2086 | ||
2070 | __set_current_state(TASK_RUNNING); | 2087 | __set_current_state(TASK_RUNNING); |
2071 | mayday_clear_cpu(cpu, wq->mayday_mask); | 2088 | mayday_clear_cpu(cpu, wq->mayday_mask); |
2072 | 2089 | ||
2073 | /* migrate to the target cpu if possible */ | 2090 | /* migrate to the target cpu if possible */ |
2074 | rescuer->gcwq = gcwq; | 2091 | rescuer->pool = pool; |
2075 | worker_maybe_bind_and_lock(rescuer); | 2092 | worker_maybe_bind_and_lock(rescuer); |
2076 | 2093 | ||
2077 | /* | 2094 | /* |
@@ -2079,7 +2096,7 @@ repeat: | |||
2079 | * process'em. | 2096 | * process'em. |
2080 | */ | 2097 | */ |
2081 | BUG_ON(!list_empty(&rescuer->scheduled)); | 2098 | BUG_ON(!list_empty(&rescuer->scheduled)); |
2082 | list_for_each_entry_safe(work, n, &gcwq->worklist, entry) | 2099 | list_for_each_entry_safe(work, n, &pool->worklist, entry) |
2083 | if (get_work_cwq(work) == cwq) | 2100 | if (get_work_cwq(work) == cwq) |
2084 | move_linked_works(work, scheduled, &n); | 2101 | move_linked_works(work, scheduled, &n); |
2085 | 2102 | ||
@@ -2216,7 +2233,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, | |||
2216 | 2233 | ||
2217 | for_each_cwq_cpu(cpu, wq) { | 2234 | for_each_cwq_cpu(cpu, wq) { |
2218 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 2235 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
2219 | struct global_cwq *gcwq = cwq->gcwq; | 2236 | struct global_cwq *gcwq = cwq->pool->gcwq; |
2220 | 2237 | ||
2221 | spin_lock_irq(&gcwq->lock); | 2238 | spin_lock_irq(&gcwq->lock); |
2222 | 2239 | ||
@@ -2432,9 +2449,9 @@ reflush: | |||
2432 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 2449 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
2433 | bool drained; | 2450 | bool drained; |
2434 | 2451 | ||
2435 | spin_lock_irq(&cwq->gcwq->lock); | 2452 | spin_lock_irq(&cwq->pool->gcwq->lock); |
2436 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); | 2453 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); |
2437 | spin_unlock_irq(&cwq->gcwq->lock); | 2454 | spin_unlock_irq(&cwq->pool->gcwq->lock); |
2438 | 2455 | ||
2439 | if (drained) | 2456 | if (drained) |
2440 | continue; | 2457 | continue; |
@@ -2474,7 +2491,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | |||
2474 | */ | 2491 | */ |
2475 | smp_rmb(); | 2492 | smp_rmb(); |
2476 | cwq = get_work_cwq(work); | 2493 | cwq = get_work_cwq(work); |
2477 | if (unlikely(!cwq || gcwq != cwq->gcwq)) | 2494 | if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) |
2478 | goto already_gone; | 2495 | goto already_gone; |
2479 | } else if (wait_executing) { | 2496 | } else if (wait_executing) { |
2480 | worker = find_worker_executing_work(gcwq, work); | 2497 | worker = find_worker_executing_work(gcwq, work); |
@@ -3017,7 +3034,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3017 | struct global_cwq *gcwq = get_gcwq(cpu); | 3034 | struct global_cwq *gcwq = get_gcwq(cpu); |
3018 | 3035 | ||
3019 | BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); | 3036 | BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); |
3020 | cwq->gcwq = gcwq; | 3037 | cwq->pool = &gcwq->pool; |
3021 | cwq->wq = wq; | 3038 | cwq->wq = wq; |
3022 | cwq->flush_color = -1; | 3039 | cwq->flush_color = -1; |
3023 | cwq->max_active = max_active; | 3040 | cwq->max_active = max_active; |
@@ -3344,7 +3361,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3344 | 3361 | ||
3345 | gcwq->flags |= GCWQ_MANAGING_WORKERS; | 3362 | gcwq->flags |= GCWQ_MANAGING_WORKERS; |
3346 | 3363 | ||
3347 | list_for_each_entry(worker, &gcwq->idle_list, entry) | 3364 | list_for_each_entry(worker, &gcwq->pool.idle_list, entry) |
3348 | worker->flags |= WORKER_ROGUE; | 3365 | worker->flags |= WORKER_ROGUE; |
3349 | 3366 | ||
3350 | for_each_busy_worker(worker, i, pos, gcwq) | 3367 | for_each_busy_worker(worker, i, pos, gcwq) |
@@ -3369,7 +3386,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3369 | atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); | 3386 | atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); |
3370 | 3387 | ||
3371 | spin_unlock_irq(&gcwq->lock); | 3388 | spin_unlock_irq(&gcwq->lock); |
3372 | del_timer_sync(&gcwq->idle_timer); | 3389 | del_timer_sync(&gcwq->pool.idle_timer); |
3373 | spin_lock_irq(&gcwq->lock); | 3390 | spin_lock_irq(&gcwq->lock); |
3374 | 3391 | ||
3375 | /* | 3392 | /* |
@@ -3391,17 +3408,17 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3391 | * may be frozen works in freezable cwqs. Don't declare | 3408 | * may be frozen works in freezable cwqs. Don't declare |
3392 | * completion while frozen. | 3409 | * completion while frozen. |
3393 | */ | 3410 | */ |
3394 | while (gcwq->nr_workers != gcwq->nr_idle || | 3411 | while (gcwq->pool.nr_workers != gcwq->pool.nr_idle || |
3395 | gcwq->flags & GCWQ_FREEZING || | 3412 | gcwq->flags & GCWQ_FREEZING || |
3396 | gcwq->trustee_state == TRUSTEE_IN_CHARGE) { | 3413 | gcwq->trustee_state == TRUSTEE_IN_CHARGE) { |
3397 | int nr_works = 0; | 3414 | int nr_works = 0; |
3398 | 3415 | ||
3399 | list_for_each_entry(work, &gcwq->worklist, entry) { | 3416 | list_for_each_entry(work, &gcwq->pool.worklist, entry) { |
3400 | send_mayday(work); | 3417 | send_mayday(work); |
3401 | nr_works++; | 3418 | nr_works++; |
3402 | } | 3419 | } |
3403 | 3420 | ||
3404 | list_for_each_entry(worker, &gcwq->idle_list, entry) { | 3421 | list_for_each_entry(worker, &gcwq->pool.idle_list, entry) { |
3405 | if (!nr_works--) | 3422 | if (!nr_works--) |
3406 | break; | 3423 | break; |
3407 | wake_up_process(worker->task); | 3424 | wake_up_process(worker->task); |
@@ -3428,11 +3445,11 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3428 | * all workers till we're canceled. | 3445 | * all workers till we're canceled. |
3429 | */ | 3446 | */ |
3430 | do { | 3447 | do { |
3431 | rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); | 3448 | rc = trustee_wait_event(!list_empty(&gcwq->pool.idle_list)); |
3432 | while (!list_empty(&gcwq->idle_list)) | 3449 | while (!list_empty(&gcwq->pool.idle_list)) |
3433 | destroy_worker(list_first_entry(&gcwq->idle_list, | 3450 | destroy_worker(list_first_entry(&gcwq->pool.idle_list, |
3434 | struct worker, entry)); | 3451 | struct worker, entry)); |
3435 | } while (gcwq->nr_workers && rc >= 0); | 3452 | } while (gcwq->pool.nr_workers && rc >= 0); |
3436 | 3453 | ||
3437 | /* | 3454 | /* |
3438 | * At this point, either draining has completed and no worker | 3455 | * At this point, either draining has completed and no worker |
@@ -3441,7 +3458,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3441 | * Tell the remaining busy ones to rebind once it finishes the | 3458 | * Tell the remaining busy ones to rebind once it finishes the |
3442 | * currently scheduled works by scheduling the rebind_work. | 3459 | * currently scheduled works by scheduling the rebind_work. |
3443 | */ | 3460 | */ |
3444 | WARN_ON(!list_empty(&gcwq->idle_list)); | 3461 | WARN_ON(!list_empty(&gcwq->pool.idle_list)); |
3445 | 3462 | ||
3446 | for_each_busy_worker(worker, i, pos, gcwq) { | 3463 | for_each_busy_worker(worker, i, pos, gcwq) { |
3447 | struct work_struct *rebind_work = &worker->rebind_work; | 3464 | struct work_struct *rebind_work = &worker->rebind_work; |
@@ -3522,7 +3539,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3522 | kthread_bind(new_trustee, cpu); | 3539 | kthread_bind(new_trustee, cpu); |
3523 | /* fall through */ | 3540 | /* fall through */ |
3524 | case CPU_UP_PREPARE: | 3541 | case CPU_UP_PREPARE: |
3525 | BUG_ON(gcwq->first_idle); | 3542 | BUG_ON(gcwq->pool.first_idle); |
3526 | new_worker = create_worker(gcwq, false); | 3543 | new_worker = create_worker(gcwq, false); |
3527 | if (!new_worker) { | 3544 | if (!new_worker) { |
3528 | if (new_trustee) | 3545 | if (new_trustee) |
@@ -3544,8 +3561,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3544 | wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); | 3561 | wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); |
3545 | /* fall through */ | 3562 | /* fall through */ |
3546 | case CPU_UP_PREPARE: | 3563 | case CPU_UP_PREPARE: |
3547 | BUG_ON(gcwq->first_idle); | 3564 | BUG_ON(gcwq->pool.first_idle); |
3548 | gcwq->first_idle = new_worker; | 3565 | gcwq->pool.first_idle = new_worker; |
3549 | break; | 3566 | break; |
3550 | 3567 | ||
3551 | case CPU_DYING: | 3568 | case CPU_DYING: |
@@ -3562,8 +3579,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3562 | gcwq->trustee_state = TRUSTEE_BUTCHER; | 3579 | gcwq->trustee_state = TRUSTEE_BUTCHER; |
3563 | /* fall through */ | 3580 | /* fall through */ |
3564 | case CPU_UP_CANCELED: | 3581 | case CPU_UP_CANCELED: |
3565 | destroy_worker(gcwq->first_idle); | 3582 | destroy_worker(gcwq->pool.first_idle); |
3566 | gcwq->first_idle = NULL; | 3583 | gcwq->pool.first_idle = NULL; |
3567 | break; | 3584 | break; |
3568 | 3585 | ||
3569 | case CPU_DOWN_FAILED: | 3586 | case CPU_DOWN_FAILED: |
@@ -3581,11 +3598,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3581 | * take a look. | 3598 | * take a look. |
3582 | */ | 3599 | */ |
3583 | spin_unlock_irq(&gcwq->lock); | 3600 | spin_unlock_irq(&gcwq->lock); |
3584 | kthread_bind(gcwq->first_idle->task, cpu); | 3601 | kthread_bind(gcwq->pool.first_idle->task, cpu); |
3585 | spin_lock_irq(&gcwq->lock); | 3602 | spin_lock_irq(&gcwq->lock); |
3586 | gcwq->flags |= GCWQ_MANAGE_WORKERS; | 3603 | gcwq->flags |= GCWQ_MANAGE_WORKERS; |
3587 | start_worker(gcwq->first_idle); | 3604 | start_worker(gcwq->pool.first_idle); |
3588 | gcwq->first_idle = NULL; | 3605 | gcwq->pool.first_idle = NULL; |
3589 | break; | 3606 | break; |
3590 | } | 3607 | } |
3591 | 3608 | ||
@@ -3794,22 +3811,23 @@ static int __init init_workqueues(void) | |||
3794 | struct global_cwq *gcwq = get_gcwq(cpu); | 3811 | struct global_cwq *gcwq = get_gcwq(cpu); |
3795 | 3812 | ||
3796 | spin_lock_init(&gcwq->lock); | 3813 | spin_lock_init(&gcwq->lock); |
3797 | INIT_LIST_HEAD(&gcwq->worklist); | 3814 | gcwq->pool.gcwq = gcwq; |
3815 | INIT_LIST_HEAD(&gcwq->pool.worklist); | ||
3798 | gcwq->cpu = cpu; | 3816 | gcwq->cpu = cpu; |
3799 | gcwq->flags |= GCWQ_DISASSOCIATED; | 3817 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3800 | 3818 | ||
3801 | INIT_LIST_HEAD(&gcwq->idle_list); | 3819 | INIT_LIST_HEAD(&gcwq->pool.idle_list); |
3802 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3820 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
3803 | INIT_HLIST_HEAD(&gcwq->busy_hash[i]); | 3821 | INIT_HLIST_HEAD(&gcwq->busy_hash[i]); |
3804 | 3822 | ||
3805 | init_timer_deferrable(&gcwq->idle_timer); | 3823 | init_timer_deferrable(&gcwq->pool.idle_timer); |
3806 | gcwq->idle_timer.function = idle_worker_timeout; | 3824 | gcwq->pool.idle_timer.function = idle_worker_timeout; |
3807 | gcwq->idle_timer.data = (unsigned long)gcwq; | 3825 | gcwq->pool.idle_timer.data = (unsigned long)gcwq; |
3808 | 3826 | ||
3809 | setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, | 3827 | setup_timer(&gcwq->pool.mayday_timer, gcwq_mayday_timeout, |
3810 | (unsigned long)gcwq); | 3828 | (unsigned long)gcwq); |
3811 | 3829 | ||
3812 | ida_init(&gcwq->worker_ida); | 3830 | ida_init(&gcwq->pool.worker_ida); |
3813 | 3831 | ||
3814 | gcwq->trustee_state = TRUSTEE_DONE; | 3832 | gcwq->trustee_state = TRUSTEE_DONE; |
3815 | init_waitqueue_head(&gcwq->trustee_wait); | 3833 | init_waitqueue_head(&gcwq->trustee_wait); |