aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h12
-rw-r--r--include/trace/events/workqueue.h10
-rw-r--r--kernel/workqueue.c433
-rw-r--r--kernel/workqueue_internal.h2
4 files changed, 228 insertions, 229 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a3d7556510c3..8afab27cdbc2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data);
27enum { 27enum {
28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
30 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ 30 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
32#ifdef CONFIG_DEBUG_OBJECTS_WORK 32#ifdef CONFIG_DEBUG_OBJECTS_WORK
33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
@@ -40,7 +40,7 @@ enum {
40 40
41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
43 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, 43 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
45#ifdef CONFIG_DEBUG_OBJECTS_WORK 45#ifdef CONFIG_DEBUG_OBJECTS_WORK
46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
@@ -60,14 +60,14 @@ enum {
60 WORK_CPU_END = NR_CPUS + 1, 60 WORK_CPU_END = NR_CPUS + 1,
61 61
62 /* 62 /*
63 * Reserve 7 bits off of cwq pointer w/ debugobjects turned 63 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
64 * off. This makes cwqs aligned to 256 bytes and allows 15 64 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
65 * workqueue flush colors. 65 * flush colors.
66 */ 66 */
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS, 68 WORK_STRUCT_COLOR_BITS,
69 69
70 /* data contains off-queue information when !WORK_STRUCT_CWQ */ 70 /* data contains off-queue information when !WORK_STRUCT_PWQ */
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
72 72
73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 4e798e384a6a..bf0e18ba6cfb 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,
27/** 27/**
28 * workqueue_queue_work - called when a work gets queued 28 * workqueue_queue_work - called when a work gets queued
29 * @req_cpu: the requested cpu 29 * @req_cpu: the requested cpu
30 * @cwq: pointer to struct cpu_workqueue_struct 30 * @pwq: pointer to struct pool_workqueue
31 * @work: pointer to struct work_struct 31 * @work: pointer to struct work_struct
32 * 32 *
33 * This event occurs when a work is queued immediately or once a 33 * This event occurs when a work is queued immediately or once a
@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,
36 */ 36 */
37TRACE_EVENT(workqueue_queue_work, 37TRACE_EVENT(workqueue_queue_work,
38 38
39 TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, 39 TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
40 struct work_struct *work), 40 struct work_struct *work),
41 41
42 TP_ARGS(req_cpu, cwq, work), 42 TP_ARGS(req_cpu, pwq, work),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __field( void *, work ) 45 __field( void *, work )
@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,
52 TP_fast_assign( 52 TP_fast_assign(
53 __entry->work = work; 53 __entry->work = work;
54 __entry->function = work->func; 54 __entry->function = work->func;
55 __entry->workqueue = cwq->wq; 55 __entry->workqueue = pwq->wq;
56 __entry->req_cpu = req_cpu; 56 __entry->req_cpu = req_cpu;
57 __entry->cpu = cwq->pool->cpu; 57 __entry->cpu = pwq->pool->cpu;
58 ), 58 ),
59 59
60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ea7f696f1060..0f1a264d0f22 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -154,11 +154,12 @@ struct worker_pool {
154} ____cacheline_aligned_in_smp; 154} ____cacheline_aligned_in_smp;
155 155
156/* 156/*
157 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 157 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
158 * work_struct->data are used for flags and thus cwqs need to be 158 * of work_struct->data are used for flags and the remaining high bits
159 * aligned at two's power of the number of flag bits. 159 * point to the pwq; thus, pwqs need to be aligned at two's power of the
160 * number of flag bits.
160 */ 161 */
161struct cpu_workqueue_struct { 162struct pool_workqueue {
162 struct worker_pool *pool; /* I: the associated pool */ 163 struct worker_pool *pool; /* I: the associated pool */
163 struct workqueue_struct *wq; /* I: the owning workqueue */ 164 struct workqueue_struct *wq; /* I: the owning workqueue */
164 int work_color; /* L: current color */ 165 int work_color; /* L: current color */
@@ -207,16 +208,16 @@ typedef unsigned long mayday_mask_t;
207struct workqueue_struct { 208struct workqueue_struct {
208 unsigned int flags; /* W: WQ_* flags */ 209 unsigned int flags; /* W: WQ_* flags */
209 union { 210 union {
210 struct cpu_workqueue_struct __percpu *pcpu; 211 struct pool_workqueue __percpu *pcpu;
211 struct cpu_workqueue_struct *single; 212 struct pool_workqueue *single;
212 unsigned long v; 213 unsigned long v;
213 } cpu_wq; /* I: cwq's */ 214 } pool_wq; /* I: pwq's */
214 struct list_head list; /* W: list of all workqueues */ 215 struct list_head list; /* W: list of all workqueues */
215 216
216 struct mutex flush_mutex; /* protects wq flushing */ 217 struct mutex flush_mutex; /* protects wq flushing */
217 int work_color; /* F: current work color */ 218 int work_color; /* F: current work color */
218 int flush_color; /* F: current flush color */ 219 int flush_color; /* F: current flush color */
219 atomic_t nr_cwqs_to_flush; /* flush in progress */ 220 atomic_t nr_pwqs_to_flush; /* flush in progress */
220 struct wq_flusher *first_flusher; /* F: first flusher */ 221 struct wq_flusher *first_flusher; /* F: first flusher */
221 struct list_head flusher_queue; /* F: flush waiters */ 222 struct list_head flusher_queue; /* F: flush waiters */
222 struct list_head flusher_overflow; /* F: flush overflow list */ 223 struct list_head flusher_overflow; /* F: flush overflow list */
@@ -225,7 +226,7 @@ struct workqueue_struct {
225 struct worker *rescuer; /* I: rescue worker */ 226 struct worker *rescuer; /* I: rescue worker */
226 227
227 int nr_drainers; /* W: drain in progress */ 228 int nr_drainers; /* W: drain in progress */
228 int saved_max_active; /* W: saved cwq max_active */ 229 int saved_max_active; /* W: saved pwq max_active */
229#ifdef CONFIG_LOCKDEP 230#ifdef CONFIG_LOCKDEP
230 struct lockdep_map lockdep_map; 231 struct lockdep_map lockdep_map;
231#endif 232#endif
@@ -268,7 +269,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
268 return WORK_CPU_END; 269 return WORK_CPU_END;
269} 270}
270 271
271static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, 272static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
272 struct workqueue_struct *wq) 273 struct workqueue_struct *wq)
273{ 274{
274 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 275 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
@@ -284,7 +285,7 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
284 * 285 *
285 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND 286 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
286 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND 287 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
287 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 288 * for_each_pwq_cpu() : possible CPUs for bound workqueues,
288 * WORK_CPU_UNBOUND for unbound workqueues 289 * WORK_CPU_UNBOUND for unbound workqueues
289 */ 290 */
290#define for_each_wq_cpu(cpu) \ 291#define for_each_wq_cpu(cpu) \
@@ -297,10 +298,10 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
297 (cpu) < WORK_CPU_END; \ 298 (cpu) < WORK_CPU_END; \
298 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) 299 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
299 300
300#define for_each_cwq_cpu(cpu, wq) \ 301#define for_each_pwq_cpu(cpu, wq) \
301 for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ 302 for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
302 (cpu) < WORK_CPU_END; \ 303 (cpu) < WORK_CPU_END; \
303 (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) 304 (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
304 305
305#ifdef CONFIG_DEBUG_OBJECTS_WORK 306#ifdef CONFIG_DEBUG_OBJECTS_WORK
306 307
@@ -479,14 +480,14 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
479 return &pools[highpri]; 480 return &pools[highpri];
480} 481}
481 482
482static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 483static struct pool_workqueue *get_pwq(unsigned int cpu,
483 struct workqueue_struct *wq) 484 struct workqueue_struct *wq)
484{ 485{
485 if (!(wq->flags & WQ_UNBOUND)) { 486 if (!(wq->flags & WQ_UNBOUND)) {
486 if (likely(cpu < nr_cpu_ids)) 487 if (likely(cpu < nr_cpu_ids))
487 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 488 return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
488 } else if (likely(cpu == WORK_CPU_UNBOUND)) 489 } else if (likely(cpu == WORK_CPU_UNBOUND))
489 return wq->cpu_wq.single; 490 return wq->pool_wq.single;
490 return NULL; 491 return NULL;
491} 492}
492 493
@@ -507,18 +508,18 @@ static int work_next_color(int color)
507} 508}
508 509
509/* 510/*
510 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 511 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
511 * contain the pointer to the queued cwq. Once execution starts, the flag 512 * contain the pointer to the queued pwq. Once execution starts, the flag
512 * is cleared and the high bits contain OFFQ flags and pool ID. 513 * is cleared and the high bits contain OFFQ flags and pool ID.
513 * 514 *
514 * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 515 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
515 * and clear_work_data() can be used to set the cwq, pool or clear 516 * and clear_work_data() can be used to set the pwq, pool or clear
516 * work->data. These functions should only be called while the work is 517 * work->data. These functions should only be called while the work is
517 * owned - ie. while the PENDING bit is set. 518 * owned - ie. while the PENDING bit is set.
518 * 519 *
519 * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq 520 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
520 * corresponding to a work. Pool is available once the work has been 521 * corresponding to a work. Pool is available once the work has been
521 * queued anywhere after initialization until it is sync canceled. cwq is 522 * queued anywhere after initialization until it is sync canceled. pwq is
522 * available only while the work item is queued. 523 * available only while the work item is queued.
523 * 524 *
524 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 525 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
@@ -533,12 +534,11 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
533 atomic_long_set(&work->data, data | flags | work_static(work)); 534 atomic_long_set(&work->data, data | flags | work_static(work));
534} 535}
535 536
536static void set_work_cwq(struct work_struct *work, 537static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
537 struct cpu_workqueue_struct *cwq,
538 unsigned long extra_flags) 538 unsigned long extra_flags)
539{ 539{
540 set_work_data(work, (unsigned long)cwq, 540 set_work_data(work, (unsigned long)pwq,
541 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 541 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
542} 542}
543 543
544static void set_work_pool_and_keep_pending(struct work_struct *work, 544static void set_work_pool_and_keep_pending(struct work_struct *work,
@@ -567,11 +567,11 @@ static void clear_work_data(struct work_struct *work)
567 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 567 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
568} 568}
569 569
570static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 570static struct pool_workqueue *get_work_pwq(struct work_struct *work)
571{ 571{
572 unsigned long data = atomic_long_read(&work->data); 572 unsigned long data = atomic_long_read(&work->data);
573 573
574 if (data & WORK_STRUCT_CWQ) 574 if (data & WORK_STRUCT_PWQ)
575 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 575 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
576 else 576 else
577 return NULL; 577 return NULL;
@@ -589,8 +589,8 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
589 struct worker_pool *pool; 589 struct worker_pool *pool;
590 int pool_id; 590 int pool_id;
591 591
592 if (data & WORK_STRUCT_CWQ) 592 if (data & WORK_STRUCT_PWQ)
593 return ((struct cpu_workqueue_struct *) 593 return ((struct pool_workqueue *)
594 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 594 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
595 595
596 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 596 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
@@ -613,8 +613,8 @@ static int get_work_pool_id(struct work_struct *work)
613{ 613{
614 unsigned long data = atomic_long_read(&work->data); 614 unsigned long data = atomic_long_read(&work->data);
615 615
616 if (data & WORK_STRUCT_CWQ) 616 if (data & WORK_STRUCT_PWQ)
617 return ((struct cpu_workqueue_struct *) 617 return ((struct pool_workqueue *)
618 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 618 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
619 619
620 return data >> WORK_OFFQ_POOL_SHIFT; 620 return data >> WORK_OFFQ_POOL_SHIFT;
@@ -632,7 +632,7 @@ static bool work_is_canceling(struct work_struct *work)
632{ 632{
633 unsigned long data = atomic_long_read(&work->data); 633 unsigned long data = atomic_long_read(&work->data);
634 634
635 return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); 635 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
636} 636}
637 637
638/* 638/*
@@ -961,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
961 *nextp = n; 961 *nextp = n;
962} 962}
963 963
964static void cwq_activate_delayed_work(struct work_struct *work) 964static void pwq_activate_delayed_work(struct work_struct *work)
965{ 965{
966 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 966 struct pool_workqueue *pwq = get_work_pwq(work);
967 967
968 trace_workqueue_activate_work(work); 968 trace_workqueue_activate_work(work);
969 move_linked_works(work, &cwq->pool->worklist, NULL); 969 move_linked_works(work, &pwq->pool->worklist, NULL);
970 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 970 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
971 cwq->nr_active++; 971 pwq->nr_active++;
972} 972}
973 973
974static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 974static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
975{ 975{
976 struct work_struct *work = list_first_entry(&cwq->delayed_works, 976 struct work_struct *work = list_first_entry(&pwq->delayed_works,
977 struct work_struct, entry); 977 struct work_struct, entry);
978 978
979 cwq_activate_delayed_work(work); 979 pwq_activate_delayed_work(work);
980} 980}
981 981
982/** 982/**
983 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 983 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
984 * @cwq: cwq of interest 984 * @pwq: pwq of interest
985 * @color: color of work which left the queue 985 * @color: color of work which left the queue
986 * 986 *
987 * A work either has completed or is removed from pending queue, 987 * A work either has completed or is removed from pending queue,
988 * decrement nr_in_flight of its cwq and handle workqueue flushing. 988 * decrement nr_in_flight of its pwq and handle workqueue flushing.
989 * 989 *
990 * CONTEXT: 990 * CONTEXT:
991 * spin_lock_irq(pool->lock). 991 * spin_lock_irq(pool->lock).
992 */ 992 */
993static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 993static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
994{ 994{
995 /* ignore uncolored works */ 995 /* ignore uncolored works */
996 if (color == WORK_NO_COLOR) 996 if (color == WORK_NO_COLOR)
997 return; 997 return;
998 998
999 cwq->nr_in_flight[color]--; 999 pwq->nr_in_flight[color]--;
1000 1000
1001 cwq->nr_active--; 1001 pwq->nr_active--;
1002 if (!list_empty(&cwq->delayed_works)) { 1002 if (!list_empty(&pwq->delayed_works)) {
1003 /* one down, submit a delayed one */ 1003 /* one down, submit a delayed one */
1004 if (cwq->nr_active < cwq->max_active) 1004 if (pwq->nr_active < pwq->max_active)
1005 cwq_activate_first_delayed(cwq); 1005 pwq_activate_first_delayed(pwq);
1006 } 1006 }
1007 1007
1008 /* is flush in progress and are we at the flushing tip? */ 1008 /* is flush in progress and are we at the flushing tip? */
1009 if (likely(cwq->flush_color != color)) 1009 if (likely(pwq->flush_color != color))
1010 return; 1010 return;
1011 1011
1012 /* are there still in-flight works? */ 1012 /* are there still in-flight works? */
1013 if (cwq->nr_in_flight[color]) 1013 if (pwq->nr_in_flight[color])
1014 return; 1014 return;
1015 1015
1016 /* this cwq is done, clear flush_color */ 1016 /* this pwq is done, clear flush_color */
1017 cwq->flush_color = -1; 1017 pwq->flush_color = -1;
1018 1018
1019 /* 1019 /*
1020 * If this was the last cwq, wake up the first flusher. It 1020 * If this was the last pwq, wake up the first flusher. It
1021 * will handle the rest. 1021 * will handle the rest.
1022 */ 1022 */
1023 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1023 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1024 complete(&cwq->wq->first_flusher->done); 1024 complete(&pwq->wq->first_flusher->done);
1025} 1025}
1026 1026
1027/** 1027/**
@@ -1053,7 +1053,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1053 unsigned long *flags) 1053 unsigned long *flags)
1054{ 1054{
1055 struct worker_pool *pool; 1055 struct worker_pool *pool;
1056 struct cpu_workqueue_struct *cwq; 1056 struct pool_workqueue *pwq;
1057 1057
1058 local_irq_save(*flags); 1058 local_irq_save(*flags);
1059 1059
@@ -1084,31 +1084,31 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1084 1084
1085 spin_lock(&pool->lock); 1085 spin_lock(&pool->lock);
1086 /* 1086 /*
1087 * work->data is guaranteed to point to cwq only while the work 1087 * work->data is guaranteed to point to pwq only while the work
1088 * item is queued on cwq->wq, and both updating work->data to point 1088 * item is queued on pwq->wq, and both updating work->data to point
1089 * to cwq on queueing and to pool on dequeueing are done under 1089 * to pwq on queueing and to pool on dequeueing are done under
1090 * cwq->pool->lock. This in turn guarantees that, if work->data 1090 * pwq->pool->lock. This in turn guarantees that, if work->data
1091 * points to cwq which is associated with a locked pool, the work 1091 * points to pwq which is associated with a locked pool, the work
1092 * item is currently queued on that pool. 1092 * item is currently queued on that pool.
1093 */ 1093 */
1094 cwq = get_work_cwq(work); 1094 pwq = get_work_pwq(work);
1095 if (cwq && cwq->pool == pool) { 1095 if (pwq && pwq->pool == pool) {
1096 debug_work_deactivate(work); 1096 debug_work_deactivate(work);
1097 1097
1098 /* 1098 /*
1099 * A delayed work item cannot be grabbed directly because 1099 * A delayed work item cannot be grabbed directly because
1100 * it might have linked NO_COLOR work items which, if left 1100 * it might have linked NO_COLOR work items which, if left
1101 * on the delayed_list, will confuse cwq->nr_active 1101 * on the delayed_list, will confuse pwq->nr_active
1102 * management later on and cause stall. Make sure the work 1102 * management later on and cause stall. Make sure the work
1103 * item is activated before grabbing. 1103 * item is activated before grabbing.
1104 */ 1104 */
1105 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1105 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1106 cwq_activate_delayed_work(work); 1106 pwq_activate_delayed_work(work);
1107 1107
1108 list_del_init(&work->entry); 1108 list_del_init(&work->entry);
1109 cwq_dec_nr_in_flight(get_work_cwq(work), get_work_color(work)); 1109 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
1110 1110
1111 /* work->data points to cwq iff queued, point to pool */ 1111 /* work->data points to pwq iff queued, point to pool */
1112 set_work_pool_and_keep_pending(work, pool->id); 1112 set_work_pool_and_keep_pending(work, pool->id);
1113 1113
1114 spin_unlock(&pool->lock); 1114 spin_unlock(&pool->lock);
@@ -1125,25 +1125,24 @@ fail:
1125 1125
1126/** 1126/**
1127 * insert_work - insert a work into a pool 1127 * insert_work - insert a work into a pool
1128 * @cwq: cwq @work belongs to 1128 * @pwq: pwq @work belongs to
1129 * @work: work to insert 1129 * @work: work to insert
1130 * @head: insertion point 1130 * @head: insertion point
1131 * @extra_flags: extra WORK_STRUCT_* flags to set 1131 * @extra_flags: extra WORK_STRUCT_* flags to set
1132 * 1132 *
1133 * Insert @work which belongs to @cwq after @head. @extra_flags is or'd to 1133 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1134 * work_struct flags. 1134 * work_struct flags.
1135 * 1135 *
1136 * CONTEXT: 1136 * CONTEXT:
1137 * spin_lock_irq(pool->lock). 1137 * spin_lock_irq(pool->lock).
1138 */ 1138 */
1139static void insert_work(struct cpu_workqueue_struct *cwq, 1139static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1140 struct work_struct *work, struct list_head *head, 1140 struct list_head *head, unsigned int extra_flags)
1141 unsigned int extra_flags)
1142{ 1141{
1143 struct worker_pool *pool = cwq->pool; 1142 struct worker_pool *pool = pwq->pool;
1144 1143
1145 /* we own @work, set data and link */ 1144 /* we own @work, set data and link */
1146 set_work_cwq(work, cwq, extra_flags); 1145 set_work_pwq(work, pwq, extra_flags);
1147 list_add_tail(&work->entry, head); 1146 list_add_tail(&work->entry, head);
1148 1147
1149 /* 1148 /*
@@ -1170,13 +1169,13 @@ static bool is_chained_work(struct workqueue_struct *wq)
1170 * Return %true iff I'm a worker execuing a work item on @wq. If 1169 * Return %true iff I'm a worker execuing a work item on @wq. If
1171 * I'm @worker, it's safe to dereference it without locking. 1170 * I'm @worker, it's safe to dereference it without locking.
1172 */ 1171 */
1173 return worker && worker->current_cwq->wq == wq; 1172 return worker && worker->current_pwq->wq == wq;
1174} 1173}
1175 1174
1176static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 1175static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1177 struct work_struct *work) 1176 struct work_struct *work)
1178{ 1177{
1179 struct cpu_workqueue_struct *cwq; 1178 struct pool_workqueue *pwq;
1180 struct list_head *worklist; 1179 struct list_head *worklist;
1181 unsigned int work_flags; 1180 unsigned int work_flags;
1182 unsigned int req_cpu = cpu; 1181 unsigned int req_cpu = cpu;
@@ -1196,7 +1195,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1196 WARN_ON_ONCE(!is_chained_work(wq))) 1195 WARN_ON_ONCE(!is_chained_work(wq)))
1197 return; 1196 return;
1198 1197
1199 /* determine the cwq to use */ 1198 /* determine the pwq to use */
1200 if (!(wq->flags & WQ_UNBOUND)) { 1199 if (!(wq->flags & WQ_UNBOUND)) {
1201 struct worker_pool *last_pool; 1200 struct worker_pool *last_pool;
1202 1201
@@ -1209,54 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1209 * work needs to be queued on that cpu to guarantee 1208 * work needs to be queued on that cpu to guarantee
1210 * non-reentrancy. 1209 * non-reentrancy.
1211 */ 1210 */
1212 cwq = get_cwq(cpu, wq); 1211 pwq = get_pwq(cpu, wq);
1213 last_pool = get_work_pool(work); 1212 last_pool = get_work_pool(work);
1214 1213
1215 if (last_pool && last_pool != cwq->pool) { 1214 if (last_pool && last_pool != pwq->pool) {
1216 struct worker *worker; 1215 struct worker *worker;
1217 1216
1218 spin_lock(&last_pool->lock); 1217 spin_lock(&last_pool->lock);
1219 1218
1220 worker = find_worker_executing_work(last_pool, work); 1219 worker = find_worker_executing_work(last_pool, work);
1221 1220
1222 if (worker && worker->current_cwq->wq == wq) { 1221 if (worker && worker->current_pwq->wq == wq) {
1223 cwq = get_cwq(last_pool->cpu, wq); 1222 pwq = get_pwq(last_pool->cpu, wq);
1224 } else { 1223 } else {
1225 /* meh... not running there, queue here */ 1224 /* meh... not running there, queue here */
1226 spin_unlock(&last_pool->lock); 1225 spin_unlock(&last_pool->lock);
1227 spin_lock(&cwq->pool->lock); 1226 spin_lock(&pwq->pool->lock);
1228 } 1227 }
1229 } else { 1228 } else {
1230 spin_lock(&cwq->pool->lock); 1229 spin_lock(&pwq->pool->lock);
1231 } 1230 }
1232 } else { 1231 } else {
1233 cwq = get_cwq(WORK_CPU_UNBOUND, wq); 1232 pwq = get_pwq(WORK_CPU_UNBOUND, wq);
1234 spin_lock(&cwq->pool->lock); 1233 spin_lock(&pwq->pool->lock);
1235 } 1234 }
1236 1235
1237 /* cwq determined, queue */ 1236 /* pwq determined, queue */
1238 trace_workqueue_queue_work(req_cpu, cwq, work); 1237 trace_workqueue_queue_work(req_cpu, pwq, work);
1239 1238
1240 if (WARN_ON(!list_empty(&work->entry))) { 1239 if (WARN_ON(!list_empty(&work->entry))) {
1241 spin_unlock(&cwq->pool->lock); 1240 spin_unlock(&pwq->pool->lock);
1242 return; 1241 return;
1243 } 1242 }
1244 1243
1245 cwq->nr_in_flight[cwq->work_color]++; 1244 pwq->nr_in_flight[pwq->work_color]++;
1246 work_flags = work_color_to_flags(cwq->work_color); 1245 work_flags = work_color_to_flags(pwq->work_color);
1247 1246
1248 if (likely(cwq->nr_active < cwq->max_active)) { 1247 if (likely(pwq->nr_active < pwq->max_active)) {
1249 trace_workqueue_activate_work(work); 1248 trace_workqueue_activate_work(work);
1250 cwq->nr_active++; 1249 pwq->nr_active++;
1251 worklist = &cwq->pool->worklist; 1250 worklist = &pwq->pool->worklist;
1252 } else { 1251 } else {
1253 work_flags |= WORK_STRUCT_DELAYED; 1252 work_flags |= WORK_STRUCT_DELAYED;
1254 worklist = &cwq->delayed_works; 1253 worklist = &pwq->delayed_works;
1255 } 1254 }
1256 1255
1257 insert_work(cwq, work, worklist, work_flags); 1256 insert_work(pwq, work, worklist, work_flags);
1258 1257
1259 spin_unlock(&cwq->pool->lock); 1258 spin_unlock(&pwq->pool->lock);
1260} 1259}
1261 1260
1262/** 1261/**
@@ -1661,14 +1660,14 @@ static void rebind_workers(struct worker_pool *pool)
1661 1660
1662 /* 1661 /*
1663 * wq doesn't really matter but let's keep @worker->pool 1662 * wq doesn't really matter but let's keep @worker->pool
1664 * and @cwq->pool consistent for sanity. 1663 * and @pwq->pool consistent for sanity.
1665 */ 1664 */
1666 if (std_worker_pool_pri(worker->pool)) 1665 if (std_worker_pool_pri(worker->pool))
1667 wq = system_highpri_wq; 1666 wq = system_highpri_wq;
1668 else 1667 else
1669 wq = system_wq; 1668 wq = system_wq;
1670 1669
1671 insert_work(get_cwq(pool->cpu, wq), rebind_work, 1670 insert_work(get_pwq(pool->cpu, wq), rebind_work,
1672 worker->scheduled.next, 1671 worker->scheduled.next,
1673 work_color_to_flags(WORK_NO_COLOR)); 1672 work_color_to_flags(WORK_NO_COLOR));
1674 } 1673 }
@@ -1845,15 +1844,15 @@ static void idle_worker_timeout(unsigned long __pool)
1845 1844
1846static bool send_mayday(struct work_struct *work) 1845static bool send_mayday(struct work_struct *work)
1847{ 1846{
1848 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1847 struct pool_workqueue *pwq = get_work_pwq(work);
1849 struct workqueue_struct *wq = cwq->wq; 1848 struct workqueue_struct *wq = pwq->wq;
1850 unsigned int cpu; 1849 unsigned int cpu;
1851 1850
1852 if (!(wq->flags & WQ_RESCUER)) 1851 if (!(wq->flags & WQ_RESCUER))
1853 return false; 1852 return false;
1854 1853
1855 /* mayday mayday mayday */ 1854 /* mayday mayday mayday */
1856 cpu = cwq->pool->cpu; 1855 cpu = pwq->pool->cpu;
1857 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1856 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1858 if (cpu == WORK_CPU_UNBOUND) 1857 if (cpu == WORK_CPU_UNBOUND)
1859 cpu = 0; 1858 cpu = 0;
@@ -2082,9 +2081,9 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
2082__releases(&pool->lock) 2081__releases(&pool->lock)
2083__acquires(&pool->lock) 2082__acquires(&pool->lock)
2084{ 2083{
2085 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 2084 struct pool_workqueue *pwq = get_work_pwq(work);
2086 struct worker_pool *pool = worker->pool; 2085 struct worker_pool *pool = worker->pool;
2087 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 2086 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2088 int work_color; 2087 int work_color;
2089 struct worker *collision; 2088 struct worker *collision;
2090#ifdef CONFIG_LOCKDEP 2089#ifdef CONFIG_LOCKDEP
@@ -2125,7 +2124,7 @@ __acquires(&pool->lock)
2125 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2124 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2126 worker->current_work = work; 2125 worker->current_work = work;
2127 worker->current_func = work->func; 2126 worker->current_func = work->func;
2128 worker->current_cwq = cwq; 2127 worker->current_pwq = pwq;
2129 work_color = get_work_color(work); 2128 work_color = get_work_color(work);
2130 2129
2131 list_del_init(&work->entry); 2130 list_del_init(&work->entry);
@@ -2154,7 +2153,7 @@ __acquires(&pool->lock)
2154 2153
2155 spin_unlock_irq(&pool->lock); 2154 spin_unlock_irq(&pool->lock);
2156 2155
2157 lock_map_acquire_read(&cwq->wq->lockdep_map); 2156 lock_map_acquire_read(&pwq->wq->lockdep_map);
2158 lock_map_acquire(&lockdep_map); 2157 lock_map_acquire(&lockdep_map);
2159 trace_workqueue_execute_start(work); 2158 trace_workqueue_execute_start(work);
2160 worker->current_func(work); 2159 worker->current_func(work);
@@ -2164,7 +2163,7 @@ __acquires(&pool->lock)
2164 */ 2163 */
2165 trace_workqueue_execute_end(work); 2164 trace_workqueue_execute_end(work);
2166 lock_map_release(&lockdep_map); 2165 lock_map_release(&lockdep_map);
2167 lock_map_release(&cwq->wq->lockdep_map); 2166 lock_map_release(&pwq->wq->lockdep_map);
2168 2167
2169 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2168 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2170 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2169 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
@@ -2185,8 +2184,8 @@ __acquires(&pool->lock)
2185 hash_del(&worker->hentry); 2184 hash_del(&worker->hentry);
2186 worker->current_work = NULL; 2185 worker->current_work = NULL;
2187 worker->current_func = NULL; 2186 worker->current_func = NULL;
2188 worker->current_cwq = NULL; 2187 worker->current_pwq = NULL;
2189 cwq_dec_nr_in_flight(cwq, work_color); 2188 pwq_dec_nr_in_flight(pwq, work_color);
2190} 2189}
2191 2190
2192/** 2191/**
@@ -2353,8 +2352,8 @@ repeat:
2353 */ 2352 */
2354 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2353 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2355 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2354 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2356 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2355 struct pool_workqueue *pwq = get_pwq(tcpu, wq);
2357 struct worker_pool *pool = cwq->pool; 2356 struct worker_pool *pool = pwq->pool;
2358 struct work_struct *work, *n; 2357 struct work_struct *work, *n;
2359 2358
2360 __set_current_state(TASK_RUNNING); 2359 __set_current_state(TASK_RUNNING);
@@ -2370,7 +2369,7 @@ repeat:
2370 */ 2369 */
2371 BUG_ON(!list_empty(&rescuer->scheduled)); 2370 BUG_ON(!list_empty(&rescuer->scheduled));
2372 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2371 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2373 if (get_work_cwq(work) == cwq) 2372 if (get_work_pwq(work) == pwq)
2374 move_linked_works(work, scheduled, &n); 2373 move_linked_works(work, scheduled, &n);
2375 2374
2376 process_scheduled_works(rescuer); 2375 process_scheduled_works(rescuer);
@@ -2405,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
2405 2404
2406/** 2405/**
2407 * insert_wq_barrier - insert a barrier work 2406 * insert_wq_barrier - insert a barrier work
2408 * @cwq: cwq to insert barrier into 2407 * @pwq: pwq to insert barrier into
2409 * @barr: wq_barrier to insert 2408 * @barr: wq_barrier to insert
2410 * @target: target work to attach @barr to 2409 * @target: target work to attach @barr to
2411 * @worker: worker currently executing @target, NULL if @target is not executing 2410 * @worker: worker currently executing @target, NULL if @target is not executing
@@ -2422,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
2422 * after a work with LINKED flag set. 2421 * after a work with LINKED flag set.
2423 * 2422 *
2424 * Note that when @worker is non-NULL, @target may be modified 2423 * Note that when @worker is non-NULL, @target may be modified
2425 * underneath us, so we can't reliably determine cwq from @target. 2424 * underneath us, so we can't reliably determine pwq from @target.
2426 * 2425 *
2427 * CONTEXT: 2426 * CONTEXT:
2428 * spin_lock_irq(pool->lock). 2427 * spin_lock_irq(pool->lock).
2429 */ 2428 */
2430static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2429static void insert_wq_barrier(struct pool_workqueue *pwq,
2431 struct wq_barrier *barr, 2430 struct wq_barrier *barr,
2432 struct work_struct *target, struct worker *worker) 2431 struct work_struct *target, struct worker *worker)
2433{ 2432{
@@ -2460,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2460 } 2459 }
2461 2460
2462 debug_work_activate(&barr->work); 2461 debug_work_activate(&barr->work);
2463 insert_work(cwq, &barr->work, head, 2462 insert_work(pwq, &barr->work, head,
2464 work_color_to_flags(WORK_NO_COLOR) | linked); 2463 work_color_to_flags(WORK_NO_COLOR) | linked);
2465} 2464}
2466 2465
2467/** 2466/**
2468 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2467 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2469 * @wq: workqueue being flushed 2468 * @wq: workqueue being flushed
2470 * @flush_color: new flush color, < 0 for no-op 2469 * @flush_color: new flush color, < 0 for no-op
2471 * @work_color: new work color, < 0 for no-op 2470 * @work_color: new work color, < 0 for no-op
2472 * 2471 *
2473 * Prepare cwqs for workqueue flushing. 2472 * Prepare pwqs for workqueue flushing.
2474 * 2473 *
2475 * If @flush_color is non-negative, flush_color on all cwqs should be 2474 * If @flush_color is non-negative, flush_color on all pwqs should be
2476 * -1. If no cwq has in-flight commands at the specified color, all 2475 * -1. If no pwq has in-flight commands at the specified color, all
2477 * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2476 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2478 * has in flight commands, its cwq->flush_color is set to 2477 * has in flight commands, its pwq->flush_color is set to
2479 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2478 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2480 * wakeup logic is armed and %true is returned. 2479 * wakeup logic is armed and %true is returned.
2481 * 2480 *
2482 * The caller should have initialized @wq->first_flusher prior to 2481 * The caller should have initialized @wq->first_flusher prior to
@@ -2484,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2484 * @flush_color is negative, no flush color update is done and %false 2483 * @flush_color is negative, no flush color update is done and %false
2485 * is returned. 2484 * is returned.
2486 * 2485 *
2487 * If @work_color is non-negative, all cwqs should have the same 2486 * If @work_color is non-negative, all pwqs should have the same
2488 * work_color which is previous to @work_color and all will be 2487 * work_color which is previous to @work_color and all will be
2489 * advanced to @work_color. 2488 * advanced to @work_color.
2490 * 2489 *
@@ -2495,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2495 * %true if @flush_color >= 0 and there's something to flush. %false 2494 * %true if @flush_color >= 0 and there's something to flush. %false
2496 * otherwise. 2495 * otherwise.
2497 */ 2496 */
2498static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2497static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2499 int flush_color, int work_color) 2498 int flush_color, int work_color)
2500{ 2499{
2501 bool wait = false; 2500 bool wait = false;
2502 unsigned int cpu; 2501 unsigned int cpu;
2503 2502
2504 if (flush_color >= 0) { 2503 if (flush_color >= 0) {
2505 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2504 BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
2506 atomic_set(&wq->nr_cwqs_to_flush, 1); 2505 atomic_set(&wq->nr_pwqs_to_flush, 1);
2507 } 2506 }
2508 2507
2509 for_each_cwq_cpu(cpu, wq) { 2508 for_each_pwq_cpu(cpu, wq) {
2510 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2509 struct pool_workqueue *pwq = get_pwq(cpu, wq);
2511 struct worker_pool *pool = cwq->pool; 2510 struct worker_pool *pool = pwq->pool;
2512 2511
2513 spin_lock_irq(&pool->lock); 2512 spin_lock_irq(&pool->lock);
2514 2513
2515 if (flush_color >= 0) { 2514 if (flush_color >= 0) {
2516 BUG_ON(cwq->flush_color != -1); 2515 BUG_ON(pwq->flush_color != -1);
2517 2516
2518 if (cwq->nr_in_flight[flush_color]) { 2517 if (pwq->nr_in_flight[flush_color]) {
2519 cwq->flush_color = flush_color; 2518 pwq->flush_color = flush_color;
2520 atomic_inc(&wq->nr_cwqs_to_flush); 2519 atomic_inc(&wq->nr_pwqs_to_flush);
2521 wait = true; 2520 wait = true;
2522 } 2521 }
2523 } 2522 }
2524 2523
2525 if (work_color >= 0) { 2524 if (work_color >= 0) {
2526 BUG_ON(work_color != work_next_color(cwq->work_color)); 2525 BUG_ON(work_color != work_next_color(pwq->work_color));
2527 cwq->work_color = work_color; 2526 pwq->work_color = work_color;
2528 } 2527 }
2529 2528
2530 spin_unlock_irq(&pool->lock); 2529 spin_unlock_irq(&pool->lock);
2531 } 2530 }
2532 2531
2533 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2532 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2534 complete(&wq->first_flusher->done); 2533 complete(&wq->first_flusher->done);
2535 2534
2536 return wait; 2535 return wait;
@@ -2581,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2581 2580
2582 wq->first_flusher = &this_flusher; 2581 wq->first_flusher = &this_flusher;
2583 2582
2584 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2583 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2585 wq->work_color)) { 2584 wq->work_color)) {
2586 /* nothing to flush, done */ 2585 /* nothing to flush, done */
2587 wq->flush_color = next_color; 2586 wq->flush_color = next_color;
@@ -2592,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2592 /* wait in queue */ 2591 /* wait in queue */
2593 BUG_ON(wq->flush_color == this_flusher.flush_color); 2592 BUG_ON(wq->flush_color == this_flusher.flush_color);
2594 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2593 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2595 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2594 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2596 } 2595 }
2597 } else { 2596 } else {
2598 /* 2597 /*
@@ -2659,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2659 2658
2660 list_splice_tail_init(&wq->flusher_overflow, 2659 list_splice_tail_init(&wq->flusher_overflow,
2661 &wq->flusher_queue); 2660 &wq->flusher_queue);
2662 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2661 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2663 } 2662 }
2664 2663
2665 if (list_empty(&wq->flusher_queue)) { 2664 if (list_empty(&wq->flusher_queue)) {
@@ -2669,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2669 2668
2670 /* 2669 /*
2671 * Need to flush more colors. Make the next flusher 2670 * Need to flush more colors. Make the next flusher
2672 * the new first flusher and arm cwqs. 2671 * the new first flusher and arm pwqs.
2673 */ 2672 */
2674 BUG_ON(wq->flush_color == wq->work_color); 2673 BUG_ON(wq->flush_color == wq->work_color);
2675 BUG_ON(wq->flush_color != next->flush_color); 2674 BUG_ON(wq->flush_color != next->flush_color);
@@ -2677,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2677 list_del_init(&next->list); 2676 list_del_init(&next->list);
2678 wq->first_flusher = next; 2677 wq->first_flusher = next;
2679 2678
2680 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2679 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2681 break; 2680 break;
2682 2681
2683 /* 2682 /*
@@ -2720,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq)
2720reflush: 2719reflush:
2721 flush_workqueue(wq); 2720 flush_workqueue(wq);
2722 2721
2723 for_each_cwq_cpu(cpu, wq) { 2722 for_each_pwq_cpu(cpu, wq) {
2724 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2723 struct pool_workqueue *pwq = get_pwq(cpu, wq);
2725 bool drained; 2724 bool drained;
2726 2725
2727 spin_lock_irq(&cwq->pool->lock); 2726 spin_lock_irq(&pwq->pool->lock);
2728 drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2727 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2729 spin_unlock_irq(&cwq->pool->lock); 2728 spin_unlock_irq(&pwq->pool->lock);
2730 2729
2731 if (drained) 2730 if (drained)
2732 continue; 2731 continue;
@@ -2749,7 +2748,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2749{ 2748{
2750 struct worker *worker = NULL; 2749 struct worker *worker = NULL;
2751 struct worker_pool *pool; 2750 struct worker_pool *pool;
2752 struct cpu_workqueue_struct *cwq; 2751 struct pool_workqueue *pwq;
2753 2752
2754 might_sleep(); 2753 might_sleep();
2755 pool = get_work_pool(work); 2754 pool = get_work_pool(work);
@@ -2758,18 +2757,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2758 2757
2759 spin_lock_irq(&pool->lock); 2758 spin_lock_irq(&pool->lock);
2760 /* see the comment in try_to_grab_pending() with the same code */ 2759 /* see the comment in try_to_grab_pending() with the same code */
2761 cwq = get_work_cwq(work); 2760 pwq = get_work_pwq(work);
2762 if (cwq) { 2761 if (pwq) {
2763 if (unlikely(cwq->pool != pool)) 2762 if (unlikely(pwq->pool != pool))
2764 goto already_gone; 2763 goto already_gone;
2765 } else { 2764 } else {
2766 worker = find_worker_executing_work(pool, work); 2765 worker = find_worker_executing_work(pool, work);
2767 if (!worker) 2766 if (!worker)
2768 goto already_gone; 2767 goto already_gone;
2769 cwq = worker->current_cwq; 2768 pwq = worker->current_pwq;
2770 } 2769 }
2771 2770
2772 insert_wq_barrier(cwq, barr, work, worker); 2771 insert_wq_barrier(pwq, barr, work, worker);
2773 spin_unlock_irq(&pool->lock); 2772 spin_unlock_irq(&pool->lock);
2774 2773
2775 /* 2774 /*
@@ -2778,11 +2777,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2778 * flusher is not running on the same workqueue by verifying write 2777 * flusher is not running on the same workqueue by verifying write
2779 * access. 2778 * access.
2780 */ 2779 */
2781 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2780 if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
2782 lock_map_acquire(&cwq->wq->lockdep_map); 2781 lock_map_acquire(&pwq->wq->lockdep_map);
2783 else 2782 else
2784 lock_map_acquire_read(&cwq->wq->lockdep_map); 2783 lock_map_acquire_read(&pwq->wq->lockdep_map);
2785 lock_map_release(&cwq->wq->lockdep_map); 2784 lock_map_release(&pwq->wq->lockdep_map);
2786 2785
2787 return true; 2786 return true;
2788already_gone: 2787already_gone:
@@ -3092,46 +3091,46 @@ int keventd_up(void)
3092 return system_wq != NULL; 3091 return system_wq != NULL;
3093} 3092}
3094 3093
3095static int alloc_cwqs(struct workqueue_struct *wq) 3094static int alloc_pwqs(struct workqueue_struct *wq)
3096{ 3095{
3097 /* 3096 /*
3098 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3097 * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3099 * Make sure that the alignment isn't lower than that of 3098 * Make sure that the alignment isn't lower than that of
3100 * unsigned long long. 3099 * unsigned long long.
3101 */ 3100 */
3102 const size_t size = sizeof(struct cpu_workqueue_struct); 3101 const size_t size = sizeof(struct pool_workqueue);
3103 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 3102 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3104 __alignof__(unsigned long long)); 3103 __alignof__(unsigned long long));
3105 3104
3106 if (!(wq->flags & WQ_UNBOUND)) 3105 if (!(wq->flags & WQ_UNBOUND))
3107 wq->cpu_wq.pcpu = __alloc_percpu(size, align); 3106 wq->pool_wq.pcpu = __alloc_percpu(size, align);
3108 else { 3107 else {
3109 void *ptr; 3108 void *ptr;
3110 3109
3111 /* 3110 /*
3112 * Allocate enough room to align cwq and put an extra 3111 * Allocate enough room to align pwq and put an extra
3113 * pointer at the end pointing back to the originally 3112 * pointer at the end pointing back to the originally
3114 * allocated pointer which will be used for free. 3113 * allocated pointer which will be used for free.
3115 */ 3114 */
3116 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 3115 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3117 if (ptr) { 3116 if (ptr) {
3118 wq->cpu_wq.single = PTR_ALIGN(ptr, align); 3117 wq->pool_wq.single = PTR_ALIGN(ptr, align);
3119 *(void **)(wq->cpu_wq.single + 1) = ptr; 3118 *(void **)(wq->pool_wq.single + 1) = ptr;
3120 } 3119 }
3121 } 3120 }
3122 3121
3123 /* just in case, make sure it's actually aligned */ 3122 /* just in case, make sure it's actually aligned */
3124 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 3123 BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
3125 return wq->cpu_wq.v ? 0 : -ENOMEM; 3124 return wq->pool_wq.v ? 0 : -ENOMEM;
3126} 3125}
3127 3126
3128static void free_cwqs(struct workqueue_struct *wq) 3127static void free_pwqs(struct workqueue_struct *wq)
3129{ 3128{
3130 if (!(wq->flags & WQ_UNBOUND)) 3129 if (!(wq->flags & WQ_UNBOUND))
3131 free_percpu(wq->cpu_wq.pcpu); 3130 free_percpu(wq->pool_wq.pcpu);
3132 else if (wq->cpu_wq.single) { 3131 else if (wq->pool_wq.single) {
3133 /* the pointer to free is stored right after the cwq */ 3132 /* the pointer to free is stored right after the pwq */
3134 kfree(*(void **)(wq->cpu_wq.single + 1)); 3133 kfree(*(void **)(wq->pool_wq.single + 1));
3135 } 3134 }
3136} 3135}
3137 3136
@@ -3185,25 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3185 wq->flags = flags; 3184 wq->flags = flags;
3186 wq->saved_max_active = max_active; 3185 wq->saved_max_active = max_active;
3187 mutex_init(&wq->flush_mutex); 3186 mutex_init(&wq->flush_mutex);
3188 atomic_set(&wq->nr_cwqs_to_flush, 0); 3187 atomic_set(&wq->nr_pwqs_to_flush, 0);
3189 INIT_LIST_HEAD(&wq->flusher_queue); 3188 INIT_LIST_HEAD(&wq->flusher_queue);
3190 INIT_LIST_HEAD(&wq->flusher_overflow); 3189 INIT_LIST_HEAD(&wq->flusher_overflow);
3191 3190
3192 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3191 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3193 INIT_LIST_HEAD(&wq->list); 3192 INIT_LIST_HEAD(&wq->list);
3194 3193
3195 if (alloc_cwqs(wq) < 0) 3194 if (alloc_pwqs(wq) < 0)
3196 goto err; 3195 goto err;
3197 3196
3198 for_each_cwq_cpu(cpu, wq) { 3197 for_each_pwq_cpu(cpu, wq) {
3199 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3198 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3200 3199
3201 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 3200 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3202 cwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); 3201 pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
3203 cwq->wq = wq; 3202 pwq->wq = wq;
3204 cwq->flush_color = -1; 3203 pwq->flush_color = -1;
3205 cwq->max_active = max_active; 3204 pwq->max_active = max_active;
3206 INIT_LIST_HEAD(&cwq->delayed_works); 3205 INIT_LIST_HEAD(&pwq->delayed_works);
3207 } 3206 }
3208 3207
3209 if (flags & WQ_RESCUER) { 3208 if (flags & WQ_RESCUER) {
@@ -3234,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3234 spin_lock(&workqueue_lock); 3233 spin_lock(&workqueue_lock);
3235 3234
3236 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3235 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3237 for_each_cwq_cpu(cpu, wq) 3236 for_each_pwq_cpu(cpu, wq)
3238 get_cwq(cpu, wq)->max_active = 0; 3237 get_pwq(cpu, wq)->max_active = 0;
3239 3238
3240 list_add(&wq->list, &workqueues); 3239 list_add(&wq->list, &workqueues);
3241 3240
@@ -3244,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3244 return wq; 3243 return wq;
3245err: 3244err:
3246 if (wq) { 3245 if (wq) {
3247 free_cwqs(wq); 3246 free_pwqs(wq);
3248 free_mayday_mask(wq->mayday_mask); 3247 free_mayday_mask(wq->mayday_mask);
3249 kfree(wq->rescuer); 3248 kfree(wq->rescuer);
3250 kfree(wq); 3249 kfree(wq);
@@ -3275,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
3275 spin_unlock(&workqueue_lock); 3274 spin_unlock(&workqueue_lock);
3276 3275
3277 /* sanity check */ 3276 /* sanity check */
3278 for_each_cwq_cpu(cpu, wq) { 3277 for_each_pwq_cpu(cpu, wq) {
3279 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3278 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3280 int i; 3279 int i;
3281 3280
3282 for (i = 0; i < WORK_NR_COLORS; i++) 3281 for (i = 0; i < WORK_NR_COLORS; i++)
3283 BUG_ON(cwq->nr_in_flight[i]); 3282 BUG_ON(pwq->nr_in_flight[i]);
3284 BUG_ON(cwq->nr_active); 3283 BUG_ON(pwq->nr_active);
3285 BUG_ON(!list_empty(&cwq->delayed_works)); 3284 BUG_ON(!list_empty(&pwq->delayed_works));
3286 } 3285 }
3287 3286
3288 if (wq->flags & WQ_RESCUER) { 3287 if (wq->flags & WQ_RESCUER) {
@@ -3291,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
3291 kfree(wq->rescuer); 3290 kfree(wq->rescuer);
3292 } 3291 }
3293 3292
3294 free_cwqs(wq); 3293 free_pwqs(wq);
3295 kfree(wq); 3294 kfree(wq);
3296} 3295}
3297EXPORT_SYMBOL_GPL(destroy_workqueue); 3296EXPORT_SYMBOL_GPL(destroy_workqueue);
3298 3297
3299/** 3298/**
3300 * cwq_set_max_active - adjust max_active of a cwq 3299 * pwq_set_max_active - adjust max_active of a pwq
3301 * @cwq: target cpu_workqueue_struct 3300 * @pwq: target pool_workqueue
3302 * @max_active: new max_active value. 3301 * @max_active: new max_active value.
3303 * 3302 *
3304 * Set @cwq->max_active to @max_active and activate delayed works if 3303 * Set @pwq->max_active to @max_active and activate delayed works if
3305 * increased. 3304 * increased.
3306 * 3305 *
3307 * CONTEXT: 3306 * CONTEXT:
3308 * spin_lock_irq(pool->lock). 3307 * spin_lock_irq(pool->lock).
3309 */ 3308 */
3310static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) 3309static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
3311{ 3310{
3312 cwq->max_active = max_active; 3311 pwq->max_active = max_active;
3313 3312
3314 while (!list_empty(&cwq->delayed_works) && 3313 while (!list_empty(&pwq->delayed_works) &&
3315 cwq->nr_active < cwq->max_active) 3314 pwq->nr_active < pwq->max_active)
3316 cwq_activate_first_delayed(cwq); 3315 pwq_activate_first_delayed(pwq);
3317} 3316}
3318 3317
3319/** 3318/**
@@ -3336,15 +3335,15 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3336 3335
3337 wq->saved_max_active = max_active; 3336 wq->saved_max_active = max_active;
3338 3337
3339 for_each_cwq_cpu(cpu, wq) { 3338 for_each_pwq_cpu(cpu, wq) {
3340 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3339 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3341 struct worker_pool *pool = cwq->pool; 3340 struct worker_pool *pool = pwq->pool;
3342 3341
3343 spin_lock_irq(&pool->lock); 3342 spin_lock_irq(&pool->lock);
3344 3343
3345 if (!(wq->flags & WQ_FREEZABLE) || 3344 if (!(wq->flags & WQ_FREEZABLE) ||
3346 !(pool->flags & POOL_FREEZING)) 3345 !(pool->flags & POOL_FREEZING))
3347 cwq_set_max_active(cwq, max_active); 3346 pwq_set_max_active(pwq, max_active);
3348 3347
3349 spin_unlock_irq(&pool->lock); 3348 spin_unlock_irq(&pool->lock);
3350 } 3349 }
@@ -3367,9 +3366,9 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3367 */ 3366 */
3368bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3367bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3369{ 3368{
3370 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3369 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3371 3370
3372 return !list_empty(&cwq->delayed_works); 3371 return !list_empty(&pwq->delayed_works);
3373} 3372}
3374EXPORT_SYMBOL_GPL(workqueue_congested); 3373EXPORT_SYMBOL_GPL(workqueue_congested);
3375 3374
@@ -3408,7 +3407,7 @@ EXPORT_SYMBOL_GPL(work_busy);
3408 * CPU hotplug. 3407 * CPU hotplug.
3409 * 3408 *
3410 * There are two challenges in supporting CPU hotplug. Firstly, there 3409 * There are two challenges in supporting CPU hotplug. Firstly, there
3411 * are a lot of assumptions on strong associations among work, cwq and 3410 * are a lot of assumptions on strong associations among work, pwq and
3412 * pool which make migrating pending and scheduled works very 3411 * pool which make migrating pending and scheduled works very
3413 * difficult to implement without impacting hot paths. Secondly, 3412 * difficult to implement without impacting hot paths. Secondly,
3414 * worker pools serve mix of short, long and very long running works making 3413 * worker pools serve mix of short, long and very long running works making
@@ -3612,11 +3611,11 @@ void freeze_workqueues_begin(void)
3612 pool->flags |= POOL_FREEZING; 3611 pool->flags |= POOL_FREEZING;
3613 3612
3614 list_for_each_entry(wq, &workqueues, list) { 3613 list_for_each_entry(wq, &workqueues, list) {
3615 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3614 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3616 3615
3617 if (cwq && cwq->pool == pool && 3616 if (pwq && pwq->pool == pool &&
3618 (wq->flags & WQ_FREEZABLE)) 3617 (wq->flags & WQ_FREEZABLE))
3619 cwq->max_active = 0; 3618 pwq->max_active = 0;
3620 } 3619 }
3621 3620
3622 spin_unlock_irq(&pool->lock); 3621 spin_unlock_irq(&pool->lock);
@@ -3655,13 +3654,13 @@ bool freeze_workqueues_busy(void)
3655 * to peek without lock. 3654 * to peek without lock.
3656 */ 3655 */
3657 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3658 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3659 3658
3660 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3659 if (!pwq || !(wq->flags & WQ_FREEZABLE))
3661 continue; 3660 continue;
3662 3661
3663 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(pwq->nr_active < 0);
3664 if (cwq->nr_active) { 3663 if (pwq->nr_active) {
3665 busy = true; 3664 busy = true;
3666 goto out_unlock; 3665 goto out_unlock;
3667 } 3666 }
@@ -3701,14 +3700,14 @@ void thaw_workqueues(void)
3701 pool->flags &= ~POOL_FREEZING; 3700 pool->flags &= ~POOL_FREEZING;
3702 3701
3703 list_for_each_entry(wq, &workqueues, list) { 3702 list_for_each_entry(wq, &workqueues, list) {
3704 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3703 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3705 3704
3706 if (!cwq || cwq->pool != pool || 3705 if (!pwq || pwq->pool != pool ||
3707 !(wq->flags & WQ_FREEZABLE)) 3706 !(wq->flags & WQ_FREEZABLE))
3708 continue; 3707 continue;
3709 3708
3710 /* restore max_active and repopulate worklist */ 3709 /* restore max_active and repopulate worklist */
3711 cwq_set_max_active(cwq, wq->saved_max_active); 3710 pwq_set_max_active(pwq, wq->saved_max_active);
3712 } 3711 }
3713 3712
3714 wake_up_worker(pool); 3713 wake_up_worker(pool);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 328be4a269aa..07650264ec15 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -28,7 +28,7 @@ struct worker {
28 28
29 struct work_struct *current_work; /* L: work being processed */ 29 struct work_struct *current_work; /* L: work being processed */
30 work_func_t current_func; /* L: current_work's fn */ 30 work_func_t current_func; /* L: current_work's fn */
31 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ 31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */
32 struct list_head scheduled; /* L: scheduled works */ 32 struct list_head scheduled; /* L: scheduled works */
33 struct task_struct *task; /* I: worker task */ 33 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */ 34 struct worker_pool *pool; /* I: the associated pool */