aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/workqueue.txt4
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/workqueue.c37
17 files changed, 47 insertions, 36 deletions
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index 996a27d9b8db..01c513fac40e 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -190,9 +190,9 @@ resources, scheduled and executed.
190 * Long running CPU intensive workloads which can be better 190 * Long running CPU intensive workloads which can be better
191 managed by the system scheduler. 191 managed by the system scheduler.
192 192
193 WQ_FREEZEABLE 193 WQ_FREEZABLE
194 194
195 A freezeable wq participates in the freeze phase of the system 195 A freezable wq participates in the freeze phase of the system
196 suspend operations. Work items on the wq are drained and no 196 suspend operations. Work items on the wq are drained and no
197 new work item starts execution until thawed. 197 new work item starts execution until thawed.
198 198
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab7b0cf..8c1d85e27be4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
621{ 621{
622 int rc; 622 int rc;
623 623
624 workqueue = create_freezeable_workqueue("kmemstick"); 624 workqueue = create_freezable_workqueue("kmemstick");
625 if (!workqueue) 625 if (!workqueue)
626 return -ENOMEM; 626 return -ENOMEM;
627 627
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852dff40b..44d4475a09dd 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
329{ 329{
330 int rc; 330 int rc;
331 331
332 workqueue = create_freezeable_workqueue("tifm"); 332 workqueue = create_freezable_workqueue("tifm");
333 if (!workqueue) 333 if (!workqueue)
334 return -ENOMEM; 334 return -ENOMEM;
335 335
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e80140..6df5a55da110 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
785 if (x86_hyper != &x86_hyper_vmware) 785 if (x86_hyper != &x86_hyper_vmware)
786 return -ENODEV; 786 return -ENODEV;
787 787
788 vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 788 vmballoon_wq = create_freezable_workqueue("vmmemctl");
789 if (!vmballoon_wq) { 789 if (!vmballoon_wq) {
790 pr_err("failed to create workqueue\n"); 790 pr_err("failed to create workqueue\n");
791 return -ENOMEM; 791 return -ENOMEM;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
930 930
931 init_completion(&dev->dma_done); 931 init_completion(&dev->dma_done);
932 932
933 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
934 934
935 if (!dev->card_workqueue) 935 if (!dev->card_workqueue)
936 goto error9; 936 goto error9;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf6c025..ac0d6a8613b5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
1258static __init int sm_module_init(void) 1258static __init int sm_module_init(void)
1259{ 1259{
1260 int error = 0; 1260 int error = 0;
1261 cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1261 cache_flush_workqueue = create_freezable_workqueue("smflush");
1262 1262
1263 if (IS_ERR(cache_flush_workqueue)) 1263 if (IS_ERR(cache_flush_workqueue))
1264 return PTR_ERR(cache_flush_workqueue); 1264 return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
940 goto open_unlock; 940 goto open_unlock;
941 } 941 }
942 942
943 priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 priv->wq = create_freezable_workqueue("mcp251x_wq");
944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
946 946
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..7b951adac54b 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
601 s->rts = 0; 601 s->rts = 0;
602 602
603 sprintf(b, "max3100-%d", s->minor); 603 sprintf(b, "max3100-%d", s->minor);
604 s->workqueue = create_freezeable_workqueue(b); 604 s->workqueue = create_freezable_workqueue(b);
605 if (!s->workqueue) { 605 if (!s->workqueue) {
606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 606 dev_warn(&s->spi->dev, "cannot create workqueue\n");
607 return -EBUSY; 607 return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..750b4f627315 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
833 struct max3107_port *s = container_of(port, struct max3107_port, port); 833 struct max3107_port *s = container_of(port, struct max3107_port, port);
834 834
835 /* Initialize work queue */ 835 /* Initialize work queue */
836 s->workqueue = create_freezeable_workqueue("max3107"); 836 s->workqueue = create_freezable_workqueue("max3107");
837 if (!s->workqueue) { 837 if (!s->workqueue) {
838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 838 dev_err(&s->spi->dev, "Workqueue creation failed\n");
839 return -EBUSY; 839 return -EBUSY;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 08a8beb152e6..7cd9a5a68d59 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void)
1779#endif 1779#endif
1780 1780
1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1782 WQ_HIGHPRI | WQ_FREEZEABLE, 0); 1782 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1783 if (IS_ERR(glock_workqueue)) 1783 if (IS_ERR(glock_workqueue))
1784 return PTR_ERR(glock_workqueue); 1784 return PTR_ERR(glock_workqueue);
1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1786 WQ_MEM_RECLAIM | WQ_FREEZEABLE, 1786 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1787 0); 1787 0);
1788 if (IS_ERR(gfs2_delete_workqueue)) { 1788 if (IS_ERR(gfs2_delete_workqueue)) {
1789 destroy_workqueue(glock_workqueue); 1789 destroy_workqueue(glock_workqueue);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index ebef7ab6e17e..85ba027d1c4d 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void)
144 144
145 error = -ENOMEM; 145 error = -ENOMEM;
146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery",
147 WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); 147 WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
148 if (!gfs_recovery_wq) 148 if (!gfs_recovery_wq)
149 goto fail_wq; 149 goto fail_wq;
150 150
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index da7e52b099f3..1effc8b56b4e 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -109,7 +109,7 @@ static inline void freezer_count(void)
109} 109}
110 110
111/* 111/*
112 * Check if the task should be counted as freezeable by the freezer 112 * Check if the task should be counted as freezable by the freezer
113 */ 113 */
114static inline int freezer_should_skip(struct task_struct *p) 114static inline int freezer_should_skip(struct task_struct *p)
115{ 115{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d747f948b34e..777d8a5ed06b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1744#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1744#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1745#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1745#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1746#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1746#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1747#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1747#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1748#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1748#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1749 1749
1750/* 1750/*
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1ac11586a2f5..f7998a3bf020 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
250enum { 250enum {
251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
253 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 253 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
255 WQ_HIGHPRI = 1 << 4, /* high priority */ 255 WQ_HIGHPRI = 1 << 4, /* high priority */
256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
318/** 318/**
319 * alloc_ordered_workqueue - allocate an ordered workqueue 319 * alloc_ordered_workqueue - allocate an ordered workqueue
320 * @name: name of the workqueue 320 * @name: name of the workqueue
321 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) 321 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
322 * 322 *
323 * Allocate an ordered workqueue. An ordered workqueue executes at 323 * Allocate an ordered workqueue. An ordered workqueue executes at
324 * most one work item at any given time in the queued order. They are 324 * most one work item at any given time in the queued order. They are
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
335 335
336#define create_workqueue(name) \ 336#define create_workqueue(name) \
337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
338#define create_freezeable_workqueue(name) \ 338#define create_freezable_workqueue(name) \
339 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 339 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
340#define create_singlethread_workqueue(name) \ 340#define create_singlethread_workqueue(name) \
341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
342 342
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 7b5db6a8561e..701853042c28 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
326 326
327static int __init pm_start_workqueue(void) 327static int __init pm_start_workqueue(void)
328{ 328{
329 pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 329 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
330 330
331 return pm_wq ? 0 : -ENOMEM; 331 return pm_wq ? 0 : -ENOMEM;
332} 332}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d6d2a10320e0..0cf3a27a6c9d 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezeable(struct task_struct * p) 25static inline int freezable(struct task_struct * p)
26{ 26{
27 if ((p == current) || 27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) || 28 (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
53 todo = 0; 53 todo = 0;
54 read_lock(&tasklist_lock); 54 read_lock(&tasklist_lock);
55 do_each_thread(g, p) { 55 do_each_thread(g, p) {
56 if (frozen(p) || !freezeable(p)) 56 if (frozen(p) || !freezable(p))
57 continue; 57 continue;
58 58
59 if (!freeze_task(p, sig_only)) 59 if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
167 167
168 read_lock(&tasklist_lock); 168 read_lock(&tasklist_lock);
169 do_each_thread(g, p) { 169 do_each_thread(g, p) {
170 if (!freezeable(p)) 170 if (!freezable(p))
171 continue; 171 continue;
172 172
173 if (nosig_only && should_send_signal(p)) 173 if (nosig_only && should_send_signal(p))
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11869faa6819..ee6578b578ad 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@ enum {
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81 81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
@@ -2047,6 +2049,15 @@ repeat:
2047 move_linked_works(work, scheduled, &n); 2049 move_linked_works(work, scheduled, &n);
2048 2050
2049 process_scheduled_works(rescuer); 2051 process_scheduled_works(rescuer);
2052
2053 /*
2054 * Leave this gcwq. If keep_working() is %true, notify a
2055 * regular worker; otherwise, we end up with 0 concurrency
2056 * and stalling the execution.
2057 */
2058 if (keep_working(gcwq))
2059 wake_up_worker(gcwq);
2060
2050 spin_unlock_irq(&gcwq->lock); 2061 spin_unlock_irq(&gcwq->lock);
2051 } 2062 }
2052 2063
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2956 */ 2967 */
2957 spin_lock(&workqueue_lock); 2968 spin_lock(&workqueue_lock);
2958 2969
2959 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2970 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2960 for_each_cwq_cpu(cpu, wq) 2971 for_each_cwq_cpu(cpu, wq)
2961 get_cwq(cpu, wq)->max_active = 0; 2972 get_cwq(cpu, wq)->max_active = 0;
2962 2973
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3068 3079
3069 spin_lock_irq(&gcwq->lock); 3080 spin_lock_irq(&gcwq->lock);
3070 3081
3071 if (!(wq->flags & WQ_FREEZEABLE) || 3082 if (!(wq->flags & WQ_FREEZABLE) ||
3072 !(gcwq->flags & GCWQ_FREEZING)) 3083 !(gcwq->flags & GCWQ_FREEZING))
3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3084 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3074 3085
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3318 * want to get it over with ASAP - spam rescuers, wake up as 3329 * want to get it over with ASAP - spam rescuers, wake up as
3319 * many idlers as necessary and create new ones till the 3330 * many idlers as necessary and create new ones till the
3320 * worklist is empty. Note that if the gcwq is frozen, there 3331 * worklist is empty. Note that if the gcwq is frozen, there
3321 * may be frozen works in freezeable cwqs. Don't declare 3332 * may be frozen works in freezable cwqs. Don't declare
3322 * completion while frozen. 3333 * completion while frozen.
3323 */ 3334 */
3324 while (gcwq->nr_workers != gcwq->nr_idle || 3335 while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3576/** 3587/**
3577 * freeze_workqueues_begin - begin freezing workqueues 3588 * freeze_workqueues_begin - begin freezing workqueues
3578 * 3589 *
3579 * Start freezing workqueues. After this function returns, all 3590 * Start freezing workqueues. After this function returns, all freezable
3580 * freezeable workqueues will queue new works to their frozen_works 3591 * workqueues will queue new works to their frozen_works list instead of
3581 * list instead of gcwq->worklist. 3592 * gcwq->worklist.
3582 * 3593 *
3583 * CONTEXT: 3594 * CONTEXT:
3584 * Grabs and releases workqueue_lock and gcwq->lock's. 3595 * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
3604 list_for_each_entry(wq, &workqueues, list) { 3615 list_for_each_entry(wq, &workqueues, list) {
3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3616 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3606 3617
3607 if (cwq && wq->flags & WQ_FREEZEABLE) 3618 if (cwq && wq->flags & WQ_FREEZABLE)
3608 cwq->max_active = 0; 3619 cwq->max_active = 0;
3609 } 3620 }
3610 3621
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
3615} 3626}
3616 3627
3617/** 3628/**
3618 * freeze_workqueues_busy - are freezeable workqueues still busy? 3629 * freeze_workqueues_busy - are freezable workqueues still busy?
3619 * 3630 *
3620 * Check whether freezing is complete. This function must be called 3631 * Check whether freezing is complete. This function must be called
3621 * between freeze_workqueues_begin() and thaw_workqueues(). 3632 * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
3624 * Grabs and releases workqueue_lock. 3635 * Grabs and releases workqueue_lock.
3625 * 3636 *
3626 * RETURNS: 3637 * RETURNS:
3627 * %true if some freezeable workqueues are still busy. %false if 3638 * %true if some freezable workqueues are still busy. %false if freezing
3628 * freezing is complete. 3639 * is complete.
3629 */ 3640 */
3630bool freeze_workqueues_busy(void) 3641bool freeze_workqueues_busy(void)
3631{ 3642{
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
3645 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3647 3658
3648 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3659 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3649 continue; 3660 continue;
3650 3661
3651 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
3690 list_for_each_entry(wq, &workqueues, list) { 3701 list_for_each_entry(wq, &workqueues, list) {
3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3702 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3692 3703
3693 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3704 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3694 continue; 3705 continue;
3695 3706
3696 /* restore max_active and repopulate worklist */ 3707 /* restore max_active and repopulate worklist */