aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c7
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/irqdesc.c11
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/perf_event.c19
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/power/snapshot.c7
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/trace/blktrace.c16
-rw-r--r--kernel/workqueue.c37
15 files changed, 89 insertions, 51 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4349935c2ad8..e92e98189032 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1575 return -ENODEV; 1575 return -ENODEV;
1576 1576
1577 trialcs = alloc_trial_cpuset(cs); 1577 trialcs = alloc_trial_cpuset(cs);
1578 if (!trialcs) 1578 if (!trialcs) {
1579 return -ENOMEM; 1579 retval = -ENOMEM;
1580 goto out;
1581 }
1580 1582
1581 switch (cft->private) { 1583 switch (cft->private) {
1582 case FILE_CPULIST: 1584 case FILE_CPULIST:
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1591 } 1593 }
1592 1594
1593 free_trial_cpuset(trialcs); 1595 free_trial_cpuset(trialcs);
1596out:
1594 cgroup_unlock(); 1597 cgroup_unlock();
1595 return retval; 1598 return retval;
1596} 1599}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4571ae7e085a..99c3bc8a6fb4 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -3,6 +3,12 @@
3 */ 3 */
4#include <linux/irqdesc.h> 4#include <linux/irqdesc.h>
5 5
6#ifdef CONFIG_SPARSE_IRQ
7# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
8#else
9# define IRQ_BITMAP_BITS NR_IRQS
10#endif
11
6extern int noirqdebug; 12extern int noirqdebug;
7 13
8#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 14#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 282f20230e67..2039bea31bdf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS;
94EXPORT_SYMBOL_GPL(nr_irqs); 94EXPORT_SYMBOL_GPL(nr_irqs);
95 95
96static DEFINE_MUTEX(sparse_irq_lock); 96static DEFINE_MUTEX(sparse_irq_lock);
97static DECLARE_BITMAP(allocated_irqs, NR_IRQS); 97static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
98 98
99#ifdef CONFIG_SPARSE_IRQ 99#ifdef CONFIG_SPARSE_IRQ
100 100
@@ -217,6 +217,15 @@ int __init early_irq_init(void)
217 initcnt = arch_probe_nr_irqs(); 217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219 219
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
222
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
225
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
228
220 for (i = 0; i < initcnt; i++) { 229 for (i = 0; i < initcnt; i++) {
221 desc = alloc_desc(i, node); 230 desc = alloc_desc(i, node);
222 set_bit(i, allocated_irqs); 231 set_bit(i, allocated_irqs);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f747dd..9033c1c70828 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1100,7 +1100,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1100 if (retval) 1100 if (retval)
1101 kfree(action); 1101 kfree(action);
1102 1102
1103#ifdef CONFIG_DEBUG_SHIRQ 1103#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1104 if (!retval && (irqflags & IRQF_SHARED)) { 1104 if (!retval && (irqflags & IRQF_SHARED)) {
1105 /* 1105 /*
1106 * It's a shared IRQ -- the driver ought to be prepared for it 1106 * It's a shared IRQ -- the driver ought to be prepared for it
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 891115a929aa..dc49358b73fa 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
23#ifdef CONFIG_HARDIRQS_SW_RESEND 23#ifdef CONFIG_HARDIRQS_SW_RESEND
24 24
25/* Bitmap to handle software resend of interrupts: */ 25/* Bitmap to handle software resend of interrupts: */
26static DECLARE_BITMAP(irqs_resend, NR_IRQS); 26static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
27 27
28/* 28/*
29 * Run software resends of IRQ's 29 * Run software resends of IRQ's
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..656222fcf767 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,10 @@ retry:
782 raw_spin_unlock_irq(&ctx->lock); 782 raw_spin_unlock_irq(&ctx->lock);
783} 783}
784 784
785#define MAX_INTERRUPTS (~0ULL)
786
787static void perf_log_throttle(struct perf_event *event, int enable);
788
785static int 789static int
786event_sched_in(struct perf_event *event, 790event_sched_in(struct perf_event *event,
787 struct perf_cpu_context *cpuctx, 791 struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
794 798
795 event->state = PERF_EVENT_STATE_ACTIVE; 799 event->state = PERF_EVENT_STATE_ACTIVE;
796 event->oncpu = smp_processor_id(); 800 event->oncpu = smp_processor_id();
801
802 /*
803 * Unthrottle events, since we scheduled we might have missed several
804 * ticks already, also for a heavily scheduling task there is little
805 * guarantee it'll get a tick in a timely manner.
806 */
807 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
808 perf_log_throttle(event, 1);
809 event->hw.interrupts = 0;
810 }
811
797 /* 812 /*
798 * The new state must be visible before we turn it on in the hardware: 813 * The new state must be visible before we turn it on in the hardware:
799 */ 814 */
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
1596 } 1611 }
1597} 1612}
1598 1613
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
1603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 1614static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{ 1615{
1605 u64 frequency = event->attr.sample_freq; 1616 u64 frequency = event->attr.sample_freq;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 7b5db6a8561e..701853042c28 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
326 326
327static int __init pm_start_workqueue(void) 327static int __init pm_start_workqueue(void)
328{ 328{
329 pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 329 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
330 330
331 return pm_wq ? 0 : -ENOMEM; 331 return pm_wq ? 0 : -ENOMEM;
332} 332}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d6d2a10320e0..0cf3a27a6c9d 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezeable(struct task_struct * p) 25static inline int freezable(struct task_struct * p)
26{ 26{
27 if ((p == current) || 27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) || 28 (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
53 todo = 0; 53 todo = 0;
54 read_lock(&tasklist_lock); 54 read_lock(&tasklist_lock);
55 do_each_thread(g, p) { 55 do_each_thread(g, p) {
56 if (frozen(p) || !freezeable(p)) 56 if (frozen(p) || !freezable(p))
57 continue; 57 continue;
58 58
59 if (!freeze_task(p, sig_only)) 59 if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
167 167
168 read_lock(&tasklist_lock); 168 read_lock(&tasklist_lock);
169 do_each_thread(g, p) { 169 do_each_thread(g, p) {
170 if (!freezeable(p)) 170 if (!freezable(p))
171 continue; 171 continue;
172 172
173 if (nosig_only && should_send_signal(p)) 173 if (nosig_only && should_send_signal(p))
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0dac75ea4456..64db648ff911 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1519,11 +1519,8 @@ static int
1519swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, 1519swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1520 unsigned int nr_pages, unsigned int nr_highmem) 1520 unsigned int nr_pages, unsigned int nr_highmem)
1521{ 1521{
1522 int error = 0;
1523
1524 if (nr_highmem > 0) { 1522 if (nr_highmem > 0) {
1525 error = get_highmem_buffer(PG_ANY); 1523 if (get_highmem_buffer(PG_ANY))
1526 if (error)
1527 goto err_out; 1524 goto err_out;
1528 if (nr_highmem > alloc_highmem) { 1525 if (nr_highmem > alloc_highmem) {
1529 nr_highmem -= alloc_highmem; 1526 nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1546 1543
1547 err_out: 1544 err_out:
1548 swsusp_free(); 1545 swsusp_free();
1549 return error; 1546 return -ENOMEM;
1550} 1547}
1551 1548
1552asmlinkage int swsusp_save(void) 1549asmlinkage int swsusp_save(void)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1708b1e2972d..e2302e40b360 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
163 return !err; 163 return !err;
164} 164}
165 165
166int ptrace_attach(struct task_struct *task) 166static int ptrace_attach(struct task_struct *task)
167{ 167{
168 int retval; 168 int retval;
169 169
@@ -219,7 +219,7 @@ out:
219 * Performs checks and sets PT_PTRACED. 219 * Performs checks and sets PT_PTRACED.
220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 220 * Should be used by all ptrace implementations for PTRACE_TRACEME.
221 */ 221 */
222int ptrace_traceme(void) 222static int ptrace_traceme(void)
223{ 223{
224 int ret = -EPERM; 224 int ret = -EPERM;
225 225
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
293 return false; 293 return false;
294} 294}
295 295
296int ptrace_detach(struct task_struct *child, unsigned int data) 296static int ptrace_detach(struct task_struct *child, unsigned int data)
297{ 297{
298 bool dead = false; 298 bool dead = false;
299 299
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b5668..a3b5aff62606 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
601} 601}
602 602
603/*
604 * Check whether the broadcast device supports oneshot.
605 */
606bool tick_broadcast_oneshot_available(void)
607{
608 struct clock_event_device *bc = tick_broadcast_device.evtdev;
609
610 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
611}
612
603#endif 613#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80a0c43..ed228ef6f6b8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
51{ 51{
52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
53 53
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 54 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
55 return 0;
56 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
57 return 1;
58 return tick_broadcast_oneshot_available();
55} 59}
56 60
57/* 61/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefbc1f60..f65d3a723a64 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
37extern int tick_broadcast_oneshot_active(void); 37extern int tick_broadcast_oneshot_active(void);
38extern void tick_check_oneshot_broadcast(int cpu); 38extern void tick_check_oneshot_broadcast(int cpu);
39bool tick_broadcast_oneshot_available(void);
39# else /* BROADCAST */ 40# else /* BROADCAST */
40static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 41static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
41{ 42{
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
46static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 47static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
47static inline int tick_broadcast_oneshot_active(void) { return 0; } 48static inline int tick_broadcast_oneshot_active(void) { return 0; }
48static inline void tick_check_oneshot_broadcast(int cpu) { } 49static inline void tick_check_oneshot_broadcast(int cpu) { }
50static inline bool tick_broadcast_oneshot_available(void) { return true; }
49# endif /* !BROADCAST */ 51# endif /* !BROADCAST */
50 52
51#else /* !ONESHOT */ 53#else /* !ONESHOT */
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
76 return 0; 78 return 0;
77} 79}
78static inline int tick_broadcast_oneshot_active(void) { return 0; } 80static inline int tick_broadcast_oneshot_active(void) { return 0; }
81static inline bool tick_broadcast_oneshot_available(void) { return false; }
79#endif /* !TICK_ONESHOT */ 82#endif /* !TICK_ONESHOT */
80 83
81/* 84/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index d95721f33702..cbafed7d4f38 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1827 rwbs[i] = '\0'; 1827 rwbs[i] = '\0';
1828} 1828}
1829 1829
1830void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1831{
1832 int rw = rq->cmd_flags & 0x03;
1833 int bytes;
1834
1835 if (rq->cmd_flags & REQ_DISCARD)
1836 rw |= REQ_DISCARD;
1837
1838 if (rq->cmd_flags & REQ_SECURE)
1839 rw |= REQ_SECURE;
1840
1841 bytes = blk_rq_bytes(rq);
1842
1843 blk_fill_rwbs(rwbs, rw, bytes);
1844}
1845
1846#endif /* CONFIG_EVENT_TRACING */ 1830#endif /* CONFIG_EVENT_TRACING */
1847 1831
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11869faa6819..ee6578b578ad 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@ enum {
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81 81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
@@ -2047,6 +2049,15 @@ repeat:
2047 move_linked_works(work, scheduled, &n); 2049 move_linked_works(work, scheduled, &n);
2048 2050
2049 process_scheduled_works(rescuer); 2051 process_scheduled_works(rescuer);
2052
2053 /*
2054 * Leave this gcwq. If keep_working() is %true, notify a
2055 * regular worker; otherwise, we end up with 0 concurrency
2056 * and stalling the execution.
2057 */
2058 if (keep_working(gcwq))
2059 wake_up_worker(gcwq);
2060
2050 spin_unlock_irq(&gcwq->lock); 2061 spin_unlock_irq(&gcwq->lock);
2051 } 2062 }
2052 2063
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2956 */ 2967 */
2957 spin_lock(&workqueue_lock); 2968 spin_lock(&workqueue_lock);
2958 2969
2959 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2970 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2960 for_each_cwq_cpu(cpu, wq) 2971 for_each_cwq_cpu(cpu, wq)
2961 get_cwq(cpu, wq)->max_active = 0; 2972 get_cwq(cpu, wq)->max_active = 0;
2962 2973
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3068 3079
3069 spin_lock_irq(&gcwq->lock); 3080 spin_lock_irq(&gcwq->lock);
3070 3081
3071 if (!(wq->flags & WQ_FREEZEABLE) || 3082 if (!(wq->flags & WQ_FREEZABLE) ||
3072 !(gcwq->flags & GCWQ_FREEZING)) 3083 !(gcwq->flags & GCWQ_FREEZING))
3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3084 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3074 3085
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3318 * want to get it over with ASAP - spam rescuers, wake up as 3329 * want to get it over with ASAP - spam rescuers, wake up as
3319 * many idlers as necessary and create new ones till the 3330 * many idlers as necessary and create new ones till the
3320 * worklist is empty. Note that if the gcwq is frozen, there 3331 * worklist is empty. Note that if the gcwq is frozen, there
3321 * may be frozen works in freezeable cwqs. Don't declare 3332 * may be frozen works in freezable cwqs. Don't declare
3322 * completion while frozen. 3333 * completion while frozen.
3323 */ 3334 */
3324 while (gcwq->nr_workers != gcwq->nr_idle || 3335 while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3576/** 3587/**
3577 * freeze_workqueues_begin - begin freezing workqueues 3588 * freeze_workqueues_begin - begin freezing workqueues
3578 * 3589 *
3579 * Start freezing workqueues. After this function returns, all 3590 * Start freezing workqueues. After this function returns, all freezable
3580 * freezeable workqueues will queue new works to their frozen_works 3591 * workqueues will queue new works to their frozen_works list instead of
3581 * list instead of gcwq->worklist. 3592 * gcwq->worklist.
3582 * 3593 *
3583 * CONTEXT: 3594 * CONTEXT:
3584 * Grabs and releases workqueue_lock and gcwq->lock's. 3595 * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
3604 list_for_each_entry(wq, &workqueues, list) { 3615 list_for_each_entry(wq, &workqueues, list) {
3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3616 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3606 3617
3607 if (cwq && wq->flags & WQ_FREEZEABLE) 3618 if (cwq && wq->flags & WQ_FREEZABLE)
3608 cwq->max_active = 0; 3619 cwq->max_active = 0;
3609 } 3620 }
3610 3621
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
3615} 3626}
3616 3627
3617/** 3628/**
3618 * freeze_workqueues_busy - are freezeable workqueues still busy? 3629 * freeze_workqueues_busy - are freezable workqueues still busy?
3619 * 3630 *
3620 * Check whether freezing is complete. This function must be called 3631 * Check whether freezing is complete. This function must be called
3621 * between freeze_workqueues_begin() and thaw_workqueues(). 3632 * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
3624 * Grabs and releases workqueue_lock. 3635 * Grabs and releases workqueue_lock.
3625 * 3636 *
3626 * RETURNS: 3637 * RETURNS:
3627 * %true if some freezeable workqueues are still busy. %false if 3638 * %true if some freezable workqueues are still busy. %false if freezing
3628 * freezing is complete. 3639 * is complete.
3629 */ 3640 */
3630bool freeze_workqueues_busy(void) 3641bool freeze_workqueues_busy(void)
3631{ 3642{
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
3645 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3647 3658
3648 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3659 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3649 continue; 3660 continue;
3650 3661
3651 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
3690 list_for_each_entry(wq, &workqueues, list) { 3701 list_for_each_entry(wq, &workqueues, list) {
3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3702 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3692 3703
3693 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3704 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3694 continue; 3705 continue;
3695 3706
3696 /* restore max_active and repopulate worklist */ 3707 /* restore max_active and repopulate worklist */