aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/spurious.c46
-rw-r--r--kernel/kthread.c7
-rw-r--r--kernel/power/process.c57
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/time/tick-broadcast.c17
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/workqueue.c84
11 files changed, 159 insertions, 104 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index c6d14b8008..5b888c24e4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -762,11 +762,8 @@ static void exit_notify(struct task_struct *tsk)
762 read_lock(&tasklist_lock); 762 read_lock(&tasklist_lock);
763 spin_lock_irq(&tsk->sighand->siglock); 763 spin_lock_irq(&tsk->sighand->siglock);
764 for (t = next_thread(tsk); t != tsk; t = next_thread(t)) 764 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
765 if (!signal_pending(t) && !(t->flags & PF_EXITING)) { 765 if (!signal_pending(t) && !(t->flags & PF_EXITING))
766 recalc_sigpending_tsk(t); 766 recalc_sigpending_and_wake(t);
767 if (signal_pending(t))
768 signal_wake_up(t, 0);
769 }
770 spin_unlock_irq(&tsk->sighand->siglock); 767 spin_unlock_irq(&tsk->sighand->siglock);
771 read_unlock(&tasklist_lock); 768 read_unlock(&tasklist_lock);
772 } 769 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 87069cfc18..73ad5cda1b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -45,6 +45,7 @@
45#include <linux/acct.h> 45#include <linux/acct.h>
46#include <linux/tsacct_kern.h> 46#include <linux/tsacct_kern.h>
47#include <linux/cn_proc.h> 47#include <linux/cn_proc.h>
48#include <linux/freezer.h>
48#include <linux/delayacct.h> 49#include <linux/delayacct.h>
49#include <linux/taskstats_kern.h> 50#include <linux/taskstats_kern.h>
50#include <linux/random.h> 51#include <linux/random.h>
@@ -1405,7 +1406,9 @@ long do_fork(unsigned long clone_flags,
1405 } 1406 }
1406 1407
1407 if (clone_flags & CLONE_VFORK) { 1408 if (clone_flags & CLONE_VFORK) {
1409 freezer_do_not_count();
1408 wait_for_completion(&vfork); 1410 wait_for_completion(&vfork);
1411 freezer_count();
1409 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1412 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1410 current->ptrace_message = nr; 1413 current->ptrace_message = nr;
1411 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1414 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b0d81aae47..bd9e272d55 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -135,6 +135,39 @@ report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
135 } 135 }
136} 136}
137 137
138static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
139{
140 struct irqaction *action;
141
142 if (!irqfixup)
143 return 0;
144
145 /* We didn't actually handle the IRQ - see if it was misrouted? */
146 if (action_ret == IRQ_NONE)
147 return 1;
148
149 /*
150 * But for 'irqfixup == 2' we also do it for handled interrupts if
151 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
152 * traditional PC timer interrupt.. Legacy)
153 */
154 if (irqfixup < 2)
155 return 0;
156
157 if (!irq)
158 return 1;
159
160 /*
161 * Since we don't get the descriptor lock, "action" can
162 * change under us. We don't really care, but we don't
163 * want to follow a NULL pointer. So tell the compiler to
164 * just load it once by using a barrier.
165 */
166 action = desc->action;
167 barrier();
168 return action && (action->flags & IRQF_IRQPOLL);
169}
170
138void note_interrupt(unsigned int irq, struct irq_desc *desc, 171void note_interrupt(unsigned int irq, struct irq_desc *desc,
139 irqreturn_t action_ret) 172 irqreturn_t action_ret)
140{ 173{
@@ -144,15 +177,10 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
144 report_bad_irq(irq, desc, action_ret); 177 report_bad_irq(irq, desc, action_ret);
145 } 178 }
146 179
147 if (unlikely(irqfixup)) { 180 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
148 /* Don't punish working computers */ 181 int ok = misrouted_irq(irq);
149 if ((irqfixup == 2 && ((irq == 0) || 182 if (action_ret == IRQ_NONE)
150 (desc->action->flags & IRQF_IRQPOLL))) || 183 desc->irqs_unhandled -= ok;
151 action_ret == IRQ_NONE) {
152 int ok = misrouted_irq(irq);
153 if (action_ret == IRQ_NONE)
154 desc->irqs_unhandled -= ok;
155 }
156 } 184 }
157 185
158 desc->irq_count++; 186 desc->irq_count++;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index df8a8e8f6c..bbd51b81a3 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -70,7 +70,7 @@ static int kthread(void *_create)
70 data = create->data; 70 data = create->data;
71 71
72 /* OK, tell user we're spawned, wait for stop or wakeup */ 72 /* OK, tell user we're spawned, wait for stop or wakeup */
73 __set_current_state(TASK_INTERRUPTIBLE); 73 __set_current_state(TASK_UNINTERRUPTIBLE);
74 complete(&create->started); 74 complete(&create->started);
75 schedule(); 75 schedule();
76 76
@@ -162,7 +162,10 @@ EXPORT_SYMBOL(kthread_create);
162 */ 162 */
163void kthread_bind(struct task_struct *k, unsigned int cpu) 163void kthread_bind(struct task_struct *k, unsigned int cpu)
164{ 164{
165 BUG_ON(k->state != TASK_INTERRUPTIBLE); 165 if (k->state != TASK_UNINTERRUPTIBLE) {
166 WARN_ON(1);
167 return;
168 }
166 /* Must have done schedule() in kthread() before we set_task_cpu */ 169 /* Must have done schedule() in kthread() before we set_task_cpu */
167 wait_task_inactive(k); 170 wait_task_inactive(k);
168 set_task_cpu(k, cpu); 171 set_task_cpu(k, cpu);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0884193873..e0233d8422 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -31,16 +31,36 @@ static inline int freezeable(struct task_struct * p)
31 return 1; 31 return 1;
32} 32}
33 33
34/*
35 * freezing is complete, mark current process as frozen
36 */
37static inline void frozen_process(void)
38{
39 if (!unlikely(current->flags & PF_NOFREEZE)) {
40 current->flags |= PF_FROZEN;
41 wmb();
42 }
43 clear_tsk_thread_flag(current, TIF_FREEZE);
44}
45
34/* Refrigerator is place where frozen processes are stored :-). */ 46/* Refrigerator is place where frozen processes are stored :-). */
35void refrigerator(void) 47void refrigerator(void)
36{ 48{
37 /* Hmm, should we be allowed to suspend when there are realtime 49 /* Hmm, should we be allowed to suspend when there are realtime
38 processes around? */ 50 processes around? */
39 long save; 51 long save;
52
53 task_lock(current);
54 if (freezing(current)) {
55 frozen_process();
56 task_unlock(current);
57 } else {
58 task_unlock(current);
59 return;
60 }
40 save = current->state; 61 save = current->state;
41 pr_debug("%s entered refrigerator\n", current->comm); 62 pr_debug("%s entered refrigerator\n", current->comm);
42 63
43 frozen_process(current);
44 spin_lock_irq(&current->sighand->siglock); 64 spin_lock_irq(&current->sighand->siglock);
45 recalc_sigpending(); /* We sent fake signal, clean it up */ 65 recalc_sigpending(); /* We sent fake signal, clean it up */
46 spin_unlock_irq(&current->sighand->siglock); 66 spin_unlock_irq(&current->sighand->siglock);
@@ -81,7 +101,7 @@ static void cancel_freezing(struct task_struct *p)
81 pr_debug(" clean up: %s\n", p->comm); 101 pr_debug(" clean up: %s\n", p->comm);
82 do_not_freeze(p); 102 do_not_freeze(p);
83 spin_lock_irqsave(&p->sighand->siglock, flags); 103 spin_lock_irqsave(&p->sighand->siglock, flags);
84 recalc_sigpending_tsk(p); 104 recalc_sigpending_and_wake(p);
85 spin_unlock_irqrestore(&p->sighand->siglock, flags); 105 spin_unlock_irqrestore(&p->sighand->siglock, flags);
86 } 106 }
87} 107}
@@ -112,22 +132,12 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
112 cancel_freezing(p); 132 cancel_freezing(p);
113 continue; 133 continue;
114 } 134 }
115 if (is_user_space(p)) { 135 if (freeze_user_space && !is_user_space(p))
116 if (!freeze_user_space) 136 continue;
117 continue; 137
118 138 freeze_process(p);
119 /* Freeze the task unless there is a vfork 139 if (!freezer_should_skip(p))
120 * completion pending 140 todo++;
121 */
122 if (!p->vfork_done)
123 freeze_process(p);
124 } else {
125 if (freeze_user_space)
126 continue;
127
128 freeze_process(p);
129 }
130 todo++;
131 } while_each_thread(g, p); 141 } while_each_thread(g, p);
132 read_unlock(&tasklist_lock); 142 read_unlock(&tasklist_lock);
133 yield(); /* Yield is okay here */ 143 yield(); /* Yield is okay here */
@@ -149,13 +159,16 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
149 TIMEOUT / HZ, todo); 159 TIMEOUT / HZ, todo);
150 read_lock(&tasklist_lock); 160 read_lock(&tasklist_lock);
151 do_each_thread(g, p) { 161 do_each_thread(g, p) {
152 if (is_user_space(p) == !freeze_user_space) 162 if (freeze_user_space && !is_user_space(p))
153 continue; 163 continue;
154 164
155 if (freezeable(p) && !frozen(p)) 165 task_lock(p);
166 if (freezeable(p) && !frozen(p) &&
167 !freezer_should_skip(p))
156 printk(KERN_ERR " %s\n", p->comm); 168 printk(KERN_ERR " %s\n", p->comm);
157 169
158 cancel_freezing(p); 170 cancel_freezing(p);
171 task_unlock(p);
159 } while_each_thread(g, p); 172 } while_each_thread(g, p);
160 read_unlock(&tasklist_lock); 173 read_unlock(&tasklist_lock);
161 } 174 }
@@ -200,9 +213,7 @@ static void thaw_tasks(int thaw_user_space)
200 if (is_user_space(p) == !thaw_user_space) 213 if (is_user_space(p) == !thaw_user_space)
201 continue; 214 continue;
202 215
203 if (!thaw_process(p)) 216 thaw_process(p);
204 printk(KERN_WARNING " Strange, %s not stopped\n",
205 p->comm );
206 } while_each_thread(g, p); 217 } while_each_thread(g, p);
207 read_unlock(&tasklist_lock); 218 read_unlock(&tasklist_lock);
208} 219}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b8b235cc19..8b1a1b8371 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -584,7 +584,7 @@ int swsusp_check(void)
584 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); 584 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
585 if (!IS_ERR(resume_bdev)) { 585 if (!IS_ERR(resume_bdev)) {
586 set_blocksize(resume_bdev, PAGE_SIZE); 586 set_blocksize(resume_bdev, PAGE_SIZE);
587 memset(swsusp_header, 0, sizeof(PAGE_SIZE)); 587 memset(swsusp_header, 0, PAGE_SIZE);
588 error = bio_read_page(swsusp_resume_block, 588 error = bio_read_page(swsusp_resume_block,
589 swsusp_header, NULL); 589 swsusp_header, NULL);
590 if (error) 590 if (error)
diff --git a/kernel/sched.c b/kernel/sched.c
index 799d23b4e3..13cdab3b4c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4775,9 +4775,7 @@ int __sched cond_resched_softirq(void)
4775 BUG_ON(!in_softirq()); 4775 BUG_ON(!in_softirq());
4776 4776
4777 if (need_resched() && system_state == SYSTEM_RUNNING) { 4777 if (need_resched() && system_state == SYSTEM_RUNNING) {
4778 raw_local_irq_disable(); 4778 local_bh_enable();
4779 _local_bh_enable();
4780 raw_local_irq_enable();
4781 __cond_resched(); 4779 __cond_resched();
4782 local_bh_disable(); 4780 local_bh_disable();
4783 return 1; 4781 return 1;
diff --git a/kernel/signal.c b/kernel/signal.c
index 364fc95bf9..acdfc0549c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -96,15 +96,27 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
96 96
97#define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 97#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 98
99fastcall void recalc_sigpending_tsk(struct task_struct *t) 99static int recalc_sigpending_tsk(struct task_struct *t)
100{ 100{
101 if (t->signal->group_stop_count > 0 || 101 if (t->signal->group_stop_count > 0 ||
102 (freezing(t)) || 102 (freezing(t)) ||
103 PENDING(&t->pending, &t->blocked) || 103 PENDING(&t->pending, &t->blocked) ||
104 PENDING(&t->signal->shared_pending, &t->blocked)) 104 PENDING(&t->signal->shared_pending, &t->blocked)) {
105 set_tsk_thread_flag(t, TIF_SIGPENDING); 105 set_tsk_thread_flag(t, TIF_SIGPENDING);
106 else 106 return 1;
107 clear_tsk_thread_flag(t, TIF_SIGPENDING); 107 }
108 clear_tsk_thread_flag(t, TIF_SIGPENDING);
109 return 0;
110}
111
112/*
113 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
114 * This is superfluous when called on current, the wakeup is a harmless no-op.
115 */
116void recalc_sigpending_and_wake(struct task_struct *t)
117{
118 if (recalc_sigpending_tsk(t))
119 signal_wake_up(t, 0);
108} 120}
109 121
110void recalc_sigpending(void) 122void recalc_sigpending(void)
@@ -744,7 +756,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
744 action->sa.sa_handler = SIG_DFL; 756 action->sa.sa_handler = SIG_DFL;
745 if (blocked) { 757 if (blocked) {
746 sigdelset(&t->blocked, sig); 758 sigdelset(&t->blocked, sig);
747 recalc_sigpending_tsk(t); 759 recalc_sigpending_and_wake(t);
748 } 760 }
749 } 761 }
750 ret = specific_send_sig_info(sig, info, t); 762 ret = specific_send_sig_info(sig, info, t);
@@ -2273,7 +2285,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2273 rm_from_queue_full(&mask, &t->signal->shared_pending); 2285 rm_from_queue_full(&mask, &t->signal->shared_pending);
2274 do { 2286 do {
2275 rm_from_queue_full(&mask, &t->pending); 2287 rm_from_queue_full(&mask, &t->pending);
2276 recalc_sigpending_tsk(t); 2288 recalc_sigpending_and_wake(t);
2277 t = next_thread(t); 2289 t = next_thread(t);
2278 } while (t != current); 2290 } while (t != current);
2279 } 2291 }
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index eadfce2fff..8001d37071 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -243,11 +243,18 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
243{ 243{
244 int cpu = get_cpu(); 244 int cpu = get_cpu();
245 245
246 if (cpu == *oncpu) 246 if (!cpu_isset(*oncpu, cpu_online_map)) {
247 tick_do_broadcast_on_off(&reason); 247 printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
248 else 248 "offline CPU #%d\n", *oncpu);
249 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 249 } else {
250 &reason, 1, 1); 250
251 if (cpu == *oncpu)
252 tick_do_broadcast_on_off(&reason);
253 else
254 smp_call_function_single(*oncpu,
255 tick_do_broadcast_on_off,
256 &reason, 1, 1);
257 }
251 put_cpu(); 258 put_cpu();
252} 259}
253 260
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3483e6cb95..3e7ebc4646 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -167,9 +167,15 @@ void tick_nohz_stop_sched_tick(void)
167 goto end; 167 goto end;
168 168
169 cpu = smp_processor_id(); 169 cpu = smp_processor_id();
170 if (unlikely(local_softirq_pending())) 170 if (unlikely(local_softirq_pending())) {
171 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", 171 static int ratelimit;
172 local_softirq_pending()); 172
173 if (ratelimit < 10) {
174 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
175 local_softirq_pending());
176 ratelimit++;
177 }
178 }
173 179
174 now = ktime_get(); 180 now = ktime_get();
175 /* 181 /*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fb56fedd5c..3bebf73be9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -47,7 +47,6 @@ struct cpu_workqueue_struct {
47 47
48 struct workqueue_struct *wq; 48 struct workqueue_struct *wq;
49 struct task_struct *thread; 49 struct task_struct *thread;
50 int should_stop;
51 50
52 int run_depth; /* Detect run_workqueue() recursion depth */ 51 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned; 52} ____cacheline_aligned;
@@ -71,7 +70,13 @@ static LIST_HEAD(workqueues);
71 70
72static int singlethread_cpu __read_mostly; 71static int singlethread_cpu __read_mostly;
73static cpumask_t cpu_singlethread_map __read_mostly; 72static cpumask_t cpu_singlethread_map __read_mostly;
74/* optimization, we could use cpu_possible_map */ 73/*
74 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76 * which comes in between can't use for_each_online_cpu(). We could
77 * use cpu_possible_map, the cpumask below is more a documentation
78 * than optimization.
79 */
75static cpumask_t cpu_populated_map __read_mostly; 80static cpumask_t cpu_populated_map __read_mostly;
76 81
77/* If it's single threaded, it isn't in the list of workqueues. */ 82/* If it's single threaded, it isn't in the list of workqueues. */
@@ -272,24 +277,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
272 spin_unlock_irq(&cwq->lock); 277 spin_unlock_irq(&cwq->lock);
273} 278}
274 279
275/*
276 * NOTE: the caller must not touch *cwq if this func returns true
277 */
278static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
279{
280 int should_stop = cwq->should_stop;
281
282 if (unlikely(should_stop)) {
283 spin_lock_irq(&cwq->lock);
284 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
285 if (should_stop)
286 cwq->thread = NULL;
287 spin_unlock_irq(&cwq->lock);
288 }
289
290 return should_stop;
291}
292
293static int worker_thread(void *__cwq) 280static int worker_thread(void *__cwq)
294{ 281{
295 struct cpu_workqueue_struct *cwq = __cwq; 282 struct cpu_workqueue_struct *cwq = __cwq;
@@ -302,14 +289,15 @@ static int worker_thread(void *__cwq)
302 289
303 for (;;) { 290 for (;;) {
304 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 291 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
305 if (!freezing(current) && !cwq->should_stop 292 if (!freezing(current) &&
306 && list_empty(&cwq->worklist)) 293 !kthread_should_stop() &&
294 list_empty(&cwq->worklist))
307 schedule(); 295 schedule();
308 finish_wait(&cwq->more_work, &wait); 296 finish_wait(&cwq->more_work, &wait);
309 297
310 try_to_freeze(); 298 try_to_freeze();
311 299
312 if (cwq_should_stop(cwq)) 300 if (kthread_should_stop())
313 break; 301 break;
314 302
315 run_workqueue(cwq); 303 run_workqueue(cwq);
@@ -340,18 +328,21 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 insert_work(cwq, &barr->work, tail); 328 insert_work(cwq, &barr->work, tail);
341} 329}
342 330
343static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 331static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
344{ 332{
333 int active;
334
345 if (cwq->thread == current) { 335 if (cwq->thread == current) {
346 /* 336 /*
347 * Probably keventd trying to flush its own queue. So simply run 337 * Probably keventd trying to flush its own queue. So simply run
348 * it by hand rather than deadlocking. 338 * it by hand rather than deadlocking.
349 */ 339 */
350 run_workqueue(cwq); 340 run_workqueue(cwq);
341 active = 1;
351 } else { 342 } else {
352 struct wq_barrier barr; 343 struct wq_barrier barr;
353 int active = 0;
354 344
345 active = 0;
355 spin_lock_irq(&cwq->lock); 346 spin_lock_irq(&cwq->lock);
356 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 347 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
357 insert_wq_barrier(cwq, &barr, 1); 348 insert_wq_barrier(cwq, &barr, 1);
@@ -362,6 +353,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
362 if (active) 353 if (active)
363 wait_for_completion(&barr.done); 354 wait_for_completion(&barr.done);
364 } 355 }
356
357 return active;
365} 358}
366 359
367/** 360/**
@@ -674,7 +667,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
674 return PTR_ERR(p); 667 return PTR_ERR(p);
675 668
676 cwq->thread = p; 669 cwq->thread = p;
677 cwq->should_stop = 0;
678 670
679 return 0; 671 return 0;
680} 672}
@@ -740,29 +732,27 @@ EXPORT_SYMBOL_GPL(__create_workqueue);
740 732
741static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 733static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
742{ 734{
743 struct wq_barrier barr; 735 /*
744 int alive = 0; 736 * Our caller is either destroy_workqueue() or CPU_DEAD,
745 737 * workqueue_mutex protects cwq->thread
746 spin_lock_irq(&cwq->lock); 738 */
747 if (cwq->thread != NULL) { 739 if (cwq->thread == NULL)
748 insert_wq_barrier(cwq, &barr, 1); 740 return;
749 cwq->should_stop = 1;
750 alive = 1;
751 }
752 spin_unlock_irq(&cwq->lock);
753 741
754 if (alive) { 742 /*
755 wait_for_completion(&barr.done); 743 * If the caller is CPU_DEAD the single flush_cpu_workqueue()
744 * is not enough, a concurrent flush_workqueue() can insert a
745 * barrier after us.
746 * When ->worklist becomes empty it is safe to exit because no
747 * more work_structs can be queued on this cwq: flush_workqueue
748 * checks list_empty(), and a "normal" queue_work() can't use
749 * a dead CPU.
750 */
751 while (flush_cpu_workqueue(cwq))
752 ;
756 753
757 while (unlikely(cwq->thread != NULL)) 754 kthread_stop(cwq->thread);
758 cpu_relax(); 755 cwq->thread = NULL;
759 /*
760 * Wait until cwq->thread unlocks cwq->lock,
761 * it won't touch *cwq after that.
762 */
763 smp_rmb();
764 spin_unlock_wait(&cwq->lock);
765 }
766} 756}
767 757
768/** 758/**