aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c84
1 files changed, 37 insertions, 47 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fb56fedd5c02..3bebf73be976 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -47,7 +47,6 @@ struct cpu_workqueue_struct {
47 47
48 struct workqueue_struct *wq; 48 struct workqueue_struct *wq;
49 struct task_struct *thread; 49 struct task_struct *thread;
50 int should_stop;
51 50
52 int run_depth; /* Detect run_workqueue() recursion depth */ 51 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned; 52} ____cacheline_aligned;
@@ -71,7 +70,13 @@ static LIST_HEAD(workqueues);
71 70
72static int singlethread_cpu __read_mostly; 71static int singlethread_cpu __read_mostly;
73static cpumask_t cpu_singlethread_map __read_mostly; 72static cpumask_t cpu_singlethread_map __read_mostly;
74/* optimization, we could use cpu_possible_map */ 73/*
74 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76 * which comes in between can't use for_each_online_cpu(). We could
77 * use cpu_possible_map, the cpumask below is more a documentation
78 * than optimization.
79 */
75static cpumask_t cpu_populated_map __read_mostly; 80static cpumask_t cpu_populated_map __read_mostly;
76 81
77/* If it's single threaded, it isn't in the list of workqueues. */ 82/* If it's single threaded, it isn't in the list of workqueues. */
@@ -272,24 +277,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
272 spin_unlock_irq(&cwq->lock); 277 spin_unlock_irq(&cwq->lock);
273} 278}
274 279
275/*
276 * NOTE: the caller must not touch *cwq if this func returns true
277 */
278static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
279{
280 int should_stop = cwq->should_stop;
281
282 if (unlikely(should_stop)) {
283 spin_lock_irq(&cwq->lock);
284 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
285 if (should_stop)
286 cwq->thread = NULL;
287 spin_unlock_irq(&cwq->lock);
288 }
289
290 return should_stop;
291}
292
293static int worker_thread(void *__cwq) 280static int worker_thread(void *__cwq)
294{ 281{
295 struct cpu_workqueue_struct *cwq = __cwq; 282 struct cpu_workqueue_struct *cwq = __cwq;
@@ -302,14 +289,15 @@ static int worker_thread(void *__cwq)
302 289
303 for (;;) { 290 for (;;) {
304 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 291 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
305 if (!freezing(current) && !cwq->should_stop 292 if (!freezing(current) &&
306 && list_empty(&cwq->worklist)) 293 !kthread_should_stop() &&
294 list_empty(&cwq->worklist))
307 schedule(); 295 schedule();
308 finish_wait(&cwq->more_work, &wait); 296 finish_wait(&cwq->more_work, &wait);
309 297
310 try_to_freeze(); 298 try_to_freeze();
311 299
312 if (cwq_should_stop(cwq)) 300 if (kthread_should_stop())
313 break; 301 break;
314 302
315 run_workqueue(cwq); 303 run_workqueue(cwq);
@@ -340,18 +328,21 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 insert_work(cwq, &barr->work, tail); 328 insert_work(cwq, &barr->work, tail);
341} 329}
342 330
343static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 331static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
344{ 332{
333 int active;
334
345 if (cwq->thread == current) { 335 if (cwq->thread == current) {
346 /* 336 /*
347 * Probably keventd trying to flush its own queue. So simply run 337 * Probably keventd trying to flush its own queue. So simply run
348 * it by hand rather than deadlocking. 338 * it by hand rather than deadlocking.
349 */ 339 */
350 run_workqueue(cwq); 340 run_workqueue(cwq);
341 active = 1;
351 } else { 342 } else {
352 struct wq_barrier barr; 343 struct wq_barrier barr;
353 int active = 0;
354 344
345 active = 0;
355 spin_lock_irq(&cwq->lock); 346 spin_lock_irq(&cwq->lock);
356 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 347 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
357 insert_wq_barrier(cwq, &barr, 1); 348 insert_wq_barrier(cwq, &barr, 1);
@@ -362,6 +353,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
362 if (active) 353 if (active)
363 wait_for_completion(&barr.done); 354 wait_for_completion(&barr.done);
364 } 355 }
356
357 return active;
365} 358}
366 359
367/** 360/**
@@ -674,7 +667,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
674 return PTR_ERR(p); 667 return PTR_ERR(p);
675 668
676 cwq->thread = p; 669 cwq->thread = p;
677 cwq->should_stop = 0;
678 670
679 return 0; 671 return 0;
680} 672}
@@ -740,29 +732,27 @@ EXPORT_SYMBOL_GPL(__create_workqueue);
740 732
741static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 733static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
742{ 734{
743 struct wq_barrier barr; 735 /*
744 int alive = 0; 736 * Our caller is either destroy_workqueue() or CPU_DEAD,
745 737 * workqueue_mutex protects cwq->thread
746 spin_lock_irq(&cwq->lock); 738 */
747 if (cwq->thread != NULL) { 739 if (cwq->thread == NULL)
748 insert_wq_barrier(cwq, &barr, 1); 740 return;
749 cwq->should_stop = 1;
750 alive = 1;
751 }
752 spin_unlock_irq(&cwq->lock);
753 741
754 if (alive) { 742 /*
755 wait_for_completion(&barr.done); 743 * If the caller is CPU_DEAD the single flush_cpu_workqueue()
744 * is not enough, a concurrent flush_workqueue() can insert a
745 * barrier after us.
746 * When ->worklist becomes empty it is safe to exit because no
747 * more work_structs can be queued on this cwq: flush_workqueue
748 * checks list_empty(), and a "normal" queue_work() can't use
749 * a dead CPU.
750 */
751 while (flush_cpu_workqueue(cwq))
752 ;
756 753
757 while (unlikely(cwq->thread != NULL)) 754 kthread_stop(cwq->thread);
758 cpu_relax(); 755 cwq->thread = NULL;
759 /*
760 * Wait until cwq->thread unlocks cwq->lock,
761 * it won't touch *cwq after that.
762 */
763 smp_rmb();
764 spin_unlock_wait(&cwq->lock);
765 }
766} 756}
767 757
768/** 758/**