aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c55
1 files changed, 25 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce72d45c7fd8..6308a4bc6a82 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex);
69static LIST_HEAD(workqueues); 69static LIST_HEAD(workqueues);
70 70
71static int singlethread_cpu __read_mostly; 71static int singlethread_cpu __read_mostly;
72static cpumask_t cpu_singlethread_map __read_mostly;
72/* optimization, we could use cpu_possible_map */ 73/* optimization, we could use cpu_possible_map */
73static cpumask_t cpu_populated_map __read_mostly; 74static cpumask_t cpu_populated_map __read_mostly;
74 75
@@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
78 return list_empty(&wq->list); 79 return list_empty(&wq->list);
79} 80}
80 81
82static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
83{
84 return is_single_threaded(wq)
85 ? &cpu_singlethread_map : &cpu_populated_map;
86}
87
81/* 88/*
82 * Set the workqueue on which a work item is to be run 89 * Set the workqueue on which a work item is to be run
83 * - Must *only* be called if the pending flag is set 90 * - Must *only* be called if the pending flag is set
@@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
393 */ 400 */
394void fastcall flush_workqueue(struct workqueue_struct *wq) 401void fastcall flush_workqueue(struct workqueue_struct *wq)
395{ 402{
396 might_sleep(); 403 const cpumask_t *cpu_map = wq_cpu_map(wq);
397 404 int cpu
398 if (is_single_threaded(wq))
399 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
400 else {
401 int cpu;
402 405
403 for_each_cpu_mask(cpu, cpu_populated_map) 406 might_sleep();
404 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 407 for_each_cpu_mask(cpu, *cpu_map)
405 } 408 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
406} 409}
407EXPORT_SYMBOL_GPL(flush_workqueue); 410EXPORT_SYMBOL_GPL(flush_workqueue);
408 411
@@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
439 */ 442 */
440void flush_work(struct workqueue_struct *wq, struct work_struct *work) 443void flush_work(struct workqueue_struct *wq, struct work_struct *work)
441{ 444{
445 const cpumask_t *cpu_map = wq_cpu_map(wq);
442 struct cpu_workqueue_struct *cwq; 446 struct cpu_workqueue_struct *cwq;
447 int cpu;
443 448
444 might_sleep(); 449 might_sleep();
445 450
@@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
457 work_release(work); 462 work_release(work);
458 spin_unlock_irq(&cwq->lock); 463 spin_unlock_irq(&cwq->lock);
459 464
460 if (is_single_threaded(wq)) 465 for_each_cpu_mask(cpu, *cpu_map)
461 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); 466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
462 else {
463 int cpu;
464
465 for_each_cpu_mask(cpu, cpu_populated_map)
466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
467 }
468} 467}
469EXPORT_SYMBOL_GPL(flush_work); 468EXPORT_SYMBOL_GPL(flush_work);
470 469
@@ -757,22 +756,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
757 */ 756 */
758void destroy_workqueue(struct workqueue_struct *wq) 757void destroy_workqueue(struct workqueue_struct *wq)
759{ 758{
759 const cpumask_t *cpu_map = wq_cpu_map(wq);
760 struct cpu_workqueue_struct *cwq; 760 struct cpu_workqueue_struct *cwq;
761 int cpu;
761 762
762 if (is_single_threaded(wq)) { 763 mutex_lock(&workqueue_mutex);
763 cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu); 764 list_del(&wq->list);
764 cleanup_workqueue_thread(cwq, singlethread_cpu); 765 mutex_unlock(&workqueue_mutex);
765 } else {
766 int cpu;
767 766
768 mutex_lock(&workqueue_mutex); 767 for_each_cpu_mask(cpu, *cpu_map) {
769 list_del(&wq->list); 768 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
770 mutex_unlock(&workqueue_mutex); 769 cleanup_workqueue_thread(cwq, cpu);
771
772 for_each_cpu_mask(cpu, cpu_populated_map) {
773 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
774 cleanup_workqueue_thread(cwq, cpu);
775 }
776 } 770 }
777 771
778 free_percpu(wq->cpu_wq); 772 free_percpu(wq->cpu_wq);
@@ -831,6 +825,7 @@ void init_workqueues(void)
831{ 825{
832 cpu_populated_map = cpu_online_map; 826 cpu_populated_map = cpu_online_map;
833 singlethread_cpu = first_cpu(cpu_possible_map); 827 singlethread_cpu = first_cpu(cpu_possible_map);
828 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
834 hotcpu_notifier(workqueue_cpu_callback, 0); 829 hotcpu_notifier(workqueue_cpu_callback, 0);
835 keventd_wq = create_workqueue("events"); 830 keventd_wq = create_workqueue("events");
836 BUG_ON(!keventd_wq); 831 BUG_ON(!keventd_wq);