aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 00ff4d08e370..7db251a959c5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
158 * 158 *
159 * Returns 0 if @work was already on a queue, non-zero otherwise. 159 * Returns 0 if @work was already on a queue, non-zero otherwise.
160 * 160 *
161 * We queue the work to the CPU it was submitted, but there is no 161 * We queue the work to the CPU on which it was submitted, but if the CPU dies
162 * guarantee that it will be processed by that CPU. 162 * it can be processed by another CPU.
163 */ 163 */
164int queue_work(struct workqueue_struct *wq, struct work_struct *work) 164int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 165{
@@ -772,7 +772,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
772} 772}
773EXPORT_SYMBOL_GPL(__create_workqueue_key); 773EXPORT_SYMBOL_GPL(__create_workqueue_key);
774 774
775static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 775static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
776{ 776{
777 /* 777 /*
778 * Our caller is either destroy_workqueue() or CPU_DEAD, 778 * Our caller is either destroy_workqueue() or CPU_DEAD,
@@ -808,19 +808,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
808void destroy_workqueue(struct workqueue_struct *wq) 808void destroy_workqueue(struct workqueue_struct *wq)
809{ 809{
810 const cpumask_t *cpu_map = wq_cpu_map(wq); 810 const cpumask_t *cpu_map = wq_cpu_map(wq);
811 struct cpu_workqueue_struct *cwq;
812 int cpu; 811 int cpu;
813 812
814 get_online_cpus(); 813 get_online_cpus();
815 spin_lock(&workqueue_lock); 814 spin_lock(&workqueue_lock);
816 list_del(&wq->list); 815 list_del(&wq->list);
817 spin_unlock(&workqueue_lock); 816 spin_unlock(&workqueue_lock);
818 put_online_cpus();
819 817
820 for_each_cpu_mask(cpu, *cpu_map) { 818 for_each_cpu_mask(cpu, *cpu_map)
821 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 819 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
822 cleanup_workqueue_thread(cwq, cpu); 820 put_online_cpus();
823 }
824 821
825 free_percpu(wq->cpu_wq); 822 free_percpu(wq->cpu_wq);
826 kfree(wq); 823 kfree(wq);
@@ -838,7 +835,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
838 action &= ~CPU_TASKS_FROZEN; 835 action &= ~CPU_TASKS_FROZEN;
839 836
840 switch (action) { 837 switch (action) {
841
842 case CPU_UP_PREPARE: 838 case CPU_UP_PREPARE:
843 cpu_set(cpu, cpu_populated_map); 839 cpu_set(cpu, cpu_populated_map);
844 } 840 }
@@ -861,11 +857,17 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
861 case CPU_UP_CANCELED: 857 case CPU_UP_CANCELED:
862 start_workqueue_thread(cwq, -1); 858 start_workqueue_thread(cwq, -1);
863 case CPU_DEAD: 859 case CPU_DEAD:
864 cleanup_workqueue_thread(cwq, cpu); 860 cleanup_workqueue_thread(cwq);
865 break; 861 break;
866 } 862 }
867 } 863 }
868 864
865 switch (action) {
866 case CPU_UP_CANCELED:
867 case CPU_DEAD:
868 cpu_clear(cpu, cpu_populated_map);
869 }
870
869 return NOTIFY_OK; 871 return NOTIFY_OK;
870} 872}
871 873