aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2008-04-29 04:00:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:11 -0400
commit00dfcaf748f46de89efe41baa298b5cf9adda67e (patch)
tree4420dbfac9ba213e1604320cf9615a505ca909fd /kernel/workqueue.c
parent786083667e0ced85ce17c4c0b6c57a9f47c5b9f2 (diff)
workqueues: shrink cpu_populated_map when CPU dies
When cpu_populated_map was introduced, it was supposed that cwq->thread can survive after CPU_DEAD, that is why we never shrink cpu_populated_map. This is not very nice, we can safely remove the already dead CPU from the map. The only required change is that destroy_workqueue() must hold the hotplug lock until it destroys all cwq->thread's, to protect the cpu_populated_map. We could make the local copy of cpu mask and drop the lock, but sizeof(cpumask_t) may be very large. Also, fix the comment near queue_work(). Unless _cpu_down() happens we do guarantee the cpu-affinity of the work_struct, and we have users which rely on this. [akpm@linux-foundation.org: repair comment] Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 00ff4d08e370..1ad0ee489cd1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
158 * 158 *
159 * Returns 0 if @work was already on a queue, non-zero otherwise. 159 * Returns 0 if @work was already on a queue, non-zero otherwise.
160 * 160 *
161 * We queue the work to the CPU it was submitted, but there is no 161 * We queue the work to the CPU on which it was submitted, but if the CPU dies
162 * guarantee that it will be processed by that CPU. 162 * it can be processed by another CPU.
163 */ 163 */
164int queue_work(struct workqueue_struct *wq, struct work_struct *work) 164int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 165{
@@ -815,12 +815,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
815 spin_lock(&workqueue_lock); 815 spin_lock(&workqueue_lock);
816 list_del(&wq->list); 816 list_del(&wq->list);
817 spin_unlock(&workqueue_lock); 817 spin_unlock(&workqueue_lock);
818 put_online_cpus();
819 818
820 for_each_cpu_mask(cpu, *cpu_map) { 819 for_each_cpu_mask(cpu, *cpu_map) {
821 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 820 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
822 cleanup_workqueue_thread(cwq, cpu); 821 cleanup_workqueue_thread(cwq, cpu);
823 } 822 }
823 put_online_cpus();
824 824
825 free_percpu(wq->cpu_wq); 825 free_percpu(wq->cpu_wq);
826 kfree(wq); 826 kfree(wq);
@@ -838,7 +838,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
838 action &= ~CPU_TASKS_FROZEN; 838 action &= ~CPU_TASKS_FROZEN;
839 839
840 switch (action) { 840 switch (action) {
841
842 case CPU_UP_PREPARE: 841 case CPU_UP_PREPARE:
843 cpu_set(cpu, cpu_populated_map); 842 cpu_set(cpu, cpu_populated_map);
844 } 843 }
@@ -866,6 +865,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
866 } 865 }
867 } 866 }
868 867
868 switch (action) {
869 case CPU_UP_CANCELED:
870 case CPU_DEAD:
871 cpu_clear(cpu, cpu_populated_map);
872 }
873
869 return NOTIFY_OK; 874 return NOTIFY_OK;
870} 875}
871 876