aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/notifier.h2
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/workqueue.c18
4 files changed, 27 insertions, 13 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7464ba3b4333..d7faf8808497 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,10 +69,11 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
69#endif 69#endif
70 70
71int cpu_up(unsigned int cpu); 71int cpu_up(unsigned int cpu);
72
73extern void cpu_hotplug_init(void); 72extern void cpu_hotplug_init(void);
73extern void cpu_maps_update_begin(void);
74extern void cpu_maps_update_done(void);
74 75
75#else 76#else /* CONFIG_SMP */
76 77
77static inline int register_cpu_notifier(struct notifier_block *nb) 78static inline int register_cpu_notifier(struct notifier_block *nb)
78{ 79{
@@ -87,10 +88,16 @@ static inline void cpu_hotplug_init(void)
87{ 88{
88} 89}
89 90
91static inline void cpu_maps_update_begin(void)
92{
93}
94
95static inline void cpu_maps_update_done(void)
96{
97}
98
90#endif /* CONFIG_SMP */ 99#endif /* CONFIG_SMP */
91extern struct sysdev_class cpu_sysdev_class; 100extern struct sysdev_class cpu_sysdev_class;
92extern void cpu_maps_update_begin(void);
93extern void cpu_maps_update_done(void);
94 101
95#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
96/* Stop CPUs going up and down. */ 103/* Stop CPUs going up and down. */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index bd3d72ddf333..da2698b0fdd1 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -214,6 +214,8 @@ static inline int notifier_to_errno(int ret)
214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
216 * not handling interrupts, soon dead */ 216 * not handling interrupts, soon dead */
217#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
218 * lock is dropped */
217 219
218/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 220/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
219 * operation in progress 221 * operation in progress
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2cc409ce0a8f..10ba5f1004a5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -285,6 +285,11 @@ out_allowed:
285 set_cpus_allowed_ptr(current, &old_allowed); 285 set_cpus_allowed_ptr(current, &old_allowed);
286out_release: 286out_release:
287 cpu_hotplug_done(); 287 cpu_hotplug_done();
288 if (!err) {
289 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
290 hcpu) == NOTIFY_BAD)
291 BUG();
292 }
288 return err; 293 return err;
289} 294}
290 295
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5fbffd302eb5..828e58230cbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -828,7 +828,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
828 err = create_workqueue_thread(cwq, singlethread_cpu); 828 err = create_workqueue_thread(cwq, singlethread_cpu);
829 start_workqueue_thread(cwq, -1); 829 start_workqueue_thread(cwq, -1);
830 } else { 830 } else {
831 get_online_cpus(); 831 cpu_maps_update_begin();
832 spin_lock(&workqueue_lock); 832 spin_lock(&workqueue_lock);
833 list_add(&wq->list, &workqueues); 833 list_add(&wq->list, &workqueues);
834 spin_unlock(&workqueue_lock); 834 spin_unlock(&workqueue_lock);
@@ -840,7 +840,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
840 err = create_workqueue_thread(cwq, cpu); 840 err = create_workqueue_thread(cwq, cpu);
841 start_workqueue_thread(cwq, cpu); 841 start_workqueue_thread(cwq, cpu);
842 } 842 }
843 put_online_cpus(); 843 cpu_maps_update_done();
844 } 844 }
845 845
846 if (err) { 846 if (err) {
@@ -854,8 +854,8 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
854static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 854static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
855{ 855{
856 /* 856 /*
857 * Our caller is either destroy_workqueue() or CPU_DEAD, 857 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
858 * get_online_cpus() protects cwq->thread. 858 * cpu_add_remove_lock protects cwq->thread.
859 */ 859 */
860 if (cwq->thread == NULL) 860 if (cwq->thread == NULL)
861 return; 861 return;
@@ -865,7 +865,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
865 865
866 flush_cpu_workqueue(cwq); 866 flush_cpu_workqueue(cwq);
867 /* 867 /*
868 * If the caller is CPU_DEAD and cwq->worklist was not empty, 868 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
869 * a concurrent flush_workqueue() can insert a barrier after us. 869 * a concurrent flush_workqueue() can insert a barrier after us.
870 * However, in that case run_workqueue() won't return and check 870 * However, in that case run_workqueue() won't return and check
871 * kthread_should_stop() until it flushes all work_struct's. 871 * kthread_should_stop() until it flushes all work_struct's.
@@ -889,14 +889,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
889 const cpumask_t *cpu_map = wq_cpu_map(wq); 889 const cpumask_t *cpu_map = wq_cpu_map(wq);
890 int cpu; 890 int cpu;
891 891
892 get_online_cpus(); 892 cpu_maps_update_begin();
893 spin_lock(&workqueue_lock); 893 spin_lock(&workqueue_lock);
894 list_del(&wq->list); 894 list_del(&wq->list);
895 spin_unlock(&workqueue_lock); 895 spin_unlock(&workqueue_lock);
896 896
897 for_each_cpu_mask_nr(cpu, *cpu_map) 897 for_each_cpu_mask_nr(cpu, *cpu_map)
898 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 898 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
899 put_online_cpus(); 899 cpu_maps_update_done();
900 900
901 free_percpu(wq->cpu_wq); 901 free_percpu(wq->cpu_wq);
902 kfree(wq); 902 kfree(wq);
@@ -935,7 +935,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
935 935
936 case CPU_UP_CANCELED: 936 case CPU_UP_CANCELED:
937 start_workqueue_thread(cwq, -1); 937 start_workqueue_thread(cwq, -1);
938 case CPU_DEAD: 938 case CPU_POST_DEAD:
939 cleanup_workqueue_thread(cwq); 939 cleanup_workqueue_thread(cwq);
940 break; 940 break;
941 } 941 }
@@ -943,7 +943,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
943 943
944 switch (action) { 944 switch (action) {
945 case CPU_UP_CANCELED: 945 case CPU_UP_CANCELED:
946 case CPU_DEAD: 946 case CPU_POST_DEAD:
947 cpu_clear(cpu, cpu_populated_map); 947 cpu_clear(cpu, cpu_populated_map);
948 } 948 }
949 949