aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/profile.c20
-rw-r--r--kernel/trace/ring_buffer.c19
3 files changed, 62 insertions, 15 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index deff2e693766..a9e710eef0e2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/lockdep.h>
22 23
23#include "smpboot.h" 24#include "smpboot.h"
24 25
@@ -27,18 +28,23 @@
27static DEFINE_MUTEX(cpu_add_remove_lock); 28static DEFINE_MUTEX(cpu_add_remove_lock);
28 29
29/* 30/*
30 * The following two API's must be used when attempting 31 * The following two APIs (cpu_maps_update_begin/done) must be used when
31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 32 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
33 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
34 * hotplug callback (un)registration performed using __register_cpu_notifier()
35 * or __unregister_cpu_notifier().
32 */ 36 */
33void cpu_maps_update_begin(void) 37void cpu_maps_update_begin(void)
34{ 38{
35 mutex_lock(&cpu_add_remove_lock); 39 mutex_lock(&cpu_add_remove_lock);
36} 40}
41EXPORT_SYMBOL(cpu_notifier_register_begin);
37 42
38void cpu_maps_update_done(void) 43void cpu_maps_update_done(void)
39{ 44{
40 mutex_unlock(&cpu_add_remove_lock); 45 mutex_unlock(&cpu_add_remove_lock);
41} 46}
47EXPORT_SYMBOL(cpu_notifier_register_done);
42 48
43static RAW_NOTIFIER_HEAD(cpu_chain); 49static RAW_NOTIFIER_HEAD(cpu_chain);
44 50
@@ -57,17 +63,30 @@ static struct {
57 * an ongoing cpu hotplug operation. 63 * an ongoing cpu hotplug operation.
58 */ 64 */
59 int refcount; 65 int refcount;
66
67#ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69#endif
60} cpu_hotplug = { 70} cpu_hotplug = {
61 .active_writer = NULL, 71 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 .refcount = 0, 73 .refcount = 0,
74#ifdef CONFIG_DEBUG_LOCK_ALLOC
75 .dep_map = {.name = "cpu_hotplug.lock" },
76#endif
64}; 77};
65 78
79/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
80#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
81#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
82#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
83
66void get_online_cpus(void) 84void get_online_cpus(void)
67{ 85{
68 might_sleep(); 86 might_sleep();
69 if (cpu_hotplug.active_writer == current) 87 if (cpu_hotplug.active_writer == current)
70 return; 88 return;
89 cpuhp_lock_acquire_read();
71 mutex_lock(&cpu_hotplug.lock); 90 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++; 91 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock); 92 mutex_unlock(&cpu_hotplug.lock);
@@ -87,6 +106,7 @@ void put_online_cpus(void)
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer); 107 wake_up_process(cpu_hotplug.active_writer);
89 mutex_unlock(&cpu_hotplug.lock); 108 mutex_unlock(&cpu_hotplug.lock);
109 cpuhp_lock_release();
90 110
91} 111}
92EXPORT_SYMBOL_GPL(put_online_cpus); 112EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -117,6 +137,7 @@ void cpu_hotplug_begin(void)
117{ 137{
118 cpu_hotplug.active_writer = current; 138 cpu_hotplug.active_writer = current;
119 139
140 cpuhp_lock_acquire();
120 for (;;) { 141 for (;;) {
121 mutex_lock(&cpu_hotplug.lock); 142 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount)) 143 if (likely(!cpu_hotplug.refcount))
@@ -131,6 +152,7 @@ void cpu_hotplug_done(void)
131{ 152{
132 cpu_hotplug.active_writer = NULL; 153 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock); 154 mutex_unlock(&cpu_hotplug.lock);
155 cpuhp_lock_release();
134} 156}
135 157
136/* 158/*
@@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
166 return ret; 188 return ret;
167} 189}
168 190
191int __ref __register_cpu_notifier(struct notifier_block *nb)
192{
193 return raw_notifier_chain_register(&cpu_chain, nb);
194}
195
169static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 196static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
170 int *nr_calls) 197 int *nr_calls)
171{ 198{
@@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)
189 BUG_ON(cpu_notify(val, v)); 216 BUG_ON(cpu_notify(val, v));
190} 217}
191EXPORT_SYMBOL(register_cpu_notifier); 218EXPORT_SYMBOL(register_cpu_notifier);
219EXPORT_SYMBOL(__register_cpu_notifier);
192 220
193void __ref unregister_cpu_notifier(struct notifier_block *nb) 221void __ref unregister_cpu_notifier(struct notifier_block *nb)
194{ 222{
@@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
198} 226}
199EXPORT_SYMBOL(unregister_cpu_notifier); 227EXPORT_SYMBOL(unregister_cpu_notifier);
200 228
229void __ref __unregister_cpu_notifier(struct notifier_block *nb)
230{
231 raw_notifier_chain_unregister(&cpu_chain, nb);
232}
233EXPORT_SYMBOL(__unregister_cpu_notifier);
234
201/** 235/**
202 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
203 * @cpu: a CPU id 237 * @cpu: a CPU id
diff --git a/kernel/profile.c b/kernel/profile.c
index 1b266dbe755a..cb980f0c731b 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -591,18 +591,28 @@ out_cleanup:
591int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ 591int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
592{ 592{
593 struct proc_dir_entry *entry; 593 struct proc_dir_entry *entry;
594 int err = 0;
594 595
595 if (!prof_on) 596 if (!prof_on)
596 return 0; 597 return 0;
597 if (create_hash_tables()) 598
598 return -ENOMEM; 599 cpu_notifier_register_begin();
600
601 if (create_hash_tables()) {
602 err = -ENOMEM;
603 goto out;
604 }
605
599 entry = proc_create("profile", S_IWUSR | S_IRUGO, 606 entry = proc_create("profile", S_IWUSR | S_IRUGO,
600 NULL, &proc_profile_operations); 607 NULL, &proc_profile_operations);
601 if (!entry) 608 if (!entry)
602 return 0; 609 goto out;
603 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); 610 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
604 hotcpu_notifier(profile_cpu_callback, 0); 611 __hotcpu_notifier(profile_cpu_callback, 0);
605 return 0; 612
613out:
614 cpu_notifier_register_done();
615 return err;
606} 616}
607subsys_initcall(create_proc_profile); 617subsys_initcall(create_proc_profile);
608#endif /* CONFIG_PROC_FS */ 618#endif /* CONFIG_PROC_FS */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fc4da2d97f9b..c634868c2921 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1301 * In that off case, we need to allocate for all possible cpus. 1301 * In that off case, we need to allocate for all possible cpus.
1302 */ 1302 */
1303#ifdef CONFIG_HOTPLUG_CPU 1303#ifdef CONFIG_HOTPLUG_CPU
1304 get_online_cpus(); 1304 cpu_notifier_register_begin();
1305 cpumask_copy(buffer->cpumask, cpu_online_mask); 1305 cpumask_copy(buffer->cpumask, cpu_online_mask);
1306#else 1306#else
1307 cpumask_copy(buffer->cpumask, cpu_possible_mask); 1307 cpumask_copy(buffer->cpumask, cpu_possible_mask);
@@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1324#ifdef CONFIG_HOTPLUG_CPU 1324#ifdef CONFIG_HOTPLUG_CPU
1325 buffer->cpu_notify.notifier_call = rb_cpu_notify; 1325 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326 buffer->cpu_notify.priority = 0; 1326 buffer->cpu_notify.priority = 0;
1327 register_cpu_notifier(&buffer->cpu_notify); 1327 __register_cpu_notifier(&buffer->cpu_notify);
1328 cpu_notifier_register_done();
1328#endif 1329#endif
1329 1330
1330 put_online_cpus();
1331 mutex_init(&buffer->mutex); 1331 mutex_init(&buffer->mutex);
1332 1332
1333 return buffer; 1333 return buffer;
@@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1341 1341
1342 fail_free_cpumask: 1342 fail_free_cpumask:
1343 free_cpumask_var(buffer->cpumask); 1343 free_cpumask_var(buffer->cpumask);
1344 put_online_cpus(); 1344#ifdef CONFIG_HOTPLUG_CPU
1345 cpu_notifier_register_done();
1346#endif
1345 1347
1346 fail_free_buffer: 1348 fail_free_buffer:
1347 kfree(buffer); 1349 kfree(buffer);
@@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer)
1358{ 1360{
1359 int cpu; 1361 int cpu;
1360 1362
1361 get_online_cpus();
1362
1363#ifdef CONFIG_HOTPLUG_CPU 1363#ifdef CONFIG_HOTPLUG_CPU
1364 unregister_cpu_notifier(&buffer->cpu_notify); 1364 cpu_notifier_register_begin();
1365 __unregister_cpu_notifier(&buffer->cpu_notify);
1365#endif 1366#endif
1366 1367
1367 for_each_buffer_cpu(buffer, cpu) 1368 for_each_buffer_cpu(buffer, cpu)
1368 rb_free_cpu_buffer(buffer->buffers[cpu]); 1369 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369 1370
1370 put_online_cpus(); 1371#ifdef CONFIG_HOTPLUG_CPU
1372 cpu_notifier_register_done();
1373#endif
1371 1374
1372 kfree(buffer->buffers); 1375 kfree(buffer->buffers);
1373 free_cpumask_var(buffer->cpumask); 1376 free_cpumask_var(buffer->cpumask);