diff options
-rw-r--r-- | include/linux/cpuhotplug.h | 2 | ||||
-rw-r--r-- | include/linux/perf_event.h | 9 | ||||
-rw-r--r-- | kernel/cpu.c | 11 | ||||
-rw-r--r-- | kernel/events/core.c | 56 |
4 files changed, 32 insertions, 46 deletions
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d769ec941fd3..067082e3fd41 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -4,6 +4,7 @@ | |||
4 | enum cpuhp_state { | 4 | enum cpuhp_state { |
5 | CPUHP_OFFLINE, | 5 | CPUHP_OFFLINE, |
6 | CPUHP_CREATE_THREADS, | 6 | CPUHP_CREATE_THREADS, |
7 | CPUHP_PERF_PREPARE, | ||
7 | CPUHP_NOTIFY_PREPARE, | 8 | CPUHP_NOTIFY_PREPARE, |
8 | CPUHP_BRINGUP_CPU, | 9 | CPUHP_BRINGUP_CPU, |
9 | CPUHP_AP_IDLE_DEAD, | 10 | CPUHP_AP_IDLE_DEAD, |
@@ -22,6 +23,7 @@ enum cpuhp_state { | |||
22 | CPUHP_AP_ONLINE_IDLE, | 23 | CPUHP_AP_ONLINE_IDLE, |
23 | CPUHP_AP_SMPBOOT_THREADS, | 24 | CPUHP_AP_SMPBOOT_THREADS, |
24 | CPUHP_AP_X86_VDSO_VMA_ONLINE, | 25 | CPUHP_AP_X86_VDSO_VMA_ONLINE, |
26 | CPUHP_AP_PERF_ONLINE, | ||
25 | CPUHP_AP_NOTIFY_ONLINE, | 27 | CPUHP_AP_NOTIFY_ONLINE, |
26 | CPUHP_AP_ONLINE_DYN, | 28 | CPUHP_AP_ONLINE_DYN, |
27 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | 29 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a827cecd62f..9abeb6948e70 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -1354,4 +1354,13 @@ _name##_show(struct device *dev, \ | |||
1354 | \ | 1354 | \ |
1355 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | 1355 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) |
1356 | 1356 | ||
1357 | /* Performance counter hotplug functions */ | ||
1358 | #ifdef CONFIG_PERF_EVENTS | ||
1359 | int perf_event_init_cpu(unsigned int cpu); | ||
1360 | int perf_event_exit_cpu(unsigned int cpu); | ||
1361 | #else | ||
1362 | #define perf_event_init_cpu NULL | ||
1363 | #define perf_event_exit_cpu NULL | ||
1364 | #endif | ||
1365 | |||
1357 | #endif /* _LINUX_PERF_EVENT_H */ | 1366 | #endif /* _LINUX_PERF_EVENT_H */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index fe71ce4e60f1..3705d9043c08 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -1180,6 +1180,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1180 | .teardown = NULL, | 1180 | .teardown = NULL, |
1181 | .cant_stop = true, | 1181 | .cant_stop = true, |
1182 | }, | 1182 | }, |
1183 | [CPUHP_PERF_PREPARE] = { | ||
1184 | .name = "perf prepare", | ||
1185 | .startup = perf_event_init_cpu, | ||
1186 | .teardown = perf_event_exit_cpu, | ||
1187 | }, | ||
1183 | /* | 1188 | /* |
1184 | * Preparatory and dead notifiers. Will be replaced once the notifiers | 1189 | * Preparatory and dead notifiers. Will be replaced once the notifiers |
1185 | * are converted to states. | 1190 | * are converted to states. |
@@ -1257,6 +1262,12 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1257 | .startup = smpboot_unpark_threads, | 1262 | .startup = smpboot_unpark_threads, |
1258 | .teardown = NULL, | 1263 | .teardown = NULL, |
1259 | }, | 1264 | }, |
1265 | [CPUHP_AP_PERF_ONLINE] = { | ||
1266 | .name = "perf online", | ||
1267 | .startup = perf_event_init_cpu, | ||
1268 | .teardown = perf_event_exit_cpu, | ||
1269 | }, | ||
1270 | |||
1260 | /* | 1271 | /* |
1261 | * Online/down_prepare notifiers. Will be removed once the notifiers | 1272 | * Online/down_prepare notifiers. Will be removed once the notifiers |
1262 | * are converted to states. | 1273 | * are converted to states. |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 43d43a2d5811..f3ef1c29a7c9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -10255,7 +10255,7 @@ static void __init perf_event_init_all_cpus(void) | |||
10255 | } | 10255 | } |
10256 | } | 10256 | } |
10257 | 10257 | ||
10258 | static void perf_event_init_cpu(int cpu) | 10258 | int perf_event_init_cpu(unsigned int cpu) |
10259 | { | 10259 | { |
10260 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 10260 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
10261 | 10261 | ||
@@ -10268,6 +10268,7 @@ static void perf_event_init_cpu(int cpu) | |||
10268 | rcu_assign_pointer(swhash->swevent_hlist, hlist); | 10268 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
10269 | } | 10269 | } |
10270 | mutex_unlock(&swhash->hlist_mutex); | 10270 | mutex_unlock(&swhash->hlist_mutex); |
10271 | return 0; | ||
10271 | } | 10272 | } |
10272 | 10273 | ||
10273 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE | 10274 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE |
@@ -10299,14 +10300,17 @@ static void perf_event_exit_cpu_context(int cpu) | |||
10299 | } | 10300 | } |
10300 | srcu_read_unlock(&pmus_srcu, idx); | 10301 | srcu_read_unlock(&pmus_srcu, idx); |
10301 | } | 10302 | } |
10303 | #else | ||
10304 | |||
10305 | static void perf_event_exit_cpu_context(int cpu) { } | ||
10306 | |||
10307 | #endif | ||
10302 | 10308 | ||
10303 | static void perf_event_exit_cpu(int cpu) | 10309 | int perf_event_exit_cpu(unsigned int cpu) |
10304 | { | 10310 | { |
10305 | perf_event_exit_cpu_context(cpu); | 10311 | perf_event_exit_cpu_context(cpu); |
10312 | return 0; | ||
10306 | } | 10313 | } |
10307 | #else | ||
10308 | static inline void perf_event_exit_cpu(int cpu) { } | ||
10309 | #endif | ||
10310 | 10314 | ||
10311 | static int | 10315 | static int |
10312 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) | 10316 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) |
@@ -10328,46 +10332,6 @@ static struct notifier_block perf_reboot_notifier = { | |||
10328 | .priority = INT_MIN, | 10332 | .priority = INT_MIN, |
10329 | }; | 10333 | }; |
10330 | 10334 | ||
10331 | static int | ||
10332 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
10333 | { | ||
10334 | unsigned int cpu = (long)hcpu; | ||
10335 | |||
10336 | switch (action & ~CPU_TASKS_FROZEN) { | ||
10337 | |||
10338 | case CPU_UP_PREPARE: | ||
10339 | /* | ||
10340 | * This must be done before the CPU comes alive, because the | ||
10341 | * moment we can run tasks we can encounter (software) events. | ||
10342 | * | ||
10343 | * Specifically, someone can have inherited events on kthreadd | ||
10344 | * or a pre-existing worker thread that gets re-bound. | ||
10345 | */ | ||
10346 | perf_event_init_cpu(cpu); | ||
10347 | break; | ||
10348 | |||
10349 | case CPU_DOWN_PREPARE: | ||
10350 | /* | ||
10351 | * This must be done before the CPU dies because after that an | ||
10352 | * active event might want to IPI the CPU and that'll not work | ||
10353 | * so great for dead CPUs. | ||
10354 | * | ||
10355 | * XXX smp_call_function_single() return -ENXIO without a warn | ||
10356 | * so we could possibly deal with this. | ||
10357 | * | ||
10358 | * This is safe against new events arriving because | ||
10359 | * sys_perf_event_open() serializes against hotplug using | ||
10360 | * get_online_cpus(). | ||
10361 | */ | ||
10362 | perf_event_exit_cpu(cpu); | ||
10363 | break; | ||
10364 | default: | ||
10365 | break; | ||
10366 | } | ||
10367 | |||
10368 | return NOTIFY_OK; | ||
10369 | } | ||
10370 | |||
10371 | void __init perf_event_init(void) | 10335 | void __init perf_event_init(void) |
10372 | { | 10336 | { |
10373 | int ret; | 10337 | int ret; |
@@ -10380,7 +10344,7 @@ void __init perf_event_init(void) | |||
10380 | perf_pmu_register(&perf_cpu_clock, NULL, -1); | 10344 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
10381 | perf_pmu_register(&perf_task_clock, NULL, -1); | 10345 | perf_pmu_register(&perf_task_clock, NULL, -1); |
10382 | perf_tp_register(); | 10346 | perf_tp_register(); |
10383 | perf_cpu_notifier(perf_cpu_notify); | 10347 | perf_event_init_cpu(smp_processor_id()); |
10384 | register_reboot_notifier(&perf_reboot_notifier); | 10348 | register_reboot_notifier(&perf_reboot_notifier); |
10385 | 10349 | ||
10386 | ret = init_hw_breakpoint(); | 10350 | ret = init_hw_breakpoint(); |