aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-05 07:01:18 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-10 07:22:24 -0500
commit3f6da3905398826d85731247e7fbcf53400c18bd (patch)
tree3e01248974385999fb8e7f8d5daa53b46228f649 /arch/x86
parentdc1d628a67a8f042e711ea5accc0beedc3ef0092 (diff)
perf: Rework and fix the arch CPU-hotplug hooks
Remove the hw_perf_event_*() hotplug hooks in favour of per PMU hotplug notifiers. This has the advantage of reducing the static weak interface as well as exposing all hotplug actions to the PMU. Use this to fix x86 hotplug usage where we did things in ONLINE which should have been done in UP_PREPARE or STARTING. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mundt <lethal@linux-sh.org> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo <acme@infradead.org> LKML-Reference: <20100305154128.736225361@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c70
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c60
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
3 files changed, 71 insertions, 64 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 42aafd11e170..585d5608ae6b 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -157,6 +157,11 @@ struct x86_pmu {
157 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event); 158 struct perf_event *event);
159 struct event_constraint *event_constraints; 159 struct event_constraint *event_constraints;
160
161 void (*cpu_prepare)(int cpu);
162 void (*cpu_starting)(int cpu);
163 void (*cpu_dying)(int cpu);
164 void (*cpu_dead)(int cpu);
160}; 165};
161 166
162static struct x86_pmu x86_pmu __read_mostly; 167static struct x86_pmu x86_pmu __read_mostly;
@@ -293,7 +298,7 @@ static inline bool bts_available(void)
293 return x86_pmu.enable_bts != NULL; 298 return x86_pmu.enable_bts != NULL;
294} 299}
295 300
296static inline void init_debug_store_on_cpu(int cpu) 301static void init_debug_store_on_cpu(int cpu)
297{ 302{
298 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 303 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
299 304
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu)
305 (u32)((u64)(unsigned long)ds >> 32)); 310 (u32)((u64)(unsigned long)ds >> 32));
306} 311}
307 312
308static inline void fini_debug_store_on_cpu(int cpu) 313static void fini_debug_store_on_cpu(int cpu)
309{ 314{
310 if (!per_cpu(cpu_hw_events, cpu).ds) 315 if (!per_cpu(cpu_hw_events, cpu).ds)
311 return; 316 return;
@@ -1337,6 +1342,39 @@ undo:
1337#include "perf_event_p6.c" 1342#include "perf_event_p6.c"
1338#include "perf_event_intel.c" 1343#include "perf_event_intel.c"
1339 1344
1345static int __cpuinit
1346x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1347{
1348 unsigned int cpu = (long)hcpu;
1349
1350 switch (action & ~CPU_TASKS_FROZEN) {
1351 case CPU_UP_PREPARE:
1352 if (x86_pmu.cpu_prepare)
1353 x86_pmu.cpu_prepare(cpu);
1354 break;
1355
1356 case CPU_STARTING:
1357 if (x86_pmu.cpu_starting)
1358 x86_pmu.cpu_starting(cpu);
1359 break;
1360
1361 case CPU_DYING:
1362 if (x86_pmu.cpu_dying)
1363 x86_pmu.cpu_dying(cpu);
1364 break;
1365
1366 case CPU_DEAD:
1367 if (x86_pmu.cpu_dead)
1368 x86_pmu.cpu_dead(cpu);
1369 break;
1370
1371 default:
1372 break;
1373 }
1374
1375 return NOTIFY_OK;
1376}
1377
1340static void __init pmu_check_apic(void) 1378static void __init pmu_check_apic(void)
1341{ 1379{
1342 if (cpu_has_apic) 1380 if (cpu_has_apic)
@@ -1415,6 +1453,8 @@ void __init init_hw_perf_events(void)
1415 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1453 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1416 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1454 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1417 pr_info("... event mask: %016Lx\n", perf_event_mask); 1455 pr_info("... event mask: %016Lx\n", perf_event_mask);
1456
1457 perf_cpu_notifier(x86_pmu_notifier);
1418} 1458}
1419 1459
1420static inline void x86_pmu_read(struct perf_event *event) 1460static inline void x86_pmu_read(struct perf_event *event)
@@ -1674,29 +1714,3 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1674 1714
1675 return entry; 1715 return entry;
1676} 1716}
1677
1678void hw_perf_event_setup_online(int cpu)
1679{
1680 init_debug_store_on_cpu(cpu);
1681
1682 switch (boot_cpu_data.x86_vendor) {
1683 case X86_VENDOR_AMD:
1684 amd_pmu_cpu_online(cpu);
1685 break;
1686 default:
1687 return;
1688 }
1689}
1690
1691void hw_perf_event_setup_offline(int cpu)
1692{
1693 init_debug_store_on_cpu(cpu);
1694
1695 switch (boot_cpu_data.x86_vendor) {
1696 case X86_VENDOR_AMD:
1697 amd_pmu_cpu_offline(cpu);
1698 break;
1699 default:
1700 return;
1701 }
1702}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 8f3dbfda3c4f..014528ba7d57 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -271,28 +271,6 @@ done:
271 return &emptyconstraint; 271 return &emptyconstraint;
272} 272}
273 273
274static __initconst struct x86_pmu amd_pmu = {
275 .name = "AMD",
276 .handle_irq = x86_pmu_handle_irq,
277 .disable_all = x86_pmu_disable_all,
278 .enable_all = x86_pmu_enable_all,
279 .enable = x86_pmu_enable_event,
280 .disable = x86_pmu_disable_event,
281 .eventsel = MSR_K7_EVNTSEL0,
282 .perfctr = MSR_K7_PERFCTR0,
283 .event_map = amd_pmu_event_map,
284 .raw_event = amd_pmu_raw_event,
285 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
286 .num_events = 4,
287 .event_bits = 48,
288 .event_mask = (1ULL << 48) - 1,
289 .apic = 1,
290 /* use highest bit to detect overflow */
291 .max_period = (1ULL << 47) - 1,
292 .get_event_constraints = amd_get_event_constraints,
293 .put_event_constraints = amd_put_event_constraints
294};
295
296static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) 274static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
297{ 275{
298 struct amd_nb *nb; 276 struct amd_nb *nb;
@@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu)
378 raw_spin_unlock(&amd_nb_lock); 356 raw_spin_unlock(&amd_nb_lock);
379} 357}
380 358
359static __initconst struct x86_pmu amd_pmu = {
360 .name = "AMD",
361 .handle_irq = x86_pmu_handle_irq,
362 .disable_all = x86_pmu_disable_all,
363 .enable_all = x86_pmu_enable_all,
364 .enable = x86_pmu_enable_event,
365 .disable = x86_pmu_disable_event,
366 .eventsel = MSR_K7_EVNTSEL0,
367 .perfctr = MSR_K7_PERFCTR0,
368 .event_map = amd_pmu_event_map,
369 .raw_event = amd_pmu_raw_event,
370 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
371 .num_events = 4,
372 .event_bits = 48,
373 .event_mask = (1ULL << 48) - 1,
374 .apic = 1,
375 /* use highest bit to detect overflow */
376 .max_period = (1ULL << 47) - 1,
377 .get_event_constraints = amd_get_event_constraints,
378 .put_event_constraints = amd_put_event_constraints,
379
380 .cpu_prepare = amd_pmu_cpu_online,
381 .cpu_dead = amd_pmu_cpu_offline,
382};
383
381static __init int amd_pmu_init(void) 384static __init int amd_pmu_init(void)
382{ 385{
383 /* Performance-monitoring supported from K7 and later: */ 386 /* Performance-monitoring supported from K7 and later: */
@@ -390,11 +393,6 @@ static __init int amd_pmu_init(void)
390 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 393 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
391 sizeof(hw_cache_event_ids)); 394 sizeof(hw_cache_event_ids));
392 395
393 /*
394 * explicitly initialize the boot cpu, other cpus will get
395 * the cpu hotplug callbacks from smp_init()
396 */
397 amd_pmu_cpu_online(smp_processor_id());
398 return 0; 396 return 0;
399} 397}
400 398
@@ -405,12 +403,4 @@ static int amd_pmu_init(void)
405 return 0; 403 return 0;
406} 404}
407 405
408static void amd_pmu_cpu_online(int cpu)
409{
410}
411
412static void amd_pmu_cpu_offline(int cpu)
413{
414}
415
416#endif 406#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 44b60c852107..12e811a7d747 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -870,7 +870,10 @@ static __initconst struct x86_pmu intel_pmu = {
870 .max_period = (1ULL << 31) - 1, 870 .max_period = (1ULL << 31) - 1,
871 .enable_bts = intel_pmu_enable_bts, 871 .enable_bts = intel_pmu_enable_bts,
872 .disable_bts = intel_pmu_disable_bts, 872 .disable_bts = intel_pmu_disable_bts,
873 .get_event_constraints = intel_get_event_constraints 873 .get_event_constraints = intel_get_event_constraints,
874
875 .cpu_starting = init_debug_store_on_cpu,
876 .cpu_dying = fini_debug_store_on_cpu,
874}; 877};
875 878
876static __init int intel_pmu_init(void) 879static __init int intel_pmu_init(void)