aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_event.c21
-rw-r--r--arch/sh/kernel/perf_event.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event.c70
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c60
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--include/linux/perf_event.h16
-rw-r--r--kernel/perf_event.c15
7 files changed, 126 insertions, 81 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 5120bd44f69a..fbe101d7505d 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1287 irq_exit(); 1287 irq_exit();
1288} 1288}
1289 1289
1290void hw_perf_event_setup(int cpu) 1290static void power_pmu_setup(int cpu)
1291{ 1291{
1292 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 1292 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1293 1293
@@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu)
1297 cpuhw->mmcr[0] = MMCR0_FC; 1297 cpuhw->mmcr[0] = MMCR0_FC;
1298} 1298}
1299 1299
1300static int __cpuinit
1301power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1302{
1303 unsigned int cpu = (long)hcpu;
1304
1305 switch (action & ~CPU_TASKS_FROZEN) {
1306 case CPU_UP_PREPARE:
1307 power_pmu_setup(cpu);
1308 break;
1309
1310 default:
1311 break;
1312 }
1313
1314 return NOTIFY_OK;
1315}
1316
1300int register_power_pmu(struct power_pmu *pmu) 1317int register_power_pmu(struct power_pmu *pmu)
1301{ 1318{
1302 if (ppmu) 1319 if (ppmu)
@@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu)
1314 freeze_events_kernel = MMCR0_FCHV; 1331 freeze_events_kernel = MMCR0_FCHV;
1315#endif /* CONFIG_PPC64 */ 1332#endif /* CONFIG_PPC64 */
1316 1333
1334 perf_cpu_notifier(power_pmu_notifier);
1335
1317 return 0; 1336 return 0;
1318} 1337}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7ff0943e7a08..9f253e9cce01 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
275 return &pmu; 275 return &pmu;
276} 276}
277 277
278void hw_perf_event_setup(int cpu) 278static void sh_pmu_setup(int cpu)
279{ 279{
280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
281 281
282 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 282 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
283} 283}
284 284
285static int __cpuinit
286sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
287{
288 unsigned int cpu = (long)hcpu;
289
290 switch (action & ~CPU_TASKS_FROZEN) {
291 case CPU_UP_PREPARE:
292 sh_pmu_setup(cpu);
293 break;
294
295 default:
296 break;
297 }
298
299 return NOTIFY_OK;
300}
301
285void hw_perf_enable(void) 302void hw_perf_enable(void)
286{ 303{
287 if (!sh_pmu_initialized()) 304 if (!sh_pmu_initialized())
@@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu)
308 325
309 WARN_ON(pmu->num_events > MAX_HWEVENTS); 326 WARN_ON(pmu->num_events > MAX_HWEVENTS);
310 327
328 perf_cpu_notifier(sh_pmu_notifier);
311 return 0; 329 return 0;
312} 330}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 42aafd11e170..585d5608ae6b 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -157,6 +157,11 @@ struct x86_pmu {
157 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event); 158 struct perf_event *event);
159 struct event_constraint *event_constraints; 159 struct event_constraint *event_constraints;
160
161 void (*cpu_prepare)(int cpu);
162 void (*cpu_starting)(int cpu);
163 void (*cpu_dying)(int cpu);
164 void (*cpu_dead)(int cpu);
160}; 165};
161 166
162static struct x86_pmu x86_pmu __read_mostly; 167static struct x86_pmu x86_pmu __read_mostly;
@@ -293,7 +298,7 @@ static inline bool bts_available(void)
293 return x86_pmu.enable_bts != NULL; 298 return x86_pmu.enable_bts != NULL;
294} 299}
295 300
296static inline void init_debug_store_on_cpu(int cpu) 301static void init_debug_store_on_cpu(int cpu)
297{ 302{
298 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 303 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
299 304
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu)
305 (u32)((u64)(unsigned long)ds >> 32)); 310 (u32)((u64)(unsigned long)ds >> 32));
306} 311}
307 312
308static inline void fini_debug_store_on_cpu(int cpu) 313static void fini_debug_store_on_cpu(int cpu)
309{ 314{
310 if (!per_cpu(cpu_hw_events, cpu).ds) 315 if (!per_cpu(cpu_hw_events, cpu).ds)
311 return; 316 return;
@@ -1337,6 +1342,39 @@ undo:
1337#include "perf_event_p6.c" 1342#include "perf_event_p6.c"
1338#include "perf_event_intel.c" 1343#include "perf_event_intel.c"
1339 1344
1345static int __cpuinit
1346x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1347{
1348 unsigned int cpu = (long)hcpu;
1349
1350 switch (action & ~CPU_TASKS_FROZEN) {
1351 case CPU_UP_PREPARE:
1352 if (x86_pmu.cpu_prepare)
1353 x86_pmu.cpu_prepare(cpu);
1354 break;
1355
1356 case CPU_STARTING:
1357 if (x86_pmu.cpu_starting)
1358 x86_pmu.cpu_starting(cpu);
1359 break;
1360
1361 case CPU_DYING:
1362 if (x86_pmu.cpu_dying)
1363 x86_pmu.cpu_dying(cpu);
1364 break;
1365
1366 case CPU_DEAD:
1367 if (x86_pmu.cpu_dead)
1368 x86_pmu.cpu_dead(cpu);
1369 break;
1370
1371 default:
1372 break;
1373 }
1374
1375 return NOTIFY_OK;
1376}
1377
1340static void __init pmu_check_apic(void) 1378static void __init pmu_check_apic(void)
1341{ 1379{
1342 if (cpu_has_apic) 1380 if (cpu_has_apic)
@@ -1415,6 +1453,8 @@ void __init init_hw_perf_events(void)
1415 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1453 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1416 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1454 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1417 pr_info("... event mask: %016Lx\n", perf_event_mask); 1455 pr_info("... event mask: %016Lx\n", perf_event_mask);
1456
1457 perf_cpu_notifier(x86_pmu_notifier);
1418} 1458}
1419 1459
1420static inline void x86_pmu_read(struct perf_event *event) 1460static inline void x86_pmu_read(struct perf_event *event)
@@ -1674,29 +1714,3 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1674 1714
1675 return entry; 1715 return entry;
1676} 1716}
1677
1678void hw_perf_event_setup_online(int cpu)
1679{
1680 init_debug_store_on_cpu(cpu);
1681
1682 switch (boot_cpu_data.x86_vendor) {
1683 case X86_VENDOR_AMD:
1684 amd_pmu_cpu_online(cpu);
1685 break;
1686 default:
1687 return;
1688 }
1689}
1690
1691void hw_perf_event_setup_offline(int cpu)
1692{
1693 init_debug_store_on_cpu(cpu);
1694
1695 switch (boot_cpu_data.x86_vendor) {
1696 case X86_VENDOR_AMD:
1697 amd_pmu_cpu_offline(cpu);
1698 break;
1699 default:
1700 return;
1701 }
1702}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 8f3dbfda3c4f..014528ba7d57 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -271,28 +271,6 @@ done:
271 return &emptyconstraint; 271 return &emptyconstraint;
272} 272}
273 273
274static __initconst struct x86_pmu amd_pmu = {
275 .name = "AMD",
276 .handle_irq = x86_pmu_handle_irq,
277 .disable_all = x86_pmu_disable_all,
278 .enable_all = x86_pmu_enable_all,
279 .enable = x86_pmu_enable_event,
280 .disable = x86_pmu_disable_event,
281 .eventsel = MSR_K7_EVNTSEL0,
282 .perfctr = MSR_K7_PERFCTR0,
283 .event_map = amd_pmu_event_map,
284 .raw_event = amd_pmu_raw_event,
285 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
286 .num_events = 4,
287 .event_bits = 48,
288 .event_mask = (1ULL << 48) - 1,
289 .apic = 1,
290 /* use highest bit to detect overflow */
291 .max_period = (1ULL << 47) - 1,
292 .get_event_constraints = amd_get_event_constraints,
293 .put_event_constraints = amd_put_event_constraints
294};
295
296static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) 274static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
297{ 275{
298 struct amd_nb *nb; 276 struct amd_nb *nb;
@@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu)
378 raw_spin_unlock(&amd_nb_lock); 356 raw_spin_unlock(&amd_nb_lock);
379} 357}
380 358
359static __initconst struct x86_pmu amd_pmu = {
360 .name = "AMD",
361 .handle_irq = x86_pmu_handle_irq,
362 .disable_all = x86_pmu_disable_all,
363 .enable_all = x86_pmu_enable_all,
364 .enable = x86_pmu_enable_event,
365 .disable = x86_pmu_disable_event,
366 .eventsel = MSR_K7_EVNTSEL0,
367 .perfctr = MSR_K7_PERFCTR0,
368 .event_map = amd_pmu_event_map,
369 .raw_event = amd_pmu_raw_event,
370 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
371 .num_events = 4,
372 .event_bits = 48,
373 .event_mask = (1ULL << 48) - 1,
374 .apic = 1,
375 /* use highest bit to detect overflow */
376 .max_period = (1ULL << 47) - 1,
377 .get_event_constraints = amd_get_event_constraints,
378 .put_event_constraints = amd_put_event_constraints,
379
380 .cpu_prepare = amd_pmu_cpu_online,
381 .cpu_dead = amd_pmu_cpu_offline,
382};
383
381static __init int amd_pmu_init(void) 384static __init int amd_pmu_init(void)
382{ 385{
383 /* Performance-monitoring supported from K7 and later: */ 386 /* Performance-monitoring supported from K7 and later: */
@@ -390,11 +393,6 @@ static __init int amd_pmu_init(void)
390 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 393 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
391 sizeof(hw_cache_event_ids)); 394 sizeof(hw_cache_event_ids));
392 395
393 /*
394 * explicitly initialize the boot cpu, other cpus will get
395 * the cpu hotplug callbacks from smp_init()
396 */
397 amd_pmu_cpu_online(smp_processor_id());
398 return 0; 396 return 0;
399} 397}
400 398
@@ -405,12 +403,4 @@ static int amd_pmu_init(void)
405 return 0; 403 return 0;
406} 404}
407 405
408static void amd_pmu_cpu_online(int cpu)
409{
410}
411
412static void amd_pmu_cpu_offline(int cpu)
413{
414}
415
416#endif 406#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 44b60c852107..12e811a7d747 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -870,7 +870,10 @@ static __initconst struct x86_pmu intel_pmu = {
870 .max_period = (1ULL << 31) - 1, 870 .max_period = (1ULL << 31) - 1,
871 .enable_bts = intel_pmu_enable_bts, 871 .enable_bts = intel_pmu_enable_bts,
872 .disable_bts = intel_pmu_disable_bts, 872 .disable_bts = intel_pmu_disable_bts,
873 .get_event_constraints = intel_get_event_constraints 873 .get_event_constraints = intel_get_event_constraints,
874
875 .cpu_starting = init_debug_store_on_cpu,
876 .cpu_dying = fini_debug_store_on_cpu,
874}; 877};
875 878
876static __init int intel_pmu_init(void) 879static __init int intel_pmu_init(void)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6f8cd7da1a01..80acbf3d5de1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -936,5 +936,21 @@ static inline void perf_event_disable(struct perf_event *event) { }
936#define perf_output_put(handle, x) \ 936#define perf_output_put(handle, x) \
937 perf_output_copy((handle), &(x), sizeof(x)) 937 perf_output_copy((handle), &(x), sizeof(x))
938 938
939/*
940 * This has to have a higher priority than migration_notifier in sched.c.
941 */
942#define perf_cpu_notifier(fn) \
943do { \
944 static struct notifier_block fn##_nb __cpuinitdata = \
945 { .notifier_call = fn, .priority = 20 }; \
946 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
947 (void *)(unsigned long)smp_processor_id()); \
948 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
949 (void *)(unsigned long)smp_processor_id()); \
950 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
951 (void *)(unsigned long)smp_processor_id()); \
952 register_cpu_notifier(&fn##_nb); \
953} while (0)
954
939#endif /* __KERNEL__ */ 955#endif /* __KERNEL__ */
940#endif /* _LINUX_PERF_EVENT_H */ 956#endif /* _LINUX_PERF_EVENT_H */
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 4393b9e73740..73329dedb5ad 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
81void __weak hw_perf_disable(void) { barrier(); } 81void __weak hw_perf_disable(void) { barrier(); }
82void __weak hw_perf_enable(void) { barrier(); } 82void __weak hw_perf_enable(void) { barrier(); }
83 83
84void __weak hw_perf_event_setup(int cpu) { barrier(); }
85void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
86void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
87
88int __weak 84int __weak
89hw_perf_group_sched_in(struct perf_event *group_leader, 85hw_perf_group_sched_in(struct perf_event *group_leader,
90 struct perf_cpu_context *cpuctx, 86 struct perf_cpu_context *cpuctx,
@@ -5382,8 +5378,6 @@ static void __cpuinit perf_event_init_cpu(int cpu)
5382 spin_lock(&perf_resource_lock); 5378 spin_lock(&perf_resource_lock);
5383 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5379 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5384 spin_unlock(&perf_resource_lock); 5380 spin_unlock(&perf_resource_lock);
5385
5386 hw_perf_event_setup(cpu);
5387} 5381}
5388 5382
5389#ifdef CONFIG_HOTPLUG_CPU 5383#ifdef CONFIG_HOTPLUG_CPU
@@ -5423,20 +5417,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5423 perf_event_init_cpu(cpu); 5417 perf_event_init_cpu(cpu);
5424 break; 5418 break;
5425 5419
5426 case CPU_ONLINE:
5427 case CPU_ONLINE_FROZEN:
5428 hw_perf_event_setup_online(cpu);
5429 break;
5430
5431 case CPU_DOWN_PREPARE: 5420 case CPU_DOWN_PREPARE:
5432 case CPU_DOWN_PREPARE_FROZEN: 5421 case CPU_DOWN_PREPARE_FROZEN:
5433 perf_event_exit_cpu(cpu); 5422 perf_event_exit_cpu(cpu);
5434 break; 5423 break;
5435 5424
5436 case CPU_DEAD:
5437 hw_perf_event_setup_offline(cpu);
5438 break;
5439
5440 default: 5425 default:
5441 break; 5426 break;
5442 } 5427 }