aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-03-29 12:36:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 13:52:02 -0400
commit948b1bb89a44561560531394c18da4a99215f772 (patch)
treeb44054c805d049ffeba328c3bfc3063f5d8d11ba /arch/x86/kernel/cpu/perf_event.c
parentec5e61aabeac58670691bd0613388d16697d0d81 (diff)
perf, x86: Undo some some *_counter* -> *_event* renames
The big rename: cdd6c48 perf: Do the big rename: Performance Counters -> Performance Events accidentally renamed some members of stucts that were named after registers in the spec. To avoid confusion this patch reverts some changes. The related specs are MSR descriptions in AMD's BKDGs and the ARCHITECTURAL PERFORMANCE MONITORING section in the Intel 64 and IA-32 Architectures Software Developer's Manuals. This patch does: $ sed -i -e 's:num_events:num_counters:g' \ arch/x86/include/asm/perf_event.h \ arch/x86/kernel/cpu/perf_event_amd.c \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_intel.c \ arch/x86/kernel/cpu/perf_event_p6.c \ arch/x86/kernel/cpu/perf_event_p4.c \ arch/x86/oprofile/op_model_ppro.c $ sed -i -e 's:event_bits:cntval_bits:g' -e 's:event_mask:cntval_mask:g' \ arch/x86/kernel/cpu/perf_event_amd.c \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_intel.c \ arch/x86/kernel/cpu/perf_event_p6.c \ arch/x86/kernel/cpu/perf_event_p4.c Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1269880612-25800-2-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c74
1 files changed, 37 insertions, 37 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b5343566181..9daaa1ef504 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -195,10 +195,10 @@ struct x86_pmu {
195 u64 (*event_map)(int); 195 u64 (*event_map)(int);
196 u64 (*raw_event)(u64); 196 u64 (*raw_event)(u64);
197 int max_events; 197 int max_events;
198 int num_events; 198 int num_counters;
199 int num_events_fixed; 199 int num_counters_fixed;
200 int event_bits; 200 int cntval_bits;
201 u64 event_mask; 201 u64 cntval_mask;
202 int apic; 202 int apic;
203 u64 max_period; 203 u64 max_period;
204 struct event_constraint * 204 struct event_constraint *
@@ -268,7 +268,7 @@ static u64
268x86_perf_event_update(struct perf_event *event) 268x86_perf_event_update(struct perf_event *event)
269{ 269{
270 struct hw_perf_event *hwc = &event->hw; 270 struct hw_perf_event *hwc = &event->hw;
271 int shift = 64 - x86_pmu.event_bits; 271 int shift = 64 - x86_pmu.cntval_bits;
272 u64 prev_raw_count, new_raw_count; 272 u64 prev_raw_count, new_raw_count;
273 int idx = hwc->idx; 273 int idx = hwc->idx;
274 s64 delta; 274 s64 delta;
@@ -320,12 +320,12 @@ static bool reserve_pmc_hardware(void)
320 if (nmi_watchdog == NMI_LOCAL_APIC) 320 if (nmi_watchdog == NMI_LOCAL_APIC)
321 disable_lapic_nmi_watchdog(); 321 disable_lapic_nmi_watchdog();
322 322
323 for (i = 0; i < x86_pmu.num_events; i++) { 323 for (i = 0; i < x86_pmu.num_counters; i++) {
324 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 324 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
325 goto perfctr_fail; 325 goto perfctr_fail;
326 } 326 }
327 327
328 for (i = 0; i < x86_pmu.num_events; i++) { 328 for (i = 0; i < x86_pmu.num_counters; i++) {
329 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 329 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
330 goto eventsel_fail; 330 goto eventsel_fail;
331 } 331 }
@@ -336,7 +336,7 @@ eventsel_fail:
336 for (i--; i >= 0; i--) 336 for (i--; i >= 0; i--)
337 release_evntsel_nmi(x86_pmu.eventsel + i); 337 release_evntsel_nmi(x86_pmu.eventsel + i);
338 338
339 i = x86_pmu.num_events; 339 i = x86_pmu.num_counters;
340 340
341perfctr_fail: 341perfctr_fail:
342 for (i--; i >= 0; i--) 342 for (i--; i >= 0; i--)
@@ -352,7 +352,7 @@ static void release_pmc_hardware(void)
352{ 352{
353 int i; 353 int i;
354 354
355 for (i = 0; i < x86_pmu.num_events; i++) { 355 for (i = 0; i < x86_pmu.num_counters; i++) {
356 release_perfctr_nmi(x86_pmu.perfctr + i); 356 release_perfctr_nmi(x86_pmu.perfctr + i);
357 release_evntsel_nmi(x86_pmu.eventsel + i); 357 release_evntsel_nmi(x86_pmu.eventsel + i);
358 } 358 }
@@ -547,7 +547,7 @@ static void x86_pmu_disable_all(void)
547 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 547 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
548 int idx; 548 int idx;
549 549
550 for (idx = 0; idx < x86_pmu.num_events; idx++) { 550 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
551 u64 val; 551 u64 val;
552 552
553 if (!test_bit(idx, cpuc->active_mask)) 553 if (!test_bit(idx, cpuc->active_mask))
@@ -582,7 +582,7 @@ static void x86_pmu_enable_all(int added)
582 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 582 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
583 int idx; 583 int idx;
584 584
585 for (idx = 0; idx < x86_pmu.num_events; idx++) { 585 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
586 struct perf_event *event = cpuc->events[idx]; 586 struct perf_event *event = cpuc->events[idx];
587 u64 val; 587 u64 val;
588 588
@@ -657,14 +657,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
657 * assign events to counters starting with most 657 * assign events to counters starting with most
658 * constrained events. 658 * constrained events.
659 */ 659 */
660 wmax = x86_pmu.num_events; 660 wmax = x86_pmu.num_counters;
661 661
662 /* 662 /*
663 * when fixed event counters are present, 663 * when fixed event counters are present,
664 * wmax is incremented by 1 to account 664 * wmax is incremented by 1 to account
665 * for one more choice 665 * for one more choice
666 */ 666 */
667 if (x86_pmu.num_events_fixed) 667 if (x86_pmu.num_counters_fixed)
668 wmax++; 668 wmax++;
669 669
670 for (w = 1, num = n; num && w <= wmax; w++) { 670 for (w = 1, num = n; num && w <= wmax; w++) {
@@ -714,7 +714,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
714 struct perf_event *event; 714 struct perf_event *event;
715 int n, max_count; 715 int n, max_count;
716 716
717 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed; 717 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
718 718
719 /* current number of events already accepted */ 719 /* current number of events already accepted */
720 n = cpuc->n_events; 720 n = cpuc->n_events;
@@ -904,7 +904,7 @@ x86_perf_event_set_period(struct perf_event *event)
904 atomic64_set(&hwc->prev_count, (u64)-left); 904 atomic64_set(&hwc->prev_count, (u64)-left);
905 905
906 wrmsrl(hwc->event_base + idx, 906 wrmsrl(hwc->event_base + idx,
907 (u64)(-left) & x86_pmu.event_mask); 907 (u64)(-left) & x86_pmu.cntval_mask);
908 908
909 perf_event_update_userpage(event); 909 perf_event_update_userpage(event);
910 910
@@ -987,7 +987,7 @@ void perf_event_print_debug(void)
987 unsigned long flags; 987 unsigned long flags;
988 int cpu, idx; 988 int cpu, idx;
989 989
990 if (!x86_pmu.num_events) 990 if (!x86_pmu.num_counters)
991 return; 991 return;
992 992
993 local_irq_save(flags); 993 local_irq_save(flags);
@@ -1011,7 +1011,7 @@ void perf_event_print_debug(void)
1011 } 1011 }
1012 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1012 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1013 1013
1014 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1014 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1015 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1015 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1016 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1016 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1017 1017
@@ -1024,7 +1024,7 @@ void perf_event_print_debug(void)
1024 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1024 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1025 cpu, idx, prev_left); 1025 cpu, idx, prev_left);
1026 } 1026 }
1027 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { 1027 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1028 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1028 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1029 1029
1030 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1030 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1089,7 +1089,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1089 1089
1090 cpuc = &__get_cpu_var(cpu_hw_events); 1090 cpuc = &__get_cpu_var(cpu_hw_events);
1091 1091
1092 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1092 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1093 if (!test_bit(idx, cpuc->active_mask)) 1093 if (!test_bit(idx, cpuc->active_mask))
1094 continue; 1094 continue;
1095 1095
@@ -1097,7 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1097 hwc = &event->hw; 1097 hwc = &event->hw;
1098 1098
1099 val = x86_perf_event_update(event); 1099 val = x86_perf_event_update(event);
1100 if (val & (1ULL << (x86_pmu.event_bits - 1))) 1100 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1101 continue; 1101 continue;
1102 1102
1103 /* 1103 /*
@@ -1401,46 +1401,46 @@ void __init init_hw_perf_events(void)
1401 if (x86_pmu.quirks) 1401 if (x86_pmu.quirks)
1402 x86_pmu.quirks(); 1402 x86_pmu.quirks();
1403 1403
1404 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { 1404 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1405 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 1405 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1406 x86_pmu.num_events, X86_PMC_MAX_GENERIC); 1406 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1407 x86_pmu.num_events = X86_PMC_MAX_GENERIC; 1407 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1408 } 1408 }
1409 x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1; 1409 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1410 perf_max_events = x86_pmu.num_events; 1410 perf_max_events = x86_pmu.num_counters;
1411 1411
1412 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { 1412 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1413 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 1413 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1414 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); 1414 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1415 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; 1415 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1416 } 1416 }
1417 1417
1418 x86_pmu.intel_ctrl |= 1418 x86_pmu.intel_ctrl |=
1419 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; 1419 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1420 1420
1421 perf_events_lapic_init(); 1421 perf_events_lapic_init();
1422 register_die_notifier(&perf_event_nmi_notifier); 1422 register_die_notifier(&perf_event_nmi_notifier);
1423 1423
1424 unconstrained = (struct event_constraint) 1424 unconstrained = (struct event_constraint)
1425 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1425 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1426 0, x86_pmu.num_events); 1426 0, x86_pmu.num_counters);
1427 1427
1428 if (x86_pmu.event_constraints) { 1428 if (x86_pmu.event_constraints) {
1429 for_each_event_constraint(c, x86_pmu.event_constraints) { 1429 for_each_event_constraint(c, x86_pmu.event_constraints) {
1430 if (c->cmask != INTEL_ARCH_FIXED_MASK) 1430 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1431 continue; 1431 continue;
1432 1432
1433 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1; 1433 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1434 c->weight += x86_pmu.num_events; 1434 c->weight += x86_pmu.num_counters;
1435 } 1435 }
1436 } 1436 }
1437 1437
1438 pr_info("... version: %d\n", x86_pmu.version); 1438 pr_info("... version: %d\n", x86_pmu.version);
1439 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1439 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1440 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1440 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1441 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); 1441 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1442 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1442 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1443 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1443 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1444 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); 1444 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1445 1445
1446 perf_cpu_notifier(x86_pmu_notifier); 1446 perf_cpu_notifier(x86_pmu_notifier);