aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-03-29 12:36:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 13:52:02 -0400
commit948b1bb89a44561560531394c18da4a99215f772 (patch)
treeb44054c805d049ffeba328c3bfc3063f5d8d11ba /arch
parentec5e61aabeac58670691bd0613388d16697d0d81 (diff)
perf, x86: Undo some some *_counter* -> *_event* renames
The big rename: cdd6c48 perf: Do the big rename: Performance Counters -> Performance Events accidentally renamed some members of stucts that were named after registers in the spec. To avoid confusion this patch reverts some changes. The related specs are MSR descriptions in AMD's BKDGs and the ARCHITECTURAL PERFORMANCE MONITORING section in the Intel 64 and IA-32 Architectures Software Developer's Manuals. This patch does: $ sed -i -e 's:num_events:num_counters:g' \ arch/x86/include/asm/perf_event.h \ arch/x86/kernel/cpu/perf_event_amd.c \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_intel.c \ arch/x86/kernel/cpu/perf_event_p6.c \ arch/x86/kernel/cpu/perf_event_p4.c \ arch/x86/oprofile/op_model_ppro.c $ sed -i -e 's:event_bits:cntval_bits:g' -e 's:event_mask:cntval_mask:g' \ arch/x86/kernel/cpu/perf_event_amd.c \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_intel.c \ arch/x86/kernel/cpu/perf_event_p6.c \ arch/x86/kernel/cpu/perf_event_p4.c Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1269880612-25800-2-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c74
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c6
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
7 files changed, 65 insertions, 65 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 124dddd598f3..987bf673141e 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -67,7 +67,7 @@
67union cpuid10_eax { 67union cpuid10_eax {
68 struct { 68 struct {
69 unsigned int version_id:8; 69 unsigned int version_id:8;
70 unsigned int num_events:8; 70 unsigned int num_counters:8;
71 unsigned int bit_width:8; 71 unsigned int bit_width:8;
72 unsigned int mask_length:8; 72 unsigned int mask_length:8;
73 } split; 73 } split;
@@ -76,7 +76,7 @@ union cpuid10_eax {
76 76
77union cpuid10_edx { 77union cpuid10_edx {
78 struct { 78 struct {
79 unsigned int num_events_fixed:4; 79 unsigned int num_counters_fixed:4;
80 unsigned int reserved:28; 80 unsigned int reserved:28;
81 } split; 81 } split;
82 unsigned int full; 82 unsigned int full;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b53435661813..9daaa1ef504c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -195,10 +195,10 @@ struct x86_pmu {
195 u64 (*event_map)(int); 195 u64 (*event_map)(int);
196 u64 (*raw_event)(u64); 196 u64 (*raw_event)(u64);
197 int max_events; 197 int max_events;
198 int num_events; 198 int num_counters;
199 int num_events_fixed; 199 int num_counters_fixed;
200 int event_bits; 200 int cntval_bits;
201 u64 event_mask; 201 u64 cntval_mask;
202 int apic; 202 int apic;
203 u64 max_period; 203 u64 max_period;
204 struct event_constraint * 204 struct event_constraint *
@@ -268,7 +268,7 @@ static u64
268x86_perf_event_update(struct perf_event *event) 268x86_perf_event_update(struct perf_event *event)
269{ 269{
270 struct hw_perf_event *hwc = &event->hw; 270 struct hw_perf_event *hwc = &event->hw;
271 int shift = 64 - x86_pmu.event_bits; 271 int shift = 64 - x86_pmu.cntval_bits;
272 u64 prev_raw_count, new_raw_count; 272 u64 prev_raw_count, new_raw_count;
273 int idx = hwc->idx; 273 int idx = hwc->idx;
274 s64 delta; 274 s64 delta;
@@ -320,12 +320,12 @@ static bool reserve_pmc_hardware(void)
320 if (nmi_watchdog == NMI_LOCAL_APIC) 320 if (nmi_watchdog == NMI_LOCAL_APIC)
321 disable_lapic_nmi_watchdog(); 321 disable_lapic_nmi_watchdog();
322 322
323 for (i = 0; i < x86_pmu.num_events; i++) { 323 for (i = 0; i < x86_pmu.num_counters; i++) {
324 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 324 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
325 goto perfctr_fail; 325 goto perfctr_fail;
326 } 326 }
327 327
328 for (i = 0; i < x86_pmu.num_events; i++) { 328 for (i = 0; i < x86_pmu.num_counters; i++) {
329 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 329 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
330 goto eventsel_fail; 330 goto eventsel_fail;
331 } 331 }
@@ -336,7 +336,7 @@ eventsel_fail:
336 for (i--; i >= 0; i--) 336 for (i--; i >= 0; i--)
337 release_evntsel_nmi(x86_pmu.eventsel + i); 337 release_evntsel_nmi(x86_pmu.eventsel + i);
338 338
339 i = x86_pmu.num_events; 339 i = x86_pmu.num_counters;
340 340
341perfctr_fail: 341perfctr_fail:
342 for (i--; i >= 0; i--) 342 for (i--; i >= 0; i--)
@@ -352,7 +352,7 @@ static void release_pmc_hardware(void)
352{ 352{
353 int i; 353 int i;
354 354
355 for (i = 0; i < x86_pmu.num_events; i++) { 355 for (i = 0; i < x86_pmu.num_counters; i++) {
356 release_perfctr_nmi(x86_pmu.perfctr + i); 356 release_perfctr_nmi(x86_pmu.perfctr + i);
357 release_evntsel_nmi(x86_pmu.eventsel + i); 357 release_evntsel_nmi(x86_pmu.eventsel + i);
358 } 358 }
@@ -547,7 +547,7 @@ static void x86_pmu_disable_all(void)
547 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 547 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
548 int idx; 548 int idx;
549 549
550 for (idx = 0; idx < x86_pmu.num_events; idx++) { 550 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
551 u64 val; 551 u64 val;
552 552
553 if (!test_bit(idx, cpuc->active_mask)) 553 if (!test_bit(idx, cpuc->active_mask))
@@ -582,7 +582,7 @@ static void x86_pmu_enable_all(int added)
582 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 582 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
583 int idx; 583 int idx;
584 584
585 for (idx = 0; idx < x86_pmu.num_events; idx++) { 585 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
586 struct perf_event *event = cpuc->events[idx]; 586 struct perf_event *event = cpuc->events[idx];
587 u64 val; 587 u64 val;
588 588
@@ -657,14 +657,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
657 * assign events to counters starting with most 657 * assign events to counters starting with most
658 * constrained events. 658 * constrained events.
659 */ 659 */
660 wmax = x86_pmu.num_events; 660 wmax = x86_pmu.num_counters;
661 661
662 /* 662 /*
663 * when fixed event counters are present, 663 * when fixed event counters are present,
664 * wmax is incremented by 1 to account 664 * wmax is incremented by 1 to account
665 * for one more choice 665 * for one more choice
666 */ 666 */
667 if (x86_pmu.num_events_fixed) 667 if (x86_pmu.num_counters_fixed)
668 wmax++; 668 wmax++;
669 669
670 for (w = 1, num = n; num && w <= wmax; w++) { 670 for (w = 1, num = n; num && w <= wmax; w++) {
@@ -714,7 +714,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
714 struct perf_event *event; 714 struct perf_event *event;
715 int n, max_count; 715 int n, max_count;
716 716
717 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed; 717 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
718 718
719 /* current number of events already accepted */ 719 /* current number of events already accepted */
720 n = cpuc->n_events; 720 n = cpuc->n_events;
@@ -904,7 +904,7 @@ x86_perf_event_set_period(struct perf_event *event)
904 atomic64_set(&hwc->prev_count, (u64)-left); 904 atomic64_set(&hwc->prev_count, (u64)-left);
905 905
906 wrmsrl(hwc->event_base + idx, 906 wrmsrl(hwc->event_base + idx,
907 (u64)(-left) & x86_pmu.event_mask); 907 (u64)(-left) & x86_pmu.cntval_mask);
908 908
909 perf_event_update_userpage(event); 909 perf_event_update_userpage(event);
910 910
@@ -987,7 +987,7 @@ void perf_event_print_debug(void)
987 unsigned long flags; 987 unsigned long flags;
988 int cpu, idx; 988 int cpu, idx;
989 989
990 if (!x86_pmu.num_events) 990 if (!x86_pmu.num_counters)
991 return; 991 return;
992 992
993 local_irq_save(flags); 993 local_irq_save(flags);
@@ -1011,7 +1011,7 @@ void perf_event_print_debug(void)
1011 } 1011 }
1012 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1012 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1013 1013
1014 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1014 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1015 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1015 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1016 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1016 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1017 1017
@@ -1024,7 +1024,7 @@ void perf_event_print_debug(void)
1024 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1024 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1025 cpu, idx, prev_left); 1025 cpu, idx, prev_left);
1026 } 1026 }
1027 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { 1027 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1028 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1028 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1029 1029
1030 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1030 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1089,7 +1089,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1089 1089
1090 cpuc = &__get_cpu_var(cpu_hw_events); 1090 cpuc = &__get_cpu_var(cpu_hw_events);
1091 1091
1092 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1092 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1093 if (!test_bit(idx, cpuc->active_mask)) 1093 if (!test_bit(idx, cpuc->active_mask))
1094 continue; 1094 continue;
1095 1095
@@ -1097,7 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1097 hwc = &event->hw; 1097 hwc = &event->hw;
1098 1098
1099 val = x86_perf_event_update(event); 1099 val = x86_perf_event_update(event);
1100 if (val & (1ULL << (x86_pmu.event_bits - 1))) 1100 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1101 continue; 1101 continue;
1102 1102
1103 /* 1103 /*
@@ -1401,46 +1401,46 @@ void __init init_hw_perf_events(void)
1401 if (x86_pmu.quirks) 1401 if (x86_pmu.quirks)
1402 x86_pmu.quirks(); 1402 x86_pmu.quirks();
1403 1403
1404 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { 1404 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1405 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 1405 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1406 x86_pmu.num_events, X86_PMC_MAX_GENERIC); 1406 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1407 x86_pmu.num_events = X86_PMC_MAX_GENERIC; 1407 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1408 } 1408 }
1409 x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1; 1409 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1410 perf_max_events = x86_pmu.num_events; 1410 perf_max_events = x86_pmu.num_counters;
1411 1411
1412 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { 1412 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1413 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 1413 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1414 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); 1414 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1415 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; 1415 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1416 } 1416 }
1417 1417
1418 x86_pmu.intel_ctrl |= 1418 x86_pmu.intel_ctrl |=
1419 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; 1419 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1420 1420
1421 perf_events_lapic_init(); 1421 perf_events_lapic_init();
1422 register_die_notifier(&perf_event_nmi_notifier); 1422 register_die_notifier(&perf_event_nmi_notifier);
1423 1423
1424 unconstrained = (struct event_constraint) 1424 unconstrained = (struct event_constraint)
1425 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1425 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1426 0, x86_pmu.num_events); 1426 0, x86_pmu.num_counters);
1427 1427
1428 if (x86_pmu.event_constraints) { 1428 if (x86_pmu.event_constraints) {
1429 for_each_event_constraint(c, x86_pmu.event_constraints) { 1429 for_each_event_constraint(c, x86_pmu.event_constraints) {
1430 if (c->cmask != INTEL_ARCH_FIXED_MASK) 1430 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1431 continue; 1431 continue;
1432 1432
1433 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1; 1433 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1434 c->weight += x86_pmu.num_events; 1434 c->weight += x86_pmu.num_counters;
1435 } 1435 }
1436 } 1436 }
1437 1437
1438 pr_info("... version: %d\n", x86_pmu.version); 1438 pr_info("... version: %d\n", x86_pmu.version);
1439 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1439 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1440 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1440 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1441 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); 1441 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1442 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1442 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1443 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1443 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1444 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); 1444 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1445 1445
1446 perf_cpu_notifier(x86_pmu_notifier); 1446 perf_cpu_notifier(x86_pmu_notifier);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 285623bc3cc8..7753a5c76535 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -165,7 +165,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
165 * be removed on one CPU at a time AND PMU is disabled 165 * be removed on one CPU at a time AND PMU is disabled
166 * when we come here 166 * when we come here
167 */ 167 */
168 for (i = 0; i < x86_pmu.num_events; i++) { 168 for (i = 0; i < x86_pmu.num_counters; i++) {
169 if (nb->owners[i] == event) { 169 if (nb->owners[i] == event) {
170 cmpxchg(nb->owners+i, event, NULL); 170 cmpxchg(nb->owners+i, event, NULL);
171 break; 171 break;
@@ -215,7 +215,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
215 struct hw_perf_event *hwc = &event->hw; 215 struct hw_perf_event *hwc = &event->hw;
216 struct amd_nb *nb = cpuc->amd_nb; 216 struct amd_nb *nb = cpuc->amd_nb;
217 struct perf_event *old = NULL; 217 struct perf_event *old = NULL;
218 int max = x86_pmu.num_events; 218 int max = x86_pmu.num_counters;
219 int i, j, k = -1; 219 int i, j, k = -1;
220 220
221 /* 221 /*
@@ -293,7 +293,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
293 /* 293 /*
294 * initialize all possible NB constraints 294 * initialize all possible NB constraints
295 */ 295 */
296 for (i = 0; i < x86_pmu.num_events; i++) { 296 for (i = 0; i < x86_pmu.num_counters; i++) {
297 __set_bit(i, nb->event_constraints[i].idxmsk); 297 __set_bit(i, nb->event_constraints[i].idxmsk);
298 nb->event_constraints[i].weight = 1; 298 nb->event_constraints[i].weight = 1;
299 } 299 }
@@ -385,9 +385,9 @@ static __initconst struct x86_pmu amd_pmu = {
385 .event_map = amd_pmu_event_map, 385 .event_map = amd_pmu_event_map,
386 .raw_event = amd_pmu_raw_event, 386 .raw_event = amd_pmu_raw_event,
387 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 387 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
388 .num_events = 4, 388 .num_counters = 4,
389 .event_bits = 48, 389 .cntval_bits = 48,
390 .event_mask = (1ULL << 48) - 1, 390 .cntval_mask = (1ULL << 48) - 1,
391 .apic = 1, 391 .apic = 1,
392 /* use highest bit to detect overflow */ 392 /* use highest bit to detect overflow */
393 .max_period = (1ULL << 47) - 1, 393 .max_period = (1ULL << 47) - 1,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 676aac27aca4..cc4d90a13d53 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -653,20 +653,20 @@ static void intel_pmu_reset(void)
653 unsigned long flags; 653 unsigned long flags;
654 int idx; 654 int idx;
655 655
656 if (!x86_pmu.num_events) 656 if (!x86_pmu.num_counters)
657 return; 657 return;
658 658
659 local_irq_save(flags); 659 local_irq_save(flags);
660 660
661 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 661 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
662 662
663 for (idx = 0; idx < x86_pmu.num_events; idx++) { 663 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
664 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); 664 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
665 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); 665 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
666 } 666 }
667 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { 667 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
668 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 668 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
669 } 669
670 if (ds) 670 if (ds)
671 ds->bts_index = ds->bts_buffer_base; 671 ds->bts_index = ds->bts_buffer_base;
672 672
@@ -901,16 +901,16 @@ static __init int intel_pmu_init(void)
901 x86_pmu = intel_pmu; 901 x86_pmu = intel_pmu;
902 902
903 x86_pmu.version = version; 903 x86_pmu.version = version;
904 x86_pmu.num_events = eax.split.num_events; 904 x86_pmu.num_counters = eax.split.num_counters;
905 x86_pmu.event_bits = eax.split.bit_width; 905 x86_pmu.cntval_bits = eax.split.bit_width;
906 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; 906 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
907 907
908 /* 908 /*
909 * Quirk: v2 perfmon does not report fixed-purpose events, so 909 * Quirk: v2 perfmon does not report fixed-purpose events, so
910 * assume at least 3 events: 910 * assume at least 3 events:
911 */ 911 */
912 if (version > 1) 912 if (version > 1)
913 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); 913 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
914 914
915 /* 915 /*
916 * v2 and above have a perf capabilities MSR 916 * v2 and above have a perf capabilities MSR
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 0d1be36cbe9e..4139100404e8 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -483,7 +483,7 @@ static void p4_pmu_disable_all(void)
483 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 483 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
484 int idx; 484 int idx;
485 485
486 for (idx = 0; idx < x86_pmu.num_events; idx++) { 486 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
487 struct perf_event *event = cpuc->events[idx]; 487 struct perf_event *event = cpuc->events[idx];
488 if (!test_bit(idx, cpuc->active_mask)) 488 if (!test_bit(idx, cpuc->active_mask))
489 continue; 489 continue;
@@ -540,7 +540,7 @@ static void p4_pmu_enable_all(int added)
540 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 540 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
541 int idx; 541 int idx;
542 542
543 for (idx = 0; idx < x86_pmu.num_events; idx++) { 543 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
544 struct perf_event *event = cpuc->events[idx]; 544 struct perf_event *event = cpuc->events[idx];
545 if (!test_bit(idx, cpuc->active_mask)) 545 if (!test_bit(idx, cpuc->active_mask))
546 continue; 546 continue;
@@ -562,7 +562,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
562 562
563 cpuc = &__get_cpu_var(cpu_hw_events); 563 cpuc = &__get_cpu_var(cpu_hw_events);
564 564
565 for (idx = 0; idx < x86_pmu.num_events; idx++) { 565 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
566 566
567 if (!test_bit(idx, cpuc->active_mask)) 567 if (!test_bit(idx, cpuc->active_mask))
568 continue; 568 continue;
@@ -579,7 +579,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
579 p4_pmu_clear_cccr_ovf(hwc); 579 p4_pmu_clear_cccr_ovf(hwc);
580 580
581 val = x86_perf_event_update(event); 581 val = x86_perf_event_update(event);
582 if (val & (1ULL << (x86_pmu.event_bits - 1))) 582 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
583 continue; 583 continue;
584 584
585 /* 585 /*
@@ -794,10 +794,10 @@ static __initconst struct x86_pmu p4_pmu = {
794 * though leave it restricted at moment assuming 794 * though leave it restricted at moment assuming
795 * HT is on 795 * HT is on
796 */ 796 */
797 .num_events = ARCH_P4_MAX_CCCR, 797 .num_counters = ARCH_P4_MAX_CCCR,
798 .apic = 1, 798 .apic = 1,
799 .event_bits = 40, 799 .cntval_bits = 40,
800 .event_mask = (1ULL << 40) - 1, 800 .cntval_mask = (1ULL << 40) - 1,
801 .max_period = (1ULL << 39) - 1, 801 .max_period = (1ULL << 39) - 1,
802 .hw_config = p4_hw_config, 802 .hw_config = p4_hw_config,
803 .schedule_events = p4_pmu_schedule_events, 803 .schedule_events = p4_pmu_schedule_events,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 877182c850df..b26fbc7eb93c 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -119,7 +119,7 @@ static __initconst struct x86_pmu p6_pmu = {
119 .apic = 1, 119 .apic = 1,
120 .max_period = (1ULL << 31) - 1, 120 .max_period = (1ULL << 31) - 1,
121 .version = 0, 121 .version = 0,
122 .num_events = 2, 122 .num_counters = 2,
123 /* 123 /*
124 * Events have 40 bits implemented. However they are designed such 124 * Events have 40 bits implemented. However they are designed such
125 * that bits [32-39] are sign extensions of bit 31. As such the 125 * that bits [32-39] are sign extensions of bit 31. As such the
@@ -127,8 +127,8 @@ static __initconst struct x86_pmu p6_pmu = {
127 * 127 *
128 * See IA-32 Intel Architecture Software developer manual Vol 3B 128 * See IA-32 Intel Architecture Software developer manual Vol 3B
129 */ 129 */
130 .event_bits = 32, 130 .cntval_bits = 32,
131 .event_mask = (1ULL << 32) - 1, 131 .cntval_mask = (1ULL << 32) - 1,
132 .get_event_constraints = x86_get_event_constraints, 132 .get_event_constraints = x86_get_event_constraints,
133 .event_constraints = p6_event_constraints, 133 .event_constraints = p6_event_constraints,
134}; 134};
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 2bf90fafa7b5..c8abc4d1bf35 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -239,11 +239,11 @@ static void arch_perfmon_setup_counters(void)
239 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && 239 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
240 current_cpu_data.x86_model == 15) { 240 current_cpu_data.x86_model == 15) {
241 eax.split.version_id = 2; 241 eax.split.version_id = 2;
242 eax.split.num_events = 2; 242 eax.split.num_counters = 2;
243 eax.split.bit_width = 40; 243 eax.split.bit_width = 40;
244 } 244 }
245 245
246 num_counters = eax.split.num_events; 246 num_counters = eax.split.num_counters;
247 247
248 op_arch_perfmon_spec.num_counters = num_counters; 248 op_arch_perfmon_spec.num_counters = num_counters;
249 op_arch_perfmon_spec.num_controls = num_counters; 249 op_arch_perfmon_spec.num_controls = num_counters;