aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2012-06-20 14:46:33 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-05 15:19:39 -0400
commit15c7ad51ad58cbd3b46112c1840bc7228bd354bf (patch)
tree010a876843be5a32d065618d8834c5acc101888c /arch/x86/kernel/cpu
parent1070505d18534076bda8ca13b1bc1ab2e09546da (diff)
perf/x86: Rename Intel specific macros
There are macros that are Intel specific and not x86 generic. Rename them into INTEL_*. This patch removes X86_PMC_IDX_GENERIC and does: $ sed -i -e 's/X86_PMC_MAX_/INTEL_PMC_MAX_/g' \ arch/x86/include/asm/kvm_host.h \ arch/x86/include/asm/perf_event.h \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_p4.c \ arch/x86/kvm/pmu.c $ sed -i -e 's/X86_PMC_IDX_FIXED/INTEL_PMC_IDX_FIXED/g' \ arch/x86/include/asm/perf_event.h \ arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_intel.c \ arch/x86/kernel/cpu/perf_event_intel_ds.c \ arch/x86/kvm/pmu.c $ sed -i -e 's/X86_PMC_MSK_/INTEL_PMC_MSK_/g' \ arch/x86/include/asm/perf_event.h \ arch/x86/kernel/cpu/perf_event.c Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1340217996-2254-2-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
4 files changed, 29 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e677d9923f4f..668050002602 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -63,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
63 int idx = hwc->idx; 63 int idx = hwc->idx;
64 s64 delta; 64 s64 delta;
65 65
66 if (idx == X86_PMC_IDX_FIXED_BTS) 66 if (idx == INTEL_PMC_IDX_FIXED_BTS)
67 return 0; 67 return 0;
68 68
69 /* 69 /*
@@ -626,8 +626,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
626 c = sched->constraints[sched->state.event]; 626 c = sched->constraints[sched->state.event];
627 627
628 /* Prefer fixed purpose counters */ 628 /* Prefer fixed purpose counters */
629 if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) { 629 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
630 idx = X86_PMC_IDX_FIXED; 630 idx = INTEL_PMC_IDX_FIXED;
631 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { 631 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
632 if (!__test_and_set_bit(idx, sched->state.used)) 632 if (!__test_and_set_bit(idx, sched->state.used))
633 goto done; 633 goto done;
@@ -635,7 +635,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
635 } 635 }
636 /* Grab the first unused counter starting with idx */ 636 /* Grab the first unused counter starting with idx */
637 idx = sched->state.counter; 637 idx = sched->state.counter;
638 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) { 638 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
639 if (!__test_and_set_bit(idx, sched->state.used)) 639 if (!__test_and_set_bit(idx, sched->state.used))
640 goto done; 640 goto done;
641 } 641 }
@@ -813,13 +813,13 @@ static inline void x86_assign_hw_event(struct perf_event *event,
813 hwc->last_cpu = smp_processor_id(); 813 hwc->last_cpu = smp_processor_id();
814 hwc->last_tag = ++cpuc->tags[i]; 814 hwc->last_tag = ++cpuc->tags[i];
815 815
816 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { 816 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
817 hwc->config_base = 0; 817 hwc->config_base = 0;
818 hwc->event_base = 0; 818 hwc->event_base = 0;
819 } else if (hwc->idx >= X86_PMC_IDX_FIXED) { 819 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
820 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 820 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
821 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); 821 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
822 hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30; 822 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
823 } else { 823 } else {
824 hwc->config_base = x86_pmu_config_addr(hwc->idx); 824 hwc->config_base = x86_pmu_config_addr(hwc->idx);
825 hwc->event_base = x86_pmu_event_addr(hwc->idx); 825 hwc->event_base = x86_pmu_event_addr(hwc->idx);
@@ -921,7 +921,7 @@ int x86_perf_event_set_period(struct perf_event *event)
921 s64 period = hwc->sample_period; 921 s64 period = hwc->sample_period;
922 int ret = 0, idx = hwc->idx; 922 int ret = 0, idx = hwc->idx;
923 923
924 if (idx == X86_PMC_IDX_FIXED_BTS) 924 if (idx == INTEL_PMC_IDX_FIXED_BTS)
925 return 0; 925 return 0;
926 926
927 /* 927 /*
@@ -1338,21 +1338,21 @@ static int __init init_hw_perf_events(void)
1338 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1338 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1339 quirk->func(); 1339 quirk->func();
1340 1340
1341 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 1341 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
1342 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 1342 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1343 x86_pmu.num_counters, X86_PMC_MAX_GENERIC); 1343 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
1344 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 1344 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
1345 } 1345 }
1346 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 1346 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1347 1347
1348 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 1348 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
1349 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 1349 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1350 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 1350 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
1351 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 1351 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
1352 } 1352 }
1353 1353
1354 x86_pmu.intel_ctrl |= 1354 x86_pmu.intel_ctrl |=
1355 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1355 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
1356 1356
1357 perf_events_lapic_init(); 1357 perf_events_lapic_init();
1358 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); 1358 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1368,7 +1368,7 @@ static int __init init_hw_perf_events(void)
1368 */ 1368 */
1369 for_each_event_constraint(c, x86_pmu.event_constraints) { 1369 for_each_event_constraint(c, x86_pmu.event_constraints) {
1370 if (c->cmask != X86_RAW_EVENT_MASK 1370 if (c->cmask != X86_RAW_EVENT_MASK
1371 || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { 1371 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
1372 continue; 1372 continue;
1373 } 1373 }
1374 1374
@@ -1611,8 +1611,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
1611 if (!x86_pmu.attr_rdpmc) 1611 if (!x86_pmu.attr_rdpmc)
1612 return 0; 1612 return 0;
1613 1613
1614 if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { 1614 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
1615 idx -= X86_PMC_IDX_FIXED; 1615 idx -= INTEL_PMC_IDX_FIXED;
1616 idx |= 1 << 30; 1616 idx |= 1 << 30;
1617 } 1617 }
1618 1618
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8408e37f5fa4..5b0b362c7ae6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void)
747 747
748 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 748 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
749 749
750 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 750 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
751 intel_pmu_disable_bts(); 751 intel_pmu_disable_bts();
752 752
753 intel_pmu_pebs_disable_all(); 753 intel_pmu_pebs_disable_all();
@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added)
763 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 763 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
764 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 764 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
765 765
766 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 766 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
767 struct perf_event *event = 767 struct perf_event *event =
768 cpuc->events[X86_PMC_IDX_FIXED_BTS]; 768 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
769 769
770 if (WARN_ON_ONCE(!event)) 770 if (WARN_ON_ONCE(!event))
771 return; 771 return;
@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack)
871 871
872static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) 872static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
873{ 873{
874 int idx = hwc->idx - X86_PMC_IDX_FIXED; 874 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
875 u64 ctrl_val, mask; 875 u64 ctrl_val, mask;
876 876
877 mask = 0xfULL << (idx * 4); 877 mask = 0xfULL << (idx * 4);
@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
886 struct hw_perf_event *hwc = &event->hw; 886 struct hw_perf_event *hwc = &event->hw;
887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
888 888
889 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 889 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
890 intel_pmu_disable_bts(); 890 intel_pmu_disable_bts();
891 intel_pmu_drain_bts_buffer(); 891 intel_pmu_drain_bts_buffer();
892 return; 892 return;
@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
915 915
916static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) 916static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
917{ 917{
918 int idx = hwc->idx - X86_PMC_IDX_FIXED; 918 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
919 u64 ctrl_val, bits, mask; 919 u64 ctrl_val, bits, mask;
920 920
921 /* 921 /*
@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
949 struct hw_perf_event *hwc = &event->hw; 949 struct hw_perf_event *hwc = &event->hw;
950 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 950 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
951 951
952 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 952 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
953 if (!__this_cpu_read(cpu_hw_events.enabled)) 953 if (!__this_cpu_read(cpu_hw_events.enabled))
954 return; 954 return;
955 955
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 026373edef7f..629ae0b7ad90 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
248 */ 248 */
249 249
250struct event_constraint bts_constraint = 250struct event_constraint bts_constraint =
251 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); 251 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
252 252
253void intel_pmu_enable_bts(u64 config) 253void intel_pmu_enable_bts(u64 config)
254{ 254{
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
295 u64 to; 295 u64 to;
296 u64 flags; 296 u64 flags;
297 }; 297 };
298 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; 298 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
299 struct bts_record *at, *top; 299 struct bts_record *at, *top;
300 struct perf_output_handle handle; 300 struct perf_output_handle handle;
301 struct perf_event_header header; 301 struct perf_event_header header;
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 6c82e4037989..92c7e39a079f 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
1325 unsigned int low, high; 1325 unsigned int low, high;
1326 1326
1327 /* If we get stripped -- indexing fails */ 1327 /* If we get stripped -- indexing fails */
1328 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); 1328 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
1329 1329
1330 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 1330 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
1331 if (!(low & (1 << 7))) { 1331 if (!(low & (1 << 7))) {