aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJacob Shin <jacob.shin@amd.com>2013-02-06 12:26:26 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-06 13:45:23 -0500
commit9f19010af8c651879ac2c36f1a808a3a4419cd40 (patch)
tree280428c36e64bf75b0a9549cd880cdb05d497166 /arch
parent4dd4c2ae555d8a91e8c5bf1cd56807a35764436a (diff)
perf/x86/amd: Use proper naming scheme for AMD bit field definitions
Update these AMD bit field names to be consistent with naming convention followed by the rest of the file. Signed-off-by: Jacob Shin <jacob.shin@amd.com> Acked-by: Stephane Eranian <eranian@google.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1360171589-6381-4-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c8
2 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 4fabcdf1cfa7..2234eaaecb52 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,8 +29,8 @@
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) 32#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) 33#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
34 34
35#define AMD64_EVENTSEL_EVENT \ 35#define AMD64_EVENTSEL_EVENT \
36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index f8c9dfbd6613..aea8c2021f78 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -156,9 +156,9 @@ static int amd_pmu_hw_config(struct perf_event *event)
156 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | 156 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
157 ARCH_PERFMON_EVENTSEL_OS); 157 ARCH_PERFMON_EVENTSEL_OS);
158 else if (event->attr.exclude_host) 158 else if (event->attr.exclude_host)
159 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; 159 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
160 else if (event->attr.exclude_guest) 160 else if (event->attr.exclude_guest)
161 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; 161 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
162 162
163 if (event->attr.type != PERF_TYPE_RAW) 163 if (event->attr.type != PERF_TYPE_RAW)
164 return 0; 164 return 0;
@@ -336,7 +336,7 @@ static void amd_pmu_cpu_starting(int cpu)
336 struct amd_nb *nb; 336 struct amd_nb *nb;
337 int i, nb_id; 337 int i, nb_id;
338 338
339 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 339 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
340 340
341 if (boot_cpu_data.x86_max_cores < 2) 341 if (boot_cpu_data.x86_max_cores < 2)
342 return; 342 return;
@@ -669,7 +669,7 @@ void amd_pmu_disable_virt(void)
669 * SVM is disabled the Guest-only bits still gets set and the counter 669 * SVM is disabled the Guest-only bits still gets set and the counter
670 * will not count anything. 670 * will not count anything.
671 */ 671 */
672 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 672 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
673 673
674 /* Reload all events */ 674 /* Reload all events */
675 x86_pmu_disable_all(); 675 x86_pmu_disable_all();