aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-03-30 05:28:21 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 13:52:03 -0400
commita098f4484bc7dae23f5b62360954007b99b64600 (patch)
tree11d9d6c8f9008ca191cb8d093eb3c9c3c13d99d3 /arch
parent948b1bb89a44561560531394c18da4a99215f772 (diff)
perf, x86: implement ARCH_PERFMON_EVENTSEL bit masks
ARCH_PERFMON_EVENTSEL bit masks are often used in the kernel. This patch adds macros for the bit masks and removes local defines. The function intel_pmu_raw_event() becomes x86_pmu_raw_event() which is generic for x86 models and same also for p6. Duplicate code is removed. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20100330092821.GH11907@erda.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/perf_event.h58
-rw-r--r--arch/x86/kernel/cpu/perf_event.c19
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c20
5 files changed, 45 insertions, 89 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 987bf673141e..f6d43dbfd8e7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -18,39 +18,31 @@
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) 22#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) 25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27/* 27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28 * Includes eventsel and unit mask as well: 28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29 */ 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL 32#define AMD64_EVENTSEL_EVENT \
33#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL 33 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
34#define INTEL_ARCH_EDGE_MASK 0x00040000ULL 34#define INTEL_ARCH_EVENT_MASK \
35#define INTEL_ARCH_INV_MASK 0x00800000ULL 35 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
36#define INTEL_ARCH_CNT_MASK 0xFF000000ULL 36
37#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK) 37#define X86_RAW_EVENT_MASK \
38 38 (ARCH_PERFMON_EVENTSEL_EVENT | \
39/* 39 ARCH_PERFMON_EVENTSEL_UMASK | \
40 * filter mask to validate fixed counter events. 40 ARCH_PERFMON_EVENTSEL_EDGE | \
41 * the following filters disqualify for fixed counters: 41 ARCH_PERFMON_EVENTSEL_INV | \
42 * - inv 42 ARCH_PERFMON_EVENTSEL_CMASK)
43 * - edge 43#define AMD64_RAW_EVENT_MASK \
44 * - cnt-mask 44 (X86_RAW_EVENT_MASK | \
45 * The other filters are supported by fixed counters. 45 AMD64_EVENTSEL_EVENT)
46 * The any-thread option is supported starting with v3.
47 */
48#define INTEL_ARCH_FIXED_MASK \
49 (INTEL_ARCH_CNT_MASK| \
50 INTEL_ARCH_INV_MASK| \
51 INTEL_ARCH_EDGE_MASK|\
52 INTEL_ARCH_UNIT_MASK|\
53 INTEL_ARCH_EVENT_MASK)
54 46
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 47#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 48#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9daaa1ef504c..1dd42c18f1cb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -143,13 +143,21 @@ struct cpu_hw_events {
143 * Constraint on the Event code. 143 * Constraint on the Event code.
144 */ 144 */
145#define INTEL_EVENT_CONSTRAINT(c, n) \ 145#define INTEL_EVENT_CONSTRAINT(c, n) \
146 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) 146 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
147 147
148/* 148/*
149 * Constraint on the Event code + UMask + fixed-mask 149 * Constraint on the Event code + UMask + fixed-mask
150 *
151 * filter mask to validate fixed counter events.
152 * the following filters disqualify for fixed counters:
153 * - inv
154 * - edge
155 * - cnt-mask
156 * The other filters are supported by fixed counters.
157 * The any-thread option is supported starting with v3.
150 */ 158 */
151#define FIXED_EVENT_CONSTRAINT(c, n) \ 159#define FIXED_EVENT_CONSTRAINT(c, n) \
152 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK) 160 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
153 161
154/* 162/*
155 * Constraint on the Event code + UMask 163 * Constraint on the Event code + UMask
@@ -437,6 +445,11 @@ static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc
437 return 0; 445 return 0;
438} 446}
439 447
448static u64 x86_pmu_raw_event(u64 hw_event)
449{
450 return hw_event & X86_RAW_EVENT_MASK;
451}
452
440/* 453/*
441 * Setup the hardware configuration for a given attr_type 454 * Setup the hardware configuration for a given attr_type
442 */ 455 */
@@ -1427,7 +1440,7 @@ void __init init_hw_perf_events(void)
1427 1440
1428 if (x86_pmu.event_constraints) { 1441 if (x86_pmu.event_constraints) {
1429 for_each_event_constraint(c, x86_pmu.event_constraints) { 1442 for_each_event_constraint(c, x86_pmu.event_constraints) {
1430 if (c->cmask != INTEL_ARCH_FIXED_MASK) 1443 if (c->cmask != X86_RAW_EVENT_MASK)
1431 continue; 1444 continue;
1432 1445
1433 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; 1446 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 7753a5c76535..37e9517729df 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -113,20 +113,7 @@ static u64 amd_pmu_event_map(int hw_event)
113 113
114static u64 amd_pmu_raw_event(u64 hw_event) 114static u64 amd_pmu_raw_event(u64 hw_event)
115{ 115{
116#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL 116 return hw_event & AMD64_RAW_EVENT_MASK;
117#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
118#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
119#define K7_EVNTSEL_INV_MASK 0x000800000ULL
120#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
121
122#define K7_EVNTSEL_MASK \
123 (K7_EVNTSEL_EVENT_MASK | \
124 K7_EVNTSEL_UNIT_MASK | \
125 K7_EVNTSEL_EDGE_MASK | \
126 K7_EVNTSEL_INV_MASK | \
127 K7_EVNTSEL_REG_MASK)
128
129 return hw_event & K7_EVNTSEL_MASK;
130} 117}
131 118
132/* 119/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index cc4d90a13d53..dfdd6f90fc8e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -452,24 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
452 }, 452 },
453}; 453};
454 454
455static u64 intel_pmu_raw_event(u64 hw_event)
456{
457#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
458#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
459#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
460#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
461#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
462
463#define CORE_EVNTSEL_MASK \
464 (INTEL_ARCH_EVTSEL_MASK | \
465 INTEL_ARCH_UNIT_MASK | \
466 INTEL_ARCH_EDGE_MASK | \
467 INTEL_ARCH_INV_MASK | \
468 INTEL_ARCH_CNT_MASK)
469
470 return hw_event & CORE_EVNTSEL_MASK;
471}
472
473static void intel_pmu_disable_all(void) 455static void intel_pmu_disable_all(void)
474{ 456{
475 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 457 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -788,7 +770,7 @@ static __initconst struct x86_pmu core_pmu = {
788 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 770 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
789 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 771 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
790 .event_map = intel_pmu_event_map, 772 .event_map = intel_pmu_event_map,
791 .raw_event = intel_pmu_raw_event, 773 .raw_event = x86_pmu_raw_event,
792 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 774 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
793 .apic = 1, 775 .apic = 1,
794 /* 776 /*
@@ -827,7 +809,7 @@ static __initconst struct x86_pmu intel_pmu = {
827 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 809 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
828 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 810 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
829 .event_map = intel_pmu_event_map, 811 .event_map = intel_pmu_event_map,
830 .raw_event = intel_pmu_raw_event, 812 .raw_event = x86_pmu_raw_event,
831 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 813 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
832 .apic = 1, 814 .apic = 1,
833 /* 815 /*
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index b26fbc7eb93c..03c139a67baa 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
27 */ 27 */
28#define P6_NOP_EVENT 0x0000002EULL 28#define P6_NOP_EVENT 0x0000002EULL
29 29
30static u64 p6_pmu_raw_event(u64 hw_event)
31{
32#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
33#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
34#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
35#define P6_EVNTSEL_INV_MASK 0x00800000ULL
36#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
37
38#define P6_EVNTSEL_MASK \
39 (P6_EVNTSEL_EVENT_MASK | \
40 P6_EVNTSEL_UNIT_MASK | \
41 P6_EVNTSEL_EDGE_MASK | \
42 P6_EVNTSEL_INV_MASK | \
43 P6_EVNTSEL_REG_MASK)
44
45 return hw_event & P6_EVNTSEL_MASK;
46}
47
48static struct event_constraint p6_event_constraints[] = 30static struct event_constraint p6_event_constraints[] =
49{ 31{
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ 32 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
@@ -114,7 +96,7 @@ static __initconst struct x86_pmu p6_pmu = {
114 .eventsel = MSR_P6_EVNTSEL0, 96 .eventsel = MSR_P6_EVNTSEL0,
115 .perfctr = MSR_P6_PERFCTR0, 97 .perfctr = MSR_P6_PERFCTR0,
116 .event_map = p6_pmu_event_map, 98 .event_map = p6_pmu_event_map,
117 .raw_event = p6_pmu_raw_event, 99 .raw_event = x86_pmu_raw_event,
118 .max_events = ARRAY_SIZE(p6_perfmon_event_map), 100 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
119 .apic = 1, 101 .apic = 1,
120 .max_period = (1ULL << 31) - 1, 102 .max_period = (1ULL << 31) - 1,