diff options
author | Yan, Zheng <zheng.z.yan@intel.com> | 2012-06-15 02:31:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-06-18 06:13:22 -0400 |
commit | fcde10e916326545e8fec1807357c68ef08dc443 (patch) | |
tree | 57a856776f7f5c6c5a59034a9a3967f65c5612c5 | |
parent | 087bfbb032691262f2f7d52b910652450c5554b8 (diff) |
perf/x86: Add Intel Nehalem and Sandy Bridge uncore PMU support
Signed-off-by: Zheng Yan <zheng.z.yan@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1339741902-8449-7-git-send-email-zheng.z.yan@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 195 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.h | 50 |
2 files changed, 245 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index fe76a07dfdbc..3ed941ac3745 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -10,6 +10,192 @@ static cpumask_t uncore_cpu_mask; | |||
10 | static struct event_constraint constraint_fixed = | 10 | static struct event_constraint constraint_fixed = |
11 | EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); | 11 | EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); |
12 | 12 | ||
13 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | ||
14 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | ||
15 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | ||
16 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | ||
17 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | ||
18 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | ||
19 | |||
20 | /* Sandy Bridge uncore support */ | ||
21 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, | ||
22 | struct perf_event *event) | ||
23 | { | ||
24 | struct hw_perf_event *hwc = &event->hw; | ||
25 | |||
26 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
27 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
28 | else | ||
29 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | ||
30 | } | ||
31 | |||
32 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, | ||
33 | struct perf_event *event) | ||
34 | { | ||
35 | wrmsrl(event->hw.config_base, 0); | ||
36 | } | ||
37 | |||
38 | static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box, | ||
39 | struct perf_event *event) | ||
40 | { | ||
41 | u64 count; | ||
42 | rdmsrl(event->hw.event_base, count); | ||
43 | return count; | ||
44 | } | ||
45 | |||
46 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | ||
47 | { | ||
48 | if (box->pmu->pmu_idx == 0) { | ||
49 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | ||
50 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static struct attribute *snb_uncore_formats_attr[] = { | ||
55 | &format_attr_event.attr, | ||
56 | &format_attr_umask.attr, | ||
57 | &format_attr_edge.attr, | ||
58 | &format_attr_inv.attr, | ||
59 | &format_attr_cmask5.attr, | ||
60 | NULL, | ||
61 | }; | ||
62 | |||
63 | static struct attribute_group snb_uncore_format_group = { | ||
64 | .name = "format", | ||
65 | .attrs = snb_uncore_formats_attr, | ||
66 | }; | ||
67 | |||
68 | static struct intel_uncore_ops snb_uncore_msr_ops = { | ||
69 | .init_box = snb_uncore_msr_init_box, | ||
70 | .disable_event = snb_uncore_msr_disable_event, | ||
71 | .enable_event = snb_uncore_msr_enable_event, | ||
72 | .read_counter = snb_uncore_msr_read_counter, | ||
73 | }; | ||
74 | |||
75 | static struct event_constraint snb_uncore_cbox_constraints[] = { | ||
76 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), | ||
77 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | ||
78 | EVENT_CONSTRAINT_END | ||
79 | }; | ||
80 | |||
81 | static struct intel_uncore_type snb_uncore_cbox = { | ||
82 | .name = "cbox", | ||
83 | .num_counters = 2, | ||
84 | .num_boxes = 4, | ||
85 | .perf_ctr_bits = 44, | ||
86 | .fixed_ctr_bits = 48, | ||
87 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | ||
88 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | ||
89 | .fixed_ctr = SNB_UNC_FIXED_CTR, | ||
90 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | ||
91 | .single_fixed = 1, | ||
92 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | ||
93 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | ||
94 | .constraints = snb_uncore_cbox_constraints, | ||
95 | .ops = &snb_uncore_msr_ops, | ||
96 | .format_group = &snb_uncore_format_group, | ||
97 | }; | ||
98 | |||
99 | static struct intel_uncore_type *snb_msr_uncores[] = { | ||
100 | &snb_uncore_cbox, | ||
101 | NULL, | ||
102 | }; | ||
103 | /* end of Sandy Bridge uncore support */ | ||
104 | |||
105 | /* Nehalem uncore support */ | ||
106 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
107 | { | ||
108 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | ||
109 | } | ||
110 | |||
111 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
112 | { | ||
113 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, | ||
114 | NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | ||
115 | } | ||
116 | |||
117 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, | ||
118 | struct perf_event *event) | ||
119 | { | ||
120 | struct hw_perf_event *hwc = &event->hw; | ||
121 | |||
122 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | ||
123 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | ||
124 | else | ||
125 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | ||
126 | } | ||
127 | |||
128 | static struct attribute *nhm_uncore_formats_attr[] = { | ||
129 | &format_attr_event.attr, | ||
130 | &format_attr_umask.attr, | ||
131 | &format_attr_edge.attr, | ||
132 | &format_attr_inv.attr, | ||
133 | &format_attr_cmask8.attr, | ||
134 | NULL, | ||
135 | }; | ||
136 | |||
137 | static struct attribute_group nhm_uncore_format_group = { | ||
138 | .name = "format", | ||
139 | .attrs = nhm_uncore_formats_attr, | ||
140 | }; | ||
141 | |||
142 | static struct uncore_event_desc nhm_uncore_events[] = { | ||
143 | INTEL_UNCORE_EVENT_DESC(CLOCKTICKS, "config=0xffff"), | ||
144 | /* full cache line writes to DRAM */ | ||
145 | INTEL_UNCORE_EVENT_DESC(QMC_WRITES_FULL_ANY, "event=0x2f,umask=0xf"), | ||
146 | /* Quickpath Memory Controller normal priority read requests */ | ||
147 | INTEL_UNCORE_EVENT_DESC(QMC_NORMAL_READS_ANY, "event=0x2c,umask=0xf"), | ||
148 | /* Quickpath Home Logic read requests from the IOH */ | ||
149 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_READS, | ||
150 | "event=0x20,umask=0x1"), | ||
151 | /* Quickpath Home Logic write requests from the IOH */ | ||
152 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_WRITES, | ||
153 | "event=0x20,umask=0x2"), | ||
154 | /* Quickpath Home Logic read requests from a remote socket */ | ||
155 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_READS, | ||
156 | "event=0x20,umask=0x4"), | ||
157 | /* Quickpath Home Logic write requests from a remote socket */ | ||
158 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_WRITES, | ||
159 | "event=0x20,umask=0x8"), | ||
160 | /* Quickpath Home Logic read requests from the local socket */ | ||
161 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_READS, | ||
162 | "event=0x20,umask=0x10"), | ||
163 | /* Quickpath Home Logic write requests from the local socket */ | ||
164 | INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_WRITES, | ||
165 | "event=0x20,umask=0x20"), | ||
166 | { /* end: all zeroes */ }, | ||
167 | }; | ||
168 | |||
169 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | ||
170 | .disable_box = nhm_uncore_msr_disable_box, | ||
171 | .enable_box = nhm_uncore_msr_enable_box, | ||
172 | .disable_event = snb_uncore_msr_disable_event, | ||
173 | .enable_event = nhm_uncore_msr_enable_event, | ||
174 | .read_counter = snb_uncore_msr_read_counter, | ||
175 | }; | ||
176 | |||
177 | static struct intel_uncore_type nhm_uncore = { | ||
178 | .name = "", | ||
179 | .num_counters = 8, | ||
180 | .num_boxes = 1, | ||
181 | .perf_ctr_bits = 48, | ||
182 | .fixed_ctr_bits = 48, | ||
183 | .event_ctl = NHM_UNC_PERFEVTSEL0, | ||
184 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | ||
185 | .fixed_ctr = NHM_UNC_FIXED_CTR, | ||
186 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | ||
187 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | ||
188 | .event_descs = nhm_uncore_events, | ||
189 | .ops = &nhm_uncore_msr_ops, | ||
190 | .format_group = &nhm_uncore_format_group, | ||
191 | }; | ||
192 | |||
193 | static struct intel_uncore_type *nhm_msr_uncores[] = { | ||
194 | &nhm_uncore, | ||
195 | NULL, | ||
196 | }; | ||
197 | /* end of Nehalem uncore support */ | ||
198 | |||
13 | static void uncore_assign_hw_event(struct intel_uncore_box *box, | 199 | static void uncore_assign_hw_event(struct intel_uncore_box *box, |
14 | struct perf_event *event, int idx) | 200 | struct perf_event *event, int idx) |
15 | { | 201 | { |
@@ -808,6 +994,15 @@ static int __init uncore_cpu_init(void) | |||
808 | int ret, cpu; | 994 | int ret, cpu; |
809 | 995 | ||
810 | switch (boot_cpu_data.x86_model) { | 996 | switch (boot_cpu_data.x86_model) { |
997 | case 26: /* Nehalem */ | ||
998 | case 30: | ||
999 | case 37: /* Westmere */ | ||
1000 | case 44: | ||
1001 | msr_uncores = nhm_msr_uncores; | ||
1002 | break; | ||
1003 | case 42: /* Sandy Bridge */ | ||
1004 | msr_uncores = snb_msr_uncores; | ||
1005 | break; | ||
811 | default: | 1006 | default: |
812 | return 0; | 1007 | return 0; |
813 | } | 1008 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 49a6bfbba0de..eeb5ca5815a8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -15,6 +15,56 @@ | |||
15 | 15 | ||
16 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) | 16 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
17 | 17 | ||
18 | /* SNB event control */ | ||
19 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | ||
20 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | ||
21 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | ||
22 | #define SNB_UNC_CTL_EN (1 << 22) | ||
23 | #define SNB_UNC_CTL_INVERT (1 << 23) | ||
24 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | ||
25 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | ||
26 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | ||
27 | |||
28 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
29 | SNB_UNC_CTL_UMASK_MASK | \ | ||
30 | SNB_UNC_CTL_EDGE_DET | \ | ||
31 | SNB_UNC_CTL_INVERT | \ | ||
32 | SNB_UNC_CTL_CMASK_MASK) | ||
33 | |||
34 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | ||
35 | SNB_UNC_CTL_UMASK_MASK | \ | ||
36 | SNB_UNC_CTL_EDGE_DET | \ | ||
37 | SNB_UNC_CTL_INVERT | \ | ||
38 | NHM_UNC_CTL_CMASK_MASK) | ||
39 | |||
40 | /* SNB global control register */ | ||
41 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | ||
42 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | ||
43 | #define SNB_UNC_FIXED_CTR 0x395 | ||
44 | |||
45 | /* SNB uncore global control */ | ||
46 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | ||
47 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | ||
48 | |||
49 | /* SNB Cbo register */ | ||
50 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | ||
51 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | ||
52 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | ||
53 | |||
54 | /* NHM global control register */ | ||
55 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | ||
56 | #define NHM_UNC_FIXED_CTR 0x394 | ||
57 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | ||
58 | |||
59 | /* NHM uncore global control */ | ||
60 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | ||
61 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | ||
62 | |||
63 | /* NHM uncore register */ | ||
64 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | ||
65 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | ||
66 | |||
67 | |||
18 | struct intel_uncore_ops; | 68 | struct intel_uncore_ops; |
19 | struct intel_uncore_pmu; | 69 | struct intel_uncore_pmu; |
20 | struct intel_uncore_box; | 70 | struct intel_uncore_box; |