diff options
author | Tony Lindgren <tony@atomide.com> | 2010-05-20 14:37:23 -0400 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2010-05-20 14:37:23 -0400 |
commit | f6304f5804f228b6c2fea9e3dfac25c5b2db9b38 (patch) | |
tree | 362b9b6a3bd32b868e5917a32d448ac75c5854df /arch/x86/kernel/cpu/perf_event_p4.c | |
parent | 4fa73a1bf89ebea4eba8a9982b5f64d266d8b5e9 (diff) | |
parent | 6daa642d9b8ec762b3c5641cd5e5fa855a5405bf (diff) |
Merge branch 'omap4-i2c-init' into omap-for-linus
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_p4.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 857 |
1 files changed, 857 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c new file mode 100644 index 00000000000..424fc8de68e --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -0,0 +1,857 @@ | |||
1 | /* | ||
2 | * Netburst Perfomance Events (P4, old Xeon) | ||
3 | * | ||
4 | * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> | ||
5 | * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> | ||
6 | * | ||
7 | * For licencing details see kernel-base/COPYING | ||
8 | */ | ||
9 | |||
10 | #ifdef CONFIG_CPU_SUP_INTEL | ||
11 | |||
12 | #include <asm/perf_event_p4.h> | ||
13 | |||
14 | #define P4_CNTR_LIMIT 3 | ||
15 | /* | ||
16 | * array indices: 0,1 - HT threads, used with HT enabled cpu | ||
17 | */ | ||
18 | struct p4_event_bind { | ||
19 | unsigned int opcode; /* Event code and ESCR selector */ | ||
20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ | ||
21 | char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ | ||
22 | }; | ||
23 | |||
24 | struct p4_cache_event_bind { | ||
25 | unsigned int metric_pebs; | ||
26 | unsigned int metric_vert; | ||
27 | }; | ||
28 | |||
29 | #define P4_GEN_CACHE_EVENT_BIND(name) \ | ||
30 | [P4_CACHE__##name] = { \ | ||
31 | .metric_pebs = P4_PEBS__##name, \ | ||
32 | .metric_vert = P4_VERT__##name, \ | ||
33 | } | ||
34 | |||
35 | static struct p4_cache_event_bind p4_cache_event_bind_map[] = { | ||
36 | P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired), | ||
37 | P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired), | ||
38 | P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired), | ||
39 | P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired), | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * Note that we don't use CCCR1 here, there is an | ||
44 | * exception for P4_BSQ_ALLOCATION but we just have | ||
45 | * no workaround | ||
46 | * | ||
47 | * consider this binding as resources which particular | ||
48 | * event may borrow, it doesn't contain EventMask, | ||
49 | * Tags and friends -- they are left to a caller | ||
50 | */ | ||
51 | static struct p4_event_bind p4_event_bind_map[] = { | ||
52 | [P4_EVENT_TC_DELIVER_MODE] = { | ||
53 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), | ||
54 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | ||
55 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
56 | }, | ||
57 | [P4_EVENT_BPU_FETCH_REQUEST] = { | ||
58 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), | ||
59 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, | ||
60 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
61 | }, | ||
62 | [P4_EVENT_ITLB_REFERENCE] = { | ||
63 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), | ||
64 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, | ||
65 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
66 | }, | ||
67 | [P4_EVENT_MEMORY_CANCEL] = { | ||
68 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), | ||
69 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | ||
70 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
71 | }, | ||
72 | [P4_EVENT_MEMORY_COMPLETE] = { | ||
73 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), | ||
74 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | ||
75 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
76 | }, | ||
77 | [P4_EVENT_LOAD_PORT_REPLAY] = { | ||
78 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), | ||
79 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, | ||
80 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
81 | }, | ||
82 | [P4_EVENT_STORE_PORT_REPLAY] = { | ||
83 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), | ||
84 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | ||
85 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
86 | }, | ||
87 | [P4_EVENT_MOB_LOAD_REPLAY] = { | ||
88 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), | ||
89 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, | ||
90 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
91 | }, | ||
92 | [P4_EVENT_PAGE_WALK_TYPE] = { | ||
93 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), | ||
94 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, | ||
95 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
96 | }, | ||
97 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { | ||
98 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), | ||
99 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, | ||
100 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
101 | }, | ||
102 | [P4_EVENT_IOQ_ALLOCATION] = { | ||
103 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), | ||
104 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
105 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
106 | }, | ||
107 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | ||
108 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), | ||
109 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, | ||
110 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | ||
111 | }, | ||
112 | [P4_EVENT_FSB_DATA_ACTIVITY] = { | ||
113 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), | ||
114 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
115 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
116 | }, | ||
117 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ | ||
118 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), | ||
119 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, | ||
120 | .cntr = { {0, -1, -1}, {1, -1, -1} }, | ||
121 | }, | ||
122 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | ||
123 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), | ||
124 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, | ||
125 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | ||
126 | }, | ||
127 | [P4_EVENT_SSE_INPUT_ASSIST] = { | ||
128 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), | ||
129 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
130 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
131 | }, | ||
132 | [P4_EVENT_PACKED_SP_UOP] = { | ||
133 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), | ||
134 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
135 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
136 | }, | ||
137 | [P4_EVENT_PACKED_DP_UOP] = { | ||
138 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), | ||
139 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
140 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
141 | }, | ||
142 | [P4_EVENT_SCALAR_SP_UOP] = { | ||
143 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), | ||
144 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
145 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
146 | }, | ||
147 | [P4_EVENT_SCALAR_DP_UOP] = { | ||
148 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), | ||
149 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
150 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
151 | }, | ||
152 | [P4_EVENT_64BIT_MMX_UOP] = { | ||
153 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), | ||
154 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
155 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
156 | }, | ||
157 | [P4_EVENT_128BIT_MMX_UOP] = { | ||
158 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), | ||
159 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
160 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
161 | }, | ||
162 | [P4_EVENT_X87_FP_UOP] = { | ||
163 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), | ||
164 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | ||
165 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
166 | }, | ||
167 | [P4_EVENT_TC_MISC] = { | ||
168 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), | ||
169 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | ||
170 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
171 | }, | ||
172 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { | ||
173 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), | ||
174 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
175 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
176 | }, | ||
177 | [P4_EVENT_TC_MS_XFER] = { | ||
178 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), | ||
179 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | ||
180 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
181 | }, | ||
182 | [P4_EVENT_UOP_QUEUE_WRITES] = { | ||
183 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), | ||
184 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | ||
185 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
186 | }, | ||
187 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { | ||
188 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), | ||
189 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, | ||
190 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
191 | }, | ||
192 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { | ||
193 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), | ||
194 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, | ||
195 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | ||
196 | }, | ||
197 | [P4_EVENT_RESOURCE_STALL] = { | ||
198 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), | ||
199 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, | ||
200 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
201 | }, | ||
202 | [P4_EVENT_WC_BUFFER] = { | ||
203 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), | ||
204 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | ||
205 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | ||
206 | }, | ||
207 | [P4_EVENT_B2B_CYCLES] = { | ||
208 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), | ||
209 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
210 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
211 | }, | ||
212 | [P4_EVENT_BNR] = { | ||
213 | .opcode = P4_OPCODE(P4_EVENT_BNR), | ||
214 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
215 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
216 | }, | ||
217 | [P4_EVENT_SNOOP] = { | ||
218 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), | ||
219 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
220 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
221 | }, | ||
222 | [P4_EVENT_RESPONSE] = { | ||
223 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), | ||
224 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | ||
225 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | ||
226 | }, | ||
227 | [P4_EVENT_FRONT_END_EVENT] = { | ||
228 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), | ||
229 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
230 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
231 | }, | ||
232 | [P4_EVENT_EXECUTION_EVENT] = { | ||
233 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), | ||
234 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
235 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
236 | }, | ||
237 | [P4_EVENT_REPLAY_EVENT] = { | ||
238 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), | ||
239 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
240 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
241 | }, | ||
242 | [P4_EVENT_INSTR_RETIRED] = { | ||
243 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), | ||
244 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | ||
245 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
246 | }, | ||
247 | [P4_EVENT_UOPS_RETIRED] = { | ||
248 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), | ||
249 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | ||
250 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
251 | }, | ||
252 | [P4_EVENT_UOP_TYPE] = { | ||
253 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), | ||
254 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, | ||
255 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
256 | }, | ||
257 | [P4_EVENT_BRANCH_RETIRED] = { | ||
258 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), | ||
259 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
260 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
261 | }, | ||
262 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { | ||
263 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), | ||
264 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | ||
265 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
266 | }, | ||
267 | [P4_EVENT_X87_ASSIST] = { | ||
268 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), | ||
269 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
270 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
271 | }, | ||
272 | [P4_EVENT_MACHINE_CLEAR] = { | ||
273 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), | ||
274 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | ||
275 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
276 | }, | ||
277 | [P4_EVENT_INSTR_COMPLETED] = { | ||
278 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), | ||
279 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | ||
280 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | ||
281 | }, | ||
282 | }; | ||
283 | |||
284 | #define P4_GEN_CACHE_EVENT(event, bit, cache_event) \ | ||
285 | p4_config_pack_escr(P4_ESCR_EVENT(event) | \ | ||
286 | P4_ESCR_EMASK_BIT(event, bit)) | \ | ||
287 | p4_config_pack_cccr(cache_event | \ | ||
288 | P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event)))) | ||
289 | |||
290 | static __initconst const u64 p4_hw_cache_event_ids | ||
291 | [PERF_COUNT_HW_CACHE_MAX] | ||
292 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
293 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
294 | { | ||
295 | [ C(L1D ) ] = { | ||
296 | [ C(OP_READ) ] = { | ||
297 | [ C(RESULT_ACCESS) ] = 0x0, | ||
298 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, | ||
299 | P4_CACHE__1stl_cache_load_miss_retired), | ||
300 | }, | ||
301 | }, | ||
302 | [ C(LL ) ] = { | ||
303 | [ C(OP_READ) ] = { | ||
304 | [ C(RESULT_ACCESS) ] = 0x0, | ||
305 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, | ||
306 | P4_CACHE__2ndl_cache_load_miss_retired), | ||
307 | }, | ||
308 | }, | ||
309 | [ C(DTLB) ] = { | ||
310 | [ C(OP_READ) ] = { | ||
311 | [ C(RESULT_ACCESS) ] = 0x0, | ||
312 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, | ||
313 | P4_CACHE__dtlb_load_miss_retired), | ||
314 | }, | ||
315 | [ C(OP_WRITE) ] = { | ||
316 | [ C(RESULT_ACCESS) ] = 0x0, | ||
317 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, | ||
318 | P4_CACHE__dtlb_store_miss_retired), | ||
319 | }, | ||
320 | }, | ||
321 | [ C(ITLB) ] = { | ||
322 | [ C(OP_READ) ] = { | ||
323 | [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT, | ||
324 | P4_CACHE__itlb_reference_hit), | ||
325 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS, | ||
326 | P4_CACHE__itlb_reference_miss), | ||
327 | }, | ||
328 | [ C(OP_WRITE) ] = { | ||
329 | [ C(RESULT_ACCESS) ] = -1, | ||
330 | [ C(RESULT_MISS) ] = -1, | ||
331 | }, | ||
332 | [ C(OP_PREFETCH) ] = { | ||
333 | [ C(RESULT_ACCESS) ] = -1, | ||
334 | [ C(RESULT_MISS) ] = -1, | ||
335 | }, | ||
336 | }, | ||
337 | }; | ||
338 | |||
339 | static u64 p4_general_events[PERF_COUNT_HW_MAX] = { | ||
340 | /* non-halted CPU clocks */ | ||
341 | [PERF_COUNT_HW_CPU_CYCLES] = | ||
342 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | | ||
343 | P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), | ||
344 | |||
345 | /* | ||
346 | * retired instructions | ||
347 | * in a sake of simplicity we don't use the FSB tagging | ||
348 | */ | ||
349 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
350 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) | | ||
351 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) | | ||
352 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)), | ||
353 | |||
354 | /* cache hits */ | ||
355 | [PERF_COUNT_HW_CACHE_REFERENCES] = | ||
356 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) | | ||
357 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | | ||
358 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | | ||
359 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | | ||
360 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | | ||
361 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | | ||
362 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)), | ||
363 | |||
364 | /* cache misses */ | ||
365 | [PERF_COUNT_HW_CACHE_MISSES] = | ||
366 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) | | ||
367 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | | ||
368 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | | ||
369 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)), | ||
370 | |||
371 | /* branch instructions retired */ | ||
372 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = | ||
373 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) | | ||
374 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) | | ||
375 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) | | ||
376 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) | | ||
377 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)), | ||
378 | |||
379 | /* mispredicted branches retired */ | ||
380 | [PERF_COUNT_HW_BRANCH_MISSES] = | ||
381 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) | | ||
382 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)), | ||
383 | |||
384 | /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */ | ||
385 | [PERF_COUNT_HW_BUS_CYCLES] = | ||
386 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) | | ||
387 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) | | ||
388 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) | | ||
389 | p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE), | ||
390 | }; | ||
391 | |||
392 | static struct p4_event_bind *p4_config_get_bind(u64 config) | ||
393 | { | ||
394 | unsigned int evnt = p4_config_unpack_event(config); | ||
395 | struct p4_event_bind *bind = NULL; | ||
396 | |||
397 | if (evnt < ARRAY_SIZE(p4_event_bind_map)) | ||
398 | bind = &p4_event_bind_map[evnt]; | ||
399 | |||
400 | return bind; | ||
401 | } | ||
402 | |||
403 | static u64 p4_pmu_event_map(int hw_event) | ||
404 | { | ||
405 | struct p4_event_bind *bind; | ||
406 | unsigned int esel; | ||
407 | u64 config; | ||
408 | |||
409 | config = p4_general_events[hw_event]; | ||
410 | bind = p4_config_get_bind(config); | ||
411 | esel = P4_OPCODE_ESEL(bind->opcode); | ||
412 | config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); | ||
413 | |||
414 | return config; | ||
415 | } | ||
416 | |||
417 | static int p4_hw_config(struct perf_event *event) | ||
418 | { | ||
419 | int cpu = get_cpu(); | ||
420 | int rc = 0; | ||
421 | unsigned int evnt; | ||
422 | u32 escr, cccr; | ||
423 | |||
424 | /* | ||
425 | * the reason we use cpu that early is that: if we get scheduled | ||
426 | * first time on the same cpu -- we will not need swap thread | ||
427 | * specific flags in config (and will save some cpu cycles) | ||
428 | */ | ||
429 | |||
430 | cccr = p4_default_cccr_conf(cpu); | ||
431 | escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel, | ||
432 | event->attr.exclude_user); | ||
433 | event->hw.config = p4_config_pack_escr(escr) | | ||
434 | p4_config_pack_cccr(cccr); | ||
435 | |||
436 | if (p4_ht_active() && p4_ht_thread(cpu)) | ||
437 | event->hw.config = p4_set_ht_bit(event->hw.config); | ||
438 | |||
439 | if (event->attr.type == PERF_TYPE_RAW) { | ||
440 | |||
441 | /* user data may have out-of-bound event index */ | ||
442 | evnt = p4_config_unpack_event(event->attr.config); | ||
443 | if (evnt >= ARRAY_SIZE(p4_event_bind_map)) { | ||
444 | rc = -EINVAL; | ||
445 | goto out; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * We don't control raw events so it's up to the caller | ||
450 | * to pass sane values (and we don't count the thread number | ||
451 | * on HT machine but allow HT-compatible specifics to be | ||
452 | * passed on) | ||
453 | * | ||
454 | * XXX: HT wide things should check perf_paranoid_cpu() && | ||
455 | * CAP_SYS_ADMIN | ||
456 | */ | ||
457 | event->hw.config |= event->attr.config & | ||
458 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | ||
459 | p4_config_pack_cccr(P4_CCCR_MASK_HT)); | ||
460 | } | ||
461 | |||
462 | rc = x86_setup_perfctr(event); | ||
463 | out: | ||
464 | put_cpu(); | ||
465 | return rc; | ||
466 | } | ||
467 | |||
468 | static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | ||
469 | { | ||
470 | unsigned long dummy; | ||
471 | |||
472 | rdmsrl(hwc->config_base + hwc->idx, dummy); | ||
473 | if (dummy & P4_CCCR_OVF) { | ||
474 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | ||
475 | ((u64)dummy) & ~P4_CCCR_OVF); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | static inline void p4_pmu_disable_event(struct perf_event *event) | ||
480 | { | ||
481 | struct hw_perf_event *hwc = &event->hw; | ||
482 | |||
483 | /* | ||
484 | * If event gets disabled while counter is in overflowed | ||
485 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get | ||
486 | * asserted again and again | ||
487 | */ | ||
488 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | ||
489 | (u64)(p4_config_unpack_cccr(hwc->config)) & | ||
490 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); | ||
491 | } | ||
492 | |||
493 | static void p4_pmu_disable_all(void) | ||
494 | { | ||
495 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
496 | int idx; | ||
497 | |||
498 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
499 | struct perf_event *event = cpuc->events[idx]; | ||
500 | if (!test_bit(idx, cpuc->active_mask)) | ||
501 | continue; | ||
502 | p4_pmu_disable_event(event); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | static void p4_pmu_enable_event(struct perf_event *event) | ||
507 | { | ||
508 | struct hw_perf_event *hwc = &event->hw; | ||
509 | int thread = p4_ht_config_thread(hwc->config); | ||
510 | u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config)); | ||
511 | unsigned int idx = p4_config_unpack_event(hwc->config); | ||
512 | unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config); | ||
513 | struct p4_event_bind *bind; | ||
514 | struct p4_cache_event_bind *bind_cache; | ||
515 | u64 escr_addr, cccr; | ||
516 | |||
517 | bind = &p4_event_bind_map[idx]; | ||
518 | escr_addr = (u64)bind->escr_msr[thread]; | ||
519 | |||
520 | /* | ||
521 | * - we dont support cascaded counters yet | ||
522 | * - and counter 1 is broken (erratum) | ||
523 | */ | ||
524 | WARN_ON_ONCE(p4_is_event_cascaded(hwc->config)); | ||
525 | WARN_ON_ONCE(hwc->idx == 1); | ||
526 | |||
527 | /* we need a real Event value */ | ||
528 | escr_conf &= ~P4_ESCR_EVENT_MASK; | ||
529 | escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode)); | ||
530 | |||
531 | cccr = p4_config_unpack_cccr(hwc->config); | ||
532 | |||
533 | /* | ||
534 | * it could be Cache event so that we need to | ||
535 | * set metrics into additional MSRs | ||
536 | */ | ||
537 | BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK); | ||
538 | if (idx_cache > P4_CACHE__NONE && | ||
539 | idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) { | ||
540 | bind_cache = &p4_cache_event_bind_map[idx_cache]; | ||
541 | (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs); | ||
542 | (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert); | ||
543 | } | ||
544 | |||
545 | (void)checking_wrmsrl(escr_addr, escr_conf); | ||
546 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | ||
547 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); | ||
548 | } | ||
549 | |||
550 | static void p4_pmu_enable_all(int added) | ||
551 | { | ||
552 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
553 | int idx; | ||
554 | |||
555 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
556 | struct perf_event *event = cpuc->events[idx]; | ||
557 | if (!test_bit(idx, cpuc->active_mask)) | ||
558 | continue; | ||
559 | p4_pmu_enable_event(event); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | static int p4_pmu_handle_irq(struct pt_regs *regs) | ||
564 | { | ||
565 | struct perf_sample_data data; | ||
566 | struct cpu_hw_events *cpuc; | ||
567 | struct perf_event *event; | ||
568 | struct hw_perf_event *hwc; | ||
569 | int idx, handled = 0; | ||
570 | u64 val; | ||
571 | |||
572 | data.addr = 0; | ||
573 | data.raw = NULL; | ||
574 | |||
575 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
576 | |||
577 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
578 | |||
579 | if (!test_bit(idx, cpuc->active_mask)) | ||
580 | continue; | ||
581 | |||
582 | event = cpuc->events[idx]; | ||
583 | hwc = &event->hw; | ||
584 | |||
585 | WARN_ON_ONCE(hwc->idx != idx); | ||
586 | |||
587 | /* | ||
588 | * FIXME: Redundant call, actually not needed | ||
589 | * but just to check if we're screwed | ||
590 | */ | ||
591 | p4_pmu_clear_cccr_ovf(hwc); | ||
592 | |||
593 | val = x86_perf_event_update(event); | ||
594 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) | ||
595 | continue; | ||
596 | |||
597 | /* | ||
598 | * event overflow | ||
599 | */ | ||
600 | handled = 1; | ||
601 | data.period = event->hw.last_period; | ||
602 | |||
603 | if (!x86_perf_event_set_period(event)) | ||
604 | continue; | ||
605 | if (perf_event_overflow(event, 1, &data, regs)) | ||
606 | p4_pmu_disable_event(event); | ||
607 | } | ||
608 | |||
609 | if (handled) { | ||
610 | /* p4 quirk: unmask it again */ | ||
611 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | ||
612 | inc_irq_stat(apic_perf_irqs); | ||
613 | } | ||
614 | |||
615 | return handled; | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * swap thread specific fields according to a thread | ||
620 | * we are going to run on | ||
621 | */ | ||
622 | static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu) | ||
623 | { | ||
624 | u32 escr, cccr; | ||
625 | |||
626 | /* | ||
627 | * we either lucky and continue on same cpu or no HT support | ||
628 | */ | ||
629 | if (!p4_should_swap_ts(hwc->config, cpu)) | ||
630 | return; | ||
631 | |||
632 | /* | ||
633 | * the event is migrated from an another logical | ||
634 | * cpu, so we need to swap thread specific flags | ||
635 | */ | ||
636 | |||
637 | escr = p4_config_unpack_escr(hwc->config); | ||
638 | cccr = p4_config_unpack_cccr(hwc->config); | ||
639 | |||
640 | if (p4_ht_thread(cpu)) { | ||
641 | cccr &= ~P4_CCCR_OVF_PMI_T0; | ||
642 | cccr |= P4_CCCR_OVF_PMI_T1; | ||
643 | if (escr & P4_ESCR_T0_OS) { | ||
644 | escr &= ~P4_ESCR_T0_OS; | ||
645 | escr |= P4_ESCR_T1_OS; | ||
646 | } | ||
647 | if (escr & P4_ESCR_T0_USR) { | ||
648 | escr &= ~P4_ESCR_T0_USR; | ||
649 | escr |= P4_ESCR_T1_USR; | ||
650 | } | ||
651 | hwc->config = p4_config_pack_escr(escr); | ||
652 | hwc->config |= p4_config_pack_cccr(cccr); | ||
653 | hwc->config |= P4_CONFIG_HT; | ||
654 | } else { | ||
655 | cccr &= ~P4_CCCR_OVF_PMI_T1; | ||
656 | cccr |= P4_CCCR_OVF_PMI_T0; | ||
657 | if (escr & P4_ESCR_T1_OS) { | ||
658 | escr &= ~P4_ESCR_T1_OS; | ||
659 | escr |= P4_ESCR_T0_OS; | ||
660 | } | ||
661 | if (escr & P4_ESCR_T1_USR) { | ||
662 | escr &= ~P4_ESCR_T1_USR; | ||
663 | escr |= P4_ESCR_T0_USR; | ||
664 | } | ||
665 | hwc->config = p4_config_pack_escr(escr); | ||
666 | hwc->config |= p4_config_pack_cccr(cccr); | ||
667 | hwc->config &= ~P4_CONFIG_HT; | ||
668 | } | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * ESCR address hashing is tricky, ESCRs are not sequential | ||
673 | * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and | ||
674 | * the metric between any ESCRs is laid in range [0xa0,0xe1] | ||
675 | * | ||
676 | * so we make ~70% filled hashtable | ||
677 | */ | ||
678 | |||
679 | #define P4_ESCR_MSR_BASE 0x000003a0 | ||
680 | #define P4_ESCR_MSR_MAX 0x000003e1 | ||
681 | #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1) | ||
682 | #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE) | ||
683 | #define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr | ||
684 | |||
685 | static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = { | ||
686 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0), | ||
687 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1), | ||
688 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0), | ||
689 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1), | ||
690 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0), | ||
691 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1), | ||
692 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0), | ||
693 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1), | ||
694 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2), | ||
695 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3), | ||
696 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4), | ||
697 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5), | ||
698 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0), | ||
699 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1), | ||
700 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0), | ||
701 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1), | ||
702 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0), | ||
703 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1), | ||
704 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0), | ||
705 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1), | ||
706 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0), | ||
707 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1), | ||
708 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0), | ||
709 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1), | ||
710 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0), | ||
711 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1), | ||
712 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0), | ||
713 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1), | ||
714 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0), | ||
715 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1), | ||
716 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0), | ||
717 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1), | ||
718 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0), | ||
719 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1), | ||
720 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0), | ||
721 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1), | ||
722 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0), | ||
723 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1), | ||
724 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0), | ||
725 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1), | ||
726 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0), | ||
727 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1), | ||
728 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0), | ||
729 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1), | ||
730 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0), | ||
731 | P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1), | ||
732 | }; | ||
733 | |||
734 | static int p4_get_escr_idx(unsigned int addr) | ||
735 | { | ||
736 | unsigned int idx = P4_ESCR_MSR_IDX(addr); | ||
737 | |||
738 | if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE || | ||
739 | !p4_escr_table[idx])) { | ||
740 | WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr); | ||
741 | return -1; | ||
742 | } | ||
743 | |||
744 | return idx; | ||
745 | } | ||
746 | |||
747 | static int p4_next_cntr(int thread, unsigned long *used_mask, | ||
748 | struct p4_event_bind *bind) | ||
749 | { | ||
750 | int i, j; | ||
751 | |||
752 | for (i = 0; i < P4_CNTR_LIMIT; i++) { | ||
753 | j = bind->cntr[thread][i]; | ||
754 | if (j != -1 && !test_bit(j, used_mask)) | ||
755 | return j; | ||
756 | } | ||
757 | |||
758 | return -1; | ||
759 | } | ||
760 | |||
761 | static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | ||
762 | { | ||
763 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
764 | unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)]; | ||
765 | int cpu = raw_smp_processor_id(); | ||
766 | struct hw_perf_event *hwc; | ||
767 | struct p4_event_bind *bind; | ||
768 | unsigned int i, thread, num; | ||
769 | int cntr_idx, escr_idx; | ||
770 | |||
771 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | ||
772 | bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE); | ||
773 | |||
774 | for (i = 0, num = n; i < n; i++, num--) { | ||
775 | |||
776 | hwc = &cpuc->event_list[i]->hw; | ||
777 | thread = p4_ht_thread(cpu); | ||
778 | bind = p4_config_get_bind(hwc->config); | ||
779 | escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); | ||
780 | if (unlikely(escr_idx == -1)) | ||
781 | goto done; | ||
782 | |||
783 | if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) { | ||
784 | cntr_idx = hwc->idx; | ||
785 | if (assign) | ||
786 | assign[i] = hwc->idx; | ||
787 | goto reserve; | ||
788 | } | ||
789 | |||
790 | cntr_idx = p4_next_cntr(thread, used_mask, bind); | ||
791 | if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) | ||
792 | goto done; | ||
793 | |||
794 | p4_pmu_swap_config_ts(hwc, cpu); | ||
795 | if (assign) | ||
796 | assign[i] = cntr_idx; | ||
797 | reserve: | ||
798 | set_bit(cntr_idx, used_mask); | ||
799 | set_bit(escr_idx, escr_mask); | ||
800 | } | ||
801 | |||
802 | done: | ||
803 | return num ? -ENOSPC : 0; | ||
804 | } | ||
805 | |||
806 | static __initconst const struct x86_pmu p4_pmu = { | ||
807 | .name = "Netburst P4/Xeon", | ||
808 | .handle_irq = p4_pmu_handle_irq, | ||
809 | .disable_all = p4_pmu_disable_all, | ||
810 | .enable_all = p4_pmu_enable_all, | ||
811 | .enable = p4_pmu_enable_event, | ||
812 | .disable = p4_pmu_disable_event, | ||
813 | .eventsel = MSR_P4_BPU_CCCR0, | ||
814 | .perfctr = MSR_P4_BPU_PERFCTR0, | ||
815 | .event_map = p4_pmu_event_map, | ||
816 | .max_events = ARRAY_SIZE(p4_general_events), | ||
817 | .get_event_constraints = x86_get_event_constraints, | ||
818 | /* | ||
819 | * IF HT disabled we may need to use all | ||
820 | * ARCH_P4_MAX_CCCR counters simulaneously | ||
821 | * though leave it restricted at moment assuming | ||
822 | * HT is on | ||
823 | */ | ||
824 | .num_counters = ARCH_P4_MAX_CCCR, | ||
825 | .apic = 1, | ||
826 | .cntval_bits = 40, | ||
827 | .cntval_mask = (1ULL << 40) - 1, | ||
828 | .max_period = (1ULL << 39) - 1, | ||
829 | .hw_config = p4_hw_config, | ||
830 | .schedule_events = p4_pmu_schedule_events, | ||
831 | }; | ||
832 | |||
833 | static __init int p4_pmu_init(void) | ||
834 | { | ||
835 | unsigned int low, high; | ||
836 | |||
837 | /* If we get stripped -- indexig fails */ | ||
838 | BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); | ||
839 | |||
840 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); | ||
841 | if (!(low & (1 << 7))) { | ||
842 | pr_cont("unsupported Netburst CPU model %d ", | ||
843 | boot_cpu_data.x86_model); | ||
844 | return -ENODEV; | ||
845 | } | ||
846 | |||
847 | memcpy(hw_cache_event_ids, p4_hw_cache_event_ids, | ||
848 | sizeof(hw_cache_event_ids)); | ||
849 | |||
850 | pr_cont("Netburst events, "); | ||
851 | |||
852 | x86_pmu = p4_pmu; | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||