aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-02-26 04:54:56 -0500
committerRobert Richter <robert.richter@amd.com>2010-05-04 05:35:28 -0400
commitda759fe5be24ec3b236a76c007b460cf6caf2009 (patch)
tree881639946d460a9fc5d9528793bd10e703f826a4 /arch/x86/oprofile
parent8617f98c001d00b176422d707e6a67b88bcd7e0d (diff)
oprofile/x86: move IBS code
Moving code to make future changes easier. This groups all IBS code together. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/op_model_amd.c220
1 files changed, 110 insertions, 110 deletions
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 536d0b0b39a5..e159254fb7cd 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -102,116 +102,6 @@ static u32 get_ibs_caps(void)
102 return ibs_caps; 102 return ibs_caps;
103} 103}
104 104
105#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
106
107static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
108 struct op_msrs const * const msrs)
109{
110 u64 val;
111 int i;
112
113 /* enable active counters */
114 for (i = 0; i < NUM_COUNTERS; ++i) {
115 int virt = op_x86_phys_to_virt(i);
116 if (!reset_value[virt])
117 continue;
118 rdmsrl(msrs->controls[i].addr, val);
119 val &= model->reserved;
120 val |= op_x86_get_ctrl(model, &counter_config[virt]);
121 wrmsrl(msrs->controls[i].addr, val);
122 }
123}
124
125#endif
126
127/* functions for op_amd_spec */
128
129static void op_amd_shutdown(struct op_msrs const * const msrs)
130{
131 int i;
132
133 for (i = 0; i < NUM_COUNTERS; ++i) {
134 if (!msrs->counters[i].addr)
135 continue;
136 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
137 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
138 }
139}
140
141static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
142{
143 int i;
144
145 for (i = 0; i < NUM_COUNTERS; i++) {
146 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
147 goto fail;
148 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
149 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
150 goto fail;
151 }
152 /* both registers must be reserved */
153 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
154 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
155 continue;
156 fail:
157 if (!counter_config[i].enabled)
158 continue;
159 op_x86_warn_reserved(i);
160 op_amd_shutdown(msrs);
161 return -EBUSY;
162 }
163
164 return 0;
165}
166
167static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
168 struct op_msrs const * const msrs)
169{
170 u64 val;
171 int i;
172
173 /* setup reset_value */
174 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
175 if (counter_config[i].enabled
176 && msrs->counters[op_x86_virt_to_phys(i)].addr)
177 reset_value[i] = counter_config[i].count;
178 else
179 reset_value[i] = 0;
180 }
181
182 /* clear all counters */
183 for (i = 0; i < NUM_COUNTERS; ++i) {
184 if (!msrs->controls[i].addr)
185 continue;
186 rdmsrl(msrs->controls[i].addr, val);
187 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
188 op_x86_warn_in_use(i);
189 val &= model->reserved;
190 wrmsrl(msrs->controls[i].addr, val);
191 /*
192 * avoid a false detection of ctr overflows in NMI
193 * handler
194 */
195 wrmsrl(msrs->counters[i].addr, -1LL);
196 }
197
198 /* enable active counters */
199 for (i = 0; i < NUM_COUNTERS; ++i) {
200 int virt = op_x86_phys_to_virt(i);
201 if (!reset_value[virt])
202 continue;
203
204 /* setup counter registers */
205 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
206
207 /* setup control registers */
208 rdmsrl(msrs->controls[i].addr, val);
209 val &= model->reserved;
210 val |= op_x86_get_ctrl(model, &counter_config[virt]);
211 wrmsrl(msrs->controls[i].addr, val);
212 }
213}
214
215/* 105/*
216 * 16-bit Linear Feedback Shift Register (LFSR) 106 * 16-bit Linear Feedback Shift Register (LFSR)
217 * 107 *
@@ -376,6 +266,116 @@ static void op_amd_stop_ibs(void)
376 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 266 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
377} 267}
378 268
269#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
270
271static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
272 struct op_msrs const * const msrs)
273{
274 u64 val;
275 int i;
276
277 /* enable active counters */
278 for (i = 0; i < NUM_COUNTERS; ++i) {
279 int virt = op_x86_phys_to_virt(i);
280 if (!reset_value[virt])
281 continue;
282 rdmsrl(msrs->controls[i].addr, val);
283 val &= model->reserved;
284 val |= op_x86_get_ctrl(model, &counter_config[virt]);
285 wrmsrl(msrs->controls[i].addr, val);
286 }
287}
288
289#endif
290
291/* functions for op_amd_spec */
292
293static void op_amd_shutdown(struct op_msrs const * const msrs)
294{
295 int i;
296
297 for (i = 0; i < NUM_COUNTERS; ++i) {
298 if (!msrs->counters[i].addr)
299 continue;
300 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
301 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
302 }
303}
304
305static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
306{
307 int i;
308
309 for (i = 0; i < NUM_COUNTERS; i++) {
310 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
311 goto fail;
312 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
313 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
314 goto fail;
315 }
316 /* both registers must be reserved */
317 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
318 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
319 continue;
320 fail:
321 if (!counter_config[i].enabled)
322 continue;
323 op_x86_warn_reserved(i);
324 op_amd_shutdown(msrs);
325 return -EBUSY;
326 }
327
328 return 0;
329}
330
331static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
332 struct op_msrs const * const msrs)
333{
334 u64 val;
335 int i;
336
337 /* setup reset_value */
338 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
339 if (counter_config[i].enabled
340 && msrs->counters[op_x86_virt_to_phys(i)].addr)
341 reset_value[i] = counter_config[i].count;
342 else
343 reset_value[i] = 0;
344 }
345
346 /* clear all counters */
347 for (i = 0; i < NUM_COUNTERS; ++i) {
348 if (!msrs->controls[i].addr)
349 continue;
350 rdmsrl(msrs->controls[i].addr, val);
351 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
352 op_x86_warn_in_use(i);
353 val &= model->reserved;
354 wrmsrl(msrs->controls[i].addr, val);
355 /*
356 * avoid a false detection of ctr overflows in NMI
357 * handler
358 */
359 wrmsrl(msrs->counters[i].addr, -1LL);
360 }
361
362 /* enable active counters */
363 for (i = 0; i < NUM_COUNTERS; ++i) {
364 int virt = op_x86_phys_to_virt(i);
365 if (!reset_value[virt])
366 continue;
367
368 /* setup counter registers */
369 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
370
371 /* setup control registers */
372 rdmsrl(msrs->controls[i].addr, val);
373 val &= model->reserved;
374 val |= op_x86_get_ctrl(model, &counter_config[virt]);
375 wrmsrl(msrs->controls[i].addr, val);
376 }
377}
378
379static int op_amd_check_ctrs(struct pt_regs * const regs, 379static int op_amd_check_ctrs(struct pt_regs * const regs,
380 struct op_msrs const * const msrs) 380 struct op_msrs const * const msrs)
381{ 381{