aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/op_model_amd.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-07-16 07:04:43 -0400
committerRobert Richter <robert.richter@amd.com>2009-07-20 10:43:19 -0400
commitd8471ad3ab613a1ba7abd3aad46659de39a2871c (patch)
tree34aff4195cd8a00342439ed63a8d0ab041132dd6 /arch/x86/oprofile/op_model_amd.c
parenta5659d17adb815fb35e11745e2f39c3f0bfd579f (diff)
oprofile: Introduce op_x86_phys_to_virt()
This new function translates physical to virtual counter numbers. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/op_model_amd.c')
-rw-r--r--arch/x86/oprofile/op_model_amd.c80
1 files changed, 31 insertions, 49 deletions
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index dcfd4505cacc..67f830d12e0e 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -42,9 +42,6 @@
42#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) 42#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
43 43
44static unsigned long reset_value[NUM_VIRT_COUNTERS]; 44static unsigned long reset_value[NUM_VIRT_COUNTERS];
45#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
46DECLARE_PER_CPU(int, switch_index);
47#endif
48 45
49#ifdef CONFIG_OPROFILE_IBS 46#ifdef CONFIG_OPROFILE_IBS
50 47
@@ -141,21 +138,20 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
141 138
142 /* enable active counters */ 139 /* enable active counters */
143 for (i = 0; i < NUM_COUNTERS; ++i) { 140 for (i = 0; i < NUM_COUNTERS; ++i) {
144#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 141 int virt = op_x86_phys_to_virt(i);
145 int offset = i + __get_cpu_var(switch_index); 142 if (!counter_config[virt].enabled)
146#else 143 continue;
147 int offset = i; 144 if (!msrs->counters[i].addr)
148#endif 145 continue;
149 if (counter_config[offset].enabled && msrs->counters[i].addr) { 146
150 /* setup counter registers */ 147 /* setup counter registers */
151 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]); 148 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
152 149
153 /* setup control registers */ 150 /* setup control registers */
154 rdmsrl(msrs->controls[i].addr, val); 151 rdmsrl(msrs->controls[i].addr, val);
155 val &= model->reserved; 152 val &= model->reserved;
156 val |= op_x86_get_ctrl(model, &counter_config[offset]); 153 val |= op_x86_get_ctrl(model, &counter_config[virt]);
157 wrmsrl(msrs->controls[i].addr, val); 154 wrmsrl(msrs->controls[i].addr, val);
158 }
159 } 155 }
160} 156}
161 157
@@ -170,14 +166,13 @@ static void op_amd_switch_ctrl(struct op_x86_model_spec const *model,
170 166
171 /* enable active counters */ 167 /* enable active counters */
172 for (i = 0; i < NUM_COUNTERS; ++i) { 168 for (i = 0; i < NUM_COUNTERS; ++i) {
173 int offset = i + __get_cpu_var(switch_index); 169 int virt = op_x86_phys_to_virt(i);
174 if (counter_config[offset].enabled) { 170 if (!counter_config[virt].enabled)
175 /* setup control registers */ 171 continue;
176 rdmsrl(msrs->controls[i].addr, val); 172 rdmsrl(msrs->controls[i].addr, val);
177 val &= model->reserved; 173 val &= model->reserved;
178 val |= op_x86_get_ctrl(model, &counter_config[offset]); 174 val |= op_x86_get_ctrl(model, &counter_config[virt]);
179 wrmsrl(msrs->controls[i].addr, val); 175 wrmsrl(msrs->controls[i].addr, val);
180 }
181 } 176 }
182} 177}
183 178
@@ -292,19 +287,15 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
292 int i; 287 int i;
293 288
294 for (i = 0; i < NUM_COUNTERS; ++i) { 289 for (i = 0; i < NUM_COUNTERS; ++i) {
295#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 290 int virt = op_x86_phys_to_virt(i);
296 int offset = i + __get_cpu_var(switch_index); 291 if (!reset_value[virt])
297#else
298 int offset = i;
299#endif
300 if (!reset_value[offset])
301 continue; 292 continue;
302 rdmsrl(msrs->counters[i].addr, val); 293 rdmsrl(msrs->counters[i].addr, val);
303 /* bit is clear if overflowed: */ 294 /* bit is clear if overflowed: */
304 if (val & OP_CTR_OVERFLOW) 295 if (val & OP_CTR_OVERFLOW)
305 continue; 296 continue;
306 oprofile_add_sample(regs, offset); 297 oprofile_add_sample(regs, virt);
307 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[offset]); 298 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
308 } 299 }
309 300
310 op_amd_handle_ibs(regs, msrs); 301 op_amd_handle_ibs(regs, msrs);
@@ -319,16 +310,11 @@ static void op_amd_start(struct op_msrs const * const msrs)
319 int i; 310 int i;
320 311
321 for (i = 0; i < NUM_COUNTERS; ++i) { 312 for (i = 0; i < NUM_COUNTERS; ++i) {
322#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 313 if (!reset_value[op_x86_phys_to_virt(i)])
323 int offset = i + __get_cpu_var(switch_index); 314 continue;
324#else 315 rdmsrl(msrs->controls[i].addr, val);
325 int offset = i; 316 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
326#endif 317 wrmsrl(msrs->controls[i].addr, val);
327 if (reset_value[offset]) {
328 rdmsrl(msrs->controls[i].addr, val);
329 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
330 wrmsrl(msrs->controls[i].addr, val);
331 }
332 } 318 }
333 319
334 op_amd_start_ibs(); 320 op_amd_start_ibs();
@@ -344,11 +330,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
344 * pm callback 330 * pm callback
345 */ 331 */
346 for (i = 0; i < NUM_COUNTERS; ++i) { 332 for (i = 0; i < NUM_COUNTERS; ++i) {
347#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 333 if (!reset_value[op_x86_phys_to_virt(i)])
348 if (!reset_value[i + per_cpu(switch_index, smp_processor_id())])
349#else
350 if (!reset_value[i])
351#endif
352 continue; 334 continue;
353 rdmsrl(msrs->controls[i].addr, val); 335 rdmsrl(msrs->controls[i].addr, val);
354 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 336 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;