aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/op_model_ppro.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-03-23 14:33:21 -0400
committerRobert Richter <robert.richter@amd.com>2010-05-04 05:35:26 -0400
commitd0e4120fda6f87eead438eed4d49032e12060e58 (patch)
tree57f3ab727aa12bc63f19437a0a026e2ea5bd6d67 /arch/x86/oprofile/op_model_ppro.c
parent8f5a2dd83a1f8e89fdc17eb0f2f07c2e713e635a (diff)
oprofile/x86: reserve counter msrs pairwise
For AMD's and Intel's P6 generic performance counters have pairwise counter and control msrs. This patch changes the counter reservation in a way that both msrs must be registered. It joins some counter loops and also removes the unnecessary NUM_CONTROLS macro in the AMD implementation. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/op_model_ppro.c')
-rw-r--r--arch/x86/oprofile/op_model_ppro.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 2bf90fafa7b5..f8e268e8e992 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -35,13 +35,15 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
35 int i; 35 int i;
36 36
37 for (i = 0; i < num_counters; i++) { 37 for (i = 0; i < num_counters; i++) {
38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) 38 if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
39 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; 39 continue;
40 } 40 if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
41 41 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
42 for (i = 0; i < num_counters; i++) { 42 continue;
43 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) 43 }
44 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; 44 /* both registers must be reserved */
45 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
46 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
45 } 47 }
46} 48}
47 49
@@ -92,12 +94,10 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
92 op_x86_warn_in_use(i); 94 op_x86_warn_in_use(i);
93 val &= model->reserved; 95 val &= model->reserved;
94 wrmsrl(msrs->controls[i].addr, val); 96 wrmsrl(msrs->controls[i].addr, val);
95 } 97 /*
96 98 * avoid a false detection of ctr overflows in NMI *
97 /* avoid a false detection of ctr overflows in NMI handler */ 99 * handler
98 for (i = 0; i < num_counters; ++i) { 100 */
99 if (unlikely(!msrs->counters[i].addr))
100 continue;
101 wrmsrl(msrs->counters[i].addr, -1LL); 101 wrmsrl(msrs->counters[i].addr, -1LL);
102 } 102 }
103 103
@@ -194,12 +194,10 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
194 int i; 194 int i;
195 195
196 for (i = 0; i < num_counters; ++i) { 196 for (i = 0; i < num_counters; ++i) {
197 if (msrs->counters[i].addr) 197 if (!msrs->counters[i].addr)
198 release_perfctr_nmi(MSR_P6_PERFCTR0 + i); 198 continue;
199 } 199 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
200 for (i = 0; i < num_counters; ++i) { 200 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
201 if (msrs->controls[i].addr)
202 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
203 } 201 }
204 if (reset_value) { 202 if (reset_value) {
205 kfree(reset_value); 203 kfree(reset_value);