diff options
author | Robert Richter <robert.richter@amd.com> | 2009-05-25 09:10:32 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2009-06-11 13:42:14 -0400 |
commit | 3370d358569755625aba4d9a846a040ce691d9ed (patch) | |
tree | 97b712208843a33dd29d1bfd9f90bc8aec30a595 /arch/x86/oprofile/op_model_ppro.c | |
parent | ef8828ddf828174785421af67c281144d4b8e796 (diff) |
x86/oprofile: replace macros to calculate control register
This patch introduces op_x86_get_ctrl() to calculate the value of the
performance control register. This is generic code usable for all
models. The event and reserved masks are model specific and stored in
struct op_x86_model_spec. 64 bit MSR functions are used now. The patch
removes many hard to read macros used for ctrl calculation.
The function op_x86_get_ctrl() is common code and the first step to
further merge performance counter implementations for x86 models.
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/op_model_ppro.c')
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 40b44ee521d5..3092f998baf2 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * @author Philippe Elie | 10 | * @author Philippe Elie |
11 | * @author Graydon Hoare | 11 | * @author Graydon Hoare |
12 | * @author Andi Kleen | 12 | * @author Andi Kleen |
13 | * @author Robert Richter <robert.richter@amd.com> | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
@@ -26,8 +27,8 @@ static int num_counters = 2; | |||
26 | static int counter_width = 32; | 27 | static int counter_width = 32; |
27 | 28 | ||
28 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) | 29 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) |
29 | #define CTRL_CLEAR(x) (x &= (1<<21)) | 30 | |
30 | #define CTRL_SET_EVENT(val, e) (val |= e) | 31 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
31 | 32 | ||
32 | static u64 *reset_value; | 33 | static u64 *reset_value; |
33 | 34 | ||
@@ -54,7 +55,7 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) | |||
54 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | 55 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, |
55 | struct op_msrs const * const msrs) | 56 | struct op_msrs const * const msrs) |
56 | { | 57 | { |
57 | unsigned int low, high; | 58 | u64 val; |
58 | int i; | 59 | int i; |
59 | 60 | ||
60 | if (!reset_value) { | 61 | if (!reset_value) { |
@@ -85,9 +86,9 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
85 | for (i = 0 ; i < num_counters; ++i) { | 86 | for (i = 0 ; i < num_counters; ++i) { |
86 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) | 87 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
87 | continue; | 88 | continue; |
88 | rdmsr(msrs->controls[i].addr, low, high); | 89 | rdmsrl(msrs->controls[i].addr, val); |
89 | CTRL_CLEAR(low); | 90 | val &= model->reserved; |
90 | wrmsr(msrs->controls[i].addr, low, high); | 91 | wrmsrl(msrs->controls[i].addr, val); |
91 | } | 92 | } |
92 | 93 | ||
93 | /* avoid a false detection of ctr overflows in NMI handler */ | 94 | /* avoid a false detection of ctr overflows in NMI handler */ |
@@ -101,17 +102,11 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
101 | for (i = 0; i < num_counters; ++i) { | 102 | for (i = 0; i < num_counters; ++i) { |
102 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { | 103 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { |
103 | reset_value[i] = counter_config[i].count; | 104 | reset_value[i] = counter_config[i].count; |
104 | |||
105 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 105 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
106 | 106 | rdmsrl(msrs->controls[i].addr, val); | |
107 | rdmsr(msrs->controls[i].addr, low, high); | 107 | val &= model->reserved; |
108 | CTRL_CLEAR(low); | 108 | val |= op_x86_get_ctrl(model, &counter_config[i]); |
109 | CTRL_SET_ENABLE(low); | 109 | wrmsrl(msrs->controls[i].addr, val); |
110 | CTRL_SET_USR(low, counter_config[i].user); | ||
111 | CTRL_SET_KERN(low, counter_config[i].kernel); | ||
112 | CTRL_SET_UM(low, counter_config[i].unit_mask); | ||
113 | CTRL_SET_EVENT(low, counter_config[i].event); | ||
114 | wrmsr(msrs->controls[i].addr, low, high); | ||
115 | } else { | 110 | } else { |
116 | reset_value[i] = 0; | 111 | reset_value[i] = 0; |
117 | } | 112 | } |
@@ -205,6 +200,7 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
205 | struct op_x86_model_spec const op_ppro_spec = { | 200 | struct op_x86_model_spec const op_ppro_spec = { |
206 | .num_counters = 2, | 201 | .num_counters = 2, |
207 | .num_controls = 2, | 202 | .num_controls = 2, |
203 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
208 | .fill_in_addresses = &ppro_fill_in_addresses, | 204 | .fill_in_addresses = &ppro_fill_in_addresses, |
209 | .setup_ctrs = &ppro_setup_ctrs, | 205 | .setup_ctrs = &ppro_setup_ctrs, |
210 | .check_ctrs = &ppro_check_ctrs, | 206 | .check_ctrs = &ppro_check_ctrs, |
@@ -249,6 +245,7 @@ static int arch_perfmon_init(struct oprofile_operations *ignore) | |||
249 | } | 245 | } |
250 | 246 | ||
251 | struct op_x86_model_spec op_arch_perfmon_spec = { | 247 | struct op_x86_model_spec op_arch_perfmon_spec = { |
248 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, | ||
252 | .init = &arch_perfmon_init, | 249 | .init = &arch_perfmon_init, |
253 | /* num_counters/num_controls filled in at runtime */ | 250 | /* num_counters/num_controls filled in at runtime */ |
254 | .fill_in_addresses = &ppro_fill_in_addresses, | 251 | .fill_in_addresses = &ppro_fill_in_addresses, |