aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/op_model_amd.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-05-25 09:10:32 -0400
committerRobert Richter <robert.richter@amd.com>2009-06-11 13:42:14 -0400
commit3370d358569755625aba4d9a846a040ce691d9ed (patch)
tree97b712208843a33dd29d1bfd9f90bc8aec30a595 /arch/x86/oprofile/op_model_amd.c
parentef8828ddf828174785421af67c281144d4b8e796 (diff)
x86/oprofile: replace macros to calculate control register
This patch introduces op_x86_get_ctrl() to calculate the value of the performance control register. This is generic code usable for all models. The event and reserved masks are model specific and stored in struct op_x86_model_spec. 64 bit MSR functions are used now. The patch removes many hard to read macros used for ctrl calculation. The function op_x86_get_ctrl() is common code and the first step to further merge performance counter implementations for x86 models. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/op_model_amd.c')
-rw-r--r--arch/x86/oprofile/op_model_amd.c41
1 files changed, 16 insertions, 25 deletions
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 86e0a01ba125..2406ab863605 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -25,12 +25,11 @@
25 25
26#define NUM_COUNTERS 4 26#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4 27#define NUM_CONTROLS 4
28#define OP_EVENT_MASK 0x0FFF
29
30#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
28 31
29#define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) 32#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
30#define CTRL_CLEAR_LO(x) (x &= (1<<21))
31#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
32#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
33#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
34 33
35static unsigned long reset_value[NUM_COUNTERS]; 34static unsigned long reset_value[NUM_COUNTERS];
36 35
@@ -84,21 +83,19 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
84 } 83 }
85} 84}
86 85
87
88static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, 86static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
89 struct op_msrs const * const msrs) 87 struct op_msrs const * const msrs)
90{ 88{
91 unsigned int low, high; 89 u64 val;
92 int i; 90 int i;
93 91
94 /* clear all counters */ 92 /* clear all counters */
95 for (i = 0 ; i < NUM_CONTROLS; ++i) { 93 for (i = 0 ; i < NUM_CONTROLS; ++i) {
96 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 94 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
97 continue; 95 continue;
98 rdmsr(msrs->controls[i].addr, low, high); 96 rdmsrl(msrs->controls[i].addr, val);
99 CTRL_CLEAR_LO(low); 97 val &= model->reserved;
100 CTRL_CLEAR_HI(high); 98 wrmsrl(msrs->controls[i].addr, val);
101 wrmsr(msrs->controls[i].addr, low, high);
102 } 99 }
103 100
104 /* avoid a false detection of ctr overflows in NMI handler */ 101 /* avoid a false detection of ctr overflows in NMI handler */
@@ -112,19 +109,11 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
112 for (i = 0; i < NUM_COUNTERS; ++i) { 109 for (i = 0; i < NUM_COUNTERS; ++i) {
113 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { 110 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
114 reset_value[i] = counter_config[i].count; 111 reset_value[i] = counter_config[i].count;
115
116 wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1); 112 wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1);
117 113 rdmsrl(msrs->controls[i].addr, val);
118 rdmsr(msrs->controls[i].addr, low, high); 114 val &= model->reserved;
119 CTRL_CLEAR_LO(low); 115 val |= op_x86_get_ctrl(model, &counter_config[i]);
120 CTRL_CLEAR_HI(high); 116 wrmsrl(msrs->controls[i].addr, val);
121 CTRL_SET_ENABLE(low);
122 CTRL_SET_USR(low, counter_config[i].user);
123 CTRL_SET_KERN(low, counter_config[i].kernel);
124 CTRL_SET_UM(low, counter_config[i].unit_mask);
125 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
126 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
127 wrmsr(msrs->controls[i].addr, low, high);
128 } else { 117 } else {
129 reset_value[i] = 0; 118 reset_value[i] = 0;
130 } 119 }
@@ -486,14 +475,16 @@ static void op_amd_exit(void) {}
486#endif /* CONFIG_OPROFILE_IBS */ 475#endif /* CONFIG_OPROFILE_IBS */
487 476
488struct op_x86_model_spec const op_amd_spec = { 477struct op_x86_model_spec const op_amd_spec = {
489 .init = op_amd_init,
490 .exit = op_amd_exit,
491 .num_counters = NUM_COUNTERS, 478 .num_counters = NUM_COUNTERS,
492 .num_controls = NUM_CONTROLS, 479 .num_controls = NUM_CONTROLS,
480 .reserved = MSR_AMD_EVENTSEL_RESERVED,
481 .event_mask = OP_EVENT_MASK,
482 .init = op_amd_init,
483 .exit = op_amd_exit,
493 .fill_in_addresses = &op_amd_fill_in_addresses, 484 .fill_in_addresses = &op_amd_fill_in_addresses,
494 .setup_ctrs = &op_amd_setup_ctrs, 485 .setup_ctrs = &op_amd_setup_ctrs,
495 .check_ctrs = &op_amd_check_ctrs, 486 .check_ctrs = &op_amd_check_ctrs,
496 .start = &op_amd_start, 487 .start = &op_amd_start,
497 .stop = &op_amd_stop, 488 .stop = &op_amd_stop,
498 .shutdown = &op_amd_shutdown 489 .shutdown = &op_amd_shutdown,
499}; 490};