aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-07-09 09:11:45 -0400
committerRobert Richter <robert.richter@amd.com>2009-07-20 10:43:21 -0400
commit39e97f40c3a5e71de0532368deaa683e09b74ba2 (patch)
treed30e542c6aa87a68ba16bd412067682efc606758 /arch
parent52471c67ee2fa5ed6f700ef57bf27833c63b2192 (diff)
x86/oprofile: Add function has_mux() to check multiplexing support
The check is used to prevent running multiplexing code for models not supporting multiplexing. Before, the code was running but without effect. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/oprofile/nmi_int.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 82ee29517f16..dca7240aeb26 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -124,6 +124,11 @@ static void nmi_stop(void)
124 124
125static DEFINE_PER_CPU(int, switch_index); 125static DEFINE_PER_CPU(int, switch_index);
126 126
127static inline int has_mux(void)
128{
129 return !!model->switch_ctrl;
130}
131
127inline int op_x86_phys_to_virt(int phys) 132inline int op_x86_phys_to_virt(int phys)
128{ 133{
129 return __get_cpu_var(switch_index) + phys; 134 return __get_cpu_var(switch_index) + phys;
@@ -132,6 +137,10 @@ inline int op_x86_phys_to_virt(int phys)
132static void nmi_shutdown_mux(void) 137static void nmi_shutdown_mux(void)
133{ 138{
134 int i; 139 int i;
140
141 if (!has_mux())
142 return;
143
135 for_each_possible_cpu(i) { 144 for_each_possible_cpu(i) {
136 kfree(per_cpu(cpu_msrs, i).multiplex); 145 kfree(per_cpu(cpu_msrs, i).multiplex);
137 per_cpu(cpu_msrs, i).multiplex = NULL; 146 per_cpu(cpu_msrs, i).multiplex = NULL;
@@ -144,12 +153,17 @@ static int nmi_setup_mux(void)
144 size_t multiplex_size = 153 size_t multiplex_size =
145 sizeof(struct op_msr) * model->num_virt_counters; 154 sizeof(struct op_msr) * model->num_virt_counters;
146 int i; 155 int i;
156
157 if (!has_mux())
158 return 1;
159
147 for_each_possible_cpu(i) { 160 for_each_possible_cpu(i) {
148 per_cpu(cpu_msrs, i).multiplex = 161 per_cpu(cpu_msrs, i).multiplex =
149 kmalloc(multiplex_size, GFP_KERNEL); 162 kmalloc(multiplex_size, GFP_KERNEL);
150 if (!per_cpu(cpu_msrs, i).multiplex) 163 if (!per_cpu(cpu_msrs, i).multiplex)
151 return 0; 164 return 0;
152 } 165 }
166
153 return 1; 167 return 1;
154} 168}
155 169
@@ -158,6 +172,9 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
158 int i; 172 int i;
159 struct op_msr *multiplex = msrs->multiplex; 173 struct op_msr *multiplex = msrs->multiplex;
160 174
175 if (!has_mux())
176 return;
177
161 for (i = 0; i < model->num_virt_counters; ++i) { 178 for (i = 0; i < model->num_virt_counters; ++i) {
162 if (counter_config[i].enabled) { 179 if (counter_config[i].enabled) {
163 multiplex[i].saved = -(u64)counter_config[i].count; 180 multiplex[i].saved = -(u64)counter_config[i].count;
@@ -229,7 +246,7 @@ static int nmi_multiplex_on(void)
229 246
230static int nmi_switch_event(void) 247static int nmi_switch_event(void)
231{ 248{
232 if (!model->switch_ctrl) 249 if (!has_mux())
233 return -ENOSYS; /* not implemented */ 250 return -ENOSYS; /* not implemented */
234 if (nmi_multiplex_on() < 0) 251 if (nmi_multiplex_on() < 0)
235 return -EINVAL; /* not necessary */ 252 return -EINVAL; /* not necessary */