aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/nmi_int.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-03-25 18:06:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:44:58 -0400
commitd18d00f5dbcd1a95811617e9812cf0560bd465ee (patch)
tree2d76e6deaab5af91c0c58c90d5e9383edd88000c /arch/x86/oprofile/nmi_int.c
parent6b6309b4c7f6da467c5d5b7d18fa8cb79730f381 (diff)
x86: oprofile: remove NR_CPUS arrays in arch/x86/oprofile/nmi_int.c
Change the following arrays sized by NR_CPUS to be PERCPU variables: static struct op_msrs cpu_msrs[NR_CPUS]; static unsigned long saved_lvtpc[NR_CPUS]; Also some minor complaints from checkpatch.pl fixed. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git All changes were transparent except for: static void nmi_shutdown(void) { + struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); nmi_enabled = 0; on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); unregister_die_notifier(&profile_exceptions_nb); - model->shutdown(cpu_msrs); + model->shutdown(msrs); free_msrs(); } The existing code passed a reference to cpu 0's instance of struct op_msrs to model->shutdown, whilst the other functions are passed a reference to <this cpu's> instance of a struct op_msrs. This seemed to be a bug to me even though as long as cpu 0 and <this cpu> are of the same type it would have the same effect...? Cc: Philippe Elie <phil.el@wanadoo.fr> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r--arch/x86/oprofile/nmi_int.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 1f11cf0a307f..cc48d3fde545 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -23,8 +23,8 @@
23#include "op_x86_model.h" 23#include "op_x86_model.h"
24 24
25static struct op_x86_model_spec const *model; 25static struct op_x86_model_spec const *model;
26static struct op_msrs cpu_msrs[NR_CPUS]; 26static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
27static unsigned long saved_lvtpc[NR_CPUS]; 27static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
28 28
29static int nmi_start(void); 29static int nmi_start(void);
30static void nmi_stop(void); 30static void nmi_stop(void);
@@ -89,7 +89,7 @@ static int profile_exceptions_notify(struct notifier_block *self,
89 89
90 switch (val) { 90 switch (val) {
91 case DIE_NMI: 91 case DIE_NMI:
92 if (model->check_ctrs(args->regs, &cpu_msrs[cpu])) 92 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
93 ret = NOTIFY_STOP; 93 ret = NOTIFY_STOP;
94 break; 94 break;
95 default: 95 default:
@@ -126,7 +126,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
126static void nmi_save_registers(void *dummy) 126static void nmi_save_registers(void *dummy)
127{ 127{
128 int cpu = smp_processor_id(); 128 int cpu = smp_processor_id();
129 struct op_msrs *msrs = &cpu_msrs[cpu]; 129 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
130 nmi_cpu_save_registers(msrs); 130 nmi_cpu_save_registers(msrs);
131} 131}
132 132
@@ -134,10 +134,10 @@ static void free_msrs(void)
134{ 134{
135 int i; 135 int i;
136 for_each_possible_cpu(i) { 136 for_each_possible_cpu(i) {
137 kfree(cpu_msrs[i].counters); 137 kfree(per_cpu(cpu_msrs, i).counters);
138 cpu_msrs[i].counters = NULL; 138 per_cpu(cpu_msrs, i).counters = NULL;
139 kfree(cpu_msrs[i].controls); 139 kfree(per_cpu(cpu_msrs, i).controls);
140 cpu_msrs[i].controls = NULL; 140 per_cpu(cpu_msrs, i).controls = NULL;
141 } 141 }
142} 142}
143 143
@@ -149,13 +149,15 @@ static int allocate_msrs(void)
149 149
150 int i; 150 int i;
151 for_each_possible_cpu(i) { 151 for_each_possible_cpu(i) {
152 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); 152 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
153 if (!cpu_msrs[i].counters) { 153 GFP_KERNEL);
154 if (!per_cpu(cpu_msrs, i).counters) {
154 success = 0; 155 success = 0;
155 break; 156 break;
156 } 157 }
157 cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL); 158 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
158 if (!cpu_msrs[i].controls) { 159 GFP_KERNEL);
160 if (!per_cpu(cpu_msrs, i).controls) {
159 success = 0; 161 success = 0;
160 break; 162 break;
161 } 163 }
@@ -170,11 +172,11 @@ static int allocate_msrs(void)
170static void nmi_cpu_setup(void *dummy) 172static void nmi_cpu_setup(void *dummy)
171{ 173{
172 int cpu = smp_processor_id(); 174 int cpu = smp_processor_id();
173 struct op_msrs *msrs = &cpu_msrs[cpu]; 175 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
174 spin_lock(&oprofilefs_lock); 176 spin_lock(&oprofilefs_lock);
175 model->setup_ctrs(msrs); 177 model->setup_ctrs(msrs);
176 spin_unlock(&oprofilefs_lock); 178 spin_unlock(&oprofilefs_lock);
177 saved_lvtpc[cpu] = apic_read(APIC_LVTPC); 179 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
178 apic_write(APIC_LVTPC, APIC_DM_NMI); 180 apic_write(APIC_LVTPC, APIC_DM_NMI);
179} 181}
180 182
@@ -203,13 +205,15 @@ static int nmi_setup(void)
203 */ 205 */
204 206
205 /* Assume saved/restored counters are the same on all CPUs */ 207 /* Assume saved/restored counters are the same on all CPUs */
206 model->fill_in_addresses(&cpu_msrs[0]); 208 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
207 for_each_possible_cpu(cpu) { 209 for_each_possible_cpu(cpu) {
208 if (cpu != 0) { 210 if (cpu != 0) {
209 memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters, 211 memcpy(per_cpu(cpu_msrs, cpu).counters,
212 per_cpu(cpu_msrs, 0).counters,
210 sizeof(struct op_msr) * model->num_counters); 213 sizeof(struct op_msr) * model->num_counters);
211 214
212 memcpy(cpu_msrs[cpu].controls, cpu_msrs[0].controls, 215 memcpy(per_cpu(cpu_msrs, cpu).controls,
216 per_cpu(cpu_msrs, 0).controls,
213 sizeof(struct op_msr) * model->num_controls); 217 sizeof(struct op_msr) * model->num_controls);
214 } 218 }
215 219
@@ -249,7 +253,7 @@ static void nmi_cpu_shutdown(void *dummy)
249{ 253{
250 unsigned int v; 254 unsigned int v;
251 int cpu = smp_processor_id(); 255 int cpu = smp_processor_id();
252 struct op_msrs *msrs = &cpu_msrs[cpu]; 256 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
253 257
254 /* restoring APIC_LVTPC can trigger an apic error because the delivery 258 /* restoring APIC_LVTPC can trigger an apic error because the delivery
255 * mode and vector nr combination can be illegal. That's by design: on 259 * mode and vector nr combination can be illegal. That's by design: on
@@ -258,23 +262,24 @@ static void nmi_cpu_shutdown(void *dummy)
258 */ 262 */
259 v = apic_read(APIC_LVTERR); 263 v = apic_read(APIC_LVTERR);
260 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 264 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
261 apic_write(APIC_LVTPC, saved_lvtpc[cpu]); 265 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
262 apic_write(APIC_LVTERR, v); 266 apic_write(APIC_LVTERR, v);
263 nmi_restore_registers(msrs); 267 nmi_restore_registers(msrs);
264} 268}
265 269
266static void nmi_shutdown(void) 270static void nmi_shutdown(void)
267{ 271{
272 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
268 nmi_enabled = 0; 273 nmi_enabled = 0;
269 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); 274 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
270 unregister_die_notifier(&profile_exceptions_nb); 275 unregister_die_notifier(&profile_exceptions_nb);
271 model->shutdown(cpu_msrs); 276 model->shutdown(msrs);
272 free_msrs(); 277 free_msrs();
273} 278}
274 279
275static void nmi_cpu_start(void *dummy) 280static void nmi_cpu_start(void *dummy)
276{ 281{
277 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()]; 282 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
278 model->start(msrs); 283 model->start(msrs);
279} 284}
280 285
@@ -286,7 +291,7 @@ static int nmi_start(void)
286 291
287static void nmi_cpu_stop(void *dummy) 292static void nmi_cpu_stop(void *dummy)
288{ 293{
289 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()]; 294 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
290 model->stop(msrs); 295 model->stop(msrs);
291} 296}
292 297