aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorCarlos R. Mafra <crmafra@gmail.com>2008-01-30 07:32:33 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:33 -0500
commitb75f53dba8a4a61fda1ff7e0fb0fe3b0d80e0c64 (patch)
tree3bc8eb269669cc9946a538ca7c07b4c987fb9bc2 /arch/x86/oprofile
parentb506a9d08bae9336ff9223c8a46a37bf27bd924a (diff)
x86: fix style errors in nmi_int.c
This patch fixes most errors detected by checkpatch.pl. errors lines of code errors/KLOC arch/x86/oprofile/nmi_int.c (after) 1 461 2.1 arch/x86/oprofile/nmi_int.c (before) 60 477 125.7 No code changed. size: text data bss dec hex filename 2675 264 472 3411 d53 nmi_int.o.after 2675 264 472 3411 d53 nmi_int.o.before md5sum: 847aea0cc68fe1a2b5e7019439f3b4dd nmi_int.o.after 847aea0cc68fe1a2b5e7019439f3b4dd nmi_int.o.before Signed-off-by: Carlos R. Mafra <crmafra@gmail.com> Reviewed-by: Jesper Juhl <jesper.juhl@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/nmi_int.c212
1 files changed, 98 insertions, 114 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index c8ab79ef4276..1f11cf0a307f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -18,11 +18,11 @@
18#include <asm/nmi.h> 18#include <asm/nmi.h>
19#include <asm/msr.h> 19#include <asm/msr.h>
20#include <asm/apic.h> 20#include <asm/apic.h>
21 21
22#include "op_counter.h" 22#include "op_counter.h"
23#include "op_x86_model.h" 23#include "op_x86_model.h"
24 24
25static struct op_x86_model_spec const * model; 25static struct op_x86_model_spec const *model;
26static struct op_msrs cpu_msrs[NR_CPUS]; 26static struct op_msrs cpu_msrs[NR_CPUS];
27static unsigned long saved_lvtpc[NR_CPUS]; 27static unsigned long saved_lvtpc[NR_CPUS];
28 28
@@ -41,7 +41,6 @@ static int nmi_suspend(struct sys_device *dev, pm_message_t state)
41 return 0; 41 return 0;
42} 42}
43 43
44
45static int nmi_resume(struct sys_device *dev) 44static int nmi_resume(struct sys_device *dev)
46{ 45{
47 if (nmi_enabled == 1) 46 if (nmi_enabled == 1)
@@ -49,29 +48,27 @@ static int nmi_resume(struct sys_device *dev)
49 return 0; 48 return 0;
50} 49}
51 50
52
53static struct sysdev_class oprofile_sysclass = { 51static struct sysdev_class oprofile_sysclass = {
54 .name = "oprofile", 52 .name = "oprofile",
55 .resume = nmi_resume, 53 .resume = nmi_resume,
56 .suspend = nmi_suspend, 54 .suspend = nmi_suspend,
57}; 55};
58 56
59
60static struct sys_device device_oprofile = { 57static struct sys_device device_oprofile = {
61 .id = 0, 58 .id = 0,
62 .cls = &oprofile_sysclass, 59 .cls = &oprofile_sysclass,
63}; 60};
64 61
65
66static int __init init_sysfs(void) 62static int __init init_sysfs(void)
67{ 63{
68 int error; 64 int error;
69 if (!(error = sysdev_class_register(&oprofile_sysclass))) 65
66 error = sysdev_class_register(&oprofile_sysclass);
67 if (!error)
70 error = sysdev_register(&device_oprofile); 68 error = sysdev_register(&device_oprofile);
71 return error; 69 return error;
72} 70}
73 71
74
75static void exit_sysfs(void) 72static void exit_sysfs(void)
76{ 73{
77 sysdev_unregister(&device_oprofile); 74 sysdev_unregister(&device_oprofile);
@@ -90,7 +87,7 @@ static int profile_exceptions_notify(struct notifier_block *self,
90 int ret = NOTIFY_DONE; 87 int ret = NOTIFY_DONE;
91 int cpu = smp_processor_id(); 88 int cpu = smp_processor_id();
92 89
93 switch(val) { 90 switch (val) {
94 case DIE_NMI: 91 case DIE_NMI:
95 if (model->check_ctrs(args->regs, &cpu_msrs[cpu])) 92 if (model->check_ctrs(args->regs, &cpu_msrs[cpu]))
96 ret = NOTIFY_STOP; 93 ret = NOTIFY_STOP;
@@ -101,24 +98,24 @@ static int profile_exceptions_notify(struct notifier_block *self,
101 return ret; 98 return ret;
102} 99}
103 100
104static void nmi_cpu_save_registers(struct op_msrs * msrs) 101static void nmi_cpu_save_registers(struct op_msrs *msrs)
105{ 102{
106 unsigned int const nr_ctrs = model->num_counters; 103 unsigned int const nr_ctrs = model->num_counters;
107 unsigned int const nr_ctrls = model->num_controls; 104 unsigned int const nr_ctrls = model->num_controls;
108 struct op_msr * counters = msrs->counters; 105 struct op_msr *counters = msrs->counters;
109 struct op_msr * controls = msrs->controls; 106 struct op_msr *controls = msrs->controls;
110 unsigned int i; 107 unsigned int i;
111 108
112 for (i = 0; i < nr_ctrs; ++i) { 109 for (i = 0; i < nr_ctrs; ++i) {
113 if (counters[i].addr){ 110 if (counters[i].addr) {
114 rdmsr(counters[i].addr, 111 rdmsr(counters[i].addr,
115 counters[i].saved.low, 112 counters[i].saved.low,
116 counters[i].saved.high); 113 counters[i].saved.high);
117 } 114 }
118 } 115 }
119 116
120 for (i = 0; i < nr_ctrls; ++i) { 117 for (i = 0; i < nr_ctrls; ++i) {
121 if (controls[i].addr){ 118 if (controls[i].addr) {
122 rdmsr(controls[i].addr, 119 rdmsr(controls[i].addr,
123 controls[i].saved.low, 120 controls[i].saved.low,
124 controls[i].saved.high); 121 controls[i].saved.high);
@@ -126,15 +123,13 @@ static void nmi_cpu_save_registers(struct op_msrs * msrs)
126 } 123 }
127} 124}
128 125
129 126static void nmi_save_registers(void *dummy)
130static void nmi_save_registers(void * dummy)
131{ 127{
132 int cpu = smp_processor_id(); 128 int cpu = smp_processor_id();
133 struct op_msrs * msrs = &cpu_msrs[cpu]; 129 struct op_msrs *msrs = &cpu_msrs[cpu];
134 nmi_cpu_save_registers(msrs); 130 nmi_cpu_save_registers(msrs);
135} 131}
136 132
137
138static void free_msrs(void) 133static void free_msrs(void)
139{ 134{
140 int i; 135 int i;
@@ -146,7 +141,6 @@ static void free_msrs(void)
146 } 141 }
147} 142}
148 143
149
150static int allocate_msrs(void) 144static int allocate_msrs(void)
151{ 145{
152 int success = 1; 146 int success = 1;
@@ -173,11 +167,10 @@ static int allocate_msrs(void)
173 return success; 167 return success;
174} 168}
175 169
176 170static void nmi_cpu_setup(void *dummy)
177static void nmi_cpu_setup(void * dummy)
178{ 171{
179 int cpu = smp_processor_id(); 172 int cpu = smp_processor_id();
180 struct op_msrs * msrs = &cpu_msrs[cpu]; 173 struct op_msrs *msrs = &cpu_msrs[cpu];
181 spin_lock(&oprofilefs_lock); 174 spin_lock(&oprofilefs_lock);
182 model->setup_ctrs(msrs); 175 model->setup_ctrs(msrs);
183 spin_unlock(&oprofilefs_lock); 176 spin_unlock(&oprofilefs_lock);
@@ -193,13 +186,14 @@ static struct notifier_block profile_exceptions_nb = {
193 186
194static int nmi_setup(void) 187static int nmi_setup(void)
195{ 188{
196 int err=0; 189 int err = 0;
197 int cpu; 190 int cpu;
198 191
199 if (!allocate_msrs()) 192 if (!allocate_msrs())
200 return -ENOMEM; 193 return -ENOMEM;
201 194
202 if ((err = register_die_notifier(&profile_exceptions_nb))){ 195 err = register_die_notifier(&profile_exceptions_nb);
196 if (err) {
203 free_msrs(); 197 free_msrs();
204 return err; 198 return err;
205 } 199 }
@@ -210,7 +204,7 @@ static int nmi_setup(void)
210 204
211 /* Assume saved/restored counters are the same on all CPUs */ 205 /* Assume saved/restored counters are the same on all CPUs */
212 model->fill_in_addresses(&cpu_msrs[0]); 206 model->fill_in_addresses(&cpu_msrs[0]);
213 for_each_possible_cpu (cpu) { 207 for_each_possible_cpu(cpu) {
214 if (cpu != 0) { 208 if (cpu != 0) {
215 memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters, 209 memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
216 sizeof(struct op_msr) * model->num_counters); 210 sizeof(struct op_msr) * model->num_counters);
@@ -226,39 +220,37 @@ static int nmi_setup(void)
226 return 0; 220 return 0;
227} 221}
228 222
229 223static void nmi_restore_registers(struct op_msrs *msrs)
230static void nmi_restore_registers(struct op_msrs * msrs)
231{ 224{
232 unsigned int const nr_ctrs = model->num_counters; 225 unsigned int const nr_ctrs = model->num_counters;
233 unsigned int const nr_ctrls = model->num_controls; 226 unsigned int const nr_ctrls = model->num_controls;
234 struct op_msr * counters = msrs->counters; 227 struct op_msr *counters = msrs->counters;
235 struct op_msr * controls = msrs->controls; 228 struct op_msr *controls = msrs->controls;
236 unsigned int i; 229 unsigned int i;
237 230
238 for (i = 0; i < nr_ctrls; ++i) { 231 for (i = 0; i < nr_ctrls; ++i) {
239 if (controls[i].addr){ 232 if (controls[i].addr) {
240 wrmsr(controls[i].addr, 233 wrmsr(controls[i].addr,
241 controls[i].saved.low, 234 controls[i].saved.low,
242 controls[i].saved.high); 235 controls[i].saved.high);
243 } 236 }
244 } 237 }
245 238
246 for (i = 0; i < nr_ctrs; ++i) { 239 for (i = 0; i < nr_ctrs; ++i) {
247 if (counters[i].addr){ 240 if (counters[i].addr) {
248 wrmsr(counters[i].addr, 241 wrmsr(counters[i].addr,
249 counters[i].saved.low, 242 counters[i].saved.low,
250 counters[i].saved.high); 243 counters[i].saved.high);
251 } 244 }
252 } 245 }
253} 246}
254
255 247
256static void nmi_cpu_shutdown(void * dummy) 248static void nmi_cpu_shutdown(void *dummy)
257{ 249{
258 unsigned int v; 250 unsigned int v;
259 int cpu = smp_processor_id(); 251 int cpu = smp_processor_id();
260 struct op_msrs * msrs = &cpu_msrs[cpu]; 252 struct op_msrs *msrs = &cpu_msrs[cpu];
261 253
262 /* restoring APIC_LVTPC can trigger an apic error because the delivery 254 /* restoring APIC_LVTPC can trigger an apic error because the delivery
263 * mode and vector nr combination can be illegal. That's by design: on 255 * mode and vector nr combination can be illegal. That's by design: on
264 * power on apic lvt contain a zero vector nr which are legal only for 256 * power on apic lvt contain a zero vector nr which are legal only for
@@ -271,7 +263,6 @@ static void nmi_cpu_shutdown(void * dummy)
271 nmi_restore_registers(msrs); 263 nmi_restore_registers(msrs);
272} 264}
273 265
274
275static void nmi_shutdown(void) 266static void nmi_shutdown(void)
276{ 267{
277 nmi_enabled = 0; 268 nmi_enabled = 0;
@@ -281,45 +272,40 @@ static void nmi_shutdown(void)
281 free_msrs(); 272 free_msrs();
282} 273}
283 274
284 275static void nmi_cpu_start(void *dummy)
285static void nmi_cpu_start(void * dummy)
286{ 276{
287 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()]; 277 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
288 model->start(msrs); 278 model->start(msrs);
289} 279}
290
291 280
292static int nmi_start(void) 281static int nmi_start(void)
293{ 282{
294 on_each_cpu(nmi_cpu_start, NULL, 0, 1); 283 on_each_cpu(nmi_cpu_start, NULL, 0, 1);
295 return 0; 284 return 0;
296} 285}
297 286
298 287static void nmi_cpu_stop(void *dummy)
299static void nmi_cpu_stop(void * dummy)
300{ 288{
301 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()]; 289 struct op_msrs const *msrs = &cpu_msrs[smp_processor_id()];
302 model->stop(msrs); 290 model->stop(msrs);
303} 291}
304 292
305
306static void nmi_stop(void) 293static void nmi_stop(void)
307{ 294{
308 on_each_cpu(nmi_cpu_stop, NULL, 0, 1); 295 on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
309} 296}
310 297
311
312struct op_counter_config counter_config[OP_MAX_COUNTER]; 298struct op_counter_config counter_config[OP_MAX_COUNTER];
313 299
314static int nmi_create_files(struct super_block * sb, struct dentry * root) 300static int nmi_create_files(struct super_block *sb, struct dentry *root)
315{ 301{
316 unsigned int i; 302 unsigned int i;
317 303
318 for (i = 0; i < model->num_counters; ++i) { 304 for (i = 0; i < model->num_counters; ++i) {
319 struct dentry * dir; 305 struct dentry *dir;
320 char buf[4]; 306 char buf[4];
321 307
322 /* quick little hack to _not_ expose a counter if it is not 308 /* quick little hack to _not_ expose a counter if it is not
323 * available for use. This should protect userspace app. 309 * available for use. This should protect userspace app.
324 * NOTE: assumes 1:1 mapping here (that counters are organized 310 * NOTE: assumes 1:1 mapping here (that counters are organized
325 * sequentially in their struct assignment). 311 * sequentially in their struct assignment).
@@ -329,21 +315,21 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
329 315
330 snprintf(buf, sizeof(buf), "%d", i); 316 snprintf(buf, sizeof(buf), "%d", i);
331 dir = oprofilefs_mkdir(sb, root, buf); 317 dir = oprofilefs_mkdir(sb, root, buf);
332 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 318 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
333 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 319 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
334 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); 320 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
335 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 321 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
336 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 322 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
337 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 323 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
338 } 324 }
339 325
340 return 0; 326 return 0;
341} 327}
342 328
343static int p4force; 329static int p4force;
344module_param(p4force, int, 0); 330module_param(p4force, int, 0);
345 331
346static int __init p4_init(char ** cpu_type) 332static int __init p4_init(char **cpu_type)
347{ 333{
348 __u8 cpu_model = boot_cpu_data.x86_model; 334 __u8 cpu_model = boot_cpu_data.x86_model;
349 335
@@ -356,15 +342,15 @@ static int __init p4_init(char ** cpu_type)
356 return 1; 342 return 1;
357#else 343#else
358 switch (smp_num_siblings) { 344 switch (smp_num_siblings) {
359 case 1: 345 case 1:
360 *cpu_type = "i386/p4"; 346 *cpu_type = "i386/p4";
361 model = &op_p4_spec; 347 model = &op_p4_spec;
362 return 1; 348 return 1;
363 349
364 case 2: 350 case 2:
365 *cpu_type = "i386/p4-ht"; 351 *cpu_type = "i386/p4-ht";
366 model = &op_p4_ht2_spec; 352 model = &op_p4_ht2_spec;
367 return 1; 353 return 1;
368 } 354 }
369#endif 355#endif
370 356
@@ -373,8 +359,7 @@ static int __init p4_init(char ** cpu_type)
373 return 0; 359 return 0;
374} 360}
375 361
376 362static int __init ppro_init(char **cpu_type)
377static int __init ppro_init(char ** cpu_type)
378{ 363{
379 __u8 cpu_model = boot_cpu_data.x86_model; 364 __u8 cpu_model = boot_cpu_data.x86_model;
380 365
@@ -409,52 +394,52 @@ int __init op_nmi_init(struct oprofile_operations *ops)
409 394
410 if (!cpu_has_apic) 395 if (!cpu_has_apic)
411 return -ENODEV; 396 return -ENODEV;
412 397
413 switch (vendor) { 398 switch (vendor) {
414 case X86_VENDOR_AMD: 399 case X86_VENDOR_AMD:
415 /* Needs to be at least an Athlon (or hammer in 32bit mode) */ 400 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
416 401
417 switch (family) { 402 switch (family) {
418 default: 403 default:
404 return -ENODEV;
405 case 6:
406 model = &op_athlon_spec;
407 cpu_type = "i386/athlon";
408 break;
409 case 0xf:
410 model = &op_athlon_spec;
411 /* Actually it could be i386/hammer too, but give
412 user space an consistent name. */
413 cpu_type = "x86-64/hammer";
414 break;
415 case 0x10:
416 model = &op_athlon_spec;
417 cpu_type = "x86-64/family10";
418 break;
419 }
420 break;
421
422 case X86_VENDOR_INTEL:
423 switch (family) {
424 /* Pentium IV */
425 case 0xf:
426 if (!p4_init(&cpu_type))
419 return -ENODEV; 427 return -ENODEV;
420 case 6:
421 model = &op_athlon_spec;
422 cpu_type = "i386/athlon";
423 break;
424 case 0xf:
425 model = &op_athlon_spec;
426 /* Actually it could be i386/hammer too, but give
427 user space an consistent name. */
428 cpu_type = "x86-64/hammer";
429 break;
430 case 0x10:
431 model = &op_athlon_spec;
432 cpu_type = "x86-64/family10";
433 break;
434 }
435 break; 428 break;
436 429
437 case X86_VENDOR_INTEL: 430 /* A P6-class processor */
438 switch (family) { 431 case 6:
439 /* Pentium IV */ 432 if (!ppro_init(&cpu_type))
440 case 0xf: 433 return -ENODEV;
441 if (!p4_init(&cpu_type))
442 return -ENODEV;
443 break;
444
445 /* A P6-class processor */
446 case 6:
447 if (!ppro_init(&cpu_type))
448 return -ENODEV;
449 break;
450
451 default:
452 return -ENODEV;
453 }
454 break; 434 break;
455 435
456 default: 436 default:
457 return -ENODEV; 437 return -ENODEV;
438 }
439 break;
440
441 default:
442 return -ENODEV;
458 } 443 }
459 444
460 init_sysfs(); 445 init_sysfs();
@@ -469,7 +454,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
469 return 0; 454 return 0;
470} 455}
471 456
472
473void op_nmi_exit(void) 457void op_nmi_exit(void)
474{ 458{
475 if (using_nmi) 459 if (using_nmi)