aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/nmi_int.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-05-21 11:49:57 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-21 11:49:57 -0400
commitff5f149b6aec8edbfa3698721667acd043009a33 (patch)
treed052553eb296dfee3f01b1cb2b717cb7ccf3127a /arch/x86/oprofile/nmi_int.c
parentf0218b3e9974f06014b61be8987159f4a20e011e (diff)
parent580d607cd666dfabfc1c7b0fb08c8ac690c7c87f (diff)
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/core-7
Conflicts: include/linux/ftrace_event.h include/trace/ftrace.h kernel/trace/trace_event_perf.c kernel/trace/trace_kprobe.c kernel/trace/trace_syscalls.c Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r--arch/x86/oprofile/nmi_int.c199
1 files changed, 123 insertions, 76 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2c505ee71014..b28d2f1253bb 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model;
31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 32static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33 33
34/* 0 == registered but off, 1 == registered and on */ 34/* must be protected with get_online_cpus()/put_online_cpus(): */
35static int nmi_enabled = 0; 35static int nmi_enabled;
36static int ctr_running;
36 37
37struct op_counter_config counter_config[OP_MAX_COUNTER]; 38struct op_counter_config counter_config[OP_MAX_COUNTER];
38 39
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self,
61{ 62{
62 struct die_args *args = (struct die_args *)data; 63 struct die_args *args = (struct die_args *)data;
63 int ret = NOTIFY_DONE; 64 int ret = NOTIFY_DONE;
64 int cpu = smp_processor_id();
65 65
66 switch (val) { 66 switch (val) {
67 case DIE_NMI: 67 case DIE_NMI:
68 case DIE_NMI_IPI: 68 case DIE_NMI_IPI:
69 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); 69 if (ctr_running)
70 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
71 else if (!nmi_enabled)
72 break;
73 else
74 model->stop(&__get_cpu_var(cpu_msrs));
70 ret = NOTIFY_STOP; 75 ret = NOTIFY_STOP;
71 break; 76 break;
72 default: 77 default:
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
95static void nmi_cpu_start(void *dummy) 100static void nmi_cpu_start(void *dummy)
96{ 101{
97 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 102 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
98 model->start(msrs); 103 if (!msrs->controls)
104 WARN_ON_ONCE(1);
105 else
106 model->start(msrs);
99} 107}
100 108
101static int nmi_start(void) 109static int nmi_start(void)
102{ 110{
111 get_online_cpus();
103 on_each_cpu(nmi_cpu_start, NULL, 1); 112 on_each_cpu(nmi_cpu_start, NULL, 1);
113 ctr_running = 1;
114 put_online_cpus();
104 return 0; 115 return 0;
105} 116}
106 117
107static void nmi_cpu_stop(void *dummy) 118static void nmi_cpu_stop(void *dummy)
108{ 119{
109 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 120 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
110 model->stop(msrs); 121 if (!msrs->controls)
122 WARN_ON_ONCE(1);
123 else
124 model->stop(msrs);
111} 125}
112 126
113static void nmi_stop(void) 127static void nmi_stop(void)
114{ 128{
129 get_online_cpus();
115 on_each_cpu(nmi_cpu_stop, NULL, 1); 130 on_each_cpu(nmi_cpu_stop, NULL, 1);
131 ctr_running = 0;
132 put_online_cpus();
116} 133}
117 134
118#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 135#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
@@ -252,7 +269,10 @@ static int nmi_switch_event(void)
252 if (nmi_multiplex_on() < 0) 269 if (nmi_multiplex_on() < 0)
253 return -EINVAL; /* not necessary */ 270 return -EINVAL; /* not necessary */
254 271
255 on_each_cpu(nmi_cpu_switch, NULL, 1); 272 get_online_cpus();
273 if (ctr_running)
274 on_each_cpu(nmi_cpu_switch, NULL, 1);
275 put_online_cpus();
256 276
257 return 0; 277 return 0;
258} 278}
@@ -295,6 +315,7 @@ static void free_msrs(void)
295 kfree(per_cpu(cpu_msrs, i).controls); 315 kfree(per_cpu(cpu_msrs, i).controls);
296 per_cpu(cpu_msrs, i).controls = NULL; 316 per_cpu(cpu_msrs, i).controls = NULL;
297 } 317 }
318 nmi_shutdown_mux();
298} 319}
299 320
300static int allocate_msrs(void) 321static int allocate_msrs(void)
@@ -307,14 +328,21 @@ static int allocate_msrs(void)
307 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, 328 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
308 GFP_KERNEL); 329 GFP_KERNEL);
309 if (!per_cpu(cpu_msrs, i).counters) 330 if (!per_cpu(cpu_msrs, i).counters)
310 return 0; 331 goto fail;
311 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, 332 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
312 GFP_KERNEL); 333 GFP_KERNEL);
313 if (!per_cpu(cpu_msrs, i).controls) 334 if (!per_cpu(cpu_msrs, i).controls)
314 return 0; 335 goto fail;
315 } 336 }
316 337
338 if (!nmi_setup_mux())
339 goto fail;
340
317 return 1; 341 return 1;
342
343fail:
344 free_msrs();
345 return 0;
318} 346}
319 347
320static void nmi_cpu_setup(void *dummy) 348static void nmi_cpu_setup(void *dummy)
@@ -336,49 +364,6 @@ static struct notifier_block profile_exceptions_nb = {
336 .priority = 2 364 .priority = 2
337}; 365};
338 366
339static int nmi_setup(void)
340{
341 int err = 0;
342 int cpu;
343
344 if (!allocate_msrs())
345 err = -ENOMEM;
346 else if (!nmi_setup_mux())
347 err = -ENOMEM;
348 else
349 err = register_die_notifier(&profile_exceptions_nb);
350
351 if (err) {
352 free_msrs();
353 nmi_shutdown_mux();
354 return err;
355 }
356
357 /* We need to serialize save and setup for HT because the subset
358 * of msrs are distinct for save and setup operations
359 */
360
361 /* Assume saved/restored counters are the same on all CPUs */
362 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
363 for_each_possible_cpu(cpu) {
364 if (!cpu)
365 continue;
366
367 memcpy(per_cpu(cpu_msrs, cpu).counters,
368 per_cpu(cpu_msrs, 0).counters,
369 sizeof(struct op_msr) * model->num_counters);
370
371 memcpy(per_cpu(cpu_msrs, cpu).controls,
372 per_cpu(cpu_msrs, 0).controls,
373 sizeof(struct op_msr) * model->num_controls);
374
375 mux_clone(cpu);
376 }
377 on_each_cpu(nmi_cpu_setup, NULL, 1);
378 nmi_enabled = 1;
379 return 0;
380}
381
382static void nmi_cpu_restore_registers(struct op_msrs *msrs) 367static void nmi_cpu_restore_registers(struct op_msrs *msrs)
383{ 368{
384 struct op_msr *counters = msrs->counters; 369 struct op_msr *counters = msrs->counters;
@@ -412,20 +397,24 @@ static void nmi_cpu_shutdown(void *dummy)
412 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 397 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
413 apic_write(APIC_LVTERR, v); 398 apic_write(APIC_LVTERR, v);
414 nmi_cpu_restore_registers(msrs); 399 nmi_cpu_restore_registers(msrs);
400 if (model->cpu_down)
401 model->cpu_down();
415} 402}
416 403
417static void nmi_shutdown(void) 404static void nmi_cpu_up(void *dummy)
418{ 405{
419 struct op_msrs *msrs; 406 if (nmi_enabled)
407 nmi_cpu_setup(dummy);
408 if (ctr_running)
409 nmi_cpu_start(dummy);
410}
420 411
421 nmi_enabled = 0; 412static void nmi_cpu_down(void *dummy)
422 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 413{
423 unregister_die_notifier(&profile_exceptions_nb); 414 if (ctr_running)
424 nmi_shutdown_mux(); 415 nmi_cpu_stop(dummy);
425 msrs = &get_cpu_var(cpu_msrs); 416 if (nmi_enabled)
426 model->shutdown(msrs); 417 nmi_cpu_shutdown(dummy);
427 free_msrs();
428 put_cpu_var(cpu_msrs);
429} 418}
430 419
431static int nmi_create_files(struct super_block *sb, struct dentry *root) 420static int nmi_create_files(struct super_block *sb, struct dentry *root)
@@ -457,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
457 return 0; 446 return 0;
458} 447}
459 448
460#ifdef CONFIG_SMP
461static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, 449static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
462 void *data) 450 void *data)
463{ 451{
@@ -465,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
465 switch (action) { 453 switch (action) {
466 case CPU_DOWN_FAILED: 454 case CPU_DOWN_FAILED:
467 case CPU_ONLINE: 455 case CPU_ONLINE:
468 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0); 456 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
469 break; 457 break;
470 case CPU_DOWN_PREPARE: 458 case CPU_DOWN_PREPARE:
471 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1); 459 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
472 break; 460 break;
473 } 461 }
474 return NOTIFY_DONE; 462 return NOTIFY_DONE;
@@ -477,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
477static struct notifier_block oprofile_cpu_nb = { 465static struct notifier_block oprofile_cpu_nb = {
478 .notifier_call = oprofile_cpu_notifier 466 .notifier_call = oprofile_cpu_notifier
479}; 467};
480#endif 468
469static int nmi_setup(void)
470{
471 int err = 0;
472 int cpu;
473
474 if (!allocate_msrs())
475 return -ENOMEM;
476
477 /* We need to serialize save and setup for HT because the subset
478 * of msrs are distinct for save and setup operations
479 */
480
481 /* Assume saved/restored counters are the same on all CPUs */
482 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
483 if (err)
484 goto fail;
485
486 for_each_possible_cpu(cpu) {
487 if (!cpu)
488 continue;
489
490 memcpy(per_cpu(cpu_msrs, cpu).counters,
491 per_cpu(cpu_msrs, 0).counters,
492 sizeof(struct op_msr) * model->num_counters);
493
494 memcpy(per_cpu(cpu_msrs, cpu).controls,
495 per_cpu(cpu_msrs, 0).controls,
496 sizeof(struct op_msr) * model->num_controls);
497
498 mux_clone(cpu);
499 }
500
501 nmi_enabled = 0;
502 ctr_running = 0;
503 barrier();
504 err = register_die_notifier(&profile_exceptions_nb);
505 if (err)
506 goto fail;
507
508 get_online_cpus();
509 register_cpu_notifier(&oprofile_cpu_nb);
510 on_each_cpu(nmi_cpu_setup, NULL, 1);
511 nmi_enabled = 1;
512 put_online_cpus();
513
514 return 0;
515fail:
516 free_msrs();
517 return err;
518}
519
520static void nmi_shutdown(void)
521{
522 struct op_msrs *msrs;
523
524 get_online_cpus();
525 unregister_cpu_notifier(&oprofile_cpu_nb);
526 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
527 nmi_enabled = 0;
528 ctr_running = 0;
529 put_online_cpus();
530 barrier();
531 unregister_die_notifier(&profile_exceptions_nb);
532 msrs = &get_cpu_var(cpu_msrs);
533 model->shutdown(msrs);
534 free_msrs();
535 put_cpu_var(cpu_msrs);
536}
481 537
482#ifdef CONFIG_PM 538#ifdef CONFIG_PM
483 539
@@ -687,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
687 return -ENODEV; 743 return -ENODEV;
688 } 744 }
689 745
690#ifdef CONFIG_SMP
691 register_cpu_notifier(&oprofile_cpu_nb);
692#endif
693 /* default values, can be overwritten by model */ 746 /* default values, can be overwritten by model */
694 ops->create_files = nmi_create_files; 747 ops->create_files = nmi_create_files;
695 ops->setup = nmi_setup; 748 ops->setup = nmi_setup;
@@ -716,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
716 769
717void op_nmi_exit(void) 770void op_nmi_exit(void)
718{ 771{
719 if (using_nmi) { 772 if (using_nmi)
720 exit_sysfs(); 773 exit_sysfs();
721#ifdef CONFIG_SMP
722 unregister_cpu_notifier(&oprofile_cpu_nb);
723#endif
724 }
725 if (model->exit)
726 model->exit();
727} 774}