aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-05-10 07:13:40 -0400
committerIngo Molnar <mingo@elte.hu>2010-05-10 07:13:40 -0400
commitcc49b092d308f8ea8634134b0d95d831a88a674b (patch)
tree4d557fe55c211b76ca2a2284b750f380b581f6e0
parent7c224a03a79021ab12ce057964df9e679af5386d (diff)
parentbae663bc635e2726c7c5228dbf0f2051e16d1c81 (diff)
Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into oprofile
-rw-r--r--arch/x86/oprofile/nmi_int.c187
-rw-r--r--arch/x86/oprofile/op_model_amd.c54
-rw-r--r--arch/x86/oprofile/op_x86_model.h2
3 files changed, 130 insertions, 113 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 9f001d904599..b28d2f1253bb 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model;
31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 32static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33 33
34/* 0 == registered but off, 1 == registered and on */ 34/* must be protected with get_online_cpus()/put_online_cpus(): */
35static int nmi_enabled = 0; 35static int nmi_enabled;
36static int ctr_running;
36 37
37struct op_counter_config counter_config[OP_MAX_COUNTER]; 38struct op_counter_config counter_config[OP_MAX_COUNTER];
38 39
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self,
61{ 62{
62 struct die_args *args = (struct die_args *)data; 63 struct die_args *args = (struct die_args *)data;
63 int ret = NOTIFY_DONE; 64 int ret = NOTIFY_DONE;
64 int cpu = smp_processor_id();
65 65
66 switch (val) { 66 switch (val) {
67 case DIE_NMI: 67 case DIE_NMI:
68 case DIE_NMI_IPI: 68 case DIE_NMI_IPI:
69 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); 69 if (ctr_running)
70 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
71 else if (!nmi_enabled)
72 break;
73 else
74 model->stop(&__get_cpu_var(cpu_msrs));
70 ret = NOTIFY_STOP; 75 ret = NOTIFY_STOP;
71 break; 76 break;
72 default: 77 default:
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
95static void nmi_cpu_start(void *dummy) 100static void nmi_cpu_start(void *dummy)
96{ 101{
97 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 102 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
98 model->start(msrs); 103 if (!msrs->controls)
104 WARN_ON_ONCE(1);
105 else
106 model->start(msrs);
99} 107}
100 108
101static int nmi_start(void) 109static int nmi_start(void)
102{ 110{
111 get_online_cpus();
103 on_each_cpu(nmi_cpu_start, NULL, 1); 112 on_each_cpu(nmi_cpu_start, NULL, 1);
113 ctr_running = 1;
114 put_online_cpus();
104 return 0; 115 return 0;
105} 116}
106 117
107static void nmi_cpu_stop(void *dummy) 118static void nmi_cpu_stop(void *dummy)
108{ 119{
109 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 120 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
110 model->stop(msrs); 121 if (!msrs->controls)
122 WARN_ON_ONCE(1);
123 else
124 model->stop(msrs);
111} 125}
112 126
113static void nmi_stop(void) 127static void nmi_stop(void)
114{ 128{
129 get_online_cpus();
115 on_each_cpu(nmi_cpu_stop, NULL, 1); 130 on_each_cpu(nmi_cpu_stop, NULL, 1);
131 ctr_running = 0;
132 put_online_cpus();
116} 133}
117 134
118#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 135#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
@@ -252,7 +269,10 @@ static int nmi_switch_event(void)
252 if (nmi_multiplex_on() < 0) 269 if (nmi_multiplex_on() < 0)
253 return -EINVAL; /* not necessary */ 270 return -EINVAL; /* not necessary */
254 271
255 on_each_cpu(nmi_cpu_switch, NULL, 1); 272 get_online_cpus();
273 if (ctr_running)
274 on_each_cpu(nmi_cpu_switch, NULL, 1);
275 put_online_cpus();
256 276
257 return 0; 277 return 0;
258} 278}
@@ -344,50 +364,6 @@ static struct notifier_block profile_exceptions_nb = {
344 .priority = 2 364 .priority = 2
345}; 365};
346 366
347static int nmi_setup(void)
348{
349 int err = 0;
350 int cpu;
351
352 if (!allocate_msrs())
353 return -ENOMEM;
354
355 /* We need to serialize save and setup for HT because the subset
356 * of msrs are distinct for save and setup operations
357 */
358
359 /* Assume saved/restored counters are the same on all CPUs */
360 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
361 if (err)
362 goto fail;
363
364 for_each_possible_cpu(cpu) {
365 if (!cpu)
366 continue;
367
368 memcpy(per_cpu(cpu_msrs, cpu).counters,
369 per_cpu(cpu_msrs, 0).counters,
370 sizeof(struct op_msr) * model->num_counters);
371
372 memcpy(per_cpu(cpu_msrs, cpu).controls,
373 per_cpu(cpu_msrs, 0).controls,
374 sizeof(struct op_msr) * model->num_controls);
375
376 mux_clone(cpu);
377 }
378
379 err = register_die_notifier(&profile_exceptions_nb);
380 if (err)
381 goto fail;
382
383 on_each_cpu(nmi_cpu_setup, NULL, 1);
384 nmi_enabled = 1;
385 return 0;
386fail:
387 free_msrs();
388 return err;
389}
390
391static void nmi_cpu_restore_registers(struct op_msrs *msrs) 367static void nmi_cpu_restore_registers(struct op_msrs *msrs)
392{ 368{
393 struct op_msr *counters = msrs->counters; 369 struct op_msr *counters = msrs->counters;
@@ -421,19 +397,24 @@ static void nmi_cpu_shutdown(void *dummy)
421 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 397 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
422 apic_write(APIC_LVTERR, v); 398 apic_write(APIC_LVTERR, v);
423 nmi_cpu_restore_registers(msrs); 399 nmi_cpu_restore_registers(msrs);
400 if (model->cpu_down)
401 model->cpu_down();
424} 402}
425 403
426static void nmi_shutdown(void) 404static void nmi_cpu_up(void *dummy)
427{ 405{
428 struct op_msrs *msrs; 406 if (nmi_enabled)
407 nmi_cpu_setup(dummy);
408 if (ctr_running)
409 nmi_cpu_start(dummy);
410}
429 411
430 nmi_enabled = 0; 412static void nmi_cpu_down(void *dummy)
431 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 413{
432 unregister_die_notifier(&profile_exceptions_nb); 414 if (ctr_running)
433 msrs = &get_cpu_var(cpu_msrs); 415 nmi_cpu_stop(dummy);
434 model->shutdown(msrs); 416 if (nmi_enabled)
435 free_msrs(); 417 nmi_cpu_shutdown(dummy);
436 put_cpu_var(cpu_msrs);
437} 418}
438 419
439static int nmi_create_files(struct super_block *sb, struct dentry *root) 420static int nmi_create_files(struct super_block *sb, struct dentry *root)
@@ -465,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
465 return 0; 446 return 0;
466} 447}
467 448
468#ifdef CONFIG_SMP
469static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, 449static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
470 void *data) 450 void *data)
471{ 451{
@@ -473,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
473 switch (action) { 453 switch (action) {
474 case CPU_DOWN_FAILED: 454 case CPU_DOWN_FAILED:
475 case CPU_ONLINE: 455 case CPU_ONLINE:
476 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0); 456 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
477 break; 457 break;
478 case CPU_DOWN_PREPARE: 458 case CPU_DOWN_PREPARE:
479 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1); 459 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
480 break; 460 break;
481 } 461 }
482 return NOTIFY_DONE; 462 return NOTIFY_DONE;
@@ -485,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
485static struct notifier_block oprofile_cpu_nb = { 465static struct notifier_block oprofile_cpu_nb = {
486 .notifier_call = oprofile_cpu_notifier 466 .notifier_call = oprofile_cpu_notifier
487}; 467};
488#endif 468
469static int nmi_setup(void)
470{
471 int err = 0;
472 int cpu;
473
474 if (!allocate_msrs())
475 return -ENOMEM;
476
477 /* We need to serialize save and setup for HT because the subset
478 * of msrs are distinct for save and setup operations
479 */
480
481 /* Assume saved/restored counters are the same on all CPUs */
482 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
483 if (err)
484 goto fail;
485
486 for_each_possible_cpu(cpu) {
487 if (!cpu)
488 continue;
489
490 memcpy(per_cpu(cpu_msrs, cpu).counters,
491 per_cpu(cpu_msrs, 0).counters,
492 sizeof(struct op_msr) * model->num_counters);
493
494 memcpy(per_cpu(cpu_msrs, cpu).controls,
495 per_cpu(cpu_msrs, 0).controls,
496 sizeof(struct op_msr) * model->num_controls);
497
498 mux_clone(cpu);
499 }
500
501 nmi_enabled = 0;
502 ctr_running = 0;
503 barrier();
504 err = register_die_notifier(&profile_exceptions_nb);
505 if (err)
506 goto fail;
507
508 get_online_cpus();
509 register_cpu_notifier(&oprofile_cpu_nb);
510 on_each_cpu(nmi_cpu_setup, NULL, 1);
511 nmi_enabled = 1;
512 put_online_cpus();
513
514 return 0;
515fail:
516 free_msrs();
517 return err;
518}
519
520static void nmi_shutdown(void)
521{
522 struct op_msrs *msrs;
523
524 get_online_cpus();
525 unregister_cpu_notifier(&oprofile_cpu_nb);
526 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
527 nmi_enabled = 0;
528 ctr_running = 0;
529 put_online_cpus();
530 barrier();
531 unregister_die_notifier(&profile_exceptions_nb);
532 msrs = &get_cpu_var(cpu_msrs);
533 model->shutdown(msrs);
534 free_msrs();
535 put_cpu_var(cpu_msrs);
536}
489 537
490#ifdef CONFIG_PM 538#ifdef CONFIG_PM
491 539
@@ -695,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
695 return -ENODEV; 743 return -ENODEV;
696 } 744 }
697 745
698#ifdef CONFIG_SMP
699 register_cpu_notifier(&oprofile_cpu_nb);
700#endif
701 /* default values, can be overwritten by model */ 746 /* default values, can be overwritten by model */
702 ops->create_files = nmi_create_files; 747 ops->create_files = nmi_create_files;
703 ops->setup = nmi_setup; 748 ops->setup = nmi_setup;
@@ -724,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
724 769
725void op_nmi_exit(void) 770void op_nmi_exit(void)
726{ 771{
727 if (using_nmi) { 772 if (using_nmi)
728 exit_sysfs(); 773 exit_sysfs();
729#ifdef CONFIG_SMP
730 unregister_cpu_notifier(&oprofile_cpu_nb);
731#endif
732 }
733 if (model->exit)
734 model->exit();
735} 774}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 384c52410480..b67a6b5aa8d4 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -374,6 +374,15 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
374 val |= op_x86_get_ctrl(model, &counter_config[virt]); 374 val |= op_x86_get_ctrl(model, &counter_config[virt]);
375 wrmsrl(msrs->controls[i].addr, val); 375 wrmsrl(msrs->controls[i].addr, val);
376 } 376 }
377
378 if (ibs_caps)
379 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
380}
381
382static void op_amd_cpu_shutdown(void)
383{
384 if (ibs_caps)
385 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
377} 386}
378 387
379static int op_amd_check_ctrs(struct pt_regs * const regs, 388static int op_amd_check_ctrs(struct pt_regs * const regs,
@@ -436,28 +445,16 @@ static void op_amd_stop(struct op_msrs const * const msrs)
436 op_amd_stop_ibs(); 445 op_amd_stop_ibs();
437} 446}
438 447
439static u8 ibs_eilvt_off; 448static int __init_ibs_nmi(void)
440
441static inline void apic_init_ibs_nmi_per_cpu(void *arg)
442{
443 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
444}
445
446static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
447{
448 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
449}
450
451static int init_ibs_nmi(void)
452{ 449{
453#define IBSCTL_LVTOFFSETVAL (1 << 8) 450#define IBSCTL_LVTOFFSETVAL (1 << 8)
454#define IBSCTL 0x1cc 451#define IBSCTL 0x1cc
455 struct pci_dev *cpu_cfg; 452 struct pci_dev *cpu_cfg;
456 int nodes; 453 int nodes;
457 u32 value = 0; 454 u32 value = 0;
455 u8 ibs_eilvt_off;
458 456
459 /* per CPU setup */ 457 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
460 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
461 458
462 nodes = 0; 459 nodes = 0;
463 cpu_cfg = NULL; 460 cpu_cfg = NULL;
@@ -487,21 +484,15 @@ static int init_ibs_nmi(void)
487 return 0; 484 return 0;
488} 485}
489 486
490/* uninitialize the APIC for the IBS interrupts if needed */
491static void clear_ibs_nmi(void)
492{
493 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
494}
495
496/* initialize the APIC for the IBS interrupts if available */ 487/* initialize the APIC for the IBS interrupts if available */
497static void ibs_init(void) 488static void init_ibs(void)
498{ 489{
499 ibs_caps = get_ibs_caps(); 490 ibs_caps = get_ibs_caps();
500 491
501 if (!ibs_caps) 492 if (!ibs_caps)
502 return; 493 return;
503 494
504 if (init_ibs_nmi()) { 495 if (__init_ibs_nmi()) {
505 ibs_caps = 0; 496 ibs_caps = 0;
506 return; 497 return;
507 } 498 }
@@ -510,14 +501,6 @@ static void ibs_init(void)
510 (unsigned)ibs_caps); 501 (unsigned)ibs_caps);
511} 502}
512 503
513static void ibs_exit(void)
514{
515 if (!ibs_caps)
516 return;
517
518 clear_ibs_nmi();
519}
520
521static int (*create_arch_files)(struct super_block *sb, struct dentry *root); 504static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
522 505
523static int setup_ibs_files(struct super_block *sb, struct dentry *root) 506static int setup_ibs_files(struct super_block *sb, struct dentry *root)
@@ -566,17 +549,12 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
566 549
567static int op_amd_init(struct oprofile_operations *ops) 550static int op_amd_init(struct oprofile_operations *ops)
568{ 551{
569 ibs_init(); 552 init_ibs();
570 create_arch_files = ops->create_files; 553 create_arch_files = ops->create_files;
571 ops->create_files = setup_ibs_files; 554 ops->create_files = setup_ibs_files;
572 return 0; 555 return 0;
573} 556}
574 557
575static void op_amd_exit(void)
576{
577 ibs_exit();
578}
579
580struct op_x86_model_spec op_amd_spec = { 558struct op_x86_model_spec op_amd_spec = {
581 .num_counters = NUM_COUNTERS, 559 .num_counters = NUM_COUNTERS,
582 .num_controls = NUM_COUNTERS, 560 .num_controls = NUM_COUNTERS,
@@ -584,9 +562,9 @@ struct op_x86_model_spec op_amd_spec = {
584 .reserved = MSR_AMD_EVENTSEL_RESERVED, 562 .reserved = MSR_AMD_EVENTSEL_RESERVED,
585 .event_mask = OP_EVENT_MASK, 563 .event_mask = OP_EVENT_MASK,
586 .init = op_amd_init, 564 .init = op_amd_init,
587 .exit = op_amd_exit,
588 .fill_in_addresses = &op_amd_fill_in_addresses, 565 .fill_in_addresses = &op_amd_fill_in_addresses,
589 .setup_ctrs = &op_amd_setup_ctrs, 566 .setup_ctrs = &op_amd_setup_ctrs,
567 .cpu_down = &op_amd_cpu_shutdown,
590 .check_ctrs = &op_amd_check_ctrs, 568 .check_ctrs = &op_amd_check_ctrs,
591 .start = &op_amd_start, 569 .start = &op_amd_start,
592 .stop = &op_amd_stop, 570 .stop = &op_amd_stop,
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 551401398fba..89017fa1fd63 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -40,10 +40,10 @@ struct op_x86_model_spec {
40 u64 reserved; 40 u64 reserved;
41 u16 event_mask; 41 u16 event_mask;
42 int (*init)(struct oprofile_operations *ops); 42 int (*init)(struct oprofile_operations *ops);
43 void (*exit)(void);
44 int (*fill_in_addresses)(struct op_msrs * const msrs); 43 int (*fill_in_addresses)(struct op_msrs * const msrs);
45 void (*setup_ctrs)(struct op_x86_model_spec const *model, 44 void (*setup_ctrs)(struct op_x86_model_spec const *model,
46 struct op_msrs const * const msrs); 45 struct op_msrs const * const msrs);
46 void (*cpu_down)(void);
47 int (*check_ctrs)(struct pt_regs * const regs, 47 int (*check_ctrs)(struct pt_regs * const regs,
48 struct op_msrs const * const msrs); 48 struct op_msrs const * const msrs);
49 void (*start)(struct op_msrs const * const msrs); 49 void (*start)(struct op_msrs const * const msrs);