aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDon Zickus <dzickus@redhat.com>2011-09-30 15:06:21 -0400
committerIngo Molnar <mingo@elte.hu>2011-10-10 00:56:57 -0400
commit9c48f1c629ecfa114850c03f875c6691003214de (patch)
tree5e454da487c90fc8399ce366aa2437597c9ff5e5
parentc9126b2ee8adb9235941cedbf558d39a9e65642d (diff)
x86, nmi: Wire up NMI handlers to new routines
Just convert all the files that have an nmi handler to the new routines. Most of it is straight forward conversion. A couple of places needed some tweaking like kgdb which separates the debug notifier from the nmi handler and mce removes a call to notify_die. [Thanks to Ying for finding out the history behind that mce call https://lkml.org/lkml/2010/5/27/114 And Boris responding that he would like to remove that call because of it https://lkml.org/lkml/2011/9/21/163] The things that get converted are the registeration/unregistration routines and the nmi handler itself has its args changed along with code removal to check which list it is on (most are on one NMI list except for kgdb which has both an NMI routine and an NMI Unknown routine). Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Corey Minyard <minyard@acm.org> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Corey Minyard <minyard@acm.org> Cc: Jack Steiner <steiner@sgi.com> Link: http://lkml.kernel.org/r/1317409584-23662-4-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/include/asm/nmi.h20
-rw-r--r--arch/x86/include/asm/reboot.h2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c27
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c69
-rw-r--r--arch/x86/kernel/crash.c5
-rw-r--r--arch/x86/kernel/kgdb.c60
-rw-r--r--arch/x86/kernel/nmi.c11
-rw-r--r--arch/x86/kernel/reboot.c23
-rw-r--r--arch/x86/oprofile/nmi_int.c38
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c28
-rw-r--r--drivers/acpi/apei/ghes.c22
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c33
-rw-r--r--drivers/watchdog/hpwdt.c24
16 files changed, 124 insertions, 281 deletions
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 480b69b75756..53610957feaf 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -22,26 +22,6 @@ void arch_trigger_all_cpu_backtrace(void);
22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif 23#endif
24 24
25/*
26 * Define some priorities for the nmi notifier call chain.
27 *
28 * Create a local nmi bit that has a higher priority than
29 * external nmis, because the local ones are more frequent.
30 *
31 * Also setup some default high/normal/low settings for
32 * subsystems to registers with. Using 4 bits to separate
33 * the priorities. This can go a lot higher if needed be.
34 */
35
36#define NMI_LOCAL_SHIFT 16 /* randomly picked */
37#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
38#define NMI_HIGH_PRIOR (1ULL << 8)
39#define NMI_NORMAL_PRIOR (1ULL << 4)
40#define NMI_LOW_PRIOR (1ULL << 0)
41#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
42#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
43#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
44
45#define NMI_FLAG_FIRST 1 25#define NMI_FLAG_FIRST 1
46 26
47enum { 27enum {
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 3250e3d605d9..92f297069e87 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
23#define MRR_BIOS 0 23#define MRR_BIOS 0
24#define MRR_APM 1 24#define MRR_APM 1
25 25
26typedef void (*nmi_shootdown_cb)(int, struct die_args*); 26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
27void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
28 28
29#endif /* _ASM_X86_REBOOT_H */ 29#endif /* _ASM_X86_REBOOT_H */
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index d5e57db0f7be..31cb9ae992b7 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
60} 60}
61 61
62static int __kprobes 62static int __kprobes
63arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, 63arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
64 unsigned long cmd, void *__args)
65{ 64{
66 struct die_args *args = __args;
67 struct pt_regs *regs;
68 int cpu; 65 int cpu;
69 66
70 switch (cmd) {
71 case DIE_NMI:
72 break;
73
74 default:
75 return NOTIFY_DONE;
76 }
77
78 regs = args->regs;
79 cpu = smp_processor_id(); 67 cpu = smp_processor_id();
80 68
81 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 69 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
86 show_regs(regs); 74 show_regs(regs);
87 arch_spin_unlock(&lock); 75 arch_spin_unlock(&lock);
88 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 76 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
89 return NOTIFY_STOP; 77 return NMI_HANDLED;
90 } 78 }
91 79
92 return NOTIFY_DONE; 80 return NMI_DONE;
93} 81}
94 82
95static __read_mostly struct notifier_block backtrace_notifier = {
96 .notifier_call = arch_trigger_all_cpu_backtrace_handler,
97 .next = NULL,
98 .priority = NMI_LOCAL_LOW_PRIOR,
99};
100
101static int __init register_trigger_all_cpu_backtrace(void) 83static int __init register_trigger_all_cpu_backtrace(void)
102{ 84{
103 register_die_notifier(&backtrace_notifier); 85 register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
86 0, "arch_bt");
104 return 0; 87 return 0;
105} 88}
106early_initcall(register_trigger_all_cpu_backtrace); 89early_initcall(register_trigger_all_cpu_backtrace);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 34b18594e724..75be00ecfff2 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
672/* 672/*
673 * When NMI is received, print a stack trace. 673 * When NMI is received, print a stack trace.
674 */ 674 */
675int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 675int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
676{ 676{
677 unsigned long real_uv_nmi; 677 unsigned long real_uv_nmi;
678 int bid; 678 int bid;
679 679
680 if (reason != DIE_NMIUNKNOWN)
681 return NOTIFY_OK;
682
683 if (in_crash_kexec)
684 /* do nothing if entering the crash kernel */
685 return NOTIFY_OK;
686
687 /* 680 /*
688 * Each blade has an MMR that indicates when an NMI has been sent 681 * Each blade has an MMR that indicates when an NMI has been sent
689 * to cpus on the blade. If an NMI is detected, atomically 682 * to cpus on the blade. If an NMI is detected, atomically
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
704 } 697 }
705 698
706 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) 699 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
707 return NOTIFY_DONE; 700 return NMI_DONE;
708 701
709 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; 702 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
710 703
@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
717 dump_stack(); 710 dump_stack();
718 spin_unlock(&uv_nmi_lock); 711 spin_unlock(&uv_nmi_lock);
719 712
720 return NOTIFY_STOP; 713 return NMI_HANDLED;
721} 714}
722 715
723static struct notifier_block uv_dump_stack_nmi_nb = {
724 .notifier_call = uv_handle_nmi,
725 .priority = NMI_LOCAL_LOW_PRIOR - 1,
726};
727
728void uv_register_nmi_notifier(void) 716void uv_register_nmi_notifier(void)
729{ 717{
730 if (register_die_notifier(&uv_dump_stack_nmi_nb)) 718 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
731 printk(KERN_WARNING "UV NMI handler failed to register\n"); 719 printk(KERN_WARNING "UV NMI handler failed to register\n");
732} 720}
733 721
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 0ed633c5048b..6199232161cf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
78 78
79static cpumask_var_t mce_inject_cpumask; 79static cpumask_var_t mce_inject_cpumask;
80 80
81static int mce_raise_notify(struct notifier_block *self, 81static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
82 unsigned long val, void *data)
83{ 82{
84 struct die_args *args = (struct die_args *)data;
85 int cpu = smp_processor_id(); 83 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm); 84 struct mce *m = &__get_cpu_var(injectm);
87 if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) 85 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NOTIFY_DONE; 86 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask); 87 cpumask_clear_cpu(cpu, mce_inject_cpumask);
90 if (m->inject_flags & MCJ_EXCEPTION) 88 if (m->inject_flags & MCJ_EXCEPTION)
91 raise_exception(m, args->regs); 89 raise_exception(m, regs);
92 else if (m->status) 90 else if (m->status)
93 raise_poll(m); 91 raise_poll(m);
94 return NOTIFY_STOP; 92 return NMI_HANDLED;
95} 93}
96 94
97static struct notifier_block mce_raise_nb = {
98 .notifier_call = mce_raise_notify,
99 .priority = NMI_LOCAL_NORMAL_PRIOR,
100};
101
102/* Inject mce on current CPU */ 95/* Inject mce on current CPU */
103static int raise_local(void) 96static int raise_local(void)
104{ 97{
@@ -216,7 +209,8 @@ static int inject_init(void)
216 return -ENOMEM; 209 return -ENOMEM;
217 printk(KERN_INFO "Machine check injector initialized\n"); 210 printk(KERN_INFO "Machine check injector initialized\n");
218 mce_chrdev_ops.write = mce_write; 211 mce_chrdev_ops.write = mce_write;
219 register_die_notifier(&mce_raise_nb); 212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213 "mce_notify");
220 return 0; 214 return 0;
221} 215}
222 216
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5b5cceec94c5..fce51ad1f362 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
908 908
909 percpu_inc(mce_exception_count); 909 percpu_inc(mce_exception_count);
910 910
911 if (notify_die(DIE_NMI, "machine check", regs, error_code,
912 18, SIGKILL) == NOTIFY_STOP)
913 goto out;
914 if (!banks) 911 if (!banks)
915 goto out; 912 goto out;
916 913
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8ab89112f93c..640891014b2a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void)
1058 apic_write(APIC_LVTPC, APIC_DM_NMI); 1058 apic_write(APIC_LVTPC, APIC_DM_NMI);
1059} 1059}
1060 1060
1061struct pmu_nmi_state {
1062 unsigned int marked;
1063 int handled;
1064};
1065
1066static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1067
1068static int __kprobes 1061static int __kprobes
1069perf_event_nmi_handler(struct notifier_block *self, 1062perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1070 unsigned long cmd, void *__args)
1071{ 1063{
1072 struct die_args *args = __args;
1073 unsigned int this_nmi;
1074 int handled;
1075
1076 if (!atomic_read(&active_events)) 1064 if (!atomic_read(&active_events))
1077 return NOTIFY_DONE; 1065 return NMI_DONE;
1078
1079 switch (cmd) {
1080 case DIE_NMI:
1081 break;
1082 case DIE_NMIUNKNOWN:
1083 this_nmi = percpu_read(irq_stat.__nmi_count);
1084 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1085 /* let the kernel handle the unknown nmi */
1086 return NOTIFY_DONE;
1087 /*
1088 * This one is a PMU back-to-back nmi. Two events
1089 * trigger 'simultaneously' raising two back-to-back
1090 * NMIs. If the first NMI handles both, the latter
1091 * will be empty and daze the CPU. So, we drop it to
1092 * avoid false-positive 'unknown nmi' messages.
1093 */
1094 return NOTIFY_STOP;
1095 default:
1096 return NOTIFY_DONE;
1097 }
1098
1099 handled = x86_pmu.handle_irq(args->regs);
1100 if (!handled)
1101 return NOTIFY_DONE;
1102 1066
1103 this_nmi = percpu_read(irq_stat.__nmi_count); 1067 return x86_pmu.handle_irq(regs);
1104 if ((handled > 1) ||
1105 /* the next nmi could be a back-to-back nmi */
1106 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1107 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1108 /*
1109 * We could have two subsequent back-to-back nmis: The
1110 * first handles more than one counter, the 2nd
1111 * handles only one counter and the 3rd handles no
1112 * counter.
1113 *
1114 * This is the 2nd nmi because the previous was
1115 * handling more than one counter. We will mark the
1116 * next (3rd) and then drop it if unhandled.
1117 */
1118 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1119 __this_cpu_write(pmu_nmi.handled, handled);
1120 }
1121
1122 return NOTIFY_STOP;
1123} 1068}
1124 1069
1125static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1126 .notifier_call = perf_event_nmi_handler,
1127 .next = NULL,
1128 .priority = NMI_LOCAL_LOW_PRIOR,
1129};
1130
1131struct event_constraint emptyconstraint; 1070struct event_constraint emptyconstraint;
1132struct event_constraint unconstrained; 1071struct event_constraint unconstrained;
1133 1072
@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void)
1232 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1171 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1233 1172
1234 perf_events_lapic_init(); 1173 perf_events_lapic_init();
1235 register_die_notifier(&perf_event_nmi_notifier); 1174 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1236 1175
1237 unconstrained = (struct event_constraint) 1176 unconstrained = (struct event_constraint)
1238 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1177 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 764c7c2b1811..13ad89971d47 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -32,15 +32,12 @@ int in_crash_kexec;
32 32
33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
34 34
35static void kdump_nmi_callback(int cpu, struct die_args *args) 35static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
36{ 36{
37 struct pt_regs *regs;
38#ifdef CONFIG_X86_32 37#ifdef CONFIG_X86_32
39 struct pt_regs fixed_regs; 38 struct pt_regs fixed_regs;
40#endif 39#endif
41 40
42 regs = args->regs;
43
44#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
45 if (!user_mode_vm(regs)) { 42 if (!user_mode_vm(regs)) {
46 crash_fixup_ss_esp(&fixed_regs, regs); 43 crash_fixup_ss_esp(&fixed_regs, regs);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 00354d4919a9..faba5771acad 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
511 511
512static int was_in_debug_nmi[NR_CPUS]; 512static int was_in_debug_nmi[NR_CPUS];
513 513
514static int __kgdb_notify(struct die_args *args, unsigned long cmd) 514static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
515{ 515{
516 struct pt_regs *regs = args->regs;
517
518 switch (cmd) { 516 switch (cmd) {
519 case DIE_NMI: 517 case NMI_LOCAL:
520 if (atomic_read(&kgdb_active) != -1) { 518 if (atomic_read(&kgdb_active) != -1) {
521 /* KGDB CPU roundup */ 519 /* KGDB CPU roundup */
522 kgdb_nmicallback(raw_smp_processor_id(), regs); 520 kgdb_nmicallback(raw_smp_processor_id(), regs);
523 was_in_debug_nmi[raw_smp_processor_id()] = 1; 521 was_in_debug_nmi[raw_smp_processor_id()] = 1;
524 touch_nmi_watchdog(); 522 touch_nmi_watchdog();
525 return NOTIFY_STOP; 523 return NMI_HANDLED;
526 } 524 }
527 return NOTIFY_DONE; 525 break;
528 526
529 case DIE_NMIUNKNOWN: 527 case NMI_UNKNOWN:
530 if (was_in_debug_nmi[raw_smp_processor_id()]) { 528 if (was_in_debug_nmi[raw_smp_processor_id()]) {
531 was_in_debug_nmi[raw_smp_processor_id()] = 0; 529 was_in_debug_nmi[raw_smp_processor_id()] = 0;
532 return NOTIFY_STOP; 530 return NMI_HANDLED;
533 } 531 }
534 return NOTIFY_DONE; 532 break;
533 default:
534 /* do nothing */
535 break;
536 }
537 return NMI_DONE;
538}
539
540static int __kgdb_notify(struct die_args *args, unsigned long cmd)
541{
542 struct pt_regs *regs = args->regs;
535 543
544 switch (cmd) {
536 case DIE_DEBUG: 545 case DIE_DEBUG:
537 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { 546 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
538 if (user_mode(regs)) 547 if (user_mode(regs))
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
590 599
591static struct notifier_block kgdb_notifier = { 600static struct notifier_block kgdb_notifier = {
592 .notifier_call = kgdb_notify, 601 .notifier_call = kgdb_notify,
593
594 /*
595 * Lowest-prio notifier priority, we want to be notified last:
596 */
597 .priority = NMI_LOCAL_LOW_PRIOR,
598}; 602};
599 603
600/** 604/**
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
605 */ 609 */
606int kgdb_arch_init(void) 610int kgdb_arch_init(void)
607{ 611{
608 return register_die_notifier(&kgdb_notifier); 612 int retval;
613
614 retval = register_die_notifier(&kgdb_notifier);
615 if (retval)
616 goto out;
617
618 retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
619 0, "kgdb");
620 if (retval)
621 goto out1;
622
623 retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
624 0, "kgdb");
625
626 if (retval)
627 goto out2;
628
629 return retval;
630
631out2:
632 unregister_nmi_handler(NMI_LOCAL, "kgdb");
633out1:
634 unregister_die_notifier(&kgdb_notifier);
635out:
636 return retval;
609} 637}
610 638
611static void kgdb_hw_overflow_handler(struct perf_event *event, 639static void kgdb_hw_overflow_handler(struct perf_event *event,
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
673 breakinfo[i].pev = NULL; 701 breakinfo[i].pev = NULL;
674 } 702 }
675 } 703 }
704 unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
705 unregister_nmi_handler(NMI_LOCAL, "kgdb");
676 unregister_die_notifier(&kgdb_notifier); 706 unregister_die_notifier(&kgdb_notifier);
677} 707}
678 708
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 327748d4f6b0..e20f5e790599 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds 2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
4 * 5 *
5 * Pentium III FXSR, SSE support 6 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * Gareth Hughes <gareth@valinux.com>, May 2000
@@ -248,8 +249,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
248static notrace __kprobes void 249static notrace __kprobes void
249unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 250unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
250{ 251{
251 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == 252 int handled;
252 NOTIFY_STOP) 253
254 handled = nmi_handle(NMI_UNKNOWN, regs);
255 if (handled)
253 return; 256 return;
254#ifdef CONFIG_MCA 257#ifdef CONFIG_MCA
255 /* 258 /*
@@ -274,13 +277,15 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
274static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 277static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
275{ 278{
276 unsigned char reason = 0; 279 unsigned char reason = 0;
280 int handled;
277 281
278 /* 282 /*
279 * CPU-specific NMI must be processed before non-CPU-specific 283 * CPU-specific NMI must be processed before non-CPU-specific
280 * NMI, otherwise we may lose it, because the CPU-specific 284 * NMI, otherwise we may lose it, because the CPU-specific
281 * NMI can not be detected/processed on other CPUs. 285 * NMI can not be detected/processed on other CPUs.
282 */ 286 */
283 if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) 287 handled = nmi_handle(NMI_LOCAL, regs);
288 if (handled)
284 return; 289 return;
285 290
286 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ 291 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 9242436e9937..e334be1182b9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -464,7 +464,7 @@ static inline void kb_wait(void)
464 } 464 }
465} 465}
466 466
467static void vmxoff_nmi(int cpu, struct die_args *args) 467static void vmxoff_nmi(int cpu, struct pt_regs *regs)
468{ 468{
469 cpu_emergency_vmxoff(); 469 cpu_emergency_vmxoff();
470} 470}
@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
736 736
737static atomic_t waiting_for_crash_ipi; 737static atomic_t waiting_for_crash_ipi;
738 738
739static int crash_nmi_callback(struct notifier_block *self, 739static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
740 unsigned long val, void *data)
741{ 740{
742 int cpu; 741 int cpu;
743 742
744 if (val != DIE_NMI)
745 return NOTIFY_OK;
746
747 cpu = raw_smp_processor_id(); 743 cpu = raw_smp_processor_id();
748 744
749 /* Don't do anything if this handler is invoked on crashing cpu. 745 /* Don't do anything if this handler is invoked on crashing cpu.
@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
751 * an NMI if system was initially booted with nmi_watchdog parameter. 747 * an NMI if system was initially booted with nmi_watchdog parameter.
752 */ 748 */
753 if (cpu == crashing_cpu) 749 if (cpu == crashing_cpu)
754 return NOTIFY_STOP; 750 return NMI_HANDLED;
755 local_irq_disable(); 751 local_irq_disable();
756 752
757 shootdown_callback(cpu, (struct die_args *)data); 753 shootdown_callback(cpu, regs);
758 754
759 atomic_dec(&waiting_for_crash_ipi); 755 atomic_dec(&waiting_for_crash_ipi);
760 /* Assume hlt works */ 756 /* Assume hlt works */
@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
762 for (;;) 758 for (;;)
763 cpu_relax(); 759 cpu_relax();
764 760
765 return 1; 761 return NMI_HANDLED;
766} 762}
767 763
768static void smp_send_nmi_allbutself(void) 764static void smp_send_nmi_allbutself(void)
@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
770 apic->send_IPI_allbutself(NMI_VECTOR); 766 apic->send_IPI_allbutself(NMI_VECTOR);
771} 767}
772 768
773static struct notifier_block crash_nmi_nb = {
774 .notifier_call = crash_nmi_callback,
775 /* we want to be the first one called */
776 .priority = NMI_LOCAL_HIGH_PRIOR+1,
777};
778
779/* Halt all other CPUs, calling the specified function on each of them 769/* Halt all other CPUs, calling the specified function on each of them
780 * 770 *
781 * This function can be used to halt all other CPUs on crash 771 * This function can be used to halt all other CPUs on crash
@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
794 784
795 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 785 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
796 /* Would it be better to replace the trap vector here? */ 786 /* Would it be better to replace the trap vector here? */
797 if (register_die_notifier(&crash_nmi_nb)) 787 if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
788 NMI_FLAG_FIRST, "crash"))
798 return; /* return what? */ 789 return; /* return what? */
799 /* Ensure the new callback function is set before sending 790 /* Ensure the new callback function is set before sending
800 * out the NMI 791 * out the NMI
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 68894fdc034b..adf8fb31aa7b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
61} 61}
62 62
63 63
64static int profile_exceptions_notify(struct notifier_block *self, 64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
65 unsigned long val, void *data)
66{ 65{
67 struct die_args *args = (struct die_args *)data; 66 if (ctr_running)
68 int ret = NOTIFY_DONE; 67 model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
69 68 else if (!nmi_enabled)
70 switch (val) { 69 return NMI_DONE;
71 case DIE_NMI: 70 else
72 if (ctr_running) 71 model->stop(&__get_cpu_var(cpu_msrs));
73 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); 72 return NMI_HANDLED;
74 else if (!nmi_enabled)
75 break;
76 else
77 model->stop(&__get_cpu_var(cpu_msrs));
78 ret = NOTIFY_STOP;
79 break;
80 default:
81 break;
82 }
83 return ret;
84} 73}
85 74
86static void nmi_cpu_save_registers(struct op_msrs *msrs) 75static void nmi_cpu_save_registers(struct op_msrs *msrs)
@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
363 apic_write(APIC_LVTPC, APIC_DM_NMI); 352 apic_write(APIC_LVTPC, APIC_DM_NMI);
364} 353}
365 354
366static struct notifier_block profile_exceptions_nb = {
367 .notifier_call = profile_exceptions_notify,
368 .next = NULL,
369 .priority = NMI_LOCAL_LOW_PRIOR,
370};
371
372static void nmi_cpu_restore_registers(struct op_msrs *msrs) 355static void nmi_cpu_restore_registers(struct op_msrs *msrs)
373{ 356{
374 struct op_msr *counters = msrs->counters; 357 struct op_msr *counters = msrs->counters;
@@ -508,7 +491,8 @@ static int nmi_setup(void)
508 ctr_running = 0; 491 ctr_running = 0;
509 /* make variables visible to the nmi handler: */ 492 /* make variables visible to the nmi handler: */
510 smp_mb(); 493 smp_mb();
511 err = register_die_notifier(&profile_exceptions_nb); 494 err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
495 0, "oprofile");
512 if (err) 496 if (err)
513 goto fail; 497 goto fail;
514 498
@@ -538,7 +522,7 @@ static void nmi_shutdown(void)
538 put_online_cpus(); 522 put_online_cpus();
539 /* make variables visible to the nmi handler: */ 523 /* make variables visible to the nmi handler: */
540 smp_mb(); 524 smp_mb();
541 unregister_die_notifier(&profile_exceptions_nb); 525 unregister_nmi_handler(NMI_LOCAL, "oprofile");
542 msrs = &get_cpu_var(cpu_msrs); 526 msrs = &get_cpu_var(cpu_msrs);
543 model->shutdown(msrs); 527 model->shutdown(msrs);
544 free_msrs(); 528 free_msrs();
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 720bf5a53c51..7f8052cd6620 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -18,32 +18,16 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20 20
21static int profile_timer_exceptions_notify(struct notifier_block *self, 21static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
22 unsigned long val, void *data)
23{ 22{
24 struct die_args *args = (struct die_args *)data; 23 oprofile_add_sample(regs, 0);
25 int ret = NOTIFY_DONE; 24 return NMI_HANDLED;
26
27 switch (val) {
28 case DIE_NMI:
29 oprofile_add_sample(args->regs, 0);
30 ret = NOTIFY_STOP;
31 break;
32 default:
33 break;
34 }
35 return ret;
36} 25}
37 26
38static struct notifier_block profile_timer_exceptions_nb = {
39 .notifier_call = profile_timer_exceptions_notify,
40 .next = NULL,
41 .priority = NMI_LOW_PRIOR,
42};
43
44static int timer_start(void) 27static int timer_start(void)
45{ 28{
46 if (register_die_notifier(&profile_timer_exceptions_nb)) 29 if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
30 0, "oprofile-timer"))
47 return 1; 31 return 1;
48 return 0; 32 return 0;
49} 33}
@@ -51,7 +35,7 @@ static int timer_start(void)
51 35
52static void timer_stop(void) 36static void timer_stop(void)
53{ 37{
54 unregister_die_notifier(&profile_timer_exceptions_nb); 38 unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
55 synchronize_sched(); /* Allow already-started NMIs to complete. */ 39 synchronize_sched(); /* Allow already-started NMIs to complete. */
56} 40}
57 41
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0784f99a4665..b8e08cb67a18 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -50,6 +50,7 @@
50#include <acpi/hed.h> 50#include <acpi/hed.h>
51#include <asm/mce.h> 51#include <asm/mce.h>
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/nmi.h>
53 54
54#include "apei-internal.h" 55#include "apei-internal.h"
55 56
@@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
749 } 750 }
750} 751}
751 752
752static int ghes_notify_nmi(struct notifier_block *this, 753static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
753 unsigned long cmd, void *data)
754{ 754{
755 struct ghes *ghes, *ghes_global = NULL; 755 struct ghes *ghes, *ghes_global = NULL;
756 int sev, sev_global = -1; 756 int sev, sev_global = -1;
757 int ret = NOTIFY_DONE; 757 int ret = NMI_DONE;
758
759 if (cmd != DIE_NMI)
760 return ret;
761 758
762 raw_spin_lock(&ghes_nmi_lock); 759 raw_spin_lock(&ghes_nmi_lock);
763 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 760 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
@@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this,
770 sev_global = sev; 767 sev_global = sev;
771 ghes_global = ghes; 768 ghes_global = ghes;
772 } 769 }
773 ret = NOTIFY_STOP; 770 ret = NMI_HANDLED;
774 } 771 }
775 772
776 if (ret == NOTIFY_DONE) 773 if (ret == NMI_DONE)
777 goto out; 774 goto out;
778 775
779 if (sev_global >= GHES_SEV_PANIC) { 776 if (sev_global >= GHES_SEV_PANIC) {
@@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = {
825 .notifier_call = ghes_notify_sci, 822 .notifier_call = ghes_notify_sci,
826}; 823};
827 824
828static struct notifier_block ghes_notifier_nmi = {
829 .notifier_call = ghes_notify_nmi,
830};
831
832static unsigned long ghes_esource_prealloc_size( 825static unsigned long ghes_esource_prealloc_size(
833 const struct acpi_hest_generic *generic) 826 const struct acpi_hest_generic *generic)
834{ 827{
@@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
918 ghes_estatus_pool_expand(len); 911 ghes_estatus_pool_expand(len);
919 mutex_lock(&ghes_list_mutex); 912 mutex_lock(&ghes_list_mutex);
920 if (list_empty(&ghes_nmi)) 913 if (list_empty(&ghes_nmi))
921 register_die_notifier(&ghes_notifier_nmi); 914 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
915 "ghes");
922 list_add_rcu(&ghes->list, &ghes_nmi); 916 list_add_rcu(&ghes->list, &ghes_nmi);
923 mutex_unlock(&ghes_list_mutex); 917 mutex_unlock(&ghes_list_mutex);
924 break; 918 break;
@@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
964 mutex_lock(&ghes_list_mutex); 958 mutex_lock(&ghes_list_mutex);
965 list_del_rcu(&ghes->list); 959 list_del_rcu(&ghes->list);
966 if (list_empty(&ghes_nmi)) 960 if (list_empty(&ghes_nmi))
967 unregister_die_notifier(&ghes_notifier_nmi); 961 unregister_nmi_handler(NMI_LOCAL, "ghes");
968 mutex_unlock(&ghes_list_mutex); 962 mutex_unlock(&ghes_list_mutex);
969 /* 963 /*
970 * To synchronize with NMI handler, ghes can only be 964 * To synchronize with NMI handler, ghes can only be
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 3302586655c4..c2917ffad2c2 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -65,6 +65,7 @@
65 * mechanism for it at that time. 65 * mechanism for it at that time.
66 */ 66 */
67#include <asm/kdebug.h> 67#include <asm/kdebug.h>
68#include <asm/nmi.h>
68#define HAVE_DIE_NMI 69#define HAVE_DIE_NMI
69#endif 70#endif
70 71
@@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
1077 1078
1078#ifdef HAVE_DIE_NMI 1079#ifdef HAVE_DIE_NMI
1079static int 1080static int
1080ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) 1081ipmi_nmi(unsigned int val, struct pt_regs *regs)
1081{ 1082{
1082 struct die_args *args = data;
1083
1084 if (val != DIE_NMIUNKNOWN)
1085 return NOTIFY_OK;
1086
1087 /* Hack, if it's a memory or I/O error, ignore it. */
1088 if (args->err & 0xc0)
1089 return NOTIFY_OK;
1090
1091 /* 1083 /*
1092 * If we get here, it's an NMI that's not a memory or I/O 1084 * If we get here, it's an NMI that's not a memory or I/O
1093 * error. We can't truly tell if it's from IPMI or not 1085 * error. We can't truly tell if it's from IPMI or not
@@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1097 1089
1098 if (testing_nmi) { 1090 if (testing_nmi) {
1099 testing_nmi = 2; 1091 testing_nmi = 2;
1100 return NOTIFY_STOP; 1092 return NMI_HANDLED;
1101 } 1093 }
1102 1094
1103 /* If we are not expecting a timeout, ignore it. */ 1095 /* If we are not expecting a timeout, ignore it. */
1104 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) 1096 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
1105 return NOTIFY_OK; 1097 return NMI_DONE;
1106 1098
1107 if (preaction_val != WDOG_PRETIMEOUT_NMI) 1099 if (preaction_val != WDOG_PRETIMEOUT_NMI)
1108 return NOTIFY_OK; 1100 return NMI_DONE;
1109 1101
1110 /* 1102 /*
1111 * If no one else handled the NMI, we assume it was the IPMI 1103 * If no one else handled the NMI, we assume it was the IPMI
@@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1120 panic(PFX "pre-timeout"); 1112 panic(PFX "pre-timeout");
1121 } 1113 }
1122 1114
1123 return NOTIFY_STOP; 1115 return NMI_HANDLED;
1124} 1116}
1125
1126static struct notifier_block ipmi_nmi_handler = {
1127 .notifier_call = ipmi_nmi
1128};
1129#endif 1117#endif
1130 1118
1131static int wdog_reboot_handler(struct notifier_block *this, 1119static int wdog_reboot_handler(struct notifier_block *this,
@@ -1290,7 +1278,8 @@ static void check_parms(void)
1290 } 1278 }
1291 } 1279 }
1292 if (do_nmi && !nmi_handler_registered) { 1280 if (do_nmi && !nmi_handler_registered) {
1293 rv = register_die_notifier(&ipmi_nmi_handler); 1281 rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
1282 "ipmi");
1294 if (rv) { 1283 if (rv) {
1295 printk(KERN_WARNING PFX 1284 printk(KERN_WARNING PFX
1296 "Can't register nmi handler\n"); 1285 "Can't register nmi handler\n");
@@ -1298,7 +1287,7 @@ static void check_parms(void)
1298 } else 1287 } else
1299 nmi_handler_registered = 1; 1288 nmi_handler_registered = 1;
1300 } else if (!do_nmi && nmi_handler_registered) { 1289 } else if (!do_nmi && nmi_handler_registered) {
1301 unregister_die_notifier(&ipmi_nmi_handler); 1290 unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
1302 nmi_handler_registered = 0; 1291 nmi_handler_registered = 0;
1303 } 1292 }
1304#endif 1293#endif
@@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void)
1336 if (rv) { 1325 if (rv) {
1337#ifdef HAVE_DIE_NMI 1326#ifdef HAVE_DIE_NMI
1338 if (nmi_handler_registered) 1327 if (nmi_handler_registered)
1339 unregister_die_notifier(&ipmi_nmi_handler); 1328 unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
1340#endif 1329#endif
1341 atomic_notifier_chain_unregister(&panic_notifier_list, 1330 atomic_notifier_chain_unregister(&panic_notifier_list,
1342 &wdog_panic_notifier); 1331 &wdog_panic_notifier);
@@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void)
1357 1346
1358#ifdef HAVE_DIE_NMI 1347#ifdef HAVE_DIE_NMI
1359 if (nmi_handler_registered) 1348 if (nmi_handler_registered)
1360 unregister_die_notifier(&ipmi_nmi_handler); 1349 unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
1361#endif 1350#endif
1362 1351
1363 atomic_notifier_chain_unregister(&panic_notifier_list, 1352 atomic_notifier_chain_unregister(&panic_notifier_list,
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 809cbda03d7a..7e7feac05221 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -477,15 +477,11 @@ static int hpwdt_time_left(void)
477/* 477/*
478 * NMI Handler 478 * NMI Handler
479 */ 479 */
480static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, 480static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
481 void *data)
482{ 481{
483 unsigned long rom_pl; 482 unsigned long rom_pl;
484 static int die_nmi_called; 483 static int die_nmi_called;
485 484
486 if (ulReason != DIE_NMIUNKNOWN)
487 goto out;
488
489 if (!hpwdt_nmi_decoding) 485 if (!hpwdt_nmi_decoding)
490 goto out; 486 goto out;
491 487
@@ -508,7 +504,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
508 "Management Log for details.\n"); 504 "Management Log for details.\n");
509 505
510out: 506out:
511 return NOTIFY_OK; 507 return NMI_DONE;
512} 508}
513#endif /* CONFIG_HPWDT_NMI_DECODING */ 509#endif /* CONFIG_HPWDT_NMI_DECODING */
514 510
@@ -648,13 +644,6 @@ static struct miscdevice hpwdt_miscdev = {
648 .fops = &hpwdt_fops, 644 .fops = &hpwdt_fops,
649}; 645};
650 646
651#ifdef CONFIG_HPWDT_NMI_DECODING
652static struct notifier_block die_notifier = {
653 .notifier_call = hpwdt_pretimeout,
654 .priority = 0,
655};
656#endif /* CONFIG_HPWDT_NMI_DECODING */
657
658/* 647/*
659 * Init & Exit 648 * Init & Exit
660 */ 649 */
@@ -740,10 +729,9 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
740 * die notify list to handle a critical NMI. The default is to 729 * die notify list to handle a critical NMI. The default is to
741 * be last so other users of the NMI signal can function. 730 * be last so other users of the NMI signal can function.
742 */ 731 */
743 if (priority) 732 retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout,
744 die_notifier.priority = 0x7FFFFFFF; 733 (priority) ? NMI_FLAG_FIRST : 0,
745 734 "hpwdt");
746 retval = register_die_notifier(&die_notifier);
747 if (retval != 0) { 735 if (retval != 0) {
748 dev_warn(&dev->dev, 736 dev_warn(&dev->dev,
749 "Unable to register a die notifier (err=%d).\n", 737 "Unable to register a die notifier (err=%d).\n",
@@ -763,7 +751,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
763 751
764static void hpwdt_exit_nmi_decoding(void) 752static void hpwdt_exit_nmi_decoding(void)
765{ 753{
766 unregister_die_notifier(&die_notifier); 754 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
767 if (cru_rom_addr) 755 if (cru_rom_addr)
768 iounmap(cru_rom_addr); 756 iounmap(cru_rom_addr);
769} 757}