diff options
author | Don Zickus <dzickus@redhat.com> | 2011-09-30 15:06:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-10-10 00:56:57 -0400 |
commit | 9c48f1c629ecfa114850c03f875c6691003214de (patch) | |
tree | 5e454da487c90fc8399ce366aa2437597c9ff5e5 /arch/x86/kernel/cpu/perf_event.c | |
parent | c9126b2ee8adb9235941cedbf558d39a9e65642d (diff) |
x86, nmi: Wire up NMI handlers to new routines
Just convert all the files that have an nmi handler to the new routines.
Most of it is straight forward conversion. A couple of places needed some
tweaking like kgdb which separates the debug notifier from the nmi handler
and mce removes a call to notify_die.
[Thanks to Ying for finding out the history behind that mce call
https://lkml.org/lkml/2010/5/27/114
And Boris responding that he would like to remove that call because of it
https://lkml.org/lkml/2011/9/21/163]
The things that get converted are the registeration/unregistration routines
and the nmi handler itself has its args changed along with code removal
to check which list it is on (most are on one NMI list except for kgdb
which has both an NMI routine and an NMI Unknown routine).
Signed-off-by: Don Zickus <dzickus@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Corey Minyard <minyard@acm.org>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Corey Minyard <minyard@acm.org>
Cc: Jack Steiner <steiner@sgi.com>
Link: http://lkml.kernel.org/r/1317409584-23662-4-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 69 |
1 files changed, 4 insertions, 65 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8ab89112f93c..640891014b2a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void) | |||
1058 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1058 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | struct pmu_nmi_state { | ||
1062 | unsigned int marked; | ||
1063 | int handled; | ||
1064 | }; | ||
1065 | |||
1066 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
1067 | |||
1068 | static int __kprobes | 1061 | static int __kprobes |
1069 | perf_event_nmi_handler(struct notifier_block *self, | 1062 | perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
1070 | unsigned long cmd, void *__args) | ||
1071 | { | 1063 | { |
1072 | struct die_args *args = __args; | ||
1073 | unsigned int this_nmi; | ||
1074 | int handled; | ||
1075 | |||
1076 | if (!atomic_read(&active_events)) | 1064 | if (!atomic_read(&active_events)) |
1077 | return NOTIFY_DONE; | 1065 | return NMI_DONE; |
1078 | |||
1079 | switch (cmd) { | ||
1080 | case DIE_NMI: | ||
1081 | break; | ||
1082 | case DIE_NMIUNKNOWN: | ||
1083 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1084 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) | ||
1085 | /* let the kernel handle the unknown nmi */ | ||
1086 | return NOTIFY_DONE; | ||
1087 | /* | ||
1088 | * This one is a PMU back-to-back nmi. Two events | ||
1089 | * trigger 'simultaneously' raising two back-to-back | ||
1090 | * NMIs. If the first NMI handles both, the latter | ||
1091 | * will be empty and daze the CPU. So, we drop it to | ||
1092 | * avoid false-positive 'unknown nmi' messages. | ||
1093 | */ | ||
1094 | return NOTIFY_STOP; | ||
1095 | default: | ||
1096 | return NOTIFY_DONE; | ||
1097 | } | ||
1098 | |||
1099 | handled = x86_pmu.handle_irq(args->regs); | ||
1100 | if (!handled) | ||
1101 | return NOTIFY_DONE; | ||
1102 | 1066 | ||
1103 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1067 | return x86_pmu.handle_irq(regs); |
1104 | if ((handled > 1) || | ||
1105 | /* the next nmi could be a back-to-back nmi */ | ||
1106 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && | ||
1107 | (__this_cpu_read(pmu_nmi.handled) > 1))) { | ||
1108 | /* | ||
1109 | * We could have two subsequent back-to-back nmis: The | ||
1110 | * first handles more than one counter, the 2nd | ||
1111 | * handles only one counter and the 3rd handles no | ||
1112 | * counter. | ||
1113 | * | ||
1114 | * This is the 2nd nmi because the previous was | ||
1115 | * handling more than one counter. We will mark the | ||
1116 | * next (3rd) and then drop it if unhandled. | ||
1117 | */ | ||
1118 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); | ||
1119 | __this_cpu_write(pmu_nmi.handled, handled); | ||
1120 | } | ||
1121 | |||
1122 | return NOTIFY_STOP; | ||
1123 | } | 1068 | } |
1124 | 1069 | ||
1125 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | ||
1126 | .notifier_call = perf_event_nmi_handler, | ||
1127 | .next = NULL, | ||
1128 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
1129 | }; | ||
1130 | |||
1131 | struct event_constraint emptyconstraint; | 1070 | struct event_constraint emptyconstraint; |
1132 | struct event_constraint unconstrained; | 1071 | struct event_constraint unconstrained; |
1133 | 1072 | ||
@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void) | |||
1232 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 1171 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
1233 | 1172 | ||
1234 | perf_events_lapic_init(); | 1173 | perf_events_lapic_init(); |
1235 | register_die_notifier(&perf_event_nmi_notifier); | 1174 | register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); |
1236 | 1175 | ||
1237 | unconstrained = (struct event_constraint) | 1176 | unconstrained = (struct event_constraint) |
1238 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, | 1177 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |