diff options
author | Hidetoshi Seto <[seto.hidetoshi@jp.fujitsu.com]> | 2009-08-06 17:51:57 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-09-14 19:18:54 -0400 |
commit | 1726b0883dd08636705ea55d577eb0ec314ba427 (patch) | |
tree | 9fea054fe26a2849865017fb5163aa944315e1b9 /arch/ia64/kernel | |
parent | 68cb14c7c46d9204ba451a534f15a8bc12c88e28 (diff) |
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/crash.c | 47 | ||||
-rw-r--r-- | arch/ia64/kernel/relocate_kernel.S | 2 |
2 files changed, 42 insertions, 7 deletions
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index b2a8b3da8af3..9c851b73f276 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -23,6 +23,7 @@ | |||
23 | int kdump_status[NR_CPUS]; | 23 | int kdump_status[NR_CPUS]; |
24 | static atomic_t kdump_cpu_frozen; | 24 | static atomic_t kdump_cpu_frozen; |
25 | atomic_t kdump_in_progress; | 25 | atomic_t kdump_in_progress; |
26 | static int kdump_freeze_monarch; | ||
26 | static int kdump_on_init = 1; | 27 | static int kdump_on_init = 1; |
27 | static int kdump_on_fatal_mca = 1; | 28 | static int kdump_on_fatal_mca = 1; |
28 | 29 | ||
@@ -108,6 +109,33 @@ machine_crash_shutdown(struct pt_regs *pt) | |||
108 | */ | 109 | */ |
109 | kexec_disable_iosapic(); | 110 | kexec_disable_iosapic(); |
110 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | /* | ||
113 | * If kdump_on_init is set and an INIT is asserted here, kdump will | ||
114 | * be started again via INIT monarch. | ||
115 | */ | ||
116 | local_irq_disable(); | ||
117 | ia64_set_psr_mc(); /* mask MCA/INIT */ | ||
118 | if (atomic_inc_return(&kdump_in_progress) != 1) | ||
119 | unw_init_running(kdump_cpu_freeze, NULL); | ||
120 | |||
121 | /* | ||
122 | * Now this cpu is ready for kdump. | ||
123 | * Stop all others by IPI or INIT. They could receive INIT from | ||
124 | * outside and might be INIT monarch, but only thing they have to | ||
125 | * do is falling into kdump_cpu_freeze(). | ||
126 | * | ||
127 | * If an INIT is asserted here: | ||
128 | * - All receivers might be slaves, since some of cpus could already | ||
129 | * be frozen and INIT might be masked on monarch. In this case, | ||
130 | * all slaves will park in while (monarch_cpu == -1) loop before | ||
131 | * DIE_INIT_SLAVE_ENTER that for waiting monarch enters. | ||
132 | * => TBD: freeze all slaves | ||
133 | * - One might be a monarch, but INIT rendezvous will fail since | ||
134 | * at least this cpu already have INIT masked so it never join | ||
135 | * to the rendezvous. In this case, all slaves and monarch will | ||
136 | * be frozen after timeout of the INIT rendezvous. | ||
137 | * => TBD: freeze them without waiting timeout | ||
138 | */ | ||
111 | kdump_smp_send_stop(); | 139 | kdump_smp_send_stop(); |
112 | /* not all cpu response to IPI, send INIT to freeze them */ | 140 | /* not all cpu response to IPI, send INIT to freeze them */ |
113 | if (kdump_wait_cpu_freeze() && kdump_on_init) { | 141 | if (kdump_wait_cpu_freeze() && kdump_on_init) { |
@@ -177,13 +205,18 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
177 | switch (val) { | 205 | switch (val) { |
178 | case DIE_INIT_MONARCH_PROCESS: | 206 | case DIE_INIT_MONARCH_PROCESS: |
179 | if (kdump_on_init) { | 207 | if (kdump_on_init) { |
180 | atomic_set(&kdump_in_progress, 1); | 208 | if (atomic_inc_return(&kdump_in_progress) != 1) |
209 | kdump_freeze_monarch = 1; | ||
181 | *(nd->monarch_cpu) = -1; | 210 | *(nd->monarch_cpu) = -1; |
182 | } | 211 | } |
183 | break; | 212 | break; |
184 | case DIE_INIT_MONARCH_LEAVE: | 213 | case DIE_INIT_MONARCH_LEAVE: |
185 | if (kdump_on_init) | 214 | if (kdump_on_init) { |
186 | machine_kdump_on_init(); | 215 | if (kdump_freeze_monarch) |
216 | unw_init_running(kdump_cpu_freeze, NULL); | ||
217 | else | ||
218 | machine_kdump_on_init(); | ||
219 | } | ||
187 | break; | 220 | break; |
188 | case DIE_INIT_SLAVE_LEAVE: | 221 | case DIE_INIT_SLAVE_LEAVE: |
189 | if (atomic_read(&kdump_in_progress)) | 222 | if (atomic_read(&kdump_in_progress)) |
@@ -196,9 +229,11 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
196 | case DIE_MCA_MONARCH_LEAVE: | 229 | case DIE_MCA_MONARCH_LEAVE: |
197 | /* *(nd->data) indicate if MCA is recoverable */ | 230 | /* *(nd->data) indicate if MCA is recoverable */ |
198 | if (kdump_on_fatal_mca && !(*(nd->data))) { | 231 | if (kdump_on_fatal_mca && !(*(nd->data))) { |
199 | atomic_set(&kdump_in_progress, 1); | 232 | if (atomic_inc_return(&kdump_in_progress) == 1) { |
200 | *(nd->monarch_cpu) = -1; | 233 | *(nd->monarch_cpu) = -1; |
201 | machine_kdump_on_init(); | 234 | machine_kdump_on_init(); |
235 | } | ||
236 | /* We got fatal MCA while kdump!? No way!! */ | ||
202 | } | 237 | } |
203 | break; | 238 | break; |
204 | } | 239 | } |
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 903babd22d62..32f6fc131fbe 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -52,7 +52,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
52 | srlz.i | 52 | srlz.i |
53 | ;; | 53 | ;; |
54 | mov ar.rnat=r18 | 54 | mov ar.rnat=r18 |
55 | rfi | 55 | rfi // note: this unmask MCA/INIT (psr.mc) |
56 | ;; | 56 | ;; |
57 | 1: | 57 | 1: |
58 | //physical mode code begin | 58 | //physical mode code begin |