diff options
Diffstat (limited to 'arch/ia64/kernel/mca.c')
-rw-r--r-- | arch/ia64/kernel/mca.c | 234 |
1 files changed, 208 insertions, 26 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 2fbe4536fe18..bfbd8986153b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -54,6 +54,9 @@ | |||
54 | * | 54 | * |
55 | * 2005-10-07 Keith Owens <kaos@sgi.com> | 55 | * 2005-10-07 Keith Owens <kaos@sgi.com> |
56 | * Add notify_die() hooks. | 56 | * Add notify_die() hooks. |
57 | * | ||
58 | * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | ||
59 | * Add printing support for MCA/INIT. | ||
57 | */ | 60 | */ |
58 | #include <linux/types.h> | 61 | #include <linux/types.h> |
59 | #include <linux/init.h> | 62 | #include <linux/init.h> |
@@ -136,11 +139,175 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); | |||
136 | 139 | ||
137 | static int mca_init __initdata; | 140 | static int mca_init __initdata; |
138 | 141 | ||
142 | /* | ||
143 | * limited & delayed printing support for MCA/INIT handler | ||
144 | */ | ||
145 | |||
146 | #define mprintk(fmt...) ia64_mca_printk(fmt) | ||
147 | |||
148 | #define MLOGBUF_SIZE (512+256*NR_CPUS) | ||
149 | #define MLOGBUF_MSGMAX 256 | ||
150 | static char mlogbuf[MLOGBUF_SIZE]; | ||
151 | static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */ | ||
152 | static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */ | ||
153 | static unsigned long mlogbuf_start; | ||
154 | static unsigned long mlogbuf_end; | ||
155 | static unsigned int mlogbuf_finished = 0; | ||
156 | static unsigned long mlogbuf_timestamp = 0; | ||
157 | |||
158 | static int loglevel_save = -1; | ||
159 | #define BREAK_LOGLEVEL(__console_loglevel) \ | ||
160 | oops_in_progress = 1; \ | ||
161 | if (loglevel_save < 0) \ | ||
162 | loglevel_save = __console_loglevel; \ | ||
163 | __console_loglevel = 15; | ||
164 | |||
165 | #define RESTORE_LOGLEVEL(__console_loglevel) \ | ||
166 | if (loglevel_save >= 0) { \ | ||
167 | __console_loglevel = loglevel_save; \ | ||
168 | loglevel_save = -1; \ | ||
169 | } \ | ||
170 | mlogbuf_finished = 0; \ | ||
171 | oops_in_progress = 0; | ||
172 | |||
173 | /* | ||
174 | * Push messages into buffer, print them later if not urgent. | ||
175 | */ | ||
176 | void ia64_mca_printk(const char *fmt, ...) | ||
177 | { | ||
178 | va_list args; | ||
179 | int printed_len; | ||
180 | char temp_buf[MLOGBUF_MSGMAX]; | ||
181 | char *p; | ||
182 | |||
183 | va_start(args, fmt); | ||
184 | printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args); | ||
185 | va_end(args); | ||
186 | |||
187 | /* Copy the output into mlogbuf */ | ||
188 | if (oops_in_progress) { | ||
189 | /* mlogbuf was abandoned, use printk directly instead. */ | ||
190 | printk(temp_buf); | ||
191 | } else { | ||
192 | spin_lock(&mlogbuf_wlock); | ||
193 | for (p = temp_buf; *p; p++) { | ||
194 | unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE; | ||
195 | if (next != mlogbuf_start) { | ||
196 | mlogbuf[mlogbuf_end] = *p; | ||
197 | mlogbuf_end = next; | ||
198 | } else { | ||
199 | /* buffer full */ | ||
200 | break; | ||
201 | } | ||
202 | } | ||
203 | mlogbuf[mlogbuf_end] = '\0'; | ||
204 | spin_unlock(&mlogbuf_wlock); | ||
205 | } | ||
206 | } | ||
207 | EXPORT_SYMBOL(ia64_mca_printk); | ||
208 | |||
209 | /* | ||
210 | * Print buffered messages. | ||
211 | * NOTE: call this after returning normal context. (ex. from salinfod) | ||
212 | */ | ||
213 | void ia64_mlogbuf_dump(void) | ||
214 | { | ||
215 | char temp_buf[MLOGBUF_MSGMAX]; | ||
216 | char *p; | ||
217 | unsigned long index; | ||
218 | unsigned long flags; | ||
219 | unsigned int printed_len; | ||
220 | |||
221 | /* Get output from mlogbuf */ | ||
222 | while (mlogbuf_start != mlogbuf_end) { | ||
223 | temp_buf[0] = '\0'; | ||
224 | p = temp_buf; | ||
225 | printed_len = 0; | ||
226 | |||
227 | spin_lock_irqsave(&mlogbuf_rlock, flags); | ||
228 | |||
229 | index = mlogbuf_start; | ||
230 | while (index != mlogbuf_end) { | ||
231 | *p = mlogbuf[index]; | ||
232 | index = (index + 1) % MLOGBUF_SIZE; | ||
233 | if (!*p) | ||
234 | break; | ||
235 | p++; | ||
236 | if (++printed_len >= MLOGBUF_MSGMAX - 1) | ||
237 | break; | ||
238 | } | ||
239 | *p = '\0'; | ||
240 | if (temp_buf[0]) | ||
241 | printk(temp_buf); | ||
242 | mlogbuf_start = index; | ||
243 | |||
244 | mlogbuf_timestamp = 0; | ||
245 | spin_unlock_irqrestore(&mlogbuf_rlock, flags); | ||
246 | } | ||
247 | } | ||
248 | EXPORT_SYMBOL(ia64_mlogbuf_dump); | ||
249 | |||
250 | /* | ||
251 | * Call this if system is going to down or if immediate flushing messages to | ||
252 | * console is required. (ex. recovery was failed, crash dump is going to be | ||
253 | * invoked, long-wait rendezvous etc.) | ||
254 | * NOTE: this should be called from monarch. | ||
255 | */ | ||
256 | static void ia64_mlogbuf_finish(int wait) | ||
257 | { | ||
258 | BREAK_LOGLEVEL(console_loglevel); | ||
259 | |||
260 | spin_lock_init(&mlogbuf_rlock); | ||
261 | ia64_mlogbuf_dump(); | ||
262 | printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " | ||
263 | "MCA/INIT might be dodgy or fail.\n"); | ||
264 | |||
265 | if (!wait) | ||
266 | return; | ||
267 | |||
268 | /* wait for console */ | ||
269 | printk("Delaying for 5 seconds...\n"); | ||
270 | udelay(5*1000000); | ||
271 | |||
272 | mlogbuf_finished = 1; | ||
273 | } | ||
274 | EXPORT_SYMBOL(ia64_mlogbuf_finish); | ||
275 | |||
276 | /* | ||
277 | * Print buffered messages from INIT context. | ||
278 | */ | ||
279 | static void ia64_mlogbuf_dump_from_init(void) | ||
280 | { | ||
281 | if (mlogbuf_finished) | ||
282 | return; | ||
283 | |||
284 | if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { | ||
285 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " | ||
286 | " and the system seems to be messed up.\n"); | ||
287 | ia64_mlogbuf_finish(0); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | if (!spin_trylock(&mlogbuf_rlock)) { | ||
292 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. " | ||
293 | "Generated messages other than stack dump will be " | ||
294 | "buffered to mlogbuf and will be printed later.\n"); | ||
295 | printk(KERN_ERR "INIT: If messages would not printed after " | ||
296 | "this INIT, wait 30sec and assert INIT again.\n"); | ||
297 | if (!mlogbuf_timestamp) | ||
298 | mlogbuf_timestamp = jiffies; | ||
299 | return; | ||
300 | } | ||
301 | spin_unlock(&mlogbuf_rlock); | ||
302 | ia64_mlogbuf_dump(); | ||
303 | } | ||
139 | 304 | ||
140 | static void inline | 305 | static void inline |
141 | ia64_mca_spin(const char *func) | 306 | ia64_mca_spin(const char *func) |
142 | { | 307 | { |
143 | printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); | 308 | if (monarch_cpu == smp_processor_id()) |
309 | ia64_mlogbuf_finish(0); | ||
310 | mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); | ||
144 | while (1) | 311 | while (1) |
145 | cpu_relax(); | 312 | cpu_relax(); |
146 | } | 313 | } |
@@ -344,9 +511,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
344 | /* SAL spec states this should run w/ interrupts enabled */ | 511 | /* SAL spec states this should run w/ interrupts enabled */ |
345 | local_irq_enable(); | 512 | local_irq_enable(); |
346 | 513 | ||
347 | /* Get the CPE error record and log it */ | ||
348 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); | ||
349 | |||
350 | spin_lock(&cpe_history_lock); | 514 | spin_lock(&cpe_history_lock); |
351 | if (!cpe_poll_enabled && cpe_vector >= 0) { | 515 | if (!cpe_poll_enabled && cpe_vector >= 0) { |
352 | 516 | ||
@@ -375,7 +539,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
375 | mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); | 539 | mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); |
376 | 540 | ||
377 | /* lock already released, get out now */ | 541 | /* lock already released, get out now */ |
378 | return IRQ_HANDLED; | 542 | goto out; |
379 | } else { | 543 | } else { |
380 | cpe_history[index++] = now; | 544 | cpe_history[index++] = now; |
381 | if (index == CPE_HISTORY_LENGTH) | 545 | if (index == CPE_HISTORY_LENGTH) |
@@ -383,6 +547,10 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
383 | } | 547 | } |
384 | } | 548 | } |
385 | spin_unlock(&cpe_history_lock); | 549 | spin_unlock(&cpe_history_lock); |
550 | out: | ||
551 | /* Get the CPE error record and log it */ | ||
552 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); | ||
553 | |||
386 | return IRQ_HANDLED; | 554 | return IRQ_HANDLED; |
387 | } | 555 | } |
388 | 556 | ||
@@ -988,18 +1156,22 @@ ia64_wait_for_slaves(int monarch, const char *type) | |||
988 | } | 1156 | } |
989 | if (!missing) | 1157 | if (!missing) |
990 | goto all_in; | 1158 | goto all_in; |
991 | printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); | 1159 | /* |
1160 | * Maybe slave(s) dead. Print buffered messages immediately. | ||
1161 | */ | ||
1162 | ia64_mlogbuf_finish(0); | ||
1163 | mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); | ||
992 | for_each_online_cpu(c) { | 1164 | for_each_online_cpu(c) { |
993 | if (c == monarch) | 1165 | if (c == monarch) |
994 | continue; | 1166 | continue; |
995 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) | 1167 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) |
996 | printk(" %d", c); | 1168 | mprintk(" %d", c); |
997 | } | 1169 | } |
998 | printk("\n"); | 1170 | mprintk("\n"); |
999 | return; | 1171 | return; |
1000 | 1172 | ||
1001 | all_in: | 1173 | all_in: |
1002 | printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); | 1174 | mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); |
1003 | return; | 1175 | return; |
1004 | } | 1176 | } |
1005 | 1177 | ||
@@ -1027,10 +1199,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1027 | struct ia64_mca_notify_die nd = | 1199 | struct ia64_mca_notify_die nd = |
1028 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | 1200 | { .sos = sos, .monarch_cpu = &monarch_cpu }; |
1029 | 1201 | ||
1030 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 1202 | mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " |
1031 | console_loglevel = 15; /* make sure printks make it to console */ | 1203 | "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); |
1032 | printk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d monarch=%ld\n", | ||
1033 | sos->proc_state_param, cpu, sos->monarch); | ||
1034 | 1204 | ||
1035 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | 1205 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); |
1036 | monarch_cpu = cpu; | 1206 | monarch_cpu = cpu; |
@@ -1066,6 +1236,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1066 | rh->severity = sal_log_severity_corrected; | 1236 | rh->severity = sal_log_severity_corrected; |
1067 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 1237 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
1068 | sos->os_status = IA64_MCA_CORRECTED; | 1238 | sos->os_status = IA64_MCA_CORRECTED; |
1239 | } else { | ||
1240 | /* Dump buffered message to console */ | ||
1241 | ia64_mlogbuf_finish(1); | ||
1069 | } | 1242 | } |
1070 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1243 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
1071 | == NOTIFY_STOP) | 1244 | == NOTIFY_STOP) |
@@ -1106,9 +1279,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1106 | /* SAL spec states this should run w/ interrupts enabled */ | 1279 | /* SAL spec states this should run w/ interrupts enabled */ |
1107 | local_irq_enable(); | 1280 | local_irq_enable(); |
1108 | 1281 | ||
1109 | /* Get the CMC error record and log it */ | ||
1110 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); | ||
1111 | |||
1112 | spin_lock(&cmc_history_lock); | 1282 | spin_lock(&cmc_history_lock); |
1113 | if (!cmc_polling_enabled) { | 1283 | if (!cmc_polling_enabled) { |
1114 | int i, count = 1; /* we know 1 happened now */ | 1284 | int i, count = 1; /* we know 1 happened now */ |
@@ -1141,7 +1311,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1141 | mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); | 1311 | mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); |
1142 | 1312 | ||
1143 | /* lock already released, get out now */ | 1313 | /* lock already released, get out now */ |
1144 | return IRQ_HANDLED; | 1314 | goto out; |
1145 | } else { | 1315 | } else { |
1146 | cmc_history[index++] = now; | 1316 | cmc_history[index++] = now; |
1147 | if (index == CMC_HISTORY_LENGTH) | 1317 | if (index == CMC_HISTORY_LENGTH) |
@@ -1149,6 +1319,10 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1149 | } | 1319 | } |
1150 | } | 1320 | } |
1151 | spin_unlock(&cmc_history_lock); | 1321 | spin_unlock(&cmc_history_lock); |
1322 | out: | ||
1323 | /* Get the CMC error record and log it */ | ||
1324 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); | ||
1325 | |||
1152 | return IRQ_HANDLED; | 1326 | return IRQ_HANDLED; |
1153 | } | 1327 | } |
1154 | 1328 | ||
@@ -1305,6 +1479,15 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi | |||
1305 | struct task_struct *g, *t; | 1479 | struct task_struct *g, *t; |
1306 | if (val != DIE_INIT_MONARCH_PROCESS) | 1480 | if (val != DIE_INIT_MONARCH_PROCESS) |
1307 | return NOTIFY_DONE; | 1481 | return NOTIFY_DONE; |
1482 | |||
1483 | /* | ||
1484 | * FIXME: mlogbuf will brim over with INIT stack dumps. | ||
1485 | * To enable show_stack from INIT, we use oops_in_progress which should | ||
1486 | * be used in real oops. This would cause something wrong after INIT. | ||
1487 | */ | ||
1488 | BREAK_LOGLEVEL(console_loglevel); | ||
1489 | ia64_mlogbuf_dump_from_init(); | ||
1490 | |||
1308 | printk(KERN_ERR "Processes interrupted by INIT -"); | 1491 | printk(KERN_ERR "Processes interrupted by INIT -"); |
1309 | for_each_online_cpu(c) { | 1492 | for_each_online_cpu(c) { |
1310 | struct ia64_sal_os_state *s; | 1493 | struct ia64_sal_os_state *s; |
@@ -1326,6 +1509,8 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi | |||
1326 | } while_each_thread (g, t); | 1509 | } while_each_thread (g, t); |
1327 | read_unlock(&tasklist_lock); | 1510 | read_unlock(&tasklist_lock); |
1328 | } | 1511 | } |
1512 | /* FIXME: This will not restore zapped printk locks. */ | ||
1513 | RESTORE_LOGLEVEL(console_loglevel); | ||
1329 | return NOTIFY_DONE; | 1514 | return NOTIFY_DONE; |
1330 | } | 1515 | } |
1331 | 1516 | ||
@@ -1357,12 +1542,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1357 | struct ia64_mca_notify_die nd = | 1542 | struct ia64_mca_notify_die nd = |
1358 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | 1543 | { .sos = sos, .monarch_cpu = &monarch_cpu }; |
1359 | 1544 | ||
1360 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | ||
1361 | console_loglevel = 15; /* make sure printks make it to console */ | ||
1362 | |||
1363 | (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); | 1545 | (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); |
1364 | 1546 | ||
1365 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", | 1547 | mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", |
1366 | sos->proc_state_param, cpu, sos->monarch); | 1548 | sos->proc_state_param, cpu, sos->monarch); |
1367 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); | 1549 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); |
1368 | 1550 | ||
@@ -1375,7 +1557,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1375 | * fix their proms and get their customers updated. | 1557 | * fix their proms and get their customers updated. |
1376 | */ | 1558 | */ |
1377 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { | 1559 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { |
1378 | printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", | 1560 | mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", |
1379 | __FUNCTION__, cpu); | 1561 | __FUNCTION__, cpu); |
1380 | atomic_dec(&slaves); | 1562 | atomic_dec(&slaves); |
1381 | sos->monarch = 1; | 1563 | sos->monarch = 1; |
@@ -1387,7 +1569,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1387 | * fix their proms and get their customers updated. | 1569 | * fix their proms and get their customers updated. |
1388 | */ | 1570 | */ |
1389 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { | 1571 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { |
1390 | printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", | 1572 | mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", |
1391 | __FUNCTION__, cpu); | 1573 | __FUNCTION__, cpu); |
1392 | atomic_dec(&monarchs); | 1574 | atomic_dec(&monarchs); |
1393 | sos->monarch = 0; | 1575 | sos->monarch = 0; |
@@ -1408,7 +1590,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1408 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) | 1590 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1409 | == NOTIFY_STOP) | 1591 | == NOTIFY_STOP) |
1410 | ia64_mca_spin(__FUNCTION__); | 1592 | ia64_mca_spin(__FUNCTION__); |
1411 | printk("Slave on cpu %d returning to normal service.\n", cpu); | 1593 | mprintk("Slave on cpu %d returning to normal service.\n", cpu); |
1412 | set_curr_task(cpu, previous_current); | 1594 | set_curr_task(cpu, previous_current); |
1413 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | 1595 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; |
1414 | atomic_dec(&slaves); | 1596 | atomic_dec(&slaves); |
@@ -1426,7 +1608,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1426 | * same serial line, the user will need some time to switch out of the BMC before | 1608 | * same serial line, the user will need some time to switch out of the BMC before |
1427 | * the dump begins. | 1609 | * the dump begins. |
1428 | */ | 1610 | */ |
1429 | printk("Delaying for 5 seconds...\n"); | 1611 | mprintk("Delaying for 5 seconds...\n"); |
1430 | udelay(5*1000000); | 1612 | udelay(5*1000000); |
1431 | ia64_wait_for_slaves(cpu, "INIT"); | 1613 | ia64_wait_for_slaves(cpu, "INIT"); |
1432 | /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through | 1614 | /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through |
@@ -1439,7 +1621,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1439 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) | 1621 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1440 | == NOTIFY_STOP) | 1622 | == NOTIFY_STOP) |
1441 | ia64_mca_spin(__FUNCTION__); | 1623 | ia64_mca_spin(__FUNCTION__); |
1442 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | 1624 | mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); |
1443 | atomic_dec(&monarchs); | 1625 | atomic_dec(&monarchs); |
1444 | set_curr_task(cpu, previous_current); | 1626 | set_curr_task(cpu, previous_current); |
1445 | monarch_cpu = -1; | 1627 | monarch_cpu = -1; |