diff options
author | Robin Getz <robin.getz@analog.com> | 2009-06-21 22:02:16 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2009-09-16 21:28:28 -0400 |
commit | ae4f073c40bf677b03826262e6022b4a251fe437 (patch) | |
tree | 452c91be30a3970efbea5780d368945e7f63712c /arch | |
parent | d4b834c13940b5433d16ae3605794b3d74804348 (diff) |
Blackfin: make EVT3->EVT5 lowering more robust wrt IPEND[4]
We handle many exceptions at EVT5 (hardware error level) so that we can
catch exceptions in our exception handling code. Today - if the global
interrupt enable bit (IPEND[4]) is set (interrupts disabled) our trap
handling code goes into a infinite loop, since we need interrupts to be
on to defer things to EVT5.
Normal kernel code should not trigger this for any reason as IPEND[4] gets
cleared early (when doing an interrupt context save) and the kernel stack
there should be sane (or something much worse is happening in the system).
But there have been a few times where this has happened, so this change
makes sure we dump a proper crash message even when things have gone south.
Signed-off-by: Robin Getz <robin.getz@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/blackfin/include/asm/pda.h | 1 | ||||
-rw-r--r-- | arch/blackfin/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 19 | ||||
-rw-r--r-- | arch/blackfin/mach-common/entry.S | 86 |
4 files changed, 66 insertions, 41 deletions
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h index b42555c1431c..69b96b40c188 100644 --- a/arch/blackfin/include/asm/pda.h +++ b/arch/blackfin/include/asm/pda.h | |||
@@ -50,6 +50,7 @@ struct blackfin_pda { /* Per-processor Data Area */ | |||
50 | unsigned long ex_optr; | 50 | unsigned long ex_optr; |
51 | unsigned long ex_buf[4]; | 51 | unsigned long ex_buf[4]; |
52 | unsigned long ex_imask; /* Saved imask from exception */ | 52 | unsigned long ex_imask; /* Saved imask from exception */ |
53 | unsigned long ex_ipend; /* Saved IPEND from exception */ | ||
53 | unsigned long *ex_stack; /* Exception stack space */ | 54 | unsigned long *ex_stack; /* Exception stack space */ |
54 | 55 | ||
55 | #ifdef ANOMALY_05000261 | 56 | #ifdef ANOMALY_05000261 |
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c index b5df9459d6d5..8ad4f2c69961 100644 --- a/arch/blackfin/kernel/asm-offsets.c +++ b/arch/blackfin/kernel/asm-offsets.c | |||
@@ -145,6 +145,7 @@ int main(void) | |||
145 | DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf)); | 145 | DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf)); |
146 | DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask)); | 146 | DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask)); |
147 | DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack)); | 147 | DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack)); |
148 | DEFINE(PDA_EXIPEND, offsetof(struct blackfin_pda, ex_ipend)); | ||
148 | #ifdef ANOMALY_05000261 | 149 | #ifdef ANOMALY_05000261 |
149 | DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx)); | 150 | DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx)); |
150 | #endif | 151 | #endif |
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index bf2b2d1f8ae5..fccf741ed3b5 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -267,11 +267,6 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
267 | * double faults if the stack has become corrupt | 267 | * double faults if the stack has become corrupt |
268 | */ | 268 | */ |
269 | 269 | ||
270 | #ifndef CONFIG_KGDB | ||
271 | /* IPEND is skipped if KGDB isn't enabled (see entry code) */ | ||
272 | fp->ipend = bfin_read_IPEND(); | ||
273 | #endif | ||
274 | |||
275 | /* trap_c() will be called for exceptions. During exceptions | 270 | /* trap_c() will be called for exceptions. During exceptions |
276 | * processing, the pc value should be set with retx value. | 271 | * processing, the pc value should be set with retx value. |
277 | * With this change we can cleanup some code in signal.c- TODO | 272 | * With this change we can cleanup some code in signal.c- TODO |
@@ -1116,10 +1111,16 @@ void show_regs(struct pt_regs *fp) | |||
1116 | 1111 | ||
1117 | verbose_printk(KERN_NOTICE "%s", linux_banner); | 1112 | verbose_printk(KERN_NOTICE "%s", linux_banner); |
1118 | 1113 | ||
1119 | verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", | 1114 | verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); |
1120 | print_tainted()); | 1115 | verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", |
1121 | verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", | 1116 | (long)fp->seqstat, fp->ipend, cpu_pda[smp_processor_id()].ex_imask, fp->syscfg); |
1122 | (long)fp->seqstat, fp->ipend, fp->syscfg); | 1117 | if (fp->ipend & EVT_IRPTEN) |
1118 | verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n"); | ||
1119 | if (!(cpu_pda[smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | | ||
1120 | EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) | ||
1121 | verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n"); | ||
1122 | if (!(cpu_pda[smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) | ||
1123 | verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n"); | ||
1123 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { | 1124 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { |
1124 | verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", | 1125 | verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", |
1125 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); | 1126 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); |
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index fb1795d5be2a..4c07fcb356a2 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -301,25 +301,31 @@ ENTRY(_ex_replaceable) | |||
301 | nop; | 301 | nop; |
302 | 302 | ||
303 | ENTRY(_ex_trap_c) | 303 | ENTRY(_ex_trap_c) |
304 | /* The only thing that has been saved in this context is | ||
305 | * (R7:6,P5:4), ASTAT & SP - don't use anything else | ||
306 | */ | ||
307 | |||
308 | GET_PDA(p5, r6); | ||
309 | |||
304 | /* Make sure we are not in a double fault */ | 310 | /* Make sure we are not in a double fault */ |
305 | p4.l = lo(IPEND); | 311 | p4.l = lo(IPEND); |
306 | p4.h = hi(IPEND); | 312 | p4.h = hi(IPEND); |
307 | r7 = [p4]; | 313 | r7 = [p4]; |
308 | CC = BITTST (r7, 5); | 314 | CC = BITTST (r7, 5); |
309 | if CC jump _double_fault; | 315 | if CC jump _double_fault; |
316 | [p5 + PDA_EXIPEND] = r7; | ||
310 | 317 | ||
311 | /* Call C code (trap_c) to handle the exception, which most | 318 | /* Call C code (trap_c) to handle the exception, which most |
312 | * likely involves sending a signal to the current process. | 319 | * likely involves sending a signal to the current process. |
313 | * To avoid double faults, lower our priority to IRQ5 first. | 320 | * To avoid double faults, lower our priority to IRQ5 first. |
314 | */ | 321 | */ |
315 | P5.h = _exception_to_level5; | 322 | r7.h = _exception_to_level5; |
316 | P5.l = _exception_to_level5; | 323 | r7.l = _exception_to_level5; |
317 | p4.l = lo(EVT5); | 324 | p4.l = lo(EVT5); |
318 | p4.h = hi(EVT5); | 325 | p4.h = hi(EVT5); |
319 | [p4] = p5; | 326 | [p4] = r7; |
320 | csync; | 327 | csync; |
321 | 328 | ||
322 | GET_PDA(p5, r6); | ||
323 | #ifndef CONFIG_DEBUG_DOUBLEFAULT | 329 | #ifndef CONFIG_DEBUG_DOUBLEFAULT |
324 | 330 | ||
325 | /* | 331 | /* |
@@ -349,8 +355,7 @@ ENTRY(_ex_trap_c) | |||
349 | BITCLR(r6, SYSCFG_SSSTEP_P); | 355 | BITCLR(r6, SYSCFG_SSSTEP_P); |
350 | SYSCFG = r6; | 356 | SYSCFG = r6; |
351 | 357 | ||
352 | /* Disable all interrupts, but make sure level 5 is enabled so | 358 | /* Save the current IMASK, since we change in order to jump to level 5 */ |
353 | * we can switch to that level. Save the old mask. */ | ||
354 | cli r6; | 359 | cli r6; |
355 | [p5 + PDA_EXIMASK] = r6; | 360 | [p5 + PDA_EXIMASK] = r6; |
356 | 361 | ||
@@ -358,9 +363,21 @@ ENTRY(_ex_trap_c) | |||
358 | p4.h = hi(SAFE_USER_INSTRUCTION); | 363 | p4.h = hi(SAFE_USER_INSTRUCTION); |
359 | retx = p4; | 364 | retx = p4; |
360 | 365 | ||
366 | /* Disable all interrupts, but make sure level 5 is enabled so | ||
367 | * we can switch to that level. | ||
368 | */ | ||
361 | r6 = 0x3f; | 369 | r6 = 0x3f; |
362 | sti r6; | 370 | sti r6; |
363 | 371 | ||
372 | /* In case interrupts are disabled IPEND[4] (global interrupt disable bit) | ||
373 | * clear it (re-enabling interrupts again) by the special sequence of pushing | ||
374 | * RETI onto the stack. This way we can lower ourselves to IVG5 even if the | ||
375 | * exception was taken after the interrupt handler was called but before it | ||
376 | * got a chance to enable global interrupts itself. | ||
377 | */ | ||
378 | [--sp] = reti; | ||
379 | sp += 4; | ||
380 | |||
364 | raise 5; | 381 | raise 5; |
365 | jump.s _bfin_return_from_exception; | 382 | jump.s _bfin_return_from_exception; |
366 | ENDPROC(_ex_trap_c) | 383 | ENDPROC(_ex_trap_c) |
@@ -420,47 +437,52 @@ ENDPROC(_double_fault) | |||
420 | ENTRY(_exception_to_level5) | 437 | ENTRY(_exception_to_level5) |
421 | SAVE_ALL_SYS | 438 | SAVE_ALL_SYS |
422 | 439 | ||
423 | GET_PDA(p4, r7); /* Fetch current PDA */ | 440 | GET_PDA(p5, r7); /* Fetch current PDA */ |
424 | r6 = [p4 + PDA_RETX]; | 441 | r6 = [p5 + PDA_RETX]; |
425 | [sp + PT_PC] = r6; | 442 | [sp + PT_PC] = r6; |
426 | 443 | ||
427 | r6 = [p4 + PDA_SYSCFG]; | 444 | r6 = [p5 + PDA_SYSCFG]; |
428 | [sp + PT_SYSCFG] = r6; | 445 | [sp + PT_SYSCFG] = r6; |
429 | 446 | ||
430 | /* Restore interrupt mask. We haven't pushed RETI, so this | ||
431 | * doesn't enable interrupts until we return from this handler. */ | ||
432 | r6 = [p4 + PDA_EXIMASK]; | ||
433 | sti r6; | ||
434 | |||
435 | /* Restore the hardware error vector. */ | 447 | /* Restore the hardware error vector. */ |
436 | P5.h = _evt_ivhw; | 448 | r7.h = _evt_ivhw; |
437 | P5.l = _evt_ivhw; | 449 | r7.l = _evt_ivhw; |
438 | p4.l = lo(EVT5); | 450 | p4.l = lo(EVT5); |
439 | p4.h = hi(EVT5); | 451 | p4.h = hi(EVT5); |
440 | [p4] = p5; | 452 | [p4] = r7; |
441 | csync; | 453 | csync; |
442 | 454 | ||
443 | p2.l = lo(IPEND); | 455 | #ifdef CONFIG_DEBUG_DOUBLEFAULT |
444 | p2.h = hi(IPEND); | 456 | /* Now that we have the hardware error vector programmed properly |
445 | csync; | 457 | * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes |
446 | r0 = [p2]; /* Read current IPEND */ | 458 | * another hardware error, we can catch it (self-nesting). |
447 | [sp + PT_IPEND] = r0; /* Store IPEND */ | 459 | */ |
460 | [--sp] = reti; | ||
461 | sp += 4; | ||
462 | #endif | ||
463 | |||
464 | r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */ | ||
465 | [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */ | ||
448 | 466 | ||
449 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ | 467 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
450 | SP += -12; | 468 | SP += -12; |
451 | call _trap_c; | 469 | call _trap_c; |
452 | SP += 12; | 470 | SP += 12; |
453 | 471 | ||
454 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | 472 | /* If interrupts were off during the exception (IPEND[4] = 1), turn them off |
455 | /* Grab ILAT */ | 473 | * before we return. |
456 | p2.l = lo(ILAT); | 474 | */ |
457 | p2.h = hi(ILAT); | 475 | CC = BITTST(r7, EVT_IRPTEN_P) |
458 | r0 = [p2]; | 476 | if !CC jump 1f; |
459 | r1 = 0x20; /* Did I just cause anther HW error? */ | 477 | /* this will load a random value into the reti register - but that is OK, |
460 | r0 = r0 & r1; | 478 | * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro |
461 | CC = R0 == R1; | 479 | */ |
462 | if CC JUMP _double_fault; | 480 | sp += -4; |
463 | #endif | 481 | reti = [sp++]; |
482 | 1: | ||
483 | /* restore the interrupt mask (IMASK) */ | ||
484 | r6 = [p5 + PDA_EXIMASK]; | ||
485 | sti r6; | ||
464 | 486 | ||
465 | call _ret_from_exception; | 487 | call _ret_from_exception; |
466 | RESTORE_ALL_SYS | 488 | RESTORE_ALL_SYS |