diff options
author | Robin Getz <robin.getz@analog.com> | 2009-06-21 22:02:16 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2009-09-16 21:28:28 -0400 |
commit | ae4f073c40bf677b03826262e6022b4a251fe437 (patch) | |
tree | 452c91be30a3970efbea5780d368945e7f63712c /arch/blackfin/mach-common | |
parent | d4b834c13940b5433d16ae3605794b3d74804348 (diff) |
Blackfin: make EVT3->EVT5 lowering more robust wrt IPEND[4]
We handle many exceptions at EVT5 (hardware error level) so that we can
catch exceptions in our exception handling code. Today - if the global
interrupt enable bit (IPEND[4]) is set (interrupts disabled) our trap
handling code goes into a infinite loop, since we need interrupts to be
on to defer things to EVT5.
Normal kernel code should not trigger this for any reason as IPEND[4] gets
cleared early (when doing an interrupt context save) and the kernel stack
there should be sane (or something much worse is happening in the system).
But there have been a few times where this has happened, so this change
makes sure we dump a proper crash message even when things have gone south.
Signed-off-by: Robin Getz <robin.getz@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r-- | arch/blackfin/mach-common/entry.S | 86 |
1 files changed, 54 insertions, 32 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index fb1795d5be2a..4c07fcb356a2 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -301,25 +301,31 @@ ENTRY(_ex_replaceable) | |||
301 | nop; | 301 | nop; |
302 | 302 | ||
303 | ENTRY(_ex_trap_c) | 303 | ENTRY(_ex_trap_c) |
304 | /* The only thing that has been saved in this context is | ||
305 | * (R7:6,P5:4), ASTAT & SP - don't use anything else | ||
306 | */ | ||
307 | |||
308 | GET_PDA(p5, r6); | ||
309 | |||
304 | /* Make sure we are not in a double fault */ | 310 | /* Make sure we are not in a double fault */ |
305 | p4.l = lo(IPEND); | 311 | p4.l = lo(IPEND); |
306 | p4.h = hi(IPEND); | 312 | p4.h = hi(IPEND); |
307 | r7 = [p4]; | 313 | r7 = [p4]; |
308 | CC = BITTST (r7, 5); | 314 | CC = BITTST (r7, 5); |
309 | if CC jump _double_fault; | 315 | if CC jump _double_fault; |
316 | [p5 + PDA_EXIPEND] = r7; | ||
310 | 317 | ||
311 | /* Call C code (trap_c) to handle the exception, which most | 318 | /* Call C code (trap_c) to handle the exception, which most |
312 | * likely involves sending a signal to the current process. | 319 | * likely involves sending a signal to the current process. |
313 | * To avoid double faults, lower our priority to IRQ5 first. | 320 | * To avoid double faults, lower our priority to IRQ5 first. |
314 | */ | 321 | */ |
315 | P5.h = _exception_to_level5; | 322 | r7.h = _exception_to_level5; |
316 | P5.l = _exception_to_level5; | 323 | r7.l = _exception_to_level5; |
317 | p4.l = lo(EVT5); | 324 | p4.l = lo(EVT5); |
318 | p4.h = hi(EVT5); | 325 | p4.h = hi(EVT5); |
319 | [p4] = p5; | 326 | [p4] = r7; |
320 | csync; | 327 | csync; |
321 | 328 | ||
322 | GET_PDA(p5, r6); | ||
323 | #ifndef CONFIG_DEBUG_DOUBLEFAULT | 329 | #ifndef CONFIG_DEBUG_DOUBLEFAULT |
324 | 330 | ||
325 | /* | 331 | /* |
@@ -349,8 +355,7 @@ ENTRY(_ex_trap_c) | |||
349 | BITCLR(r6, SYSCFG_SSSTEP_P); | 355 | BITCLR(r6, SYSCFG_SSSTEP_P); |
350 | SYSCFG = r6; | 356 | SYSCFG = r6; |
351 | 357 | ||
352 | /* Disable all interrupts, but make sure level 5 is enabled so | 358 | /* Save the current IMASK, since we change in order to jump to level 5 */ |
353 | * we can switch to that level. Save the old mask. */ | ||
354 | cli r6; | 359 | cli r6; |
355 | [p5 + PDA_EXIMASK] = r6; | 360 | [p5 + PDA_EXIMASK] = r6; |
356 | 361 | ||
@@ -358,9 +363,21 @@ ENTRY(_ex_trap_c) | |||
358 | p4.h = hi(SAFE_USER_INSTRUCTION); | 363 | p4.h = hi(SAFE_USER_INSTRUCTION); |
359 | retx = p4; | 364 | retx = p4; |
360 | 365 | ||
366 | /* Disable all interrupts, but make sure level 5 is enabled so | ||
367 | * we can switch to that level. | ||
368 | */ | ||
361 | r6 = 0x3f; | 369 | r6 = 0x3f; |
362 | sti r6; | 370 | sti r6; |
363 | 371 | ||
372 | /* In case interrupts are disabled IPEND[4] (global interrupt disable bit) | ||
373 | * clear it (re-enabling interrupts again) by the special sequence of pushing | ||
374 | * RETI onto the stack. This way we can lower ourselves to IVG5 even if the | ||
375 | * exception was taken after the interrupt handler was called but before it | ||
376 | * got a chance to enable global interrupts itself. | ||
377 | */ | ||
378 | [--sp] = reti; | ||
379 | sp += 4; | ||
380 | |||
364 | raise 5; | 381 | raise 5; |
365 | jump.s _bfin_return_from_exception; | 382 | jump.s _bfin_return_from_exception; |
366 | ENDPROC(_ex_trap_c) | 383 | ENDPROC(_ex_trap_c) |
@@ -420,47 +437,52 @@ ENDPROC(_double_fault) | |||
420 | ENTRY(_exception_to_level5) | 437 | ENTRY(_exception_to_level5) |
421 | SAVE_ALL_SYS | 438 | SAVE_ALL_SYS |
422 | 439 | ||
423 | GET_PDA(p4, r7); /* Fetch current PDA */ | 440 | GET_PDA(p5, r7); /* Fetch current PDA */ |
424 | r6 = [p4 + PDA_RETX]; | 441 | r6 = [p5 + PDA_RETX]; |
425 | [sp + PT_PC] = r6; | 442 | [sp + PT_PC] = r6; |
426 | 443 | ||
427 | r6 = [p4 + PDA_SYSCFG]; | 444 | r6 = [p5 + PDA_SYSCFG]; |
428 | [sp + PT_SYSCFG] = r6; | 445 | [sp + PT_SYSCFG] = r6; |
429 | 446 | ||
430 | /* Restore interrupt mask. We haven't pushed RETI, so this | ||
431 | * doesn't enable interrupts until we return from this handler. */ | ||
432 | r6 = [p4 + PDA_EXIMASK]; | ||
433 | sti r6; | ||
434 | |||
435 | /* Restore the hardware error vector. */ | 447 | /* Restore the hardware error vector. */ |
436 | P5.h = _evt_ivhw; | 448 | r7.h = _evt_ivhw; |
437 | P5.l = _evt_ivhw; | 449 | r7.l = _evt_ivhw; |
438 | p4.l = lo(EVT5); | 450 | p4.l = lo(EVT5); |
439 | p4.h = hi(EVT5); | 451 | p4.h = hi(EVT5); |
440 | [p4] = p5; | 452 | [p4] = r7; |
441 | csync; | 453 | csync; |
442 | 454 | ||
443 | p2.l = lo(IPEND); | 455 | #ifdef CONFIG_DEBUG_DOUBLEFAULT |
444 | p2.h = hi(IPEND); | 456 | /* Now that we have the hardware error vector programmed properly |
445 | csync; | 457 | * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes |
446 | r0 = [p2]; /* Read current IPEND */ | 458 | * another hardware error, we can catch it (self-nesting). |
447 | [sp + PT_IPEND] = r0; /* Store IPEND */ | 459 | */ |
460 | [--sp] = reti; | ||
461 | sp += 4; | ||
462 | #endif | ||
463 | |||
464 | r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */ | ||
465 | [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */ | ||
448 | 466 | ||
449 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ | 467 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
450 | SP += -12; | 468 | SP += -12; |
451 | call _trap_c; | 469 | call _trap_c; |
452 | SP += 12; | 470 | SP += 12; |
453 | 471 | ||
454 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | 472 | /* If interrupts were off during the exception (IPEND[4] = 1), turn them off |
455 | /* Grab ILAT */ | 473 | * before we return. |
456 | p2.l = lo(ILAT); | 474 | */ |
457 | p2.h = hi(ILAT); | 475 | CC = BITTST(r7, EVT_IRPTEN_P) |
458 | r0 = [p2]; | 476 | if !CC jump 1f; |
459 | r1 = 0x20; /* Did I just cause anther HW error? */ | 477 | /* this will load a random value into the reti register - but that is OK, |
460 | r0 = r0 & r1; | 478 | * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro |
461 | CC = R0 == R1; | 479 | */ |
462 | if CC JUMP _double_fault; | 480 | sp += -4; |
463 | #endif | 481 | reti = [sp++]; |
482 | 1: | ||
483 | /* restore the interrupt mask (IMASK) */ | ||
484 | r6 = [p5 + PDA_EXIMASK]; | ||
485 | sti r6; | ||
464 | 486 | ||
465 | call _ret_from_exception; | 487 | call _ret_from_exception; |
466 | RESTORE_ALL_SYS | 488 | RESTORE_ALL_SYS |