diff options
Diffstat (limited to 'arch/ppc/kernel/traps.c')
-rw-r--r-- | arch/ppc/kernel/traps.c | 185 |
1 files changed, 50 insertions, 135 deletions
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index c78568905c3b..a467a429c2fe 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c | |||
@@ -194,11 +194,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
194 | /* On 4xx, the reason for the machine check or program exception | 194 | /* On 4xx, the reason for the machine check or program exception |
195 | is in the ESR. */ | 195 | is in the ESR. */ |
196 | #define get_reason(regs) ((regs)->dsisr) | 196 | #define get_reason(regs) ((regs)->dsisr) |
197 | #ifndef CONFIG_FSL_BOOKE | ||
198 | #define get_mc_reason(regs) ((regs)->dsisr) | 197 | #define get_mc_reason(regs) ((regs)->dsisr) |
199 | #else | ||
200 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | ||
201 | #endif | ||
202 | #define REASON_FP ESR_FP | 198 | #define REASON_FP ESR_FP |
203 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | 199 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) |
204 | #define REASON_PRIVILEGED ESR_PPR | 200 | #define REASON_PRIVILEGED ESR_PPR |
@@ -231,39 +227,25 @@ platform_machine_check(struct pt_regs *regs) | |||
231 | { | 227 | { |
232 | } | 228 | } |
233 | 229 | ||
234 | void machine_check_exception(struct pt_regs *regs) | 230 | #if defined(CONFIG_4xx) |
231 | int machine_check_4xx(struct pt_regs *regs) | ||
235 | { | 232 | { |
236 | unsigned long reason = get_mc_reason(regs); | 233 | unsigned long reason = get_mc_reason(regs); |
237 | 234 | ||
238 | if (user_mode(regs)) { | ||
239 | regs->msr |= MSR_RI; | ||
240 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
245 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
246 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
247 | return; | ||
248 | #endif | ||
249 | |||
250 | if (debugger_fault_handler) { | ||
251 | debugger_fault_handler(regs); | ||
252 | regs->msr |= MSR_RI; | ||
253 | return; | ||
254 | } | ||
255 | |||
256 | if (check_io_access(regs)) | ||
257 | return; | ||
258 | |||
259 | #if defined(CONFIG_4xx) && !defined(CONFIG_440A) | ||
260 | if (reason & ESR_IMCP) { | 235 | if (reason & ESR_IMCP) { |
261 | printk("Instruction"); | 236 | printk("Instruction"); |
262 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | 237 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); |
263 | } else | 238 | } else |
264 | printk("Data"); | 239 | printk("Data"); |
265 | printk(" machine check in kernel mode.\n"); | 240 | printk(" machine check in kernel mode.\n"); |
266 | #elif defined(CONFIG_440A) | 241 | |
242 | return 0; | ||
243 | } | ||
244 | |||
245 | int machine_check_440A(struct pt_regs *regs) | ||
246 | { | ||
247 | unsigned long reason = get_mc_reason(regs); | ||
248 | |||
267 | printk("Machine check in kernel mode.\n"); | 249 | printk("Machine check in kernel mode.\n"); |
268 | if (reason & ESR_IMCP){ | 250 | if (reason & ESR_IMCP){ |
269 | printk("Instruction Synchronous Machine Check exception\n"); | 251 | printk("Instruction Synchronous Machine Check exception\n"); |
@@ -293,55 +275,13 @@ void machine_check_exception(struct pt_regs *regs) | |||
293 | /* Clear MCSR */ | 275 | /* Clear MCSR */ |
294 | mtspr(SPRN_MCSR, mcsr); | 276 | mtspr(SPRN_MCSR, mcsr); |
295 | } | 277 | } |
296 | #elif defined (CONFIG_E500) | 278 | return 0; |
297 | printk("Machine check in kernel mode.\n"); | 279 | } |
298 | printk("Caused by (from MCSR=%lx): ", reason); | 280 | #else |
299 | 281 | int machine_check_generic(struct pt_regs *regs) | |
300 | if (reason & MCSR_MCP) | 282 | { |
301 | printk("Machine Check Signal\n"); | 283 | unsigned long reason = get_mc_reason(regs); |
302 | if (reason & MCSR_ICPERR) | 284 | |
303 | printk("Instruction Cache Parity Error\n"); | ||
304 | if (reason & MCSR_DCP_PERR) | ||
305 | printk("Data Cache Push Parity Error\n"); | ||
306 | if (reason & MCSR_DCPERR) | ||
307 | printk("Data Cache Parity Error\n"); | ||
308 | if (reason & MCSR_GL_CI) | ||
309 | printk("Guarded Load or Cache-Inhibited stwcx.\n"); | ||
310 | if (reason & MCSR_BUS_IAERR) | ||
311 | printk("Bus - Instruction Address Error\n"); | ||
312 | if (reason & MCSR_BUS_RAERR) | ||
313 | printk("Bus - Read Address Error\n"); | ||
314 | if (reason & MCSR_BUS_WAERR) | ||
315 | printk("Bus - Write Address Error\n"); | ||
316 | if (reason & MCSR_BUS_IBERR) | ||
317 | printk("Bus - Instruction Data Error\n"); | ||
318 | if (reason & MCSR_BUS_RBERR) | ||
319 | printk("Bus - Read Data Bus Error\n"); | ||
320 | if (reason & MCSR_BUS_WBERR) | ||
321 | printk("Bus - Write Data Bus Error\n"); | ||
322 | if (reason & MCSR_BUS_IPERR) | ||
323 | printk("Bus - Instruction Parity Error\n"); | ||
324 | if (reason & MCSR_BUS_RPERR) | ||
325 | printk("Bus - Read Parity Error\n"); | ||
326 | #elif defined (CONFIG_E200) | ||
327 | printk("Machine check in kernel mode.\n"); | ||
328 | printk("Caused by (from MCSR=%lx): ", reason); | ||
329 | |||
330 | if (reason & MCSR_MCP) | ||
331 | printk("Machine Check Signal\n"); | ||
332 | if (reason & MCSR_CP_PERR) | ||
333 | printk("Cache Push Parity Error\n"); | ||
334 | if (reason & MCSR_CPERR) | ||
335 | printk("Cache Parity Error\n"); | ||
336 | if (reason & MCSR_EXCP_ERR) | ||
337 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | ||
338 | if (reason & MCSR_BUS_IRERR) | ||
339 | printk("Bus - Read Bus Error on instruction fetch\n"); | ||
340 | if (reason & MCSR_BUS_DRERR) | ||
341 | printk("Bus - Read Bus Error on data load\n"); | ||
342 | if (reason & MCSR_BUS_WRERR) | ||
343 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | ||
344 | #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ | ||
345 | printk("Machine check in kernel mode.\n"); | 285 | printk("Machine check in kernel mode.\n"); |
346 | printk("Caused by (from SRR1=%lx): ", reason); | 286 | printk("Caused by (from SRR1=%lx): ", reason); |
347 | switch (reason & 0x601F0000) { | 287 | switch (reason & 0x601F0000) { |
@@ -371,7 +311,39 @@ void machine_check_exception(struct pt_regs *regs) | |||
371 | default: | 311 | default: |
372 | printk("Unknown values in msr\n"); | 312 | printk("Unknown values in msr\n"); |
373 | } | 313 | } |
374 | #endif /* CONFIG_4xx */ | 314 | return 0; |
315 | } | ||
316 | #endif /* everything else */ | ||
317 | |||
318 | void machine_check_exception(struct pt_regs *regs) | ||
319 | { | ||
320 | int recover = 0; | ||
321 | |||
322 | if (cur_cpu_spec->machine_check) | ||
323 | recover = cur_cpu_spec->machine_check(regs); | ||
324 | if (recover > 0) | ||
325 | return; | ||
326 | |||
327 | if (user_mode(regs)) { | ||
328 | regs->msr |= MSR_RI; | ||
329 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
330 | return; | ||
331 | } | ||
332 | |||
333 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
334 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
335 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
336 | return; | ||
337 | #endif | ||
338 | |||
339 | if (debugger_fault_handler) { | ||
340 | debugger_fault_handler(regs); | ||
341 | regs->msr |= MSR_RI; | ||
342 | return; | ||
343 | } | ||
344 | |||
345 | if (check_io_access(regs)) | ||
346 | return; | ||
375 | 347 | ||
376 | /* | 348 | /* |
377 | * Optional platform-provided routine to print out | 349 | * Optional platform-provided routine to print out |
@@ -830,63 +802,6 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
830 | } | 802 | } |
831 | #endif /* CONFIG_ALTIVEC */ | 803 | #endif /* CONFIG_ALTIVEC */ |
832 | 804 | ||
833 | #ifdef CONFIG_E500 | ||
834 | void performance_monitor_exception(struct pt_regs *regs) | ||
835 | { | ||
836 | perf_irq(regs); | ||
837 | } | ||
838 | #endif | ||
839 | |||
840 | #ifdef CONFIG_FSL_BOOKE | ||
841 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | ||
842 | unsigned long error_code) | ||
843 | { | ||
844 | /* We treat cache locking instructions from the user | ||
845 | * as priv ops, in the future we could try to do | ||
846 | * something smarter | ||
847 | */ | ||
848 | if (error_code & (ESR_DLK|ESR_ILK)) | ||
849 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
850 | return; | ||
851 | } | ||
852 | #endif /* CONFIG_FSL_BOOKE */ | ||
853 | |||
854 | #ifdef CONFIG_SPE | ||
855 | void SPEFloatingPointException(struct pt_regs *regs) | ||
856 | { | ||
857 | unsigned long spefscr; | ||
858 | int fpexc_mode; | ||
859 | int code = 0; | ||
860 | |||
861 | spefscr = current->thread.spefscr; | ||
862 | fpexc_mode = current->thread.fpexc_mode; | ||
863 | |||
864 | /* Hardware does not necessarily set sticky | ||
865 | * underflow/overflow/invalid flags */ | ||
866 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | ||
867 | code = FPE_FLTOVF; | ||
868 | spefscr |= SPEFSCR_FOVFS; | ||
869 | } | ||
870 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | ||
871 | code = FPE_FLTUND; | ||
872 | spefscr |= SPEFSCR_FUNFS; | ||
873 | } | ||
874 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | ||
875 | code = FPE_FLTDIV; | ||
876 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | ||
877 | code = FPE_FLTINV; | ||
878 | spefscr |= SPEFSCR_FINVS; | ||
879 | } | ||
880 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | ||
881 | code = FPE_FLTRES; | ||
882 | |||
883 | current->thread.spefscr = spefscr; | ||
884 | |||
885 | _exception(SIGFPE, regs, code, regs->nip); | ||
886 | return; | ||
887 | } | ||
888 | #endif | ||
889 | |||
890 | #ifdef CONFIG_BOOKE_WDT | 805 | #ifdef CONFIG_BOOKE_WDT |
891 | /* | 806 | /* |
892 | * Default handler for a Watchdog exception, | 807 | * Default handler for a Watchdog exception, |