diff options
author | Paul Mackerras <paulus@samba.org> | 2005-09-26 02:04:21 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-26 02:04:21 -0400 |
commit | 14cf11af6cf608eb8c23e989ddb17a715ddce109 (patch) | |
tree | 271a97ce73e265f39c569cb159c195c5b4bb3f8c /arch/powerpc/kernel/traps.c | |
parent | e5baa396af7560382d2cf3f0871d616b61fc284c (diff) |
powerpc: Merge enough to start building in arch/powerpc.
This creates the directory structure under arch/powerpc and a bunch
of Kconfig files. It does a first-cut merge of arch/powerpc/mm,
arch/powerpc/lib and arch/powerpc/platforms/powermac. This is enough
to build a 32-bit powermac kernel with ARCH=powerpc.
For now we are getting some unmerged files from arch/ppc/kernel and
arch/ppc/syslib, or arch/ppc64/kernel. This makes some minor changes
to files in those directories and files outside arch/powerpc.
The boot directory is still not merged. That's going to be interesting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/traps.c')
-rw-r--r-- | arch/powerpc/kernel/traps.c | 1047 |
1 files changed, 1047 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c new file mode 100644 index 000000000000..c7afbbba0f36 --- /dev/null +++ b/arch/powerpc/kernel/traps.c | |||
@@ -0,0 +1,1047 @@ | |||
1 | /* | ||
2 | * arch/powerpc/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
12 | * and Paul Mackerras (paulus@samba.org) | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * This file handles the architecture-dependent parts of hardware exceptions | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/user.h> | ||
29 | #include <linux/a.out.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/config.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/prctl.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/kprobes.h> | ||
37 | #include <asm/kdebug.h> | ||
38 | |||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/reg.h> | ||
44 | #include <asm/xmon.h> | ||
45 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
46 | #include <asm/backlight.h> | ||
47 | #endif | ||
48 | #include <asm/perfmon.h> | ||
49 | |||
50 | #ifdef CONFIG_DEBUGGER | ||
51 | int (*__debugger)(struct pt_regs *regs); | ||
52 | int (*__debugger_ipi)(struct pt_regs *regs); | ||
53 | int (*__debugger_bpt)(struct pt_regs *regs); | ||
54 | int (*__debugger_sstep)(struct pt_regs *regs); | ||
55 | int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
56 | int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
57 | int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
58 | |||
59 | EXPORT_SYMBOL(__debugger); | ||
60 | EXPORT_SYMBOL(__debugger_ipi); | ||
61 | EXPORT_SYMBOL(__debugger_bpt); | ||
62 | EXPORT_SYMBOL(__debugger_sstep); | ||
63 | EXPORT_SYMBOL(__debugger_iabr_match); | ||
64 | EXPORT_SYMBOL(__debugger_dabr_match); | ||
65 | EXPORT_SYMBOL(__debugger_fault_handler); | ||
66 | #endif | ||
67 | |||
68 | struct notifier_block *powerpc_die_chain; | ||
69 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
70 | |||
71 | int register_die_notifier(struct notifier_block *nb) | ||
72 | { | ||
73 | int err = 0; | ||
74 | unsigned long flags; | ||
75 | |||
76 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
77 | err = notifier_chain_register(&powerpc_die_chain, nb); | ||
78 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Trap & Exception support | ||
84 | */ | ||
85 | |||
86 | static DEFINE_SPINLOCK(die_lock); | ||
87 | |||
88 | int die(const char *str, struct pt_regs *regs, long err) | ||
89 | { | ||
90 | static int die_counter; | ||
91 | int nl = 0; | ||
92 | |||
93 | if (debugger(regs)) | ||
94 | return 1; | ||
95 | |||
96 | console_verbose(); | ||
97 | spin_lock_irq(&die_lock); | ||
98 | bust_spinlocks(1); | ||
99 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
100 | if (_machine == _MACH_Pmac) { | ||
101 | set_backlight_enable(1); | ||
102 | set_backlight_level(BACKLIGHT_MAX); | ||
103 | } | ||
104 | #endif | ||
105 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | ||
106 | #ifdef CONFIG_PREEMPT | ||
107 | printk("PREEMPT "); | ||
108 | nl = 1; | ||
109 | #endif | ||
110 | #ifdef CONFIG_SMP | ||
111 | printk("SMP NR_CPUS=%d ", NR_CPUS); | ||
112 | nl = 1; | ||
113 | #endif | ||
114 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
115 | printk("DEBUG_PAGEALLOC "); | ||
116 | nl = 1; | ||
117 | #endif | ||
118 | #ifdef CONFIG_NUMA | ||
119 | printk("NUMA "); | ||
120 | nl = 1; | ||
121 | #endif | ||
122 | #ifdef CONFIG_PPC64 | ||
123 | switch (systemcfg->platform) { | ||
124 | case PLATFORM_PSERIES: | ||
125 | printk("PSERIES "); | ||
126 | nl = 1; | ||
127 | break; | ||
128 | case PLATFORM_PSERIES_LPAR: | ||
129 | printk("PSERIES LPAR "); | ||
130 | nl = 1; | ||
131 | break; | ||
132 | case PLATFORM_ISERIES_LPAR: | ||
133 | printk("ISERIES LPAR "); | ||
134 | nl = 1; | ||
135 | break; | ||
136 | case PLATFORM_POWERMAC: | ||
137 | printk("POWERMAC "); | ||
138 | nl = 1; | ||
139 | break; | ||
140 | case PLATFORM_BPA: | ||
141 | printk("BPA "); | ||
142 | nl = 1; | ||
143 | break; | ||
144 | } | ||
145 | #endif | ||
146 | if (nl) | ||
147 | printk("\n"); | ||
148 | print_modules(); | ||
149 | show_regs(regs); | ||
150 | bust_spinlocks(0); | ||
151 | spin_unlock_irq(&die_lock); | ||
152 | |||
153 | if (in_interrupt()) | ||
154 | panic("Fatal exception in interrupt"); | ||
155 | |||
156 | if (panic_on_oops) { | ||
157 | panic("Fatal exception"); | ||
158 | } | ||
159 | do_exit(err); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | ||
165 | { | ||
166 | siginfo_t info; | ||
167 | |||
168 | if (!user_mode(regs)) { | ||
169 | if (die("Exception in kernel mode", regs, signr)) | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | memset(&info, 0, sizeof(info)); | ||
174 | info.si_signo = signr; | ||
175 | info.si_code = code; | ||
176 | info.si_addr = (void __user *) addr; | ||
177 | force_sig_info(signr, &info, current); | ||
178 | |||
179 | /* | ||
180 | * Init gets no signals that it doesn't have a handler for. | ||
181 | * That's all very well, but if it has caused a synchronous | ||
182 | * exception and we ignore the resulting signal, it will just | ||
183 | * generate the same exception over and over again and we get | ||
184 | * nowhere. Better to kill it and let the kernel panic. | ||
185 | */ | ||
186 | if (current->pid == 1) { | ||
187 | __sighandler_t handler; | ||
188 | |||
189 | spin_lock_irq(¤t->sighand->siglock); | ||
190 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
191 | spin_unlock_irq(¤t->sighand->siglock); | ||
192 | if (handler == SIG_DFL) { | ||
193 | /* init has generated a synchronous exception | ||
194 | and it doesn't have a handler for the signal */ | ||
195 | printk(KERN_CRIT "init has generated signal %d " | ||
196 | "but has no handler for it\n", signr); | ||
197 | do_exit(signr); | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | |||
202 | #ifdef CONFIG_PPC64 | ||
203 | void system_reset_exception(struct pt_regs *regs) | ||
204 | { | ||
205 | /* See if any machine dependent calls */ | ||
206 | if (ppc_md.system_reset_exception) | ||
207 | ppc_md.system_reset_exception(regs); | ||
208 | |||
209 | die("System Reset", regs, SIGABRT); | ||
210 | |||
211 | /* Must die if the interrupt is not recoverable */ | ||
212 | if (!(regs->msr & MSR_RI)) | ||
213 | panic("Unrecoverable System Reset"); | ||
214 | |||
215 | /* What should we do here? We could issue a shutdown or hard reset. */ | ||
216 | } | ||
217 | #endif | ||
218 | |||
219 | /* | ||
220 | * I/O accesses can cause machine checks on powermacs. | ||
221 | * Check if the NIP corresponds to the address of a sync | ||
222 | * instruction for which there is an entry in the exception | ||
223 | * table. | ||
224 | * Note that the 601 only takes a machine check on TEA | ||
225 | * (transfer error ack) signal assertion, and does not | ||
226 | * set any of the top 16 bits of SRR1. | ||
227 | * -- paulus. | ||
228 | */ | ||
229 | static inline int check_io_access(struct pt_regs *regs) | ||
230 | { | ||
231 | #ifdef CONFIG_PPC_PMAC | ||
232 | unsigned long msr = regs->msr; | ||
233 | const struct exception_table_entry *entry; | ||
234 | unsigned int *nip = (unsigned int *)regs->nip; | ||
235 | |||
236 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | ||
237 | && (entry = search_exception_tables(regs->nip)) != NULL) { | ||
238 | /* | ||
239 | * Check that it's a sync instruction, or somewhere | ||
240 | * in the twi; isync; nop sequence that inb/inw/inl uses. | ||
241 | * As the address is in the exception table | ||
242 | * we should be able to read the instr there. | ||
243 | * For the debug message, we look at the preceding | ||
244 | * load or store. | ||
245 | */ | ||
246 | if (*nip == 0x60000000) /* nop */ | ||
247 | nip -= 2; | ||
248 | else if (*nip == 0x4c00012c) /* isync */ | ||
249 | --nip; | ||
250 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | ||
251 | /* sync or twi */ | ||
252 | unsigned int rb; | ||
253 | |||
254 | --nip; | ||
255 | rb = (*nip >> 11) & 0x1f; | ||
256 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | ||
257 | (*nip & 0x100)? "OUT to": "IN from", | ||
258 | regs->gpr[rb] - _IO_BASE, nip); | ||
259 | regs->msr |= MSR_RI; | ||
260 | regs->nip = entry->fixup; | ||
261 | return 1; | ||
262 | } | ||
263 | } | ||
264 | #endif /* CONFIG_PPC_PMAC */ | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
269 | /* On 4xx, the reason for the machine check or program exception | ||
270 | is in the ESR. */ | ||
271 | #define get_reason(regs) ((regs)->dsisr) | ||
272 | #ifndef CONFIG_FSL_BOOKE | ||
273 | #define get_mc_reason(regs) ((regs)->dsisr) | ||
274 | #else | ||
275 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | ||
276 | #endif | ||
277 | #define REASON_FP ESR_FP | ||
278 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | ||
279 | #define REASON_PRIVILEGED ESR_PPR | ||
280 | #define REASON_TRAP ESR_PTR | ||
281 | |||
282 | /* single-step stuff */ | ||
283 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | ||
284 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | ||
285 | |||
286 | #else | ||
287 | /* On non-4xx, the reason for the machine check or program | ||
288 | exception is in the MSR. */ | ||
289 | #define get_reason(regs) ((regs)->msr) | ||
290 | #define get_mc_reason(regs) ((regs)->msr) | ||
291 | #define REASON_FP 0x100000 | ||
292 | #define REASON_ILLEGAL 0x80000 | ||
293 | #define REASON_PRIVILEGED 0x40000 | ||
294 | #define REASON_TRAP 0x20000 | ||
295 | |||
296 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | ||
297 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | ||
298 | #endif | ||
299 | |||
300 | /* | ||
301 | * This is "fall-back" implementation for configurations | ||
302 | * which don't provide platform-specific machine check info | ||
303 | */ | ||
304 | void __attribute__ ((weak)) | ||
305 | platform_machine_check(struct pt_regs *regs) | ||
306 | { | ||
307 | } | ||
308 | |||
309 | void MachineCheckException(struct pt_regs *regs) | ||
310 | { | ||
311 | #ifdef CONFIG_PPC64 | ||
312 | int recover = 0; | ||
313 | |||
314 | /* See if any machine dependent calls */ | ||
315 | if (ppc_md.machine_check_exception) | ||
316 | recover = ppc_md.machine_check_exception(regs); | ||
317 | |||
318 | if (recover) | ||
319 | return; | ||
320 | #else | ||
321 | unsigned long reason = get_mc_reason(regs); | ||
322 | |||
323 | if (user_mode(regs)) { | ||
324 | regs->msr |= MSR_RI; | ||
325 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
330 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
331 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
332 | return; | ||
333 | #endif | ||
334 | |||
335 | if (debugger_fault_handler(regs)) { | ||
336 | regs->msr |= MSR_RI; | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | if (check_io_access(regs)) | ||
341 | return; | ||
342 | |||
343 | #if defined(CONFIG_4xx) && !defined(CONFIG_440A) | ||
344 | if (reason & ESR_IMCP) { | ||
345 | printk("Instruction"); | ||
346 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
347 | } else | ||
348 | printk("Data"); | ||
349 | printk(" machine check in kernel mode.\n"); | ||
350 | #elif defined(CONFIG_440A) | ||
351 | printk("Machine check in kernel mode.\n"); | ||
352 | if (reason & ESR_IMCP){ | ||
353 | printk("Instruction Synchronous Machine Check exception\n"); | ||
354 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
355 | } | ||
356 | else { | ||
357 | u32 mcsr = mfspr(SPRN_MCSR); | ||
358 | if (mcsr & MCSR_IB) | ||
359 | printk("Instruction Read PLB Error\n"); | ||
360 | if (mcsr & MCSR_DRB) | ||
361 | printk("Data Read PLB Error\n"); | ||
362 | if (mcsr & MCSR_DWB) | ||
363 | printk("Data Write PLB Error\n"); | ||
364 | if (mcsr & MCSR_TLBP) | ||
365 | printk("TLB Parity Error\n"); | ||
366 | if (mcsr & MCSR_ICP){ | ||
367 | flush_instruction_cache(); | ||
368 | printk("I-Cache Parity Error\n"); | ||
369 | } | ||
370 | if (mcsr & MCSR_DCSP) | ||
371 | printk("D-Cache Search Parity Error\n"); | ||
372 | if (mcsr & MCSR_DCFP) | ||
373 | printk("D-Cache Flush Parity Error\n"); | ||
374 | if (mcsr & MCSR_IMPE) | ||
375 | printk("Machine Check exception is imprecise\n"); | ||
376 | |||
377 | /* Clear MCSR */ | ||
378 | mtspr(SPRN_MCSR, mcsr); | ||
379 | } | ||
380 | #elif defined (CONFIG_E500) | ||
381 | printk("Machine check in kernel mode.\n"); | ||
382 | printk("Caused by (from MCSR=%lx): ", reason); | ||
383 | |||
384 | if (reason & MCSR_MCP) | ||
385 | printk("Machine Check Signal\n"); | ||
386 | if (reason & MCSR_ICPERR) | ||
387 | printk("Instruction Cache Parity Error\n"); | ||
388 | if (reason & MCSR_DCP_PERR) | ||
389 | printk("Data Cache Push Parity Error\n"); | ||
390 | if (reason & MCSR_DCPERR) | ||
391 | printk("Data Cache Parity Error\n"); | ||
392 | if (reason & MCSR_GL_CI) | ||
393 | printk("Guarded Load or Cache-Inhibited stwcx.\n"); | ||
394 | if (reason & MCSR_BUS_IAERR) | ||
395 | printk("Bus - Instruction Address Error\n"); | ||
396 | if (reason & MCSR_BUS_RAERR) | ||
397 | printk("Bus - Read Address Error\n"); | ||
398 | if (reason & MCSR_BUS_WAERR) | ||
399 | printk("Bus - Write Address Error\n"); | ||
400 | if (reason & MCSR_BUS_IBERR) | ||
401 | printk("Bus - Instruction Data Error\n"); | ||
402 | if (reason & MCSR_BUS_RBERR) | ||
403 | printk("Bus - Read Data Bus Error\n"); | ||
404 | if (reason & MCSR_BUS_WBERR) | ||
405 | printk("Bus - Read Data Bus Error\n"); | ||
406 | if (reason & MCSR_BUS_IPERR) | ||
407 | printk("Bus - Instruction Parity Error\n"); | ||
408 | if (reason & MCSR_BUS_RPERR) | ||
409 | printk("Bus - Read Parity Error\n"); | ||
410 | #elif defined (CONFIG_E200) | ||
411 | printk("Machine check in kernel mode.\n"); | ||
412 | printk("Caused by (from MCSR=%lx): ", reason); | ||
413 | |||
414 | if (reason & MCSR_MCP) | ||
415 | printk("Machine Check Signal\n"); | ||
416 | if (reason & MCSR_CP_PERR) | ||
417 | printk("Cache Push Parity Error\n"); | ||
418 | if (reason & MCSR_CPERR) | ||
419 | printk("Cache Parity Error\n"); | ||
420 | if (reason & MCSR_EXCP_ERR) | ||
421 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | ||
422 | if (reason & MCSR_BUS_IRERR) | ||
423 | printk("Bus - Read Bus Error on instruction fetch\n"); | ||
424 | if (reason & MCSR_BUS_DRERR) | ||
425 | printk("Bus - Read Bus Error on data load\n"); | ||
426 | if (reason & MCSR_BUS_WRERR) | ||
427 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | ||
428 | #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ | ||
429 | printk("Machine check in kernel mode.\n"); | ||
430 | printk("Caused by (from SRR1=%lx): ", reason); | ||
431 | switch (reason & 0x601F0000) { | ||
432 | case 0x80000: | ||
433 | printk("Machine check signal\n"); | ||
434 | break; | ||
435 | case 0: /* for 601 */ | ||
436 | case 0x40000: | ||
437 | case 0x140000: /* 7450 MSS error and TEA */ | ||
438 | printk("Transfer error ack signal\n"); | ||
439 | break; | ||
440 | case 0x20000: | ||
441 | printk("Data parity error signal\n"); | ||
442 | break; | ||
443 | case 0x10000: | ||
444 | printk("Address parity error signal\n"); | ||
445 | break; | ||
446 | case 0x20000000: | ||
447 | printk("L1 Data Cache error\n"); | ||
448 | break; | ||
449 | case 0x40000000: | ||
450 | printk("L1 Instruction Cache error\n"); | ||
451 | break; | ||
452 | case 0x00100000: | ||
453 | printk("L2 data cache parity error\n"); | ||
454 | break; | ||
455 | default: | ||
456 | printk("Unknown values in msr\n"); | ||
457 | } | ||
458 | #endif /* CONFIG_4xx */ | ||
459 | |||
460 | /* | ||
461 | * Optional platform-provided routine to print out | ||
462 | * additional info, e.g. bus error registers. | ||
463 | */ | ||
464 | platform_machine_check(regs); | ||
465 | #endif /* CONFIG_PPC64 */ | ||
466 | |||
467 | if (debugger_fault_handler(regs)) | ||
468 | return; | ||
469 | die("Machine check", regs, SIGBUS); | ||
470 | |||
471 | /* Must die if the interrupt is not recoverable */ | ||
472 | if (!(regs->msr & MSR_RI)) | ||
473 | panic("Unrecoverable Machine check"); | ||
474 | } | ||
475 | |||
476 | void SMIException(struct pt_regs *regs) | ||
477 | { | ||
478 | die("System Management Interrupt", regs, SIGABRT); | ||
479 | } | ||
480 | |||
481 | void UnknownException(struct pt_regs *regs) | ||
482 | { | ||
483 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | ||
484 | regs->nip, regs->msr, regs->trap); | ||
485 | |||
486 | _exception(SIGTRAP, regs, 0, 0); | ||
487 | } | ||
488 | |||
489 | void InstructionBreakpoint(struct pt_regs *regs) | ||
490 | { | ||
491 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | ||
492 | 5, SIGTRAP) == NOTIFY_STOP) | ||
493 | return; | ||
494 | if (debugger_iabr_match(regs)) | ||
495 | return; | ||
496 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | ||
497 | } | ||
498 | |||
499 | void RunModeException(struct pt_regs *regs) | ||
500 | { | ||
501 | _exception(SIGTRAP, regs, 0, 0); | ||
502 | } | ||
503 | |||
504 | void SingleStepException(struct pt_regs *regs) | ||
505 | { | ||
506 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ | ||
507 | |||
508 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | ||
509 | 5, SIGTRAP) == NOTIFY_STOP) | ||
510 | return; | ||
511 | if (debugger_sstep(regs)) | ||
512 | return; | ||
513 | |||
514 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * After we have successfully emulated an instruction, we have to | ||
519 | * check if the instruction was being single-stepped, and if so, | ||
520 | * pretend we got a single-step exception. This was pointed out | ||
521 | * by Kumar Gala. -- paulus | ||
522 | */ | ||
523 | static void emulate_single_step(struct pt_regs *regs) | ||
524 | { | ||
525 | if (single_stepping(regs)) { | ||
526 | clear_single_step(regs); | ||
527 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* Illegal instruction emulation support. Originally written to | ||
532 | * provide the PVR to user applications using the mfspr rd, PVR. | ||
533 | * Return non-zero if we can't emulate, or -EFAULT if the associated | ||
534 | * memory access caused an access fault. Return zero on success. | ||
535 | * | ||
536 | * There are a couple of ways to do this, either "decode" the instruction | ||
537 | * or directly match lots of bits. In this case, matching lots of | ||
538 | * bits is faster and easier. | ||
539 | * | ||
540 | */ | ||
541 | #define INST_MFSPR_PVR 0x7c1f42a6 | ||
542 | #define INST_MFSPR_PVR_MASK 0xfc1fffff | ||
543 | |||
544 | #define INST_DCBA 0x7c0005ec | ||
545 | #define INST_DCBA_MASK 0x7c0007fe | ||
546 | |||
547 | #define INST_MCRXR 0x7c000400 | ||
548 | #define INST_MCRXR_MASK 0x7c0007fe | ||
549 | |||
550 | #define INST_STRING 0x7c00042a | ||
551 | #define INST_STRING_MASK 0x7c0007fe | ||
552 | #define INST_STRING_GEN_MASK 0x7c00067e | ||
553 | #define INST_LSWI 0x7c0004aa | ||
554 | #define INST_LSWX 0x7c00042a | ||
555 | #define INST_STSWI 0x7c0005aa | ||
556 | #define INST_STSWX 0x7c00052a | ||
557 | |||
558 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) | ||
559 | { | ||
560 | u8 rT = (instword >> 21) & 0x1f; | ||
561 | u8 rA = (instword >> 16) & 0x1f; | ||
562 | u8 NB_RB = (instword >> 11) & 0x1f; | ||
563 | u32 num_bytes; | ||
564 | unsigned long EA; | ||
565 | int pos = 0; | ||
566 | |||
567 | /* Early out if we are an invalid form of lswx */ | ||
568 | if ((instword & INST_STRING_MASK) == INST_LSWX) | ||
569 | if ((rT == rA) || (rT == NB_RB)) | ||
570 | return -EINVAL; | ||
571 | |||
572 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | ||
573 | |||
574 | switch (instword & INST_STRING_MASK) { | ||
575 | case INST_LSWX: | ||
576 | case INST_STSWX: | ||
577 | EA += NB_RB; | ||
578 | num_bytes = regs->xer & 0x7f; | ||
579 | break; | ||
580 | case INST_LSWI: | ||
581 | case INST_STSWI: | ||
582 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; | ||
583 | break; | ||
584 | default: | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | while (num_bytes != 0) | ||
589 | { | ||
590 | u8 val; | ||
591 | u32 shift = 8 * (3 - (pos & 0x3)); | ||
592 | |||
593 | switch ((instword & INST_STRING_MASK)) { | ||
594 | case INST_LSWX: | ||
595 | case INST_LSWI: | ||
596 | if (get_user(val, (u8 __user *)EA)) | ||
597 | return -EFAULT; | ||
598 | /* first time updating this reg, | ||
599 | * zero it out */ | ||
600 | if (pos == 0) | ||
601 | regs->gpr[rT] = 0; | ||
602 | regs->gpr[rT] |= val << shift; | ||
603 | break; | ||
604 | case INST_STSWI: | ||
605 | case INST_STSWX: | ||
606 | val = regs->gpr[rT] >> shift; | ||
607 | if (put_user(val, (u8 __user *)EA)) | ||
608 | return -EFAULT; | ||
609 | break; | ||
610 | } | ||
611 | /* move EA to next address */ | ||
612 | EA += 1; | ||
613 | num_bytes--; | ||
614 | |||
615 | /* manage our position within the register */ | ||
616 | if (++pos == 4) { | ||
617 | pos = 0; | ||
618 | if (++rT == 32) | ||
619 | rT = 0; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int emulate_instruction(struct pt_regs *regs) | ||
627 | { | ||
628 | u32 instword; | ||
629 | u32 rd; | ||
630 | |||
631 | if (!user_mode(regs)) | ||
632 | return -EINVAL; | ||
633 | CHECK_FULL_REGS(regs); | ||
634 | |||
635 | if (get_user(instword, (u32 __user *)(regs->nip))) | ||
636 | return -EFAULT; | ||
637 | |||
638 | /* Emulate the mfspr rD, PVR. */ | ||
639 | if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { | ||
640 | rd = (instword >> 21) & 0x1f; | ||
641 | regs->gpr[rd] = mfspr(SPRN_PVR); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | /* Emulating the dcba insn is just a no-op. */ | ||
646 | if ((instword & INST_DCBA_MASK) == INST_DCBA) | ||
647 | return 0; | ||
648 | |||
649 | /* Emulate the mcrxr insn. */ | ||
650 | if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { | ||
651 | int shift = (instword >> 21) & 0x1c; | ||
652 | unsigned long msk = 0xf0000000UL >> shift; | ||
653 | |||
654 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | ||
655 | regs->xer &= ~0xf0000000UL; | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | /* Emulate load/store string insn. */ | ||
660 | if ((instword & INST_STRING_GEN_MASK) == INST_STRING) | ||
661 | return emulate_string_inst(regs, instword); | ||
662 | |||
663 | return -EINVAL; | ||
664 | } | ||
665 | |||
666 | /* | ||
667 | * Look through the list of trap instructions that are used for BUG(), | ||
668 | * BUG_ON() and WARN_ON() and see if we hit one. At this point we know | ||
669 | * that the exception was caused by a trap instruction of some kind. | ||
670 | * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 | ||
671 | * otherwise. | ||
672 | */ | ||
673 | extern struct bug_entry __start___bug_table[], __stop___bug_table[]; | ||
674 | |||
675 | #ifndef CONFIG_MODULES | ||
676 | #define module_find_bug(x) NULL | ||
677 | #endif | ||
678 | |||
679 | struct bug_entry *find_bug(unsigned long bugaddr) | ||
680 | { | ||
681 | struct bug_entry *bug; | ||
682 | |||
683 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | ||
684 | if (bugaddr == bug->bug_addr) | ||
685 | return bug; | ||
686 | return module_find_bug(bugaddr); | ||
687 | } | ||
688 | |||
689 | int check_bug_trap(struct pt_regs *regs) | ||
690 | { | ||
691 | struct bug_entry *bug; | ||
692 | unsigned long addr; | ||
693 | |||
694 | if (regs->msr & MSR_PR) | ||
695 | return 0; /* not in kernel */ | ||
696 | addr = regs->nip; /* address of trap instruction */ | ||
697 | if (addr < PAGE_OFFSET) | ||
698 | return 0; | ||
699 | bug = find_bug(regs->nip); | ||
700 | if (bug == NULL) | ||
701 | return 0; | ||
702 | if (bug->line & BUG_WARNING_TRAP) { | ||
703 | /* this is a WARN_ON rather than BUG/BUG_ON */ | ||
704 | #ifdef CONFIG_XMON | ||
705 | xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", | ||
706 | bug->function, bug->file, | ||
707 | bug->line & ~BUG_WARNING_TRAP); | ||
708 | #endif /* CONFIG_XMON */ | ||
709 | printk(KERN_ERR "Badness in %s at %s:%d\n", | ||
710 | bug->function, bug->file, | ||
711 | bug->line & ~BUG_WARNING_TRAP); | ||
712 | dump_stack(); | ||
713 | return 1; | ||
714 | } | ||
715 | #ifdef CONFIG_XMON | ||
716 | xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
717 | bug->function, bug->file, bug->line); | ||
718 | xmon(regs); | ||
719 | #endif /* CONFIG_XMON */ | ||
720 | printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
721 | bug->function, bug->file, bug->line); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | void ProgramCheckException(struct pt_regs *regs) | ||
727 | { | ||
728 | unsigned int reason = get_reason(regs); | ||
729 | extern int do_mathemu(struct pt_regs *regs); | ||
730 | |||
731 | #ifdef CONFIG_MATH_EMULATION | ||
732 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | ||
733 | * but there seems to be a hardware bug on the 405GP (RevD) | ||
734 | * that means ESR is sometimes set incorrectly - either to | ||
735 | * ESR_DST (!?) or 0. In the process of chasing this with the | ||
736 | * hardware people - not sure if it can happen on any illegal | ||
737 | * instruction or only on FP instructions, whether there is a | ||
738 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | ||
739 | if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { | ||
740 | emulate_single_step(regs); | ||
741 | return; | ||
742 | } | ||
743 | #endif /* CONFIG_MATH_EMULATION */ | ||
744 | |||
745 | if (reason & REASON_FP) { | ||
746 | /* IEEE FP exception */ | ||
747 | int code = 0; | ||
748 | u32 fpscr; | ||
749 | |||
750 | /* We must make sure the FP state is consistent with | ||
751 | * our MSR_FP in regs | ||
752 | */ | ||
753 | preempt_disable(); | ||
754 | if (regs->msr & MSR_FP) | ||
755 | giveup_fpu(current); | ||
756 | preempt_enable(); | ||
757 | |||
758 | fpscr = current->thread.fpscr; | ||
759 | fpscr &= fpscr << 22; /* mask summary bits with enables */ | ||
760 | if (fpscr & FPSCR_VX) | ||
761 | code = FPE_FLTINV; | ||
762 | else if (fpscr & FPSCR_OX) | ||
763 | code = FPE_FLTOVF; | ||
764 | else if (fpscr & FPSCR_UX) | ||
765 | code = FPE_FLTUND; | ||
766 | else if (fpscr & FPSCR_ZX) | ||
767 | code = FPE_FLTDIV; | ||
768 | else if (fpscr & FPSCR_XX) | ||
769 | code = FPE_FLTRES; | ||
770 | _exception(SIGFPE, regs, code, regs->nip); | ||
771 | return; | ||
772 | } | ||
773 | |||
774 | if (reason & REASON_TRAP) { | ||
775 | /* trap exception */ | ||
776 | if (debugger_bpt(regs)) | ||
777 | return; | ||
778 | if (check_bug_trap(regs)) { | ||
779 | regs->nip += 4; | ||
780 | return; | ||
781 | } | ||
782 | _exception(SIGTRAP, regs, TRAP_BRKPT, 0); | ||
783 | return; | ||
784 | } | ||
785 | |||
786 | /* Try to emulate it if we should. */ | ||
787 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | ||
788 | switch (emulate_instruction(regs)) { | ||
789 | case 0: | ||
790 | regs->nip += 4; | ||
791 | emulate_single_step(regs); | ||
792 | return; | ||
793 | case -EFAULT: | ||
794 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | ||
795 | return; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (reason & REASON_PRIVILEGED) | ||
800 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
801 | else | ||
802 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
803 | } | ||
804 | |||
805 | void AlignmentException(struct pt_regs *regs) | ||
806 | { | ||
807 | int fixed; | ||
808 | |||
809 | fixed = fix_alignment(regs); | ||
810 | |||
811 | if (fixed == 1) { | ||
812 | regs->nip += 4; /* skip over emulated instruction */ | ||
813 | emulate_single_step(regs); | ||
814 | return; | ||
815 | } | ||
816 | |||
817 | /* Operand address was bad */ | ||
818 | if (fixed == -EFAULT) { | ||
819 | if (user_mode(regs)) | ||
820 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); | ||
821 | else | ||
822 | /* Search exception table */ | ||
823 | bad_page_fault(regs, regs->dar, SIGSEGV); | ||
824 | return; | ||
825 | } | ||
826 | _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); | ||
827 | } | ||
828 | |||
829 | void StackOverflow(struct pt_regs *regs) | ||
830 | { | ||
831 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | ||
832 | current, regs->gpr[1]); | ||
833 | debugger(regs); | ||
834 | show_regs(regs); | ||
835 | panic("kernel stack overflow"); | ||
836 | } | ||
837 | |||
838 | void nonrecoverable_exception(struct pt_regs *regs) | ||
839 | { | ||
840 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | ||
841 | regs->nip, regs->msr); | ||
842 | debugger(regs); | ||
843 | die("nonrecoverable exception", regs, SIGKILL); | ||
844 | } | ||
845 | |||
846 | void trace_syscall(struct pt_regs *regs) | ||
847 | { | ||
848 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | ||
849 | current, current->pid, regs->nip, regs->link, regs->gpr[0], | ||
850 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | ||
851 | } | ||
852 | |||
853 | #ifdef CONFIG_8xx | ||
854 | void SoftwareEmulation(struct pt_regs *regs) | ||
855 | { | ||
856 | extern int do_mathemu(struct pt_regs *); | ||
857 | extern int Soft_emulate_8xx(struct pt_regs *); | ||
858 | int errcode; | ||
859 | |||
860 | CHECK_FULL_REGS(regs); | ||
861 | |||
862 | if (!user_mode(regs)) { | ||
863 | debugger(regs); | ||
864 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | ||
865 | } | ||
866 | |||
867 | #ifdef CONFIG_MATH_EMULATION | ||
868 | errcode = do_mathemu(regs); | ||
869 | #else | ||
870 | errcode = Soft_emulate_8xx(regs); | ||
871 | #endif | ||
872 | if (errcode) { | ||
873 | if (errcode > 0) | ||
874 | _exception(SIGFPE, regs, 0, 0); | ||
875 | else if (errcode == -EFAULT) | ||
876 | _exception(SIGSEGV, regs, 0, 0); | ||
877 | else | ||
878 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
879 | } else | ||
880 | emulate_single_step(regs); | ||
881 | } | ||
882 | #endif /* CONFIG_8xx */ | ||
883 | |||
884 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
885 | |||
886 | void DebugException(struct pt_regs *regs, unsigned long debug_status) | ||
887 | { | ||
888 | if (debug_status & DBSR_IC) { /* instruction completion */ | ||
889 | regs->msr &= ~MSR_DE; | ||
890 | if (user_mode(regs)) { | ||
891 | current->thread.dbcr0 &= ~DBCR0_IC; | ||
892 | } else { | ||
893 | /* Disable instruction completion */ | ||
894 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | ||
895 | /* Clear the instruction completion event */ | ||
896 | mtspr(SPRN_DBSR, DBSR_IC); | ||
897 | if (debugger_sstep(regs)) | ||
898 | return; | ||
899 | } | ||
900 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
901 | } | ||
902 | } | ||
903 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | ||
904 | |||
905 | #if !defined(CONFIG_TAU_INT) | ||
906 | void TAUException(struct pt_regs *regs) | ||
907 | { | ||
908 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | ||
909 | regs->nip, regs->msr, regs->trap, print_tainted()); | ||
910 | } | ||
911 | #endif /* CONFIG_INT_TAU */ | ||
912 | |||
913 | void AltivecUnavailException(struct pt_regs *regs) | ||
914 | { | ||
915 | static int kernel_altivec_count; | ||
916 | |||
917 | #ifndef CONFIG_ALTIVEC | ||
918 | if (user_mode(regs)) { | ||
919 | /* A user program has executed an altivec instruction, | ||
920 | but this kernel doesn't support altivec. */ | ||
921 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
922 | return; | ||
923 | } | ||
924 | #endif | ||
925 | /* The kernel has executed an altivec instruction without | ||
926 | first enabling altivec. Whinge but let it do it. */ | ||
927 | if (++kernel_altivec_count < 10) | ||
928 | printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", | ||
929 | current, regs->nip); | ||
930 | regs->msr |= MSR_VEC; | ||
931 | } | ||
932 | |||
933 | #ifdef CONFIG_ALTIVEC | ||
934 | void AltivecAssistException(struct pt_regs *regs) | ||
935 | { | ||
936 | int err; | ||
937 | |||
938 | preempt_disable(); | ||
939 | if (regs->msr & MSR_VEC) | ||
940 | giveup_altivec(current); | ||
941 | preempt_enable(); | ||
942 | if (!user_mode(regs)) { | ||
943 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" | ||
944 | " at %lx\n", regs->nip); | ||
945 | die("Kernel Altivec assist exception", regs, SIGILL); | ||
946 | } | ||
947 | |||
948 | err = emulate_altivec(regs); | ||
949 | if (err == 0) { | ||
950 | regs->nip += 4; /* skip emulated instruction */ | ||
951 | emulate_single_step(regs); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | if (err == -EFAULT) { | ||
956 | /* got an error reading the instruction */ | ||
957 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | ||
958 | } else { | ||
959 | /* didn't recognize the instruction */ | ||
960 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | ||
961 | if (printk_ratelimit()) | ||
962 | printk(KERN_ERR "Unrecognized altivec instruction " | ||
963 | "in %s at %lx\n", current->comm, regs->nip); | ||
964 | current->thread.vscr.u[3] |= 0x10000; | ||
965 | } | ||
966 | } | ||
967 | #endif /* CONFIG_ALTIVEC */ | ||
968 | |||
969 | #ifdef CONFIG_E500 | ||
970 | void PerformanceMonitorException(struct pt_regs *regs) | ||
971 | { | ||
972 | perf_irq(regs); | ||
973 | } | ||
974 | #endif | ||
975 | |||
976 | #ifdef CONFIG_FSL_BOOKE | ||
977 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | ||
978 | unsigned long error_code) | ||
979 | { | ||
980 | /* We treat cache locking instructions from the user | ||
981 | * as priv ops, in the future we could try to do | ||
982 | * something smarter | ||
983 | */ | ||
984 | if (error_code & (ESR_DLK|ESR_ILK)) | ||
985 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
986 | return; | ||
987 | } | ||
988 | #endif /* CONFIG_FSL_BOOKE */ | ||
989 | |||
990 | #ifdef CONFIG_SPE | ||
991 | void SPEFloatingPointException(struct pt_regs *regs) | ||
992 | { | ||
993 | unsigned long spefscr; | ||
994 | int fpexc_mode; | ||
995 | int code = 0; | ||
996 | |||
997 | spefscr = current->thread.spefscr; | ||
998 | fpexc_mode = current->thread.fpexc_mode; | ||
999 | |||
1000 | /* Hardware does not neccessarily set sticky | ||
1001 | * underflow/overflow/invalid flags */ | ||
1002 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | ||
1003 | code = FPE_FLTOVF; | ||
1004 | spefscr |= SPEFSCR_FOVFS; | ||
1005 | } | ||
1006 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | ||
1007 | code = FPE_FLTUND; | ||
1008 | spefscr |= SPEFSCR_FUNFS; | ||
1009 | } | ||
1010 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | ||
1011 | code = FPE_FLTDIV; | ||
1012 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | ||
1013 | code = FPE_FLTINV; | ||
1014 | spefscr |= SPEFSCR_FINVS; | ||
1015 | } | ||
1016 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | ||
1017 | code = FPE_FLTRES; | ||
1018 | |||
1019 | current->thread.spefscr = spefscr; | ||
1020 | |||
1021 | _exception(SIGFPE, regs, code, regs->nip); | ||
1022 | return; | ||
1023 | } | ||
1024 | #endif | ||
1025 | |||
1026 | #ifdef CONFIG_BOOKE_WDT | ||
1027 | /* | ||
1028 | * Default handler for a Watchdog exception, | ||
1029 | * spins until a reboot occurs | ||
1030 | */ | ||
1031 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) | ||
1032 | { | ||
1033 | /* Generic WatchdogHandler, implement your own */ | ||
1034 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); | ||
1035 | return; | ||
1036 | } | ||
1037 | |||
1038 | void WatchdogException(struct pt_regs *regs) | ||
1039 | { | ||
1040 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); | ||
1041 | WatchdogHandler(regs); | ||
1042 | } | ||
1043 | #endif | ||
1044 | |||
1045 | void __init trap_init(void) | ||
1046 | { | ||
1047 | } | ||