diff options
Diffstat (limited to 'arch/i386/kernel/traps.c')
-rw-r--r-- | arch/i386/kernel/traps.c | 1084 |
1 files changed, 1084 insertions, 0 deletions
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c new file mode 100644 index 000000000000..6c0e383915b6 --- /dev/null +++ b/arch/i386/kernel/traps.c | |||
@@ -0,0 +1,1084 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
12 | * state in 'asm.s'. | ||
13 | */ | ||
14 | #include <linux/config.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/highmem.h> | ||
26 | #include <linux/kallsyms.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/utsname.h> | ||
29 | #include <linux/kprobes.h> | ||
30 | |||
31 | #ifdef CONFIG_EISA | ||
32 | #include <linux/ioport.h> | ||
33 | #include <linux/eisa.h> | ||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_MCA | ||
37 | #include <linux/mca.h> | ||
38 | #endif | ||
39 | |||
40 | #include <asm/processor.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/atomic.h> | ||
45 | #include <asm/debugreg.h> | ||
46 | #include <asm/desc.h> | ||
47 | #include <asm/i387.h> | ||
48 | #include <asm/nmi.h> | ||
49 | |||
50 | #include <asm/smp.h> | ||
51 | #include <asm/arch_hooks.h> | ||
52 | #include <asm/kdebug.h> | ||
53 | |||
54 | #include <linux/irq.h> | ||
55 | #include <linux/module.h> | ||
56 | |||
57 | #include "mach_traps.h" | ||
58 | |||
59 | asmlinkage int system_call(void); | ||
60 | |||
61 | struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, | ||
62 | { 0, 0 }, { 0, 0 } }; | ||
63 | |||
64 | /* Do we ignore FPU interrupts ? */ | ||
65 | char ignore_fpu_irq = 0; | ||
66 | |||
67 | /* | ||
68 | * The IDT has to be page-aligned to simplify the Pentium | ||
69 | * F0 0F bug workaround.. We have a special link segment | ||
70 | * for this. | ||
71 | */ | ||
72 | struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; | ||
73 | |||
74 | asmlinkage void divide_error(void); | ||
75 | asmlinkage void debug(void); | ||
76 | asmlinkage void nmi(void); | ||
77 | asmlinkage void int3(void); | ||
78 | asmlinkage void overflow(void); | ||
79 | asmlinkage void bounds(void); | ||
80 | asmlinkage void invalid_op(void); | ||
81 | asmlinkage void device_not_available(void); | ||
82 | asmlinkage void coprocessor_segment_overrun(void); | ||
83 | asmlinkage void invalid_TSS(void); | ||
84 | asmlinkage void segment_not_present(void); | ||
85 | asmlinkage void stack_segment(void); | ||
86 | asmlinkage void general_protection(void); | ||
87 | asmlinkage void page_fault(void); | ||
88 | asmlinkage void coprocessor_error(void); | ||
89 | asmlinkage void simd_coprocessor_error(void); | ||
90 | asmlinkage void alignment_check(void); | ||
91 | asmlinkage void spurious_interrupt_bug(void); | ||
92 | asmlinkage void machine_check(void); | ||
93 | |||
94 | static int kstack_depth_to_print = 24; | ||
95 | struct notifier_block *i386die_chain; | ||
96 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
97 | |||
98 | int register_die_notifier(struct notifier_block *nb) | ||
99 | { | ||
100 | int err = 0; | ||
101 | unsigned long flags; | ||
102 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
103 | err = notifier_chain_register(&i386die_chain, nb); | ||
104 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
105 | return err; | ||
106 | } | ||
107 | |||
108 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
109 | { | ||
110 | return p > (void *)tinfo && | ||
111 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
112 | } | ||
113 | |||
114 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | ||
115 | unsigned long *stack, unsigned long ebp) | ||
116 | { | ||
117 | unsigned long addr; | ||
118 | |||
119 | #ifdef CONFIG_FRAME_POINTER | ||
120 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
121 | addr = *(unsigned long *)(ebp + 4); | ||
122 | printk(" [<%08lx>] ", addr); | ||
123 | print_symbol("%s", addr); | ||
124 | printk("\n"); | ||
125 | ebp = *(unsigned long *)ebp; | ||
126 | } | ||
127 | #else | ||
128 | while (valid_stack_ptr(tinfo, stack)) { | ||
129 | addr = *stack++; | ||
130 | if (__kernel_text_address(addr)) { | ||
131 | printk(" [<%08lx>]", addr); | ||
132 | print_symbol(" %s", addr); | ||
133 | printk("\n"); | ||
134 | } | ||
135 | } | ||
136 | #endif | ||
137 | return ebp; | ||
138 | } | ||
139 | |||
140 | void show_trace(struct task_struct *task, unsigned long * stack) | ||
141 | { | ||
142 | unsigned long ebp; | ||
143 | |||
144 | if (!task) | ||
145 | task = current; | ||
146 | |||
147 | if (task == current) { | ||
148 | /* Grab ebp right from our regs */ | ||
149 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | ||
150 | } else { | ||
151 | /* ebp is the last reg pushed by switch_to */ | ||
152 | ebp = *(unsigned long *) task->thread.esp; | ||
153 | } | ||
154 | |||
155 | while (1) { | ||
156 | struct thread_info *context; | ||
157 | context = (struct thread_info *) | ||
158 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
159 | ebp = print_context_stack(context, stack, ebp); | ||
160 | stack = (unsigned long*)context->previous_esp; | ||
161 | if (!stack) | ||
162 | break; | ||
163 | printk(" =======================\n"); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | void show_stack(struct task_struct *task, unsigned long *esp) | ||
168 | { | ||
169 | unsigned long *stack; | ||
170 | int i; | ||
171 | |||
172 | if (esp == NULL) { | ||
173 | if (task) | ||
174 | esp = (unsigned long*)task->thread.esp; | ||
175 | else | ||
176 | esp = (unsigned long *)&esp; | ||
177 | } | ||
178 | |||
179 | stack = esp; | ||
180 | for(i = 0; i < kstack_depth_to_print; i++) { | ||
181 | if (kstack_end(stack)) | ||
182 | break; | ||
183 | if (i && ((i % 8) == 0)) | ||
184 | printk("\n "); | ||
185 | printk("%08lx ", *stack++); | ||
186 | } | ||
187 | printk("\nCall Trace:\n"); | ||
188 | show_trace(task, esp); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * The architecture-independent dump_stack generator | ||
193 | */ | ||
194 | void dump_stack(void) | ||
195 | { | ||
196 | unsigned long stack; | ||
197 | |||
198 | show_trace(current, &stack); | ||
199 | } | ||
200 | |||
201 | EXPORT_SYMBOL(dump_stack); | ||
202 | |||
203 | void show_registers(struct pt_regs *regs) | ||
204 | { | ||
205 | int i; | ||
206 | int in_kernel = 1; | ||
207 | unsigned long esp; | ||
208 | unsigned short ss; | ||
209 | |||
210 | esp = (unsigned long) (®s->esp); | ||
211 | ss = __KERNEL_DS; | ||
212 | if (regs->xcs & 3) { | ||
213 | in_kernel = 0; | ||
214 | esp = regs->esp; | ||
215 | ss = regs->xss & 0xffff; | ||
216 | } | ||
217 | print_modules(); | ||
218 | printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx" | ||
219 | " (%s) \n", | ||
220 | smp_processor_id(), 0xffff & regs->xcs, regs->eip, | ||
221 | print_tainted(), regs->eflags, system_utsname.release); | ||
222 | print_symbol("EIP is at %s\n", regs->eip); | ||
223 | printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", | ||
224 | regs->eax, regs->ebx, regs->ecx, regs->edx); | ||
225 | printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", | ||
226 | regs->esi, regs->edi, regs->ebp, esp); | ||
227 | printk("ds: %04x es: %04x ss: %04x\n", | ||
228 | regs->xds & 0xffff, regs->xes & 0xffff, ss); | ||
229 | printk("Process %s (pid: %d, threadinfo=%p task=%p)", | ||
230 | current->comm, current->pid, current_thread_info(), current); | ||
231 | /* | ||
232 | * When in-kernel, we also print out the stack and code at the | ||
233 | * time of the fault.. | ||
234 | */ | ||
235 | if (in_kernel) { | ||
236 | u8 *eip; | ||
237 | |||
238 | printk("\nStack: "); | ||
239 | show_stack(NULL, (unsigned long*)esp); | ||
240 | |||
241 | printk("Code: "); | ||
242 | |||
243 | eip = (u8 *)regs->eip - 43; | ||
244 | for (i = 0; i < 64; i++, eip++) { | ||
245 | unsigned char c; | ||
246 | |||
247 | if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) { | ||
248 | printk(" Bad EIP value."); | ||
249 | break; | ||
250 | } | ||
251 | if (eip == (u8 *)regs->eip) | ||
252 | printk("<%02x> ", c); | ||
253 | else | ||
254 | printk("%02x ", c); | ||
255 | } | ||
256 | } | ||
257 | printk("\n"); | ||
258 | } | ||
259 | |||
260 | static void handle_BUG(struct pt_regs *regs) | ||
261 | { | ||
262 | unsigned short ud2; | ||
263 | unsigned short line; | ||
264 | char *file; | ||
265 | char c; | ||
266 | unsigned long eip; | ||
267 | |||
268 | if (regs->xcs & 3) | ||
269 | goto no_bug; /* Not in kernel */ | ||
270 | |||
271 | eip = regs->eip; | ||
272 | |||
273 | if (eip < PAGE_OFFSET) | ||
274 | goto no_bug; | ||
275 | if (__get_user(ud2, (unsigned short *)eip)) | ||
276 | goto no_bug; | ||
277 | if (ud2 != 0x0b0f) | ||
278 | goto no_bug; | ||
279 | if (__get_user(line, (unsigned short *)(eip + 2))) | ||
280 | goto bug; | ||
281 | if (__get_user(file, (char **)(eip + 4)) || | ||
282 | (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) | ||
283 | file = "<bad filename>"; | ||
284 | |||
285 | printk("------------[ cut here ]------------\n"); | ||
286 | printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line); | ||
287 | |||
288 | no_bug: | ||
289 | return; | ||
290 | |||
291 | /* Here we know it was a BUG but file-n-line is unavailable */ | ||
292 | bug: | ||
293 | printk("Kernel BUG\n"); | ||
294 | } | ||
295 | |||
296 | void die(const char * str, struct pt_regs * regs, long err) | ||
297 | { | ||
298 | static struct { | ||
299 | spinlock_t lock; | ||
300 | u32 lock_owner; | ||
301 | int lock_owner_depth; | ||
302 | } die = { | ||
303 | .lock = SPIN_LOCK_UNLOCKED, | ||
304 | .lock_owner = -1, | ||
305 | .lock_owner_depth = 0 | ||
306 | }; | ||
307 | static int die_counter; | ||
308 | |||
309 | if (die.lock_owner != _smp_processor_id()) { | ||
310 | console_verbose(); | ||
311 | spin_lock_irq(&die.lock); | ||
312 | die.lock_owner = smp_processor_id(); | ||
313 | die.lock_owner_depth = 0; | ||
314 | bust_spinlocks(1); | ||
315 | } | ||
316 | |||
317 | if (++die.lock_owner_depth < 3) { | ||
318 | int nl = 0; | ||
319 | handle_BUG(regs); | ||
320 | printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | ||
321 | #ifdef CONFIG_PREEMPT | ||
322 | printk("PREEMPT "); | ||
323 | nl = 1; | ||
324 | #endif | ||
325 | #ifdef CONFIG_SMP | ||
326 | printk("SMP "); | ||
327 | nl = 1; | ||
328 | #endif | ||
329 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
330 | printk("DEBUG_PAGEALLOC"); | ||
331 | nl = 1; | ||
332 | #endif | ||
333 | if (nl) | ||
334 | printk("\n"); | ||
335 | notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); | ||
336 | show_registers(regs); | ||
337 | } else | ||
338 | printk(KERN_ERR "Recursive die() failure, output suppressed\n"); | ||
339 | |||
340 | bust_spinlocks(0); | ||
341 | die.lock_owner = -1; | ||
342 | spin_unlock_irq(&die.lock); | ||
343 | if (in_interrupt()) | ||
344 | panic("Fatal exception in interrupt"); | ||
345 | |||
346 | if (panic_on_oops) { | ||
347 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | ||
348 | ssleep(5); | ||
349 | panic("Fatal exception"); | ||
350 | } | ||
351 | do_exit(SIGSEGV); | ||
352 | } | ||
353 | |||
354 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | ||
355 | { | ||
356 | if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) | ||
357 | die(str, regs, err); | ||
358 | } | ||
359 | |||
360 | static void do_trap(int trapnr, int signr, char *str, int vm86, | ||
361 | struct pt_regs * regs, long error_code, siginfo_t *info) | ||
362 | { | ||
363 | if (regs->eflags & VM_MASK) { | ||
364 | if (vm86) | ||
365 | goto vm86_trap; | ||
366 | goto trap_signal; | ||
367 | } | ||
368 | |||
369 | if (!(regs->xcs & 3)) | ||
370 | goto kernel_trap; | ||
371 | |||
372 | trap_signal: { | ||
373 | struct task_struct *tsk = current; | ||
374 | tsk->thread.error_code = error_code; | ||
375 | tsk->thread.trap_no = trapnr; | ||
376 | if (info) | ||
377 | force_sig_info(signr, info, tsk); | ||
378 | else | ||
379 | force_sig(signr, tsk); | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | kernel_trap: { | ||
384 | if (!fixup_exception(regs)) | ||
385 | die(str, regs, error_code); | ||
386 | return; | ||
387 | } | ||
388 | |||
389 | vm86_trap: { | ||
390 | int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); | ||
391 | if (ret) goto trap_signal; | ||
392 | return; | ||
393 | } | ||
394 | } | ||
395 | |||
396 | #define DO_ERROR(trapnr, signr, str, name) \ | ||
397 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | ||
398 | { \ | ||
399 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
400 | == NOTIFY_STOP) \ | ||
401 | return; \ | ||
402 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ | ||
403 | } | ||
404 | |||
405 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | ||
406 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | ||
407 | { \ | ||
408 | siginfo_t info; \ | ||
409 | info.si_signo = signr; \ | ||
410 | info.si_errno = 0; \ | ||
411 | info.si_code = sicode; \ | ||
412 | info.si_addr = (void __user *)siaddr; \ | ||
413 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
414 | == NOTIFY_STOP) \ | ||
415 | return; \ | ||
416 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ | ||
417 | } | ||
418 | |||
419 | #define DO_VM86_ERROR(trapnr, signr, str, name) \ | ||
420 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | ||
421 | { \ | ||
422 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
423 | == NOTIFY_STOP) \ | ||
424 | return; \ | ||
425 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ | ||
426 | } | ||
427 | |||
428 | #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | ||
429 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | ||
430 | { \ | ||
431 | siginfo_t info; \ | ||
432 | info.si_signo = signr; \ | ||
433 | info.si_errno = 0; \ | ||
434 | info.si_code = sicode; \ | ||
435 | info.si_addr = (void __user *)siaddr; \ | ||
436 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
437 | == NOTIFY_STOP) \ | ||
438 | return; \ | ||
439 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ | ||
440 | } | ||
441 | |||
442 | DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip) | ||
443 | #ifndef CONFIG_KPROBES | ||
444 | DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) | ||
445 | #endif | ||
446 | DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) | ||
447 | DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) | ||
448 | DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) | ||
449 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | ||
450 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | ||
451 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | ||
452 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | ||
453 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | ||
454 | |||
455 | fastcall void do_general_protection(struct pt_regs * regs, long error_code) | ||
456 | { | ||
457 | int cpu = get_cpu(); | ||
458 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | ||
459 | struct thread_struct *thread = ¤t->thread; | ||
460 | |||
461 | /* | ||
462 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | ||
463 | * invalid offset set (the LAZY one) and the faulting thread has | ||
464 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS | ||
465 | * and we set the offset field correctly. Then we let the CPU to | ||
466 | * restart the faulting instruction. | ||
467 | */ | ||
468 | if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | ||
469 | thread->io_bitmap_ptr) { | ||
470 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | ||
471 | thread->io_bitmap_max); | ||
472 | /* | ||
473 | * If the previously set map was extending to higher ports | ||
474 | * than the current one, pad extra space with 0xff (no access). | ||
475 | */ | ||
476 | if (thread->io_bitmap_max < tss->io_bitmap_max) | ||
477 | memset((char *) tss->io_bitmap + | ||
478 | thread->io_bitmap_max, 0xff, | ||
479 | tss->io_bitmap_max - thread->io_bitmap_max); | ||
480 | tss->io_bitmap_max = thread->io_bitmap_max; | ||
481 | tss->io_bitmap_base = IO_BITMAP_OFFSET; | ||
482 | put_cpu(); | ||
483 | return; | ||
484 | } | ||
485 | put_cpu(); | ||
486 | |||
487 | if (regs->eflags & VM_MASK) | ||
488 | goto gp_in_vm86; | ||
489 | |||
490 | if (!(regs->xcs & 3)) | ||
491 | goto gp_in_kernel; | ||
492 | |||
493 | current->thread.error_code = error_code; | ||
494 | current->thread.trap_no = 13; | ||
495 | force_sig(SIGSEGV, current); | ||
496 | return; | ||
497 | |||
498 | gp_in_vm86: | ||
499 | local_irq_enable(); | ||
500 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | ||
501 | return; | ||
502 | |||
503 | gp_in_kernel: | ||
504 | if (!fixup_exception(regs)) { | ||
505 | if (notify_die(DIE_GPF, "general protection fault", regs, | ||
506 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | ||
507 | return; | ||
508 | die("general protection fault", regs, error_code); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static void mem_parity_error(unsigned char reason, struct pt_regs * regs) | ||
513 | { | ||
514 | printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); | ||
515 | printk("You probably have a hardware problem with your RAM chips\n"); | ||
516 | |||
517 | /* Clear and disable the memory parity error line. */ | ||
518 | clear_mem_error(reason); | ||
519 | } | ||
520 | |||
521 | static void io_check_error(unsigned char reason, struct pt_regs * regs) | ||
522 | { | ||
523 | unsigned long i; | ||
524 | |||
525 | printk("NMI: IOCK error (debug interrupt?)\n"); | ||
526 | show_registers(regs); | ||
527 | |||
528 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
529 | reason = (reason & 0xf) | 8; | ||
530 | outb(reason, 0x61); | ||
531 | i = 2000; | ||
532 | while (--i) udelay(1000); | ||
533 | reason &= ~8; | ||
534 | outb(reason, 0x61); | ||
535 | } | ||
536 | |||
537 | static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | ||
538 | { | ||
539 | #ifdef CONFIG_MCA | ||
540 | /* Might actually be able to figure out what the guilty party | ||
541 | * is. */ | ||
542 | if( MCA_bus ) { | ||
543 | mca_handle_nmi(); | ||
544 | return; | ||
545 | } | ||
546 | #endif | ||
547 | printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
548 | reason, smp_processor_id()); | ||
549 | printk("Dazed and confused, but trying to continue\n"); | ||
550 | printk("Do you have a strange power saving mode enabled?\n"); | ||
551 | } | ||
552 | |||
553 | static DEFINE_SPINLOCK(nmi_print_lock); | ||
554 | |||
555 | void die_nmi (struct pt_regs *regs, const char *msg) | ||
556 | { | ||
557 | spin_lock(&nmi_print_lock); | ||
558 | /* | ||
559 | * We are in trouble anyway, lets at least try | ||
560 | * to get a message out. | ||
561 | */ | ||
562 | bust_spinlocks(1); | ||
563 | printk(msg); | ||
564 | printk(" on CPU%d, eip %08lx, registers:\n", | ||
565 | smp_processor_id(), regs->eip); | ||
566 | show_registers(regs); | ||
567 | printk("console shuts up ...\n"); | ||
568 | console_silent(); | ||
569 | spin_unlock(&nmi_print_lock); | ||
570 | bust_spinlocks(0); | ||
571 | do_exit(SIGSEGV); | ||
572 | } | ||
573 | |||
574 | static void default_do_nmi(struct pt_regs * regs) | ||
575 | { | ||
576 | unsigned char reason = 0; | ||
577 | |||
578 | /* Only the BSP gets external NMIs from the system. */ | ||
579 | if (!smp_processor_id()) | ||
580 | reason = get_nmi_reason(); | ||
581 | |||
582 | if (!(reason & 0xc0)) { | ||
583 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) | ||
584 | == NOTIFY_STOP) | ||
585 | return; | ||
586 | #ifdef CONFIG_X86_LOCAL_APIC | ||
587 | /* | ||
588 | * Ok, so this is none of the documented NMI sources, | ||
589 | * so it must be the NMI watchdog. | ||
590 | */ | ||
591 | if (nmi_watchdog) { | ||
592 | nmi_watchdog_tick(regs); | ||
593 | return; | ||
594 | } | ||
595 | #endif | ||
596 | unknown_nmi_error(reason, regs); | ||
597 | return; | ||
598 | } | ||
599 | if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) | ||
600 | return; | ||
601 | if (reason & 0x80) | ||
602 | mem_parity_error(reason, regs); | ||
603 | if (reason & 0x40) | ||
604 | io_check_error(reason, regs); | ||
605 | /* | ||
606 | * Reassert NMI in case it became active meanwhile | ||
607 | * as it's edge-triggered. | ||
608 | */ | ||
609 | reassert_nmi(); | ||
610 | } | ||
611 | |||
612 | static int dummy_nmi_callback(struct pt_regs * regs, int cpu) | ||
613 | { | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | static nmi_callback_t nmi_callback = dummy_nmi_callback; | ||
618 | |||
619 | fastcall void do_nmi(struct pt_regs * regs, long error_code) | ||
620 | { | ||
621 | int cpu; | ||
622 | |||
623 | nmi_enter(); | ||
624 | |||
625 | cpu = smp_processor_id(); | ||
626 | ++nmi_count(cpu); | ||
627 | |||
628 | if (!nmi_callback(regs, cpu)) | ||
629 | default_do_nmi(regs); | ||
630 | |||
631 | nmi_exit(); | ||
632 | } | ||
633 | |||
634 | void set_nmi_callback(nmi_callback_t callback) | ||
635 | { | ||
636 | nmi_callback = callback; | ||
637 | } | ||
638 | |||
639 | void unset_nmi_callback(void) | ||
640 | { | ||
641 | nmi_callback = dummy_nmi_callback; | ||
642 | } | ||
643 | |||
644 | #ifdef CONFIG_KPROBES | ||
645 | fastcall int do_int3(struct pt_regs *regs, long error_code) | ||
646 | { | ||
647 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | ||
648 | == NOTIFY_STOP) | ||
649 | return 1; | ||
650 | /* This is an interrupt gate, because kprobes wants interrupts | ||
651 | disabled. Normal trap handlers don't. */ | ||
652 | restore_interrupts(regs); | ||
653 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); | ||
654 | return 0; | ||
655 | } | ||
656 | #endif | ||
657 | |||
658 | /* | ||
659 | * Our handling of the processor debug registers is non-trivial. | ||
660 | * We do not clear them on entry and exit from the kernel. Therefore | ||
661 | * it is possible to get a watchpoint trap here from inside the kernel. | ||
662 | * However, the code in ./ptrace.c has ensured that the user can | ||
663 | * only set watchpoints on userspace addresses. Therefore the in-kernel | ||
664 | * watchpoint trap can only occur in code which is reading/writing | ||
665 | * from user space. Such code must not hold kernel locks (since it | ||
666 | * can equally take a page fault), therefore it is safe to call | ||
667 | * force_sig_info even though that claims and releases locks. | ||
668 | * | ||
669 | * Code in ./signal.c ensures that the debug control register | ||
670 | * is restored before we deliver any signal, and therefore that | ||
671 | * user code runs with the correct debug control register even though | ||
672 | * we clear it here. | ||
673 | * | ||
674 | * Being careful here means that we don't have to be as careful in a | ||
675 | * lot of more complicated places (task switching can be a bit lazy | ||
676 | * about restoring all the debug state, and ptrace doesn't have to | ||
677 | * find every occurrence of the TF bit that could be saved away even | ||
678 | * by user code) | ||
679 | */ | ||
680 | fastcall void do_debug(struct pt_regs * regs, long error_code) | ||
681 | { | ||
682 | unsigned int condition; | ||
683 | struct task_struct *tsk = current; | ||
684 | |||
685 | __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); | ||
686 | |||
687 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | ||
688 | SIGTRAP) == NOTIFY_STOP) | ||
689 | return; | ||
690 | /* It's safe to allow irq's after DR6 has been saved */ | ||
691 | if (regs->eflags & X86_EFLAGS_IF) | ||
692 | local_irq_enable(); | ||
693 | |||
694 | /* Mask out spurious debug traps due to lazy DR7 setting */ | ||
695 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | ||
696 | if (!tsk->thread.debugreg[7]) | ||
697 | goto clear_dr7; | ||
698 | } | ||
699 | |||
700 | if (regs->eflags & VM_MASK) | ||
701 | goto debug_vm86; | ||
702 | |||
703 | /* Save debug status register where ptrace can see it */ | ||
704 | tsk->thread.debugreg[6] = condition; | ||
705 | |||
706 | /* | ||
707 | * Single-stepping through TF: make sure we ignore any events in | ||
708 | * kernel space (but re-enable TF when returning to user mode). | ||
709 | */ | ||
710 | if (condition & DR_STEP) { | ||
711 | /* | ||
712 | * We already checked v86 mode above, so we can | ||
713 | * check for kernel mode by just checking the CPL | ||
714 | * of CS. | ||
715 | */ | ||
716 | if ((regs->xcs & 3) == 0) | ||
717 | goto clear_TF_reenable; | ||
718 | } | ||
719 | |||
720 | /* Ok, finally something we can handle */ | ||
721 | send_sigtrap(tsk, regs, error_code); | ||
722 | |||
723 | /* Disable additional traps. They'll be re-enabled when | ||
724 | * the signal is delivered. | ||
725 | */ | ||
726 | clear_dr7: | ||
727 | __asm__("movl %0,%%db7" | ||
728 | : /* no output */ | ||
729 | : "r" (0)); | ||
730 | return; | ||
731 | |||
732 | debug_vm86: | ||
733 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | ||
734 | return; | ||
735 | |||
736 | clear_TF_reenable: | ||
737 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | ||
738 | regs->eflags &= ~TF_MASK; | ||
739 | return; | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * Note that we play around with the 'TS' bit in an attempt to get | ||
744 | * the correct behaviour even in the presence of the asynchronous | ||
745 | * IRQ13 behaviour | ||
746 | */ | ||
747 | void math_error(void __user *eip) | ||
748 | { | ||
749 | struct task_struct * task; | ||
750 | siginfo_t info; | ||
751 | unsigned short cwd, swd; | ||
752 | |||
753 | /* | ||
754 | * Save the info for the exception handler and clear the error. | ||
755 | */ | ||
756 | task = current; | ||
757 | save_init_fpu(task); | ||
758 | task->thread.trap_no = 16; | ||
759 | task->thread.error_code = 0; | ||
760 | info.si_signo = SIGFPE; | ||
761 | info.si_errno = 0; | ||
762 | info.si_code = __SI_FAULT; | ||
763 | info.si_addr = eip; | ||
764 | /* | ||
765 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | ||
766 | * status. 0x3f is the exception bits in these regs, 0x200 is the | ||
767 | * C1 reg you need in case of a stack fault, 0x040 is the stack | ||
768 | * fault bit. We should only be taking one exception at a time, | ||
769 | * so if this combination doesn't produce any single exception, | ||
770 | * then we have a bad program that isn't syncronizing its FPU usage | ||
771 | * and it will suffer the consequences since we won't be able to | ||
772 | * fully reproduce the context of the exception | ||
773 | */ | ||
774 | cwd = get_fpu_cwd(task); | ||
775 | swd = get_fpu_swd(task); | ||
776 | switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) { | ||
777 | case 0x000: | ||
778 | default: | ||
779 | break; | ||
780 | case 0x001: /* Invalid Op */ | ||
781 | case 0x041: /* Stack Fault */ | ||
782 | case 0x241: /* Stack Fault | Direction */ | ||
783 | info.si_code = FPE_FLTINV; | ||
784 | /* Should we clear the SF or let user space do it ???? */ | ||
785 | break; | ||
786 | case 0x002: /* Denormalize */ | ||
787 | case 0x010: /* Underflow */ | ||
788 | info.si_code = FPE_FLTUND; | ||
789 | break; | ||
790 | case 0x004: /* Zero Divide */ | ||
791 | info.si_code = FPE_FLTDIV; | ||
792 | break; | ||
793 | case 0x008: /* Overflow */ | ||
794 | info.si_code = FPE_FLTOVF; | ||
795 | break; | ||
796 | case 0x020: /* Precision */ | ||
797 | info.si_code = FPE_FLTRES; | ||
798 | break; | ||
799 | } | ||
800 | force_sig_info(SIGFPE, &info, task); | ||
801 | } | ||
802 | |||
803 | fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code) | ||
804 | { | ||
805 | ignore_fpu_irq = 1; | ||
806 | math_error((void __user *)regs->eip); | ||
807 | } | ||
808 | |||
809 | static void simd_math_error(void __user *eip) | ||
810 | { | ||
811 | struct task_struct * task; | ||
812 | siginfo_t info; | ||
813 | unsigned short mxcsr; | ||
814 | |||
815 | /* | ||
816 | * Save the info for the exception handler and clear the error. | ||
817 | */ | ||
818 | task = current; | ||
819 | save_init_fpu(task); | ||
820 | task->thread.trap_no = 19; | ||
821 | task->thread.error_code = 0; | ||
822 | info.si_signo = SIGFPE; | ||
823 | info.si_errno = 0; | ||
824 | info.si_code = __SI_FAULT; | ||
825 | info.si_addr = eip; | ||
826 | /* | ||
827 | * The SIMD FPU exceptions are handled a little differently, as there | ||
828 | * is only a single status/control register. Thus, to determine which | ||
829 | * unmasked exception was caught we must mask the exception mask bits | ||
830 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
831 | */ | ||
832 | mxcsr = get_fpu_mxcsr(task); | ||
833 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | ||
834 | case 0x000: | ||
835 | default: | ||
836 | break; | ||
837 | case 0x001: /* Invalid Op */ | ||
838 | info.si_code = FPE_FLTINV; | ||
839 | break; | ||
840 | case 0x002: /* Denormalize */ | ||
841 | case 0x010: /* Underflow */ | ||
842 | info.si_code = FPE_FLTUND; | ||
843 | break; | ||
844 | case 0x004: /* Zero Divide */ | ||
845 | info.si_code = FPE_FLTDIV; | ||
846 | break; | ||
847 | case 0x008: /* Overflow */ | ||
848 | info.si_code = FPE_FLTOVF; | ||
849 | break; | ||
850 | case 0x020: /* Precision */ | ||
851 | info.si_code = FPE_FLTRES; | ||
852 | break; | ||
853 | } | ||
854 | force_sig_info(SIGFPE, &info, task); | ||
855 | } | ||
856 | |||
857 | fastcall void do_simd_coprocessor_error(struct pt_regs * regs, | ||
858 | long error_code) | ||
859 | { | ||
860 | if (cpu_has_xmm) { | ||
861 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | ||
862 | ignore_fpu_irq = 1; | ||
863 | simd_math_error((void __user *)regs->eip); | ||
864 | } else { | ||
865 | /* | ||
866 | * Handle strange cache flush from user space exception | ||
867 | * in all other cases. This is undocumented behaviour. | ||
868 | */ | ||
869 | if (regs->eflags & VM_MASK) { | ||
870 | handle_vm86_fault((struct kernel_vm86_regs *)regs, | ||
871 | error_code); | ||
872 | return; | ||
873 | } | ||
874 | die_if_kernel("cache flush denied", regs, error_code); | ||
875 | current->thread.trap_no = 19; | ||
876 | current->thread.error_code = error_code; | ||
877 | force_sig(SIGSEGV, current); | ||
878 | } | ||
879 | } | ||
880 | |||
881 | fastcall void do_spurious_interrupt_bug(struct pt_regs * regs, | ||
882 | long error_code) | ||
883 | { | ||
884 | #if 0 | ||
885 | /* No need to warn about this any longer. */ | ||
886 | printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); | ||
887 | #endif | ||
888 | } | ||
889 | |||
890 | fastcall void setup_x86_bogus_stack(unsigned char * stk) | ||
891 | { | ||
892 | unsigned long *switch16_ptr, *switch32_ptr; | ||
893 | struct pt_regs *regs; | ||
894 | unsigned long stack_top, stack_bot; | ||
895 | unsigned short iret_frame16_off; | ||
896 | int cpu = smp_processor_id(); | ||
897 | /* reserve the space on 32bit stack for the magic switch16 pointer */ | ||
898 | memmove(stk, stk + 8, sizeof(struct pt_regs)); | ||
899 | switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs)); | ||
900 | regs = (struct pt_regs *)stk; | ||
901 | /* now the switch32 on 16bit stack */ | ||
902 | stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu); | ||
903 | stack_top = stack_bot + CPU_16BIT_STACK_SIZE; | ||
904 | switch32_ptr = (unsigned long *)(stack_top - 8); | ||
905 | iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20; | ||
906 | /* copy iret frame on 16bit stack */ | ||
907 | memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20); | ||
908 | /* fill in the switch pointers */ | ||
909 | switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off; | ||
910 | switch16_ptr[1] = __ESPFIX_SS; | ||
911 | switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) + | ||
912 | 8 - CPU_16BIT_STACK_SIZE; | ||
913 | switch32_ptr[1] = __KERNEL_DS; | ||
914 | } | ||
915 | |||
916 | fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp) | ||
917 | { | ||
918 | unsigned long *switch32_ptr; | ||
919 | unsigned char *stack16, *stack32; | ||
920 | unsigned long stack_top, stack_bot; | ||
921 | int len; | ||
922 | int cpu = smp_processor_id(); | ||
923 | stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu); | ||
924 | stack_top = stack_bot + CPU_16BIT_STACK_SIZE; | ||
925 | switch32_ptr = (unsigned long *)(stack_top - 8); | ||
926 | /* copy the data from 16bit stack to 32bit stack */ | ||
927 | len = CPU_16BIT_STACK_SIZE - 8 - sp; | ||
928 | stack16 = (unsigned char *)(stack_bot + sp); | ||
929 | stack32 = (unsigned char *) | ||
930 | (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len); | ||
931 | memcpy(stack32, stack16, len); | ||
932 | return stack32; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * 'math_state_restore()' saves the current math information in the | ||
937 | * old math state array, and gets the new ones from the current task | ||
938 | * | ||
939 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | ||
940 | * Don't touch unless you *really* know how it works. | ||
941 | * | ||
942 | * Must be called with kernel preemption disabled (in this case, | ||
943 | * local interrupts are disabled at the call-site in entry.S). | ||
944 | */ | ||
945 | asmlinkage void math_state_restore(struct pt_regs regs) | ||
946 | { | ||
947 | struct thread_info *thread = current_thread_info(); | ||
948 | struct task_struct *tsk = thread->task; | ||
949 | |||
950 | clts(); /* Allow maths ops (or we recurse) */ | ||
951 | if (!tsk_used_math(tsk)) | ||
952 | init_fpu(tsk); | ||
953 | restore_fpu(tsk); | ||
954 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
955 | } | ||
956 | |||
957 | #ifndef CONFIG_MATH_EMULATION | ||
958 | |||
959 | asmlinkage void math_emulate(long arg) | ||
960 | { | ||
961 | printk("math-emulation not enabled and no coprocessor found.\n"); | ||
962 | printk("killing %s.\n",current->comm); | ||
963 | force_sig(SIGFPE,current); | ||
964 | schedule(); | ||
965 | } | ||
966 | |||
967 | #endif /* CONFIG_MATH_EMULATION */ | ||
968 | |||
969 | #ifdef CONFIG_X86_F00F_BUG | ||
970 | void __init trap_init_f00f_bug(void) | ||
971 | { | ||
972 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
973 | |||
974 | /* | ||
975 | * Update the IDT descriptor and reload the IDT so that | ||
976 | * it uses the read-only mapped virtual address. | ||
977 | */ | ||
978 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | ||
979 | __asm__ __volatile__("lidt %0" : : "m" (idt_descr)); | ||
980 | } | ||
981 | #endif | ||
982 | |||
983 | #define _set_gate(gate_addr,type,dpl,addr,seg) \ | ||
984 | do { \ | ||
985 | int __d0, __d1; \ | ||
986 | __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ | ||
987 | "movw %4,%%dx\n\t" \ | ||
988 | "movl %%eax,%0\n\t" \ | ||
989 | "movl %%edx,%1" \ | ||
990 | :"=m" (*((long *) (gate_addr))), \ | ||
991 | "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ | ||
992 | :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ | ||
993 | "3" ((char *) (addr)),"2" ((seg) << 16)); \ | ||
994 | } while (0) | ||
995 | |||
996 | |||
997 | /* | ||
998 | * This needs to use 'idt_table' rather than 'idt', and | ||
999 | * thus use the _nonmapped_ version of the IDT, as the | ||
1000 | * Pentium F0 0F bugfix can have resulted in the mapped | ||
1001 | * IDT being write-protected. | ||
1002 | */ | ||
1003 | void set_intr_gate(unsigned int n, void *addr) | ||
1004 | { | ||
1005 | _set_gate(idt_table+n,14,0,addr,__KERNEL_CS); | ||
1006 | } | ||
1007 | |||
1008 | /* | ||
1009 | * This routine sets up an interrupt gate at directory privilege level 3. | ||
1010 | */ | ||
1011 | static inline void set_system_intr_gate(unsigned int n, void *addr) | ||
1012 | { | ||
1013 | _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS); | ||
1014 | } | ||
1015 | |||
1016 | static void __init set_trap_gate(unsigned int n, void *addr) | ||
1017 | { | ||
1018 | _set_gate(idt_table+n,15,0,addr,__KERNEL_CS); | ||
1019 | } | ||
1020 | |||
1021 | static void __init set_system_gate(unsigned int n, void *addr) | ||
1022 | { | ||
1023 | _set_gate(idt_table+n,15,3,addr,__KERNEL_CS); | ||
1024 | } | ||
1025 | |||
1026 | static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) | ||
1027 | { | ||
1028 | _set_gate(idt_table+n,5,0,0,(gdt_entry<<3)); | ||
1029 | } | ||
1030 | |||
1031 | |||
1032 | void __init trap_init(void) | ||
1033 | { | ||
1034 | #ifdef CONFIG_EISA | ||
1035 | void __iomem *p = ioremap(0x0FFFD9, 4); | ||
1036 | if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { | ||
1037 | EISA_bus = 1; | ||
1038 | } | ||
1039 | iounmap(p); | ||
1040 | #endif | ||
1041 | |||
1042 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1043 | init_apic_mappings(); | ||
1044 | #endif | ||
1045 | |||
1046 | set_trap_gate(0,÷_error); | ||
1047 | set_intr_gate(1,&debug); | ||
1048 | set_intr_gate(2,&nmi); | ||
1049 | set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ | ||
1050 | set_system_gate(4,&overflow); | ||
1051 | set_system_gate(5,&bounds); | ||
1052 | set_trap_gate(6,&invalid_op); | ||
1053 | set_trap_gate(7,&device_not_available); | ||
1054 | set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); | ||
1055 | set_trap_gate(9,&coprocessor_segment_overrun); | ||
1056 | set_trap_gate(10,&invalid_TSS); | ||
1057 | set_trap_gate(11,&segment_not_present); | ||
1058 | set_trap_gate(12,&stack_segment); | ||
1059 | set_trap_gate(13,&general_protection); | ||
1060 | set_intr_gate(14,&page_fault); | ||
1061 | set_trap_gate(15,&spurious_interrupt_bug); | ||
1062 | set_trap_gate(16,&coprocessor_error); | ||
1063 | set_trap_gate(17,&alignment_check); | ||
1064 | #ifdef CONFIG_X86_MCE | ||
1065 | set_trap_gate(18,&machine_check); | ||
1066 | #endif | ||
1067 | set_trap_gate(19,&simd_coprocessor_error); | ||
1068 | |||
1069 | set_system_gate(SYSCALL_VECTOR,&system_call); | ||
1070 | |||
1071 | /* | ||
1072 | * Should be a barrier for any external CPU state. | ||
1073 | */ | ||
1074 | cpu_init(); | ||
1075 | |||
1076 | trap_init_hook(); | ||
1077 | } | ||
1078 | |||
1079 | static int __init kstack_setup(char *s) | ||
1080 | { | ||
1081 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); | ||
1082 | return 0; | ||
1083 | } | ||
1084 | __setup("kstack=", kstack_setup); | ||