diff options
Diffstat (limited to 'arch/x86_64/kernel/traps_64.c')
-rw-r--r-- | arch/x86_64/kernel/traps_64.c | 1138 |
1 files changed, 0 insertions, 1138 deletions
diff --git a/arch/x86_64/kernel/traps_64.c b/arch/x86_64/kernel/traps_64.c deleted file mode 100644 index 03888420775d..000000000000 --- a/arch/x86_64/kernel/traps_64.c +++ /dev/null | |||
@@ -1,1138 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/x86-64/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
6 | * | ||
7 | * Pentium III FXSR, SSE support | ||
8 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
13 | * state in 'entry.S'. | ||
14 | */ | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/timer.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/kallsyms.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/moduleparam.h> | ||
29 | #include <linux/nmi.h> | ||
30 | #include <linux/kprobes.h> | ||
31 | #include <linux/kexec.h> | ||
32 | #include <linux/unwind.h> | ||
33 | #include <linux/uaccess.h> | ||
34 | #include <linux/bug.h> | ||
35 | #include <linux/kdebug.h> | ||
36 | |||
37 | #if defined(CONFIG_EDAC) | ||
38 | #include <linux/edac.h> | ||
39 | #endif | ||
40 | |||
41 | #include <asm/system.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/atomic.h> | ||
44 | #include <asm/debugreg.h> | ||
45 | #include <asm/desc.h> | ||
46 | #include <asm/i387.h> | ||
47 | #include <asm/processor.h> | ||
48 | #include <asm/unwind.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/pgalloc.h> | ||
51 | #include <asm/pda.h> | ||
52 | #include <asm/proto.h> | ||
53 | #include <asm/nmi.h> | ||
54 | #include <asm/stacktrace.h> | ||
55 | |||
56 | asmlinkage void divide_error(void); | ||
57 | asmlinkage void debug(void); | ||
58 | asmlinkage void nmi(void); | ||
59 | asmlinkage void int3(void); | ||
60 | asmlinkage void overflow(void); | ||
61 | asmlinkage void bounds(void); | ||
62 | asmlinkage void invalid_op(void); | ||
63 | asmlinkage void device_not_available(void); | ||
64 | asmlinkage void double_fault(void); | ||
65 | asmlinkage void coprocessor_segment_overrun(void); | ||
66 | asmlinkage void invalid_TSS(void); | ||
67 | asmlinkage void segment_not_present(void); | ||
68 | asmlinkage void stack_segment(void); | ||
69 | asmlinkage void general_protection(void); | ||
70 | asmlinkage void page_fault(void); | ||
71 | asmlinkage void coprocessor_error(void); | ||
72 | asmlinkage void simd_coprocessor_error(void); | ||
73 | asmlinkage void reserved(void); | ||
74 | asmlinkage void alignment_check(void); | ||
75 | asmlinkage void machine_check(void); | ||
76 | asmlinkage void spurious_interrupt_bug(void); | ||
77 | |||
78 | static inline void conditional_sti(struct pt_regs *regs) | ||
79 | { | ||
80 | if (regs->eflags & X86_EFLAGS_IF) | ||
81 | local_irq_enable(); | ||
82 | } | ||
83 | |||
84 | static inline void preempt_conditional_sti(struct pt_regs *regs) | ||
85 | { | ||
86 | preempt_disable(); | ||
87 | if (regs->eflags & X86_EFLAGS_IF) | ||
88 | local_irq_enable(); | ||
89 | } | ||
90 | |||
91 | static inline void preempt_conditional_cli(struct pt_regs *regs) | ||
92 | { | ||
93 | if (regs->eflags & X86_EFLAGS_IF) | ||
94 | local_irq_disable(); | ||
95 | /* Make sure to not schedule here because we could be running | ||
96 | on an exception stack. */ | ||
97 | preempt_enable_no_resched(); | ||
98 | } | ||
99 | |||
100 | int kstack_depth_to_print = 12; | ||
101 | |||
102 | #ifdef CONFIG_KALLSYMS | ||
103 | void printk_address(unsigned long address) | ||
104 | { | ||
105 | unsigned long offset = 0, symsize; | ||
106 | const char *symname; | ||
107 | char *modname; | ||
108 | char *delim = ":"; | ||
109 | char namebuf[128]; | ||
110 | |||
111 | symname = kallsyms_lookup(address, &symsize, &offset, | ||
112 | &modname, namebuf); | ||
113 | if (!symname) { | ||
114 | printk(" [<%016lx>]\n", address); | ||
115 | return; | ||
116 | } | ||
117 | if (!modname) | ||
118 | modname = delim = ""; | ||
119 | printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n", | ||
120 | address, delim, modname, delim, symname, offset, symsize); | ||
121 | } | ||
122 | #else | ||
123 | void printk_address(unsigned long address) | ||
124 | { | ||
125 | printk(" [<%016lx>]\n", address); | ||
126 | } | ||
127 | #endif | ||
128 | |||
129 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | ||
130 | unsigned *usedp, char **idp) | ||
131 | { | ||
132 | static char ids[][8] = { | ||
133 | [DEBUG_STACK - 1] = "#DB", | ||
134 | [NMI_STACK - 1] = "NMI", | ||
135 | [DOUBLEFAULT_STACK - 1] = "#DF", | ||
136 | [STACKFAULT_STACK - 1] = "#SS", | ||
137 | [MCE_STACK - 1] = "#MC", | ||
138 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
139 | [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | ||
140 | #endif | ||
141 | }; | ||
142 | unsigned k; | ||
143 | |||
144 | /* | ||
145 | * Iterate over all exception stacks, and figure out whether | ||
146 | * 'stack' is in one of them: | ||
147 | */ | ||
148 | for (k = 0; k < N_EXCEPTION_STACKS; k++) { | ||
149 | unsigned long end = per_cpu(orig_ist, cpu).ist[k]; | ||
150 | /* | ||
151 | * Is 'stack' above this exception frame's end? | ||
152 | * If yes then skip to the next frame. | ||
153 | */ | ||
154 | if (stack >= end) | ||
155 | continue; | ||
156 | /* | ||
157 | * Is 'stack' above this exception frame's start address? | ||
158 | * If yes then we found the right frame. | ||
159 | */ | ||
160 | if (stack >= end - EXCEPTION_STKSZ) { | ||
161 | /* | ||
162 | * Make sure we only iterate through an exception | ||
163 | * stack once. If it comes up for the second time | ||
164 | * then there's something wrong going on - just | ||
165 | * break out and return NULL: | ||
166 | */ | ||
167 | if (*usedp & (1U << k)) | ||
168 | break; | ||
169 | *usedp |= 1U << k; | ||
170 | *idp = ids[k]; | ||
171 | return (unsigned long *)end; | ||
172 | } | ||
173 | /* | ||
174 | * If this is a debug stack, and if it has a larger size than | ||
175 | * the usual exception stacks, then 'stack' might still | ||
176 | * be within the lower portion of the debug stack: | ||
177 | */ | ||
178 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
179 | if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) { | ||
180 | unsigned j = N_EXCEPTION_STACKS - 1; | ||
181 | |||
182 | /* | ||
183 | * Black magic. A large debug stack is composed of | ||
184 | * multiple exception stack entries, which we | ||
185 | * iterate through now. Dont look: | ||
186 | */ | ||
187 | do { | ||
188 | ++j; | ||
189 | end -= EXCEPTION_STKSZ; | ||
190 | ids[j][4] = '1' + (j - N_EXCEPTION_STACKS); | ||
191 | } while (stack < end - EXCEPTION_STKSZ); | ||
192 | if (*usedp & (1U << j)) | ||
193 | break; | ||
194 | *usedp |= 1U << j; | ||
195 | *idp = ids[j]; | ||
196 | return (unsigned long *)end; | ||
197 | } | ||
198 | #endif | ||
199 | } | ||
200 | return NULL; | ||
201 | } | ||
202 | |||
203 | #define MSG(txt) ops->warning(data, txt) | ||
204 | |||
205 | /* | ||
206 | * x86-64 can have upto three kernel stacks: | ||
207 | * process stack | ||
208 | * interrupt stack | ||
209 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | ||
210 | */ | ||
211 | |||
212 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
213 | { | ||
214 | void *t = (void *)tinfo; | ||
215 | return p > t && p < t + THREAD_SIZE - 3; | ||
216 | } | ||
217 | |||
218 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | ||
219 | unsigned long *stack, | ||
220 | struct stacktrace_ops *ops, void *data) | ||
221 | { | ||
222 | const unsigned cpu = get_cpu(); | ||
223 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | ||
224 | unsigned used = 0; | ||
225 | struct thread_info *tinfo; | ||
226 | |||
227 | if (!tsk) | ||
228 | tsk = current; | ||
229 | |||
230 | if (!stack) { | ||
231 | unsigned long dummy; | ||
232 | stack = &dummy; | ||
233 | if (tsk && tsk != current) | ||
234 | stack = (unsigned long *)tsk->thread.rsp; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Print function call entries within a stack. 'cond' is the | ||
239 | * "end of stackframe" condition, that the 'stack++' | ||
240 | * iteration will eventually trigger. | ||
241 | */ | ||
242 | #define HANDLE_STACK(cond) \ | ||
243 | do while (cond) { \ | ||
244 | unsigned long addr = *stack++; \ | ||
245 | /* Use unlocked access here because except for NMIs \ | ||
246 | we should be already protected against module unloads */ \ | ||
247 | if (__kernel_text_address(addr)) { \ | ||
248 | /* \ | ||
249 | * If the address is either in the text segment of the \ | ||
250 | * kernel, or in the region which contains vmalloc'ed \ | ||
251 | * memory, it *may* be the address of a calling \ | ||
252 | * routine; if so, print it so that someone tracing \ | ||
253 | * down the cause of the crash will be able to figure \ | ||
254 | * out the call path that was taken. \ | ||
255 | */ \ | ||
256 | ops->address(data, addr); \ | ||
257 | } \ | ||
258 | } while (0) | ||
259 | |||
260 | /* | ||
261 | * Print function call entries in all stacks, starting at the | ||
262 | * current stack address. If the stacks consist of nested | ||
263 | * exceptions | ||
264 | */ | ||
265 | for (;;) { | ||
266 | char *id; | ||
267 | unsigned long *estack_end; | ||
268 | estack_end = in_exception_stack(cpu, (unsigned long)stack, | ||
269 | &used, &id); | ||
270 | |||
271 | if (estack_end) { | ||
272 | if (ops->stack(data, id) < 0) | ||
273 | break; | ||
274 | HANDLE_STACK (stack < estack_end); | ||
275 | ops->stack(data, "<EOE>"); | ||
276 | /* | ||
277 | * We link to the next stack via the | ||
278 | * second-to-last pointer (index -2 to end) in the | ||
279 | * exception stack: | ||
280 | */ | ||
281 | stack = (unsigned long *) estack_end[-2]; | ||
282 | continue; | ||
283 | } | ||
284 | if (irqstack_end) { | ||
285 | unsigned long *irqstack; | ||
286 | irqstack = irqstack_end - | ||
287 | (IRQSTACKSIZE - 64) / sizeof(*irqstack); | ||
288 | |||
289 | if (stack >= irqstack && stack < irqstack_end) { | ||
290 | if (ops->stack(data, "IRQ") < 0) | ||
291 | break; | ||
292 | HANDLE_STACK (stack < irqstack_end); | ||
293 | /* | ||
294 | * We link to the next stack (which would be | ||
295 | * the process stack normally) the last | ||
296 | * pointer (index -1 to end) in the IRQ stack: | ||
297 | */ | ||
298 | stack = (unsigned long *) (irqstack_end[-1]); | ||
299 | irqstack_end = NULL; | ||
300 | ops->stack(data, "EOI"); | ||
301 | continue; | ||
302 | } | ||
303 | } | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * This handles the process stack: | ||
309 | */ | ||
310 | tinfo = task_thread_info(tsk); | ||
311 | HANDLE_STACK (valid_stack_ptr(tinfo, stack)); | ||
312 | #undef HANDLE_STACK | ||
313 | put_cpu(); | ||
314 | } | ||
315 | EXPORT_SYMBOL(dump_trace); | ||
316 | |||
317 | static void | ||
318 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
319 | { | ||
320 | print_symbol(msg, symbol); | ||
321 | printk("\n"); | ||
322 | } | ||
323 | |||
324 | static void print_trace_warning(void *data, char *msg) | ||
325 | { | ||
326 | printk("%s\n", msg); | ||
327 | } | ||
328 | |||
329 | static int print_trace_stack(void *data, char *name) | ||
330 | { | ||
331 | printk(" <%s> ", name); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static void print_trace_address(void *data, unsigned long addr) | ||
336 | { | ||
337 | touch_nmi_watchdog(); | ||
338 | printk_address(addr); | ||
339 | } | ||
340 | |||
341 | static struct stacktrace_ops print_trace_ops = { | ||
342 | .warning = print_trace_warning, | ||
343 | .warning_symbol = print_trace_warning_symbol, | ||
344 | .stack = print_trace_stack, | ||
345 | .address = print_trace_address, | ||
346 | }; | ||
347 | |||
348 | void | ||
349 | show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack) | ||
350 | { | ||
351 | printk("\nCall Trace:\n"); | ||
352 | dump_trace(tsk, regs, stack, &print_trace_ops, NULL); | ||
353 | printk("\n"); | ||
354 | } | ||
355 | |||
356 | static void | ||
357 | _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp) | ||
358 | { | ||
359 | unsigned long *stack; | ||
360 | int i; | ||
361 | const int cpu = smp_processor_id(); | ||
362 | unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); | ||
363 | unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | ||
364 | |||
365 | // debugging aid: "show_stack(NULL, NULL);" prints the | ||
366 | // back trace for this cpu. | ||
367 | |||
368 | if (rsp == NULL) { | ||
369 | if (tsk) | ||
370 | rsp = (unsigned long *)tsk->thread.rsp; | ||
371 | else | ||
372 | rsp = (unsigned long *)&rsp; | ||
373 | } | ||
374 | |||
375 | stack = rsp; | ||
376 | for(i=0; i < kstack_depth_to_print; i++) { | ||
377 | if (stack >= irqstack && stack <= irqstack_end) { | ||
378 | if (stack == irqstack_end) { | ||
379 | stack = (unsigned long *) (irqstack_end[-1]); | ||
380 | printk(" <EOI> "); | ||
381 | } | ||
382 | } else { | ||
383 | if (((long) stack & (THREAD_SIZE-1)) == 0) | ||
384 | break; | ||
385 | } | ||
386 | if (i && ((i % 4) == 0)) | ||
387 | printk("\n"); | ||
388 | printk(" %016lx", *stack++); | ||
389 | touch_nmi_watchdog(); | ||
390 | } | ||
391 | show_trace(tsk, regs, rsp); | ||
392 | } | ||
393 | |||
394 | void show_stack(struct task_struct *tsk, unsigned long * rsp) | ||
395 | { | ||
396 | _show_stack(tsk, NULL, rsp); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * The architecture-independent dump_stack generator | ||
401 | */ | ||
402 | void dump_stack(void) | ||
403 | { | ||
404 | unsigned long dummy; | ||
405 | show_trace(NULL, NULL, &dummy); | ||
406 | } | ||
407 | |||
408 | EXPORT_SYMBOL(dump_stack); | ||
409 | |||
410 | void show_registers(struct pt_regs *regs) | ||
411 | { | ||
412 | int i; | ||
413 | int in_kernel = !user_mode(regs); | ||
414 | unsigned long rsp; | ||
415 | const int cpu = smp_processor_id(); | ||
416 | struct task_struct *cur = cpu_pda(cpu)->pcurrent; | ||
417 | |||
418 | rsp = regs->rsp; | ||
419 | printk("CPU %d ", cpu); | ||
420 | __show_regs(regs); | ||
421 | printk("Process %s (pid: %d, threadinfo %p, task %p)\n", | ||
422 | cur->comm, cur->pid, task_thread_info(cur), cur); | ||
423 | |||
424 | /* | ||
425 | * When in-kernel, we also print out the stack and code at the | ||
426 | * time of the fault.. | ||
427 | */ | ||
428 | if (in_kernel) { | ||
429 | printk("Stack: "); | ||
430 | _show_stack(NULL, regs, (unsigned long*)rsp); | ||
431 | |||
432 | printk("\nCode: "); | ||
433 | if (regs->rip < PAGE_OFFSET) | ||
434 | goto bad; | ||
435 | |||
436 | for (i=0; i<20; i++) { | ||
437 | unsigned char c; | ||
438 | if (__get_user(c, &((unsigned char*)regs->rip)[i])) { | ||
439 | bad: | ||
440 | printk(" Bad RIP value."); | ||
441 | break; | ||
442 | } | ||
443 | printk("%02x ", c); | ||
444 | } | ||
445 | } | ||
446 | printk("\n"); | ||
447 | } | ||
448 | |||
449 | int is_valid_bugaddr(unsigned long rip) | ||
450 | { | ||
451 | unsigned short ud2; | ||
452 | |||
453 | if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2))) | ||
454 | return 0; | ||
455 | |||
456 | return ud2 == 0x0b0f; | ||
457 | } | ||
458 | |||
459 | #ifdef CONFIG_BUG | ||
460 | void out_of_line_bug(void) | ||
461 | { | ||
462 | BUG(); | ||
463 | } | ||
464 | EXPORT_SYMBOL(out_of_line_bug); | ||
465 | #endif | ||
466 | |||
467 | static DEFINE_SPINLOCK(die_lock); | ||
468 | static int die_owner = -1; | ||
469 | static unsigned int die_nest_count; | ||
470 | |||
471 | unsigned __kprobes long oops_begin(void) | ||
472 | { | ||
473 | int cpu; | ||
474 | unsigned long flags; | ||
475 | |||
476 | oops_enter(); | ||
477 | |||
478 | /* racy, but better than risking deadlock. */ | ||
479 | local_irq_save(flags); | ||
480 | cpu = smp_processor_id(); | ||
481 | if (!spin_trylock(&die_lock)) { | ||
482 | if (cpu == die_owner) | ||
483 | /* nested oops. should stop eventually */; | ||
484 | else | ||
485 | spin_lock(&die_lock); | ||
486 | } | ||
487 | die_nest_count++; | ||
488 | die_owner = cpu; | ||
489 | console_verbose(); | ||
490 | bust_spinlocks(1); | ||
491 | return flags; | ||
492 | } | ||
493 | |||
494 | void __kprobes oops_end(unsigned long flags) | ||
495 | { | ||
496 | die_owner = -1; | ||
497 | bust_spinlocks(0); | ||
498 | die_nest_count--; | ||
499 | if (die_nest_count) | ||
500 | /* We still own the lock */ | ||
501 | local_irq_restore(flags); | ||
502 | else | ||
503 | /* Nest count reaches zero, release the lock. */ | ||
504 | spin_unlock_irqrestore(&die_lock, flags); | ||
505 | if (panic_on_oops) | ||
506 | panic("Fatal exception"); | ||
507 | oops_exit(); | ||
508 | } | ||
509 | |||
510 | void __kprobes __die(const char * str, struct pt_regs * regs, long err) | ||
511 | { | ||
512 | static int die_counter; | ||
513 | printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter); | ||
514 | #ifdef CONFIG_PREEMPT | ||
515 | printk("PREEMPT "); | ||
516 | #endif | ||
517 | #ifdef CONFIG_SMP | ||
518 | printk("SMP "); | ||
519 | #endif | ||
520 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
521 | printk("DEBUG_PAGEALLOC"); | ||
522 | #endif | ||
523 | printk("\n"); | ||
524 | notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); | ||
525 | show_registers(regs); | ||
526 | add_taint(TAINT_DIE); | ||
527 | /* Executive summary in case the oops scrolled away */ | ||
528 | printk(KERN_ALERT "RIP "); | ||
529 | printk_address(regs->rip); | ||
530 | printk(" RSP <%016lx>\n", regs->rsp); | ||
531 | if (kexec_should_crash(current)) | ||
532 | crash_kexec(regs); | ||
533 | } | ||
534 | |||
535 | void die(const char * str, struct pt_regs * regs, long err) | ||
536 | { | ||
537 | unsigned long flags = oops_begin(); | ||
538 | |||
539 | if (!user_mode(regs)) | ||
540 | report_bug(regs->rip, regs); | ||
541 | |||
542 | __die(str, regs, err); | ||
543 | oops_end(flags); | ||
544 | do_exit(SIGSEGV); | ||
545 | } | ||
546 | |||
547 | void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) | ||
548 | { | ||
549 | unsigned long flags = oops_begin(); | ||
550 | |||
551 | /* | ||
552 | * We are in trouble anyway, lets at least try | ||
553 | * to get a message out. | ||
554 | */ | ||
555 | printk(str, smp_processor_id()); | ||
556 | show_registers(regs); | ||
557 | if (kexec_should_crash(current)) | ||
558 | crash_kexec(regs); | ||
559 | if (do_panic || panic_on_oops) | ||
560 | panic("Non maskable interrupt"); | ||
561 | oops_end(flags); | ||
562 | nmi_exit(); | ||
563 | local_irq_enable(); | ||
564 | do_exit(SIGSEGV); | ||
565 | } | ||
566 | |||
567 | static void __kprobes do_trap(int trapnr, int signr, char *str, | ||
568 | struct pt_regs * regs, long error_code, | ||
569 | siginfo_t *info) | ||
570 | { | ||
571 | struct task_struct *tsk = current; | ||
572 | |||
573 | if (user_mode(regs)) { | ||
574 | /* | ||
575 | * We want error_code and trap_no set for userspace | ||
576 | * faults and kernelspace faults which result in | ||
577 | * die(), but not kernelspace faults which are fixed | ||
578 | * up. die() gives the process no chance to handle | ||
579 | * the signal and notice the kernel fault information, | ||
580 | * so that won't result in polluting the information | ||
581 | * about previously queued, but not yet delivered, | ||
582 | * faults. See also do_general_protection below. | ||
583 | */ | ||
584 | tsk->thread.error_code = error_code; | ||
585 | tsk->thread.trap_no = trapnr; | ||
586 | |||
587 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | ||
588 | printk_ratelimit()) | ||
589 | printk(KERN_INFO | ||
590 | "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", | ||
591 | tsk->comm, tsk->pid, str, | ||
592 | regs->rip, regs->rsp, error_code); | ||
593 | |||
594 | if (info) | ||
595 | force_sig_info(signr, info, tsk); | ||
596 | else | ||
597 | force_sig(signr, tsk); | ||
598 | return; | ||
599 | } | ||
600 | |||
601 | |||
602 | /* kernel trap */ | ||
603 | { | ||
604 | const struct exception_table_entry *fixup; | ||
605 | fixup = search_exception_tables(regs->rip); | ||
606 | if (fixup) | ||
607 | regs->rip = fixup->fixup; | ||
608 | else { | ||
609 | tsk->thread.error_code = error_code; | ||
610 | tsk->thread.trap_no = trapnr; | ||
611 | die(str, regs, error_code); | ||
612 | } | ||
613 | return; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | #define DO_ERROR(trapnr, signr, str, name) \ | ||
618 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | ||
619 | { \ | ||
620 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
621 | == NOTIFY_STOP) \ | ||
622 | return; \ | ||
623 | conditional_sti(regs); \ | ||
624 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | ||
625 | } | ||
626 | |||
627 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | ||
628 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | ||
629 | { \ | ||
630 | siginfo_t info; \ | ||
631 | info.si_signo = signr; \ | ||
632 | info.si_errno = 0; \ | ||
633 | info.si_code = sicode; \ | ||
634 | info.si_addr = (void __user *)siaddr; \ | ||
635 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | ||
636 | == NOTIFY_STOP) \ | ||
637 | return; \ | ||
638 | conditional_sti(regs); \ | ||
639 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | ||
640 | } | ||
641 | |||
642 | DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip) | ||
643 | DO_ERROR( 4, SIGSEGV, "overflow", overflow) | ||
644 | DO_ERROR( 5, SIGSEGV, "bounds", bounds) | ||
645 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip) | ||
646 | DO_ERROR( 7, SIGSEGV, "device not available", device_not_available) | ||
647 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | ||
648 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | ||
649 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | ||
650 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | ||
651 | DO_ERROR(18, SIGSEGV, "reserved", reserved) | ||
652 | |||
653 | /* Runs on IST stack */ | ||
654 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | ||
655 | { | ||
656 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | ||
657 | 12, SIGBUS) == NOTIFY_STOP) | ||
658 | return; | ||
659 | preempt_conditional_sti(regs); | ||
660 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | ||
661 | preempt_conditional_cli(regs); | ||
662 | } | ||
663 | |||
664 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | ||
665 | { | ||
666 | static const char str[] = "double fault"; | ||
667 | struct task_struct *tsk = current; | ||
668 | |||
669 | /* Return not checked because double check cannot be ignored */ | ||
670 | notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); | ||
671 | |||
672 | tsk->thread.error_code = error_code; | ||
673 | tsk->thread.trap_no = 8; | ||
674 | |||
675 | /* This is always a kernel trap and never fixable (and thus must | ||
676 | never return). */ | ||
677 | for (;;) | ||
678 | die(str, regs, error_code); | ||
679 | } | ||
680 | |||
681 | asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | ||
682 | long error_code) | ||
683 | { | ||
684 | struct task_struct *tsk = current; | ||
685 | |||
686 | conditional_sti(regs); | ||
687 | |||
688 | if (user_mode(regs)) { | ||
689 | tsk->thread.error_code = error_code; | ||
690 | tsk->thread.trap_no = 13; | ||
691 | |||
692 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
693 | printk_ratelimit()) | ||
694 | printk(KERN_INFO | ||
695 | "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", | ||
696 | tsk->comm, tsk->pid, | ||
697 | regs->rip, regs->rsp, error_code); | ||
698 | |||
699 | force_sig(SIGSEGV, tsk); | ||
700 | return; | ||
701 | } | ||
702 | |||
703 | /* kernel gp */ | ||
704 | { | ||
705 | const struct exception_table_entry *fixup; | ||
706 | fixup = search_exception_tables(regs->rip); | ||
707 | if (fixup) { | ||
708 | regs->rip = fixup->fixup; | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | tsk->thread.error_code = error_code; | ||
713 | tsk->thread.trap_no = 13; | ||
714 | if (notify_die(DIE_GPF, "general protection fault", regs, | ||
715 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | ||
716 | return; | ||
717 | die("general protection fault", regs, error_code); | ||
718 | } | ||
719 | } | ||
720 | |||
721 | static __kprobes void | ||
722 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | ||
723 | { | ||
724 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | ||
725 | reason); | ||
726 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | ||
727 | |||
728 | #if defined(CONFIG_EDAC) | ||
729 | if(edac_handler_set()) { | ||
730 | edac_atomic_assert_error(); | ||
731 | return; | ||
732 | } | ||
733 | #endif | ||
734 | |||
735 | if (panic_on_unrecovered_nmi) | ||
736 | panic("NMI: Not continuing"); | ||
737 | |||
738 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
739 | |||
740 | /* Clear and disable the memory parity error line. */ | ||
741 | reason = (reason & 0xf) | 4; | ||
742 | outb(reason, 0x61); | ||
743 | } | ||
744 | |||
745 | static __kprobes void | ||
746 | io_check_error(unsigned char reason, struct pt_regs * regs) | ||
747 | { | ||
748 | printk("NMI: IOCK error (debug interrupt?)\n"); | ||
749 | show_registers(regs); | ||
750 | |||
751 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
752 | reason = (reason & 0xf) | 8; | ||
753 | outb(reason, 0x61); | ||
754 | mdelay(2000); | ||
755 | reason &= ~8; | ||
756 | outb(reason, 0x61); | ||
757 | } | ||
758 | |||
759 | static __kprobes void | ||
760 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | ||
761 | { | ||
762 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | ||
763 | reason); | ||
764 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | ||
765 | |||
766 | if (panic_on_unrecovered_nmi) | ||
767 | panic("NMI: Not continuing"); | ||
768 | |||
769 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | ||
770 | } | ||
771 | |||
772 | /* Runs on IST stack. This code must keep interrupts off all the time. | ||
773 | Nested NMIs are prevented by the CPU. */ | ||
774 | asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs) | ||
775 | { | ||
776 | unsigned char reason = 0; | ||
777 | int cpu; | ||
778 | |||
779 | cpu = smp_processor_id(); | ||
780 | |||
781 | /* Only the BSP gets external NMIs from the system. */ | ||
782 | if (!cpu) | ||
783 | reason = get_nmi_reason(); | ||
784 | |||
785 | if (!(reason & 0xc0)) { | ||
786 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | ||
787 | == NOTIFY_STOP) | ||
788 | return; | ||
789 | /* | ||
790 | * Ok, so this is none of the documented NMI sources, | ||
791 | * so it must be the NMI watchdog. | ||
792 | */ | ||
793 | if (nmi_watchdog_tick(regs,reason)) | ||
794 | return; | ||
795 | if (!do_nmi_callback(regs,cpu)) | ||
796 | unknown_nmi_error(reason, regs); | ||
797 | |||
798 | return; | ||
799 | } | ||
800 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
801 | return; | ||
802 | |||
803 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | ||
804 | |||
805 | if (reason & 0x80) | ||
806 | mem_parity_error(reason, regs); | ||
807 | if (reason & 0x40) | ||
808 | io_check_error(reason, regs); | ||
809 | } | ||
810 | |||
811 | /* runs on IST stack. */ | ||
812 | asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | ||
813 | { | ||
814 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | ||
815 | return; | ||
816 | } | ||
817 | preempt_conditional_sti(regs); | ||
818 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | ||
819 | preempt_conditional_cli(regs); | ||
820 | } | ||
821 | |||
822 | /* Help handler running on IST stack to switch back to user stack | ||
823 | for scheduling or signal handling. The actual stack switch is done in | ||
824 | entry.S */ | ||
825 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | ||
826 | { | ||
827 | struct pt_regs *regs = eregs; | ||
828 | /* Did already sync */ | ||
829 | if (eregs == (struct pt_regs *)eregs->rsp) | ||
830 | ; | ||
831 | /* Exception from user space */ | ||
832 | else if (user_mode(eregs)) | ||
833 | regs = task_pt_regs(current); | ||
834 | /* Exception from kernel and interrupts are enabled. Move to | ||
835 | kernel process stack. */ | ||
836 | else if (eregs->eflags & X86_EFLAGS_IF) | ||
837 | regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs)); | ||
838 | if (eregs != regs) | ||
839 | *regs = *eregs; | ||
840 | return regs; | ||
841 | } | ||
842 | |||
843 | /* runs on IST stack. */ | ||
844 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | ||
845 | unsigned long error_code) | ||
846 | { | ||
847 | unsigned long condition; | ||
848 | struct task_struct *tsk = current; | ||
849 | siginfo_t info; | ||
850 | |||
851 | get_debugreg(condition, 6); | ||
852 | |||
853 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | ||
854 | SIGTRAP) == NOTIFY_STOP) | ||
855 | return; | ||
856 | |||
857 | preempt_conditional_sti(regs); | ||
858 | |||
859 | /* Mask out spurious debug traps due to lazy DR7 setting */ | ||
860 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | ||
861 | if (!tsk->thread.debugreg7) { | ||
862 | goto clear_dr7; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | tsk->thread.debugreg6 = condition; | ||
867 | |||
868 | /* Mask out spurious TF errors due to lazy TF clearing */ | ||
869 | if (condition & DR_STEP) { | ||
870 | /* | ||
871 | * The TF error should be masked out only if the current | ||
872 | * process is not traced and if the TRAP flag has been set | ||
873 | * previously by a tracing process (condition detected by | ||
874 | * the PT_DTRACE flag); remember that the i386 TRAP flag | ||
875 | * can be modified by the process itself in user mode, | ||
876 | * allowing programs to debug themselves without the ptrace() | ||
877 | * interface. | ||
878 | */ | ||
879 | if (!user_mode(regs)) | ||
880 | goto clear_TF_reenable; | ||
881 | /* | ||
882 | * Was the TF flag set by a debugger? If so, clear it now, | ||
883 | * so that register information is correct. | ||
884 | */ | ||
885 | if (tsk->ptrace & PT_DTRACE) { | ||
886 | regs->eflags &= ~TF_MASK; | ||
887 | tsk->ptrace &= ~PT_DTRACE; | ||
888 | } | ||
889 | } | ||
890 | |||
891 | /* Ok, finally something we can handle */ | ||
892 | tsk->thread.trap_no = 1; | ||
893 | tsk->thread.error_code = error_code; | ||
894 | info.si_signo = SIGTRAP; | ||
895 | info.si_errno = 0; | ||
896 | info.si_code = TRAP_BRKPT; | ||
897 | info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL; | ||
898 | force_sig_info(SIGTRAP, &info, tsk); | ||
899 | |||
900 | clear_dr7: | ||
901 | set_debugreg(0UL, 7); | ||
902 | preempt_conditional_cli(regs); | ||
903 | return; | ||
904 | |||
905 | clear_TF_reenable: | ||
906 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | ||
907 | regs->eflags &= ~TF_MASK; | ||
908 | preempt_conditional_cli(regs); | ||
909 | } | ||
910 | |||
911 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | ||
912 | { | ||
913 | const struct exception_table_entry *fixup; | ||
914 | fixup = search_exception_tables(regs->rip); | ||
915 | if (fixup) { | ||
916 | regs->rip = fixup->fixup; | ||
917 | return 1; | ||
918 | } | ||
919 | notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); | ||
920 | /* Illegal floating point operation in the kernel */ | ||
921 | current->thread.trap_no = trapnr; | ||
922 | die(str, regs, 0); | ||
923 | return 0; | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * Note that we play around with the 'TS' bit in an attempt to get | ||
928 | * the correct behaviour even in the presence of the asynchronous | ||
929 | * IRQ13 behaviour | ||
930 | */ | ||
931 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) | ||
932 | { | ||
933 | void __user *rip = (void __user *)(regs->rip); | ||
934 | struct task_struct * task; | ||
935 | siginfo_t info; | ||
936 | unsigned short cwd, swd; | ||
937 | |||
938 | conditional_sti(regs); | ||
939 | if (!user_mode(regs) && | ||
940 | kernel_math_error(regs, "kernel x87 math error", 16)) | ||
941 | return; | ||
942 | |||
943 | /* | ||
944 | * Save the info for the exception handler and clear the error. | ||
945 | */ | ||
946 | task = current; | ||
947 | save_init_fpu(task); | ||
948 | task->thread.trap_no = 16; | ||
949 | task->thread.error_code = 0; | ||
950 | info.si_signo = SIGFPE; | ||
951 | info.si_errno = 0; | ||
952 | info.si_code = __SI_FAULT; | ||
953 | info.si_addr = rip; | ||
954 | /* | ||
955 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | ||
956 | * status. 0x3f is the exception bits in these regs, 0x200 is the | ||
957 | * C1 reg you need in case of a stack fault, 0x040 is the stack | ||
958 | * fault bit. We should only be taking one exception at a time, | ||
959 | * so if this combination doesn't produce any single exception, | ||
960 | * then we have a bad program that isn't synchronizing its FPU usage | ||
961 | * and it will suffer the consequences since we won't be able to | ||
962 | * fully reproduce the context of the exception | ||
963 | */ | ||
964 | cwd = get_fpu_cwd(task); | ||
965 | swd = get_fpu_swd(task); | ||
966 | switch (swd & ~cwd & 0x3f) { | ||
967 | case 0x000: | ||
968 | default: | ||
969 | break; | ||
970 | case 0x001: /* Invalid Op */ | ||
971 | /* | ||
972 | * swd & 0x240 == 0x040: Stack Underflow | ||
973 | * swd & 0x240 == 0x240: Stack Overflow | ||
974 | * User must clear the SF bit (0x40) if set | ||
975 | */ | ||
976 | info.si_code = FPE_FLTINV; | ||
977 | break; | ||
978 | case 0x002: /* Denormalize */ | ||
979 | case 0x010: /* Underflow */ | ||
980 | info.si_code = FPE_FLTUND; | ||
981 | break; | ||
982 | case 0x004: /* Zero Divide */ | ||
983 | info.si_code = FPE_FLTDIV; | ||
984 | break; | ||
985 | case 0x008: /* Overflow */ | ||
986 | info.si_code = FPE_FLTOVF; | ||
987 | break; | ||
988 | case 0x020: /* Precision */ | ||
989 | info.si_code = FPE_FLTRES; | ||
990 | break; | ||
991 | } | ||
992 | force_sig_info(SIGFPE, &info, task); | ||
993 | } | ||
994 | |||
995 | asmlinkage void bad_intr(void) | ||
996 | { | ||
997 | printk("bad interrupt"); | ||
998 | } | ||
999 | |||
1000 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | ||
1001 | { | ||
1002 | void __user *rip = (void __user *)(regs->rip); | ||
1003 | struct task_struct * task; | ||
1004 | siginfo_t info; | ||
1005 | unsigned short mxcsr; | ||
1006 | |||
1007 | conditional_sti(regs); | ||
1008 | if (!user_mode(regs) && | ||
1009 | kernel_math_error(regs, "kernel simd math error", 19)) | ||
1010 | return; | ||
1011 | |||
1012 | /* | ||
1013 | * Save the info for the exception handler and clear the error. | ||
1014 | */ | ||
1015 | task = current; | ||
1016 | save_init_fpu(task); | ||
1017 | task->thread.trap_no = 19; | ||
1018 | task->thread.error_code = 0; | ||
1019 | info.si_signo = SIGFPE; | ||
1020 | info.si_errno = 0; | ||
1021 | info.si_code = __SI_FAULT; | ||
1022 | info.si_addr = rip; | ||
1023 | /* | ||
1024 | * The SIMD FPU exceptions are handled a little differently, as there | ||
1025 | * is only a single status/control register. Thus, to determine which | ||
1026 | * unmasked exception was caught we must mask the exception mask bits | ||
1027 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | ||
1028 | */ | ||
1029 | mxcsr = get_fpu_mxcsr(task); | ||
1030 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | ||
1031 | case 0x000: | ||
1032 | default: | ||
1033 | break; | ||
1034 | case 0x001: /* Invalid Op */ | ||
1035 | info.si_code = FPE_FLTINV; | ||
1036 | break; | ||
1037 | case 0x002: /* Denormalize */ | ||
1038 | case 0x010: /* Underflow */ | ||
1039 | info.si_code = FPE_FLTUND; | ||
1040 | break; | ||
1041 | case 0x004: /* Zero Divide */ | ||
1042 | info.si_code = FPE_FLTDIV; | ||
1043 | break; | ||
1044 | case 0x008: /* Overflow */ | ||
1045 | info.si_code = FPE_FLTOVF; | ||
1046 | break; | ||
1047 | case 0x020: /* Precision */ | ||
1048 | info.si_code = FPE_FLTRES; | ||
1049 | break; | ||
1050 | } | ||
1051 | force_sig_info(SIGFPE, &info, task); | ||
1052 | } | ||
1053 | |||
1054 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) | ||
1055 | { | ||
1056 | } | ||
1057 | |||
1058 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | ||
1059 | { | ||
1060 | } | ||
1061 | |||
1062 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | ||
1063 | { | ||
1064 | } | ||
1065 | |||
1066 | /* | ||
1067 | * 'math_state_restore()' saves the current math information in the | ||
1068 | * old math state array, and gets the new ones from the current task | ||
1069 | * | ||
1070 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | ||
1071 | * Don't touch unless you *really* know how it works. | ||
1072 | */ | ||
1073 | asmlinkage void math_state_restore(void) | ||
1074 | { | ||
1075 | struct task_struct *me = current; | ||
1076 | clts(); /* Allow maths ops (or we recurse) */ | ||
1077 | |||
1078 | if (!used_math()) | ||
1079 | init_fpu(me); | ||
1080 | restore_fpu_checking(&me->thread.i387.fxsave); | ||
1081 | task_thread_info(me)->status |= TS_USEDFPU; | ||
1082 | me->fpu_counter++; | ||
1083 | } | ||
1084 | |||
1085 | void __init trap_init(void) | ||
1086 | { | ||
1087 | set_intr_gate(0,÷_error); | ||
1088 | set_intr_gate_ist(1,&debug,DEBUG_STACK); | ||
1089 | set_intr_gate_ist(2,&nmi,NMI_STACK); | ||
1090 | set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */ | ||
1091 | set_system_gate(4,&overflow); /* int4 can be called from all */ | ||
1092 | set_intr_gate(5,&bounds); | ||
1093 | set_intr_gate(6,&invalid_op); | ||
1094 | set_intr_gate(7,&device_not_available); | ||
1095 | set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK); | ||
1096 | set_intr_gate(9,&coprocessor_segment_overrun); | ||
1097 | set_intr_gate(10,&invalid_TSS); | ||
1098 | set_intr_gate(11,&segment_not_present); | ||
1099 | set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK); | ||
1100 | set_intr_gate(13,&general_protection); | ||
1101 | set_intr_gate(14,&page_fault); | ||
1102 | set_intr_gate(15,&spurious_interrupt_bug); | ||
1103 | set_intr_gate(16,&coprocessor_error); | ||
1104 | set_intr_gate(17,&alignment_check); | ||
1105 | #ifdef CONFIG_X86_MCE | ||
1106 | set_intr_gate_ist(18,&machine_check, MCE_STACK); | ||
1107 | #endif | ||
1108 | set_intr_gate(19,&simd_coprocessor_error); | ||
1109 | |||
1110 | #ifdef CONFIG_IA32_EMULATION | ||
1111 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | ||
1112 | #endif | ||
1113 | |||
1114 | /* | ||
1115 | * Should be a barrier for any external CPU state. | ||
1116 | */ | ||
1117 | cpu_init(); | ||
1118 | } | ||
1119 | |||
1120 | |||
1121 | static int __init oops_setup(char *s) | ||
1122 | { | ||
1123 | if (!s) | ||
1124 | return -EINVAL; | ||
1125 | if (!strcmp(s, "panic")) | ||
1126 | panic_on_oops = 1; | ||
1127 | return 0; | ||
1128 | } | ||
1129 | early_param("oops", oops_setup); | ||
1130 | |||
1131 | static int __init kstack_setup(char *s) | ||
1132 | { | ||
1133 | if (!s) | ||
1134 | return -EINVAL; | ||
1135 | kstack_depth_to_print = simple_strtoul(s,NULL,0); | ||
1136 | return 0; | ||
1137 | } | ||
1138 | early_param("kstack", kstack_setup); | ||