diff options
author | Alexander van Heukelum <heukelum@mailshack.com> | 2008-07-01 19:29:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 01:43:28 -0400 |
commit | a8c1be9d2e78d8608892c86791837acf12da4bf6 (patch) | |
tree | 274fc038d6bff0535f8535e9b63bb7d4e10f12ed /arch/x86/kernel | |
parent | e93ef949fd9a3f237aedfb8e64414b28980530b8 (diff) |
x86: initial changes to unify traps_32.c and traps_64.c
This patch does not change the generated object files.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/traps_32.c | 78 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 309 |
2 files changed, 191 insertions, 196 deletions
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index d7cc292691ff..92439698e489 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
3 | * | 4 | * |
4 | * Pentium III FXSR, SSE support | 5 | * Pentium III FXSR, SSE support |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | 6 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
@@ -130,7 +131,8 @@ void printk_address(unsigned long address, int reliable) | |||
130 | #endif | 131 | #endif |
131 | } | 132 | } |
132 | 133 | ||
133 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size) | 134 | static inline int valid_stack_ptr(struct thread_info *tinfo, |
135 | void *p, unsigned int size) | ||
134 | { | 136 | { |
135 | return p > (void *)tinfo && | 137 | return p > (void *)tinfo && |
136 | p <= (void *)tinfo + THREAD_SIZE - size; | 138 | p <= (void *)tinfo + THREAD_SIZE - size; |
@@ -138,14 +140,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned s | |||
138 | 140 | ||
139 | /* The form of the top of the frame on the stack */ | 141 | /* The form of the top of the frame on the stack */ |
140 | struct stack_frame { | 142 | struct stack_frame { |
141 | struct stack_frame *next_frame; | 143 | struct stack_frame *next_frame; |
142 | unsigned long return_address; | 144 | unsigned long return_address; |
143 | }; | 145 | }; |
144 | 146 | ||
145 | static inline unsigned long | 147 | static inline unsigned long |
146 | print_context_stack(struct thread_info *tinfo, | 148 | print_context_stack(struct thread_info *tinfo, |
147 | unsigned long *stack, unsigned long bp, | 149 | unsigned long *stack, unsigned long bp, |
148 | const struct stacktrace_ops *ops, void *data) | 150 | const struct stacktrace_ops *ops, void *data) |
149 | { | 151 | { |
150 | struct stack_frame *frame = (struct stack_frame *)bp; | 152 | struct stack_frame *frame = (struct stack_frame *)bp; |
151 | 153 | ||
@@ -167,8 +169,6 @@ print_context_stack(struct thread_info *tinfo, | |||
167 | return bp; | 169 | return bp; |
168 | } | 170 | } |
169 | 171 | ||
170 | #define MSG(msg) ops->warning(data, msg) | ||
171 | |||
172 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 172 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
173 | unsigned long *stack, unsigned long bp, | 173 | unsigned long *stack, unsigned long bp, |
174 | const struct stacktrace_ops *ops, void *data) | 174 | const struct stacktrace_ops *ops, void *data) |
@@ -178,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
178 | 178 | ||
179 | if (!stack) { | 179 | if (!stack) { |
180 | unsigned long dummy; | 180 | unsigned long dummy; |
181 | |||
182 | stack = &dummy; | 181 | stack = &dummy; |
183 | if (task != current) | 182 | if (task != current) |
184 | stack = (unsigned long *)task->thread.sp; | 183 | stack = (unsigned long *)task->thread.sp; |
@@ -196,7 +195,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
196 | } | 195 | } |
197 | #endif | 196 | #endif |
198 | 197 | ||
199 | while (1) { | 198 | for (;;) { |
200 | struct thread_info *context; | 199 | struct thread_info *context; |
201 | 200 | ||
202 | context = (struct thread_info *) | 201 | context = (struct thread_info *) |
@@ -248,10 +247,10 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) | |||
248 | } | 247 | } |
249 | 248 | ||
250 | static const struct stacktrace_ops print_trace_ops = { | 249 | static const struct stacktrace_ops print_trace_ops = { |
251 | .warning = print_trace_warning, | 250 | .warning = print_trace_warning, |
252 | .warning_symbol = print_trace_warning_symbol, | 251 | .warning_symbol = print_trace_warning_symbol, |
253 | .stack = print_trace_stack, | 252 | .stack = print_trace_stack, |
254 | .address = print_trace_address, | 253 | .address = print_trace_address, |
255 | }; | 254 | }; |
256 | 255 | ||
257 | static void | 256 | static void |
@@ -351,15 +350,14 @@ void show_registers(struct pt_regs *regs) | |||
351 | printk(KERN_EMERG "Code: "); | 350 | printk(KERN_EMERG "Code: "); |
352 | 351 | ||
353 | ip = (u8 *)regs->ip - code_prologue; | 352 | ip = (u8 *)regs->ip - code_prologue; |
354 | if (ip < (u8 *)PAGE_OFFSET || | 353 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
355 | probe_kernel_address(ip, c)) { | ||
356 | /* try starting at EIP */ | 354 | /* try starting at EIP */ |
357 | ip = (u8 *)regs->ip; | 355 | ip = (u8 *)regs->ip; |
358 | code_len = code_len - code_prologue + 1; | 356 | code_len = code_len - code_prologue + 1; |
359 | } | 357 | } |
360 | for (i = 0; i < code_len; i++, ip++) { | 358 | for (i = 0; i < code_len; i++, ip++) { |
361 | if (ip < (u8 *)PAGE_OFFSET || | 359 | if (ip < (u8 *)PAGE_OFFSET || |
362 | probe_kernel_address(ip, c)) { | 360 | probe_kernel_address(ip, c)) { |
363 | printk(" Bad EIP value."); | 361 | printk(" Bad EIP value."); |
364 | break; | 362 | break; |
365 | } | 363 | } |
@@ -546,7 +544,7 @@ void do_##name(struct pt_regs *regs, long error_code) \ | |||
546 | { \ | 544 | { \ |
547 | trace_hardirqs_fixup(); \ | 545 | trace_hardirqs_fixup(); \ |
548 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 546 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
549 | == NOTIFY_STOP) \ | 547 | == NOTIFY_STOP) \ |
550 | return; \ | 548 | return; \ |
551 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ | 549 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ |
552 | } | 550 | } |
@@ -562,7 +560,7 @@ void do_##name(struct pt_regs *regs, long error_code) \ | |||
562 | info.si_code = sicode; \ | 560 | info.si_code = sicode; \ |
563 | info.si_addr = (void __user *)siaddr; \ | 561 | info.si_addr = (void __user *)siaddr; \ |
564 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 562 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
565 | == NOTIFY_STOP) \ | 563 | == NOTIFY_STOP) \ |
566 | return; \ | 564 | return; \ |
567 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ | 565 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ |
568 | } | 566 | } |
@@ -571,7 +569,7 @@ void do_##name(struct pt_regs *regs, long error_code) \ | |||
571 | void do_##name(struct pt_regs *regs, long error_code) \ | 569 | void do_##name(struct pt_regs *regs, long error_code) \ |
572 | { \ | 570 | { \ |
573 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 571 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
574 | == NOTIFY_STOP) \ | 572 | == NOTIFY_STOP) \ |
575 | return; \ | 573 | return; \ |
576 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ | 574 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ |
577 | } | 575 | } |
@@ -586,22 +584,22 @@ void do_##name(struct pt_regs *regs, long error_code) \ | |||
586 | info.si_addr = (void __user *)siaddr; \ | 584 | info.si_addr = (void __user *)siaddr; \ |
587 | trace_hardirqs_fixup(); \ | 585 | trace_hardirqs_fixup(); \ |
588 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 586 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
589 | == NOTIFY_STOP) \ | 587 | == NOTIFY_STOP) \ |
590 | return; \ | 588 | return; \ |
591 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ | 589 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ |
592 | } | 590 | } |
593 | 591 | ||
594 | DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 592 | DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
595 | #ifndef CONFIG_KPROBES | 593 | #ifndef CONFIG_KPROBES |
596 | DO_VM86_ERROR(3, SIGTRAP, "int3", int3) | 594 | DO_VM86_ERROR(3, SIGTRAP, "int3", int3) |
597 | #endif | 595 | #endif |
598 | DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow) | 596 | DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow) |
599 | DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds) | 597 | DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds) |
600 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) | 598 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) |
601 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 599 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
602 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 600 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
603 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 601 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
604 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 602 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) |
605 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) | 603 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) |
606 | DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1) | 604 | DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1) |
607 | 605 | ||
@@ -799,7 +797,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
799 | 797 | ||
800 | if (!(reason & 0xc0)) { | 798 | if (!(reason & 0xc0)) { |
801 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | 799 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) |
802 | == NOTIFY_STOP) | 800 | == NOTIFY_STOP) |
803 | return; | 801 | return; |
804 | #ifdef CONFIG_X86_LOCAL_APIC | 802 | #ifdef CONFIG_X86_LOCAL_APIC |
805 | /* | 803 | /* |
@@ -818,6 +816,8 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
818 | } | 816 | } |
819 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 817 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
820 | return; | 818 | return; |
819 | |||
820 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | ||
821 | if (reason & 0x80) | 821 | if (reason & 0x80) |
822 | mem_parity_error(reason, regs); | 822 | mem_parity_error(reason, regs); |
823 | if (reason & 0x40) | 823 | if (reason & 0x40) |
@@ -915,7 +915,7 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
915 | tsk->thread.debugctlmsr = 0; | 915 | tsk->thread.debugctlmsr = 0; |
916 | 916 | ||
917 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | 917 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, |
918 | SIGTRAP) == NOTIFY_STOP) | 918 | SIGTRAP) == NOTIFY_STOP) |
919 | return; | 919 | return; |
920 | /* It's safe to allow irq's after DR6 has been saved */ | 920 | /* It's safe to allow irq's after DR6 has been saved */ |
921 | if (regs->flags & X86_EFLAGS_IF) | 921 | if (regs->flags & X86_EFLAGS_IF) |
@@ -997,7 +997,7 @@ void math_error(void __user *ip) | |||
997 | * C1 reg you need in case of a stack fault, 0x040 is the stack | 997 | * C1 reg you need in case of a stack fault, 0x040 is the stack |
998 | * fault bit. We should only be taking one exception at a time, | 998 | * fault bit. We should only be taking one exception at a time, |
999 | * so if this combination doesn't produce any single exception, | 999 | * so if this combination doesn't produce any single exception, |
1000 | * then we have a bad program that isn't syncronizing its FPU usage | 1000 | * then we have a bad program that isn't synchronizing its FPU usage |
1001 | * and it will suffer the consequences since we won't be able to | 1001 | * and it will suffer the consequences since we won't be able to |
1002 | * fully reproduce the context of the exception | 1002 | * fully reproduce the context of the exception |
1003 | */ | 1003 | */ |
@@ -1006,7 +1006,7 @@ void math_error(void __user *ip) | |||
1006 | switch (swd & ~cwd & 0x3f) { | 1006 | switch (swd & ~cwd & 0x3f) { |
1007 | case 0x000: /* No unmasked exception */ | 1007 | case 0x000: /* No unmasked exception */ |
1008 | return; | 1008 | return; |
1009 | default: /* Multiple exceptions */ | 1009 | default: /* Multiple exceptions */ |
1010 | break; | 1010 | break; |
1011 | case 0x001: /* Invalid Op */ | 1011 | case 0x001: /* Invalid Op */ |
1012 | /* | 1012 | /* |
@@ -1198,16 +1198,16 @@ void __init trap_init(void) | |||
1198 | early_iounmap(p, 4); | 1198 | early_iounmap(p, 4); |
1199 | #endif | 1199 | #endif |
1200 | 1200 | ||
1201 | set_trap_gate(0, ÷_error); | 1201 | set_trap_gate(0, ÷_error); |
1202 | set_intr_gate(1, &debug); | 1202 | set_intr_gate(1, &debug); |
1203 | set_intr_gate(2, &nmi); | 1203 | set_intr_gate(2, &nmi); |
1204 | set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ | 1204 | set_system_intr_gate(3, &int3); /* int3 can be called from all */ |
1205 | set_system_gate(4, &overflow); | 1205 | set_system_gate(4, &overflow); /* int4 can be called from all */ |
1206 | set_trap_gate(5, &bounds); | 1206 | set_trap_gate(5, &bounds); |
1207 | set_trap_gate(6, &invalid_op); | 1207 | set_trap_gate(6, &invalid_op); |
1208 | set_trap_gate(7, &device_not_available); | 1208 | set_trap_gate(7, &device_not_available); |
1209 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); | 1209 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); |
1210 | set_trap_gate(9, &coprocessor_segment_overrun); | 1210 | set_trap_gate(9, &coprocessor_segment_overrun); |
1211 | set_trap_gate(10, &invalid_TSS); | 1211 | set_trap_gate(10, &invalid_TSS); |
1212 | set_trap_gate(11, &segment_not_present); | 1212 | set_trap_gate(11, &segment_not_present); |
1213 | set_trap_gate(12, &stack_segment); | 1213 | set_trap_gate(12, &stack_segment); |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 80ba6d37bfe0..686074e6caf9 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -205,8 +205,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
205 | return NULL; | 205 | return NULL; |
206 | } | 206 | } |
207 | 207 | ||
208 | #define MSG(txt) ops->warning(data, txt) | ||
209 | |||
210 | /* | 208 | /* |
211 | * x86-64 can have up to three kernel stacks: | 209 | * x86-64 can have up to three kernel stacks: |
212 | * process stack | 210 | * process stack |
@@ -233,11 +231,11 @@ struct stack_frame { | |||
233 | unsigned long return_address; | 231 | unsigned long return_address; |
234 | }; | 232 | }; |
235 | 233 | ||
236 | 234 | static inline unsigned long | |
237 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 235 | print_context_stack(struct thread_info *tinfo, |
238 | unsigned long *stack, unsigned long bp, | 236 | unsigned long *stack, unsigned long bp, |
239 | const struct stacktrace_ops *ops, void *data, | 237 | const struct stacktrace_ops *ops, void *data, |
240 | unsigned long *end) | 238 | unsigned long *end) |
241 | { | 239 | { |
242 | struct stack_frame *frame = (struct stack_frame *)bp; | 240 | struct stack_frame *frame = (struct stack_frame *)bp; |
243 | 241 | ||
@@ -259,7 +257,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
259 | return bp; | 257 | return bp; |
260 | } | 258 | } |
261 | 259 | ||
262 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 260 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
263 | unsigned long *stack, unsigned long bp, | 261 | unsigned long *stack, unsigned long bp, |
264 | const struct stacktrace_ops *ops, void *data) | 262 | const struct stacktrace_ops *ops, void *data) |
265 | { | 263 | { |
@@ -268,31 +266,29 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
268 | unsigned used = 0; | 266 | unsigned used = 0; |
269 | struct thread_info *tinfo; | 267 | struct thread_info *tinfo; |
270 | 268 | ||
271 | if (!tsk) | 269 | if (!task) |
272 | tsk = current; | 270 | task = current; |
273 | tinfo = task_thread_info(tsk); | 271 | tinfo = task_thread_info(task); |
274 | 272 | ||
275 | if (!stack) { | 273 | if (!stack) { |
276 | unsigned long dummy; | 274 | unsigned long dummy; |
277 | stack = &dummy; | 275 | stack = &dummy; |
278 | if (tsk && tsk != current) | 276 | if (task && task != current) |
279 | stack = (unsigned long *)tsk->thread.sp; | 277 | stack = (unsigned long *)task->thread.sp; |
280 | } | 278 | } |
281 | 279 | ||
282 | #ifdef CONFIG_FRAME_POINTER | 280 | #ifdef CONFIG_FRAME_POINTER |
283 | if (!bp) { | 281 | if (!bp) { |
284 | if (tsk == current) { | 282 | if (task == current) { |
285 | /* Grab bp right from our regs */ | 283 | /* Grab bp right from our regs */ |
286 | asm("movq %%rbp, %0" : "=r" (bp):); | 284 | asm("movq %%rbp, %0" : "=r" (bp) :); |
287 | } else { | 285 | } else { |
288 | /* bp is the last reg pushed by switch_to */ | 286 | /* bp is the last reg pushed by switch_to */ |
289 | bp = *(unsigned long *) tsk->thread.sp; | 287 | bp = *(unsigned long *) task->thread.sp; |
290 | } | 288 | } |
291 | } | 289 | } |
292 | #endif | 290 | #endif |
293 | 291 | ||
294 | |||
295 | |||
296 | /* | 292 | /* |
297 | * Print function call entries in all stacks, starting at the | 293 | * Print function call entries in all stacks, starting at the |
298 | * current stack address. If the stacks consist of nested | 294 | * current stack address. If the stacks consist of nested |
@@ -382,18 +378,17 @@ static const struct stacktrace_ops print_trace_ops = { | |||
382 | .address = print_trace_address, | 378 | .address = print_trace_address, |
383 | }; | 379 | }; |
384 | 380 | ||
385 | void | 381 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
386 | show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, | 382 | unsigned long *stack, unsigned long bp) |
387 | unsigned long bp) | ||
388 | { | 383 | { |
389 | printk("\nCall Trace:\n"); | 384 | printk("\nCall Trace:\n"); |
390 | dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL); | 385 | dump_trace(task, regs, stack, bp, &print_trace_ops, NULL); |
391 | printk("\n"); | 386 | printk("\n"); |
392 | } | 387 | } |
393 | 388 | ||
394 | static void | 389 | static void |
395 | _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | 390 | _show_stack(struct task_struct *task, struct pt_regs *regs, |
396 | unsigned long bp) | 391 | unsigned long *sp, unsigned long bp) |
397 | { | 392 | { |
398 | unsigned long *stack; | 393 | unsigned long *stack; |
399 | int i; | 394 | int i; |
@@ -405,14 +400,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
405 | // back trace for this cpu. | 400 | // back trace for this cpu. |
406 | 401 | ||
407 | if (sp == NULL) { | 402 | if (sp == NULL) { |
408 | if (tsk) | 403 | if (task) |
409 | sp = (unsigned long *)tsk->thread.sp; | 404 | sp = (unsigned long *)task->thread.sp; |
410 | else | 405 | else |
411 | sp = (unsigned long *)&sp; | 406 | sp = (unsigned long *)&sp; |
412 | } | 407 | } |
413 | 408 | ||
414 | stack = sp; | 409 | stack = sp; |
415 | for(i=0; i < kstack_depth_to_print; i++) { | 410 | for (i = 0; i < kstack_depth_to_print; i++) { |
416 | if (stack >= irqstack && stack <= irqstack_end) { | 411 | if (stack >= irqstack && stack <= irqstack_end) { |
417 | if (stack == irqstack_end) { | 412 | if (stack == irqstack_end) { |
418 | stack = (unsigned long *) (irqstack_end[-1]); | 413 | stack = (unsigned long *) (irqstack_end[-1]); |
@@ -427,12 +422,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp, | |||
427 | printk(" %016lx", *stack++); | 422 | printk(" %016lx", *stack++); |
428 | touch_nmi_watchdog(); | 423 | touch_nmi_watchdog(); |
429 | } | 424 | } |
430 | show_trace(tsk, regs, sp, bp); | 425 | show_trace(task, regs, sp, bp); |
431 | } | 426 | } |
432 | 427 | ||
433 | void show_stack(struct task_struct *tsk, unsigned long * sp) | 428 | void show_stack(struct task_struct *task, unsigned long *sp) |
434 | { | 429 | { |
435 | _show_stack(tsk, NULL, sp, 0); | 430 | _show_stack(task, NULL, sp, 0); |
436 | } | 431 | } |
437 | 432 | ||
438 | /* | 433 | /* |
@@ -440,7 +435,7 @@ void show_stack(struct task_struct *tsk, unsigned long * sp) | |||
440 | */ | 435 | */ |
441 | void dump_stack(void) | 436 | void dump_stack(void) |
442 | { | 437 | { |
443 | unsigned long dummy; | 438 | unsigned long stack; |
444 | unsigned long bp = 0; | 439 | unsigned long bp = 0; |
445 | 440 | ||
446 | #ifdef CONFIG_FRAME_POINTER | 441 | #ifdef CONFIG_FRAME_POINTER |
@@ -453,7 +448,7 @@ void dump_stack(void) | |||
453 | init_utsname()->release, | 448 | init_utsname()->release, |
454 | (int)strcspn(init_utsname()->version, " "), | 449 | (int)strcspn(init_utsname()->version, " "), |
455 | init_utsname()->version); | 450 | init_utsname()->version); |
456 | show_trace(NULL, NULL, &dummy, bp); | 451 | show_trace(NULL, NULL, &stack, bp); |
457 | } | 452 | } |
458 | 453 | ||
459 | EXPORT_SYMBOL(dump_stack); | 454 | EXPORT_SYMBOL(dump_stack); |
@@ -488,7 +483,7 @@ void show_registers(struct pt_regs *regs) | |||
488 | printk(KERN_EMERG "Code: "); | 483 | printk(KERN_EMERG "Code: "); |
489 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | 484 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
490 | /* try starting at RIP */ | 485 | /* try starting at RIP */ |
491 | ip = (u8 *) regs->ip; | 486 | ip = (u8 *)regs->ip; |
492 | code_len = code_len - code_prologue + 1; | 487 | code_len = code_len - code_prologue + 1; |
493 | } | 488 | } |
494 | for (i = 0; i < code_len; i++, ip++) { | 489 | for (i = 0; i < code_len; i++, ip++) { |
@@ -504,7 +499,7 @@ void show_registers(struct pt_regs *regs) | |||
504 | } | 499 | } |
505 | } | 500 | } |
506 | printk("\n"); | 501 | printk("\n"); |
507 | } | 502 | } |
508 | 503 | ||
509 | int is_valid_bugaddr(unsigned long ip) | 504 | int is_valid_bugaddr(unsigned long ip) |
510 | { | 505 | { |
@@ -576,8 +571,10 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
576 | printk("DEBUG_PAGEALLOC"); | 571 | printk("DEBUG_PAGEALLOC"); |
577 | #endif | 572 | #endif |
578 | printk("\n"); | 573 | printk("\n"); |
579 | if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | 574 | if (notify_die(DIE_OOPS, str, regs, err, |
575 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) | ||
580 | return 1; | 576 | return 1; |
577 | |||
581 | show_registers(regs); | 578 | show_registers(regs); |
582 | add_taint(TAINT_DIE); | 579 | add_taint(TAINT_DIE); |
583 | /* Executive summary in case the oops scrolled away */ | 580 | /* Executive summary in case the oops scrolled away */ |
@@ -589,7 +586,7 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
589 | return 0; | 586 | return 0; |
590 | } | 587 | } |
591 | 588 | ||
592 | void die(const char * str, struct pt_regs * regs, long err) | 589 | void die(const char * str, struct pt_regs *regs, long err) |
593 | { | 590 | { |
594 | unsigned long flags = oops_begin(); | 591 | unsigned long flags = oops_begin(); |
595 | 592 | ||
@@ -606,8 +603,7 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
606 | { | 603 | { |
607 | unsigned long flags; | 604 | unsigned long flags; |
608 | 605 | ||
609 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == | 606 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
610 | NOTIFY_STOP) | ||
611 | return; | 607 | return; |
612 | 608 | ||
613 | flags = oops_begin(); | 609 | flags = oops_begin(); |
@@ -629,9 +625,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic) | |||
629 | do_exit(SIGBUS); | 625 | do_exit(SIGBUS); |
630 | } | 626 | } |
631 | 627 | ||
632 | static void __kprobes do_trap(int trapnr, int signr, char *str, | 628 | static void __kprobes |
633 | struct pt_regs * regs, long error_code, | 629 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
634 | siginfo_t *info) | 630 | long error_code, siginfo_t *info) |
635 | { | 631 | { |
636 | struct task_struct *tsk = current; | 632 | struct task_struct *tsk = current; |
637 | 633 | ||
@@ -676,38 +672,38 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
676 | } | 672 | } |
677 | 673 | ||
678 | #define DO_ERROR(trapnr, signr, str, name) \ | 674 | #define DO_ERROR(trapnr, signr, str, name) \ |
679 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 675 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
680 | { \ | 676 | { \ |
681 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 677 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
682 | == NOTIFY_STOP) \ | 678 | == NOTIFY_STOP) \ |
683 | return; \ | 679 | return; \ |
684 | conditional_sti(regs); \ | 680 | conditional_sti(regs); \ |
685 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | 681 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
686 | } | 682 | } |
687 | 683 | ||
688 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 684 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
689 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 685 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ |
690 | { \ | 686 | { \ |
691 | siginfo_t info; \ | 687 | siginfo_t info; \ |
692 | info.si_signo = signr; \ | 688 | info.si_signo = signr; \ |
693 | info.si_errno = 0; \ | 689 | info.si_errno = 0; \ |
694 | info.si_code = sicode; \ | 690 | info.si_code = sicode; \ |
695 | info.si_addr = (void __user *)siaddr; \ | 691 | info.si_addr = (void __user *)siaddr; \ |
696 | trace_hardirqs_fixup(); \ | 692 | trace_hardirqs_fixup(); \ |
697 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 693 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
698 | == NOTIFY_STOP) \ | 694 | == NOTIFY_STOP) \ |
699 | return; \ | 695 | return; \ |
700 | conditional_sti(regs); \ | 696 | conditional_sti(regs); \ |
701 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 697 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
702 | } | 698 | } |
703 | 699 | ||
704 | DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 700 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
705 | DO_ERROR( 4, SIGSEGV, "overflow", overflow) | 701 | DO_ERROR(4, SIGSEGV, "overflow", overflow) |
706 | DO_ERROR( 5, SIGSEGV, "bounds", bounds) | 702 | DO_ERROR(5, SIGSEGV, "bounds", bounds) |
707 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | 703 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) |
708 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 704 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
709 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 705 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
710 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 706 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
711 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 707 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
712 | 708 | ||
713 | /* Runs on IST stack */ | 709 | /* Runs on IST stack */ |
@@ -775,14 +771,14 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | |||
775 | } | 771 | } |
776 | 772 | ||
777 | static notrace __kprobes void | 773 | static notrace __kprobes void |
778 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | 774 | mem_parity_error(unsigned char reason, struct pt_regs *regs) |
779 | { | 775 | { |
780 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 776 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
781 | reason); | 777 | reason); |
782 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | 778 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); |
783 | 779 | ||
784 | #if defined(CONFIG_EDAC) | 780 | #if defined(CONFIG_EDAC) |
785 | if(edac_handler_set()) { | 781 | if (edac_handler_set()) { |
786 | edac_atomic_assert_error(); | 782 | edac_atomic_assert_error(); |
787 | return; | 783 | return; |
788 | } | 784 | } |
@@ -799,7 +795,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs) | |||
799 | } | 795 | } |
800 | 796 | ||
801 | static notrace __kprobes void | 797 | static notrace __kprobes void |
802 | io_check_error(unsigned char reason, struct pt_regs * regs) | 798 | io_check_error(unsigned char reason, struct pt_regs *regs) |
803 | { | 799 | { |
804 | printk("NMI: IOCK error (debug interrupt?)\n"); | 800 | printk("NMI: IOCK error (debug interrupt?)\n"); |
805 | show_registers(regs); | 801 | show_registers(regs); |
@@ -836,7 +832,7 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
836 | 832 | ||
837 | cpu = smp_processor_id(); | 833 | cpu = smp_processor_id(); |
838 | 834 | ||
839 | /* Only the BSP gets external NMIs from the system. */ | 835 | /* Only the BSP gets external NMIs from the system. */ |
840 | if (!cpu) | 836 | if (!cpu) |
841 | reason = get_nmi_reason(); | 837 | reason = get_nmi_reason(); |
842 | 838 | ||
@@ -848,18 +844,17 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs) | |||
848 | * Ok, so this is none of the documented NMI sources, | 844 | * Ok, so this is none of the documented NMI sources, |
849 | * so it must be the NMI watchdog. | 845 | * so it must be the NMI watchdog. |
850 | */ | 846 | */ |
851 | if (nmi_watchdog_tick(regs,reason)) | 847 | if (nmi_watchdog_tick(regs, reason)) |
852 | return; | 848 | return; |
853 | if (!do_nmi_callback(regs,cpu)) | 849 | if (!do_nmi_callback(regs, cpu)) |
854 | unknown_nmi_error(reason, regs); | 850 | unknown_nmi_error(reason, regs); |
855 | 851 | ||
856 | return; | 852 | return; |
857 | } | 853 | } |
858 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 854 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
859 | return; | 855 | return; |
860 | 856 | ||
861 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | 857 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ |
862 | |||
863 | if (reason & 0x80) | 858 | if (reason & 0x80) |
864 | mem_parity_error(reason, regs); | 859 | mem_parity_error(reason, regs); |
865 | if (reason & 0x40) | 860 | if (reason & 0x40) |
@@ -870,9 +865,12 @@ asmlinkage notrace __kprobes void | |||
870 | do_nmi(struct pt_regs *regs, long error_code) | 865 | do_nmi(struct pt_regs *regs, long error_code) |
871 | { | 866 | { |
872 | nmi_enter(); | 867 | nmi_enter(); |
868 | |||
873 | add_pda(__nmi_count, 1); | 869 | add_pda(__nmi_count, 1); |
870 | |||
874 | if (!ignore_nmis) | 871 | if (!ignore_nmis) |
875 | default_do_nmi(regs); | 872 | default_do_nmi(regs); |
873 | |||
876 | nmi_exit(); | 874 | nmi_exit(); |
877 | } | 875 | } |
878 | 876 | ||
@@ -889,13 +887,14 @@ void restart_nmi(void) | |||
889 | } | 887 | } |
890 | 888 | ||
891 | /* runs on IST stack. */ | 889 | /* runs on IST stack. */ |
892 | asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | 890 | asmlinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
893 | { | 891 | { |
894 | trace_hardirqs_fixup(); | 892 | trace_hardirqs_fixup(); |
895 | 893 | ||
896 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | 894 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
895 | == NOTIFY_STOP) | ||
897 | return; | 896 | return; |
898 | } | 897 | |
899 | preempt_conditional_sti(regs); | 898 | preempt_conditional_sti(regs); |
900 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 899 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
901 | preempt_conditional_cli(regs); | 900 | preempt_conditional_cli(regs); |
@@ -948,21 +947,19 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
948 | 947 | ||
949 | /* Mask out spurious debug traps due to lazy DR7 setting */ | 948 | /* Mask out spurious debug traps due to lazy DR7 setting */ |
950 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | 949 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { |
951 | if (!tsk->thread.debugreg7) { | 950 | if (!tsk->thread.debugreg7) |
952 | goto clear_dr7; | 951 | goto clear_dr7; |
953 | } | ||
954 | } | 952 | } |
955 | 953 | ||
956 | tsk->thread.debugreg6 = condition; | 954 | tsk->thread.debugreg6 = condition; |
957 | 955 | ||
958 | |||
959 | /* | 956 | /* |
960 | * Single-stepping through TF: make sure we ignore any events in | 957 | * Single-stepping through TF: make sure we ignore any events in |
961 | * kernel space (but re-enable TF when returning to user mode). | 958 | * kernel space (but re-enable TF when returning to user mode). |
962 | */ | 959 | */ |
963 | if (condition & DR_STEP) { | 960 | if (condition & DR_STEP) { |
964 | if (!user_mode(regs)) | 961 | if (!user_mode(regs)) |
965 | goto clear_TF_reenable; | 962 | goto clear_TF_reenable; |
966 | } | 963 | } |
967 | 964 | ||
968 | /* Ok, finally something we can handle */ | 965 | /* Ok, finally something we can handle */ |
@@ -975,7 +972,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, | |||
975 | force_sig_info(SIGTRAP, &info, tsk); | 972 | force_sig_info(SIGTRAP, &info, tsk); |
976 | 973 | ||
977 | clear_dr7: | 974 | clear_dr7: |
978 | set_debugreg(0UL, 7); | 975 | set_debugreg(0, 7); |
979 | preempt_conditional_cli(regs); | 976 | preempt_conditional_cli(regs); |
980 | return; | 977 | return; |
981 | 978 | ||
@@ -983,6 +980,7 @@ clear_TF_reenable: | |||
983 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | 980 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
984 | regs->flags &= ~X86_EFLAGS_TF; | 981 | regs->flags &= ~X86_EFLAGS_TF; |
985 | preempt_conditional_cli(regs); | 982 | preempt_conditional_cli(regs); |
983 | return; | ||
986 | } | 984 | } |
987 | 985 | ||
988 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | 986 | static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) |
@@ -1005,7 +1003,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) | |||
1005 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) | 1003 | asmlinkage void do_coprocessor_error(struct pt_regs *regs) |
1006 | { | 1004 | { |
1007 | void __user *ip = (void __user *)(regs->ip); | 1005 | void __user *ip = (void __user *)(regs->ip); |
1008 | struct task_struct * task; | 1006 | struct task_struct *task; |
1009 | siginfo_t info; | 1007 | siginfo_t info; |
1010 | unsigned short cwd, swd; | 1008 | unsigned short cwd, swd; |
1011 | 1009 | ||
@@ -1038,30 +1036,30 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1038 | cwd = get_fpu_cwd(task); | 1036 | cwd = get_fpu_cwd(task); |
1039 | swd = get_fpu_swd(task); | 1037 | swd = get_fpu_swd(task); |
1040 | switch (swd & ~cwd & 0x3f) { | 1038 | switch (swd & ~cwd & 0x3f) { |
1041 | case 0x000: | 1039 | case 0x000: /* No unmasked exception */ |
1042 | default: | 1040 | default: /* Multiple exceptions */ |
1043 | break; | 1041 | break; |
1044 | case 0x001: /* Invalid Op */ | 1042 | case 0x001: /* Invalid Op */ |
1045 | /* | 1043 | /* |
1046 | * swd & 0x240 == 0x040: Stack Underflow | 1044 | * swd & 0x240 == 0x040: Stack Underflow |
1047 | * swd & 0x240 == 0x240: Stack Overflow | 1045 | * swd & 0x240 == 0x240: Stack Overflow |
1048 | * User must clear the SF bit (0x40) if set | 1046 | * User must clear the SF bit (0x40) if set |
1049 | */ | 1047 | */ |
1050 | info.si_code = FPE_FLTINV; | 1048 | info.si_code = FPE_FLTINV; |
1051 | break; | 1049 | break; |
1052 | case 0x002: /* Denormalize */ | 1050 | case 0x002: /* Denormalize */ |
1053 | case 0x010: /* Underflow */ | 1051 | case 0x010: /* Underflow */ |
1054 | info.si_code = FPE_FLTUND; | 1052 | info.si_code = FPE_FLTUND; |
1055 | break; | 1053 | break; |
1056 | case 0x004: /* Zero Divide */ | 1054 | case 0x004: /* Zero Divide */ |
1057 | info.si_code = FPE_FLTDIV; | 1055 | info.si_code = FPE_FLTDIV; |
1058 | break; | 1056 | break; |
1059 | case 0x008: /* Overflow */ | 1057 | case 0x008: /* Overflow */ |
1060 | info.si_code = FPE_FLTOVF; | 1058 | info.si_code = FPE_FLTOVF; |
1061 | break; | 1059 | break; |
1062 | case 0x020: /* Precision */ | 1060 | case 0x020: /* Precision */ |
1063 | info.si_code = FPE_FLTRES; | 1061 | info.si_code = FPE_FLTRES; |
1064 | break; | 1062 | break; |
1065 | } | 1063 | } |
1066 | force_sig_info(SIGFPE, &info, task); | 1064 | force_sig_info(SIGFPE, &info, task); |
1067 | } | 1065 | } |
@@ -1074,7 +1072,7 @@ asmlinkage void bad_intr(void) | |||
1074 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1072 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
1075 | { | 1073 | { |
1076 | void __user *ip = (void __user *)(regs->ip); | 1074 | void __user *ip = (void __user *)(regs->ip); |
1077 | struct task_struct * task; | 1075 | struct task_struct *task; |
1078 | siginfo_t info; | 1076 | siginfo_t info; |
1079 | unsigned short mxcsr; | 1077 | unsigned short mxcsr; |
1080 | 1078 | ||
@@ -1102,25 +1100,25 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1102 | */ | 1100 | */ |
1103 | mxcsr = get_fpu_mxcsr(task); | 1101 | mxcsr = get_fpu_mxcsr(task); |
1104 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | 1102 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { |
1105 | case 0x000: | 1103 | case 0x000: |
1106 | default: | 1104 | default: |
1107 | break; | 1105 | break; |
1108 | case 0x001: /* Invalid Op */ | 1106 | case 0x001: /* Invalid Op */ |
1109 | info.si_code = FPE_FLTINV; | 1107 | info.si_code = FPE_FLTINV; |
1110 | break; | 1108 | break; |
1111 | case 0x002: /* Denormalize */ | 1109 | case 0x002: /* Denormalize */ |
1112 | case 0x010: /* Underflow */ | 1110 | case 0x010: /* Underflow */ |
1113 | info.si_code = FPE_FLTUND; | 1111 | info.si_code = FPE_FLTUND; |
1114 | break; | 1112 | break; |
1115 | case 0x004: /* Zero Divide */ | 1113 | case 0x004: /* Zero Divide */ |
1116 | info.si_code = FPE_FLTDIV; | 1114 | info.si_code = FPE_FLTDIV; |
1117 | break; | 1115 | break; |
1118 | case 0x008: /* Overflow */ | 1116 | case 0x008: /* Overflow */ |
1119 | info.si_code = FPE_FLTOVF; | 1117 | info.si_code = FPE_FLTOVF; |
1120 | break; | 1118 | break; |
1121 | case 0x020: /* Precision */ | 1119 | case 0x020: /* Precision */ |
1122 | info.si_code = FPE_FLTRES; | 1120 | info.si_code = FPE_FLTRES; |
1123 | break; | 1121 | break; |
1124 | } | 1122 | } |
1125 | force_sig_info(SIGFPE, &info, task); | 1123 | force_sig_info(SIGFPE, &info, task); |
1126 | } | 1124 | } |
@@ -1138,7 +1136,7 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | |||
1138 | } | 1136 | } |
1139 | 1137 | ||
1140 | /* | 1138 | /* |
1141 | * 'math_state_restore()' saves the current math information in the | 1139 | * 'math_state_restore()' saves the current math information in the |
1142 | * old math state array, and gets the new ones from the current task | 1140 | * old math state array, and gets the new ones from the current task |
1143 | * | 1141 | * |
1144 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 1142 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
@@ -1163,7 +1161,7 @@ asmlinkage void math_state_restore(void) | |||
1163 | local_irq_disable(); | 1161 | local_irq_disable(); |
1164 | } | 1162 | } |
1165 | 1163 | ||
1166 | clts(); /* Allow maths ops (or we recurse) */ | 1164 | clts(); /* Allow maths ops (or we recurse) */ |
1167 | restore_fpu_checking(&me->thread.xstate->fxsave); | 1165 | restore_fpu_checking(&me->thread.xstate->fxsave); |
1168 | task_thread_info(me)->status |= TS_USEDFPU; | 1166 | task_thread_info(me)->status |= TS_USEDFPU; |
1169 | me->fpu_counter++; | 1167 | me->fpu_counter++; |
@@ -1172,64 +1170,61 @@ EXPORT_SYMBOL_GPL(math_state_restore); | |||
1172 | 1170 | ||
1173 | void __init trap_init(void) | 1171 | void __init trap_init(void) |
1174 | { | 1172 | { |
1175 | set_intr_gate(0,÷_error); | 1173 | set_intr_gate(0, ÷_error); |
1176 | set_intr_gate_ist(1,&debug,DEBUG_STACK); | 1174 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1177 | set_intr_gate_ist(2,&nmi,NMI_STACK); | 1175 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1178 | set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */ | 1176 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ |
1179 | set_system_gate(4,&overflow); /* int4 can be called from all */ | 1177 | set_system_gate(4, &overflow); /* int4 can be called from all */ |
1180 | set_intr_gate(5,&bounds); | 1178 | set_intr_gate(5, &bounds); |
1181 | set_intr_gate(6,&invalid_op); | 1179 | set_intr_gate(6, &invalid_op); |
1182 | set_intr_gate(7,&device_not_available); | 1180 | set_intr_gate(7, &device_not_available); |
1183 | set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK); | 1181 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); |
1184 | set_intr_gate(9,&coprocessor_segment_overrun); | 1182 | set_intr_gate(9, &coprocessor_segment_overrun); |
1185 | set_intr_gate(10,&invalid_TSS); | 1183 | set_intr_gate(10, &invalid_TSS); |
1186 | set_intr_gate(11,&segment_not_present); | 1184 | set_intr_gate(11, &segment_not_present); |
1187 | set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK); | 1185 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); |
1188 | set_intr_gate(13,&general_protection); | 1186 | set_intr_gate(13, &general_protection); |
1189 | set_intr_gate(14,&page_fault); | 1187 | set_intr_gate(14, &page_fault); |
1190 | set_intr_gate(15,&spurious_interrupt_bug); | 1188 | set_intr_gate(15, &spurious_interrupt_bug); |
1191 | set_intr_gate(16,&coprocessor_error); | 1189 | set_intr_gate(16, &coprocessor_error); |
1192 | set_intr_gate(17,&alignment_check); | 1190 | set_intr_gate(17, &alignment_check); |
1193 | #ifdef CONFIG_X86_MCE | 1191 | #ifdef CONFIG_X86_MCE |
1194 | set_intr_gate_ist(18,&machine_check, MCE_STACK); | 1192 | set_intr_gate_ist(18, &machine_check, MCE_STACK); |
1195 | #endif | 1193 | #endif |
1196 | set_intr_gate(19,&simd_coprocessor_error); | 1194 | set_intr_gate(19, &simd_coprocessor_error); |
1197 | 1195 | ||
1198 | #ifdef CONFIG_IA32_EMULATION | 1196 | #ifdef CONFIG_IA32_EMULATION |
1199 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1197 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
1200 | #endif | 1198 | #endif |
1201 | |||
1202 | /* | 1199 | /* |
1203 | * initialize the per thread extended state: | 1200 | * initialize the per thread extended state: |
1204 | */ | 1201 | */ |
1205 | init_thread_xstate(); | 1202 | init_thread_xstate(); |
1206 | /* | 1203 | /* |
1207 | * Should be a barrier for any external CPU state. | 1204 | * Should be a barrier for any external CPU state: |
1208 | */ | 1205 | */ |
1209 | cpu_init(); | 1206 | cpu_init(); |
1210 | } | 1207 | } |
1211 | 1208 | ||
1212 | |||
1213 | static int __init oops_setup(char *s) | 1209 | static int __init oops_setup(char *s) |
1214 | { | 1210 | { |
1215 | if (!s) | 1211 | if (!s) |
1216 | return -EINVAL; | 1212 | return -EINVAL; |
1217 | if (!strcmp(s, "panic")) | 1213 | if (!strcmp(s, "panic")) |
1218 | panic_on_oops = 1; | 1214 | panic_on_oops = 1; |
1219 | return 0; | 1215 | return 0; |
1220 | } | 1216 | } |
1221 | early_param("oops", oops_setup); | 1217 | early_param("oops", oops_setup); |
1222 | 1218 | ||
1223 | static int __init kstack_setup(char *s) | 1219 | static int __init kstack_setup(char *s) |
1224 | { | 1220 | { |
1225 | if (!s) | 1221 | if (!s) |
1226 | return -EINVAL; | 1222 | return -EINVAL; |
1227 | kstack_depth_to_print = simple_strtoul(s,NULL,0); | 1223 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
1228 | return 0; | 1224 | return 0; |
1229 | } | 1225 | } |
1230 | early_param("kstack", kstack_setup); | 1226 | early_param("kstack", kstack_setup); |
1231 | 1227 | ||
1232 | |||
1233 | static int __init code_bytes_setup(char *s) | 1228 | static int __init code_bytes_setup(char *s) |
1234 | { | 1229 | { |
1235 | code_bytes = simple_strtoul(s, NULL, 0); | 1230 | code_bytes = simple_strtoul(s, NULL, 0); |