aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2012-04-18 20:16:49 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-04-19 18:42:45 -0400
commit9900aa2f95844eb81428c1d3d202c01b7f3ac77a (patch)
tree4c93c0eebb8e645e0de8aa767913f6c4f14f1b75 /arch/x86/kernel
parent6a1ea279c210e7dc05de86dc29c0d4f577f484fb (diff)
x86-64: Handle exception table entries during early boot
If we get an exception during early boot, walk the exception table to see if we should intercept it. The main use case for this is to allow rdmsr_safe()/wrmsr_safe() during CPU initialization. Since the exception table is currently sorted at runtime, and fairly late in startup, this code walks the exception table linearly. We obviously don't need to worry about modules, however: none have been loaded at this point. [ v2: Use early_fixup_exception() instead of linear search ] Link: http://lkml.kernel.org/r/1334794610-5546-5-git-send-email-hpa@zytor.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/head_64.S76
1 files changed, 57 insertions, 19 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d1e112c8b577..7a40f2447321 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <asm/processor-flags.h> 20#include <asm/processor-flags.h>
21#include <asm/percpu.h> 21#include <asm/percpu.h>
22#include <asm/nops.h>
22 23
23#ifdef CONFIG_PARAVIRT 24#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
@@ -26,6 +27,7 @@
26#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
27#else 28#else
28#define GET_CR2_INTO(reg) movq %cr2, reg 29#define GET_CR2_INTO(reg) movq %cr2, reg
30#define INTERRUPT_RETURN iretq
29#endif 31#endif
30 32
31/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
@@ -271,35 +273,56 @@ bad_address:
271 jmp bad_address 273 jmp bad_address
272 274
273 .section ".init.text","ax" 275 .section ".init.text","ax"
274#ifdef CONFIG_EARLY_PRINTK
275 .globl early_idt_handlers 276 .globl early_idt_handlers
276early_idt_handlers: 277early_idt_handlers:
278 # 104(%rsp) %rflags
279 # 96(%rsp) %cs
280 # 88(%rsp) %rip
281 # 80(%rsp) error code
277 i = 0 282 i = 0
278 .rept NUM_EXCEPTION_VECTORS 283 .rept NUM_EXCEPTION_VECTORS
279 movl $i, %esi 284 .if (EXCEPTION_ERRCODE_MASK >> i) & 1
285 ASM_NOP2
286 .else
287 pushq $0 # Dummy error code, to make stack frame uniform
288 .endif
289 pushq $i # 72(%rsp) Vector number
280 jmp early_idt_handler 290 jmp early_idt_handler
281 i = i + 1 291 i = i + 1
282 .endr 292 .endr
283#endif
284 293
285ENTRY(early_idt_handler) 294ENTRY(early_idt_handler)
286#ifdef CONFIG_EARLY_PRINTK 295 cld
296
287 cmpl $2,early_recursion_flag(%rip) 297 cmpl $2,early_recursion_flag(%rip)
288 jz 1f 298 jz 1f
289 incl early_recursion_flag(%rip) 299 incl early_recursion_flag(%rip)
290 GET_CR2_INTO(%r9) 300
291 xorl %r8d,%r8d # zero for error code 301 pushq %rax # 64(%rsp)
292 movl %esi,%ecx # get vector number 302 pushq %rcx # 56(%rsp)
293 # Test %ecx against mask of vectors that push error code. 303 pushq %rdx # 48(%rsp)
294 cmpl $31,%ecx 304 pushq %rsi # 40(%rsp)
295 ja 0f 305 pushq %rdi # 32(%rsp)
296 movl $1,%eax 306 pushq %r8 # 24(%rsp)
297 salq %cl,%rax 307 pushq %r9 # 16(%rsp)
298 testl $EXCEPTION_ERRCODE_MASK,%eax 308 pushq %r10 # 8(%rsp)
299 je 0f 309 pushq %r11 # 0(%rsp)
300 popq %r8 # get error code 310
3010: movq 0(%rsp),%rcx # get ip 311 cmpl $__KERNEL_CS,96(%rsp)
302 movq 8(%rsp),%rdx # get cs 312 jne 10f
313
314 leaq 88(%rsp),%rdi # Pointer to %rip
315 call early_fixup_exception
316 andl %eax,%eax
317 jnz 20f # Found an exception entry
318
31910:
320#ifdef CONFIG_EARLY_PRINTK
321 GET_CR2_INTO(%r9) # can clobber any volatile register if pv
322 movl 80(%rsp),%r8d # error code
323 movl 72(%rsp),%esi # vector number
324 movl 96(%rsp),%edx # %cs
325 movq 88(%rsp),%rcx # %rip
303 xorl %eax,%eax 326 xorl %eax,%eax
304 leaq early_idt_msg(%rip),%rdi 327 leaq early_idt_msg(%rip),%rdi
305 call early_printk 328 call early_printk
@@ -308,17 +331,32 @@ ENTRY(early_idt_handler)
308 call dump_stack 331 call dump_stack
309#ifdef CONFIG_KALLSYMS 332#ifdef CONFIG_KALLSYMS
310 leaq early_idt_ripmsg(%rip),%rdi 333 leaq early_idt_ripmsg(%rip),%rdi
311 movq 0(%rsp),%rsi # get rip again 334 movq 40(%rsp),%rsi # %rip again
312 call __print_symbol 335 call __print_symbol
313#endif 336#endif
314#endif /* EARLY_PRINTK */ 337#endif /* EARLY_PRINTK */
3151: hlt 3381: hlt
316 jmp 1b 339 jmp 1b
317 340
318#ifdef CONFIG_EARLY_PRINTK 34120: # Exception table entry found
342 popq %r11
343 popq %r10
344 popq %r9
345 popq %r8
346 popq %rdi
347 popq %rsi
348 popq %rdx
349 popq %rcx
350 popq %rax
351 addq $16,%rsp # drop vector number and error code
352 decl early_recursion_flag(%rip)
353 INTERRUPT_RETURN
354
355 .balign 4
319early_recursion_flag: 356early_recursion_flag:
320 .long 0 357 .long 0
321 358
359#ifdef CONFIG_EARLY_PRINTK
322early_idt_msg: 360early_idt_msg:
323 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 361 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
324early_idt_ripmsg: 362early_idt_ripmsg: