diff options
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r-- | arch/i386/kernel/entry.S | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index d274612e05cd..de34b7fed3c1 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -53,6 +53,19 @@ | |||
53 | #include <asm/dwarf2.h> | 53 | #include <asm/dwarf2.h> |
54 | #include "irq_vectors.h" | 54 | #include "irq_vectors.h" |
55 | 55 | ||
56 | /* | ||
57 | * We use macros for low-level operations which need to be overridden | ||
58 | * for paravirtualization. The following will never clobber any registers: | ||
59 | * INTERRUPT_RETURN (aka. "iret") | ||
60 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | ||
61 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). | ||
62 | * | ||
63 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | ||
64 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | ||
65 | * Allowing a register to be clobbered can shrink the paravirt replacement | ||
66 | * enough to patch inline, increasing performance. | ||
67 | */ | ||
68 | |||
56 | #define nr_syscalls ((syscall_table_size)/4) | 69 | #define nr_syscalls ((syscall_table_size)/4) |
57 | 70 | ||
58 | CF_MASK = 0x00000001 | 71 | CF_MASK = 0x00000001 |
@@ -63,9 +76,9 @@ NT_MASK = 0x00004000 | |||
63 | VM_MASK = 0x00020000 | 76 | VM_MASK = 0x00020000 |
64 | 77 | ||
65 | #ifdef CONFIG_PREEMPT | 78 | #ifdef CONFIG_PREEMPT |
66 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF | 79 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
67 | #else | 80 | #else |
68 | #define preempt_stop | 81 | #define preempt_stop(clobbers) |
69 | #define resume_kernel restore_nocheck | 82 | #define resume_kernel restore_nocheck |
70 | #endif | 83 | #endif |
71 | 84 | ||
@@ -226,7 +239,7 @@ ENTRY(ret_from_fork) | |||
226 | ALIGN | 239 | ALIGN |
227 | RING0_PTREGS_FRAME | 240 | RING0_PTREGS_FRAME |
228 | ret_from_exception: | 241 | ret_from_exception: |
229 | preempt_stop | 242 | preempt_stop(CLBR_ANY) |
230 | ret_from_intr: | 243 | ret_from_intr: |
231 | GET_THREAD_INFO(%ebp) | 244 | GET_THREAD_INFO(%ebp) |
232 | check_userspace: | 245 | check_userspace: |
@@ -237,7 +250,7 @@ check_userspace: | |||
237 | jb resume_kernel # not returning to v8086 or userspace | 250 | jb resume_kernel # not returning to v8086 or userspace |
238 | 251 | ||
239 | ENTRY(resume_userspace) | 252 | ENTRY(resume_userspace) |
240 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt | 253 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
241 | # setting need_resched or sigpending | 254 | # setting need_resched or sigpending |
242 | # between sampling and the iret | 255 | # between sampling and the iret |
243 | movl TI_flags(%ebp), %ecx | 256 | movl TI_flags(%ebp), %ecx |
@@ -248,7 +261,7 @@ ENTRY(resume_userspace) | |||
248 | 261 | ||
249 | #ifdef CONFIG_PREEMPT | 262 | #ifdef CONFIG_PREEMPT |
250 | ENTRY(resume_kernel) | 263 | ENTRY(resume_kernel) |
251 | DISABLE_INTERRUPTS | 264 | DISABLE_INTERRUPTS(CLBR_ANY) |
252 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | 265 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
253 | jnz restore_nocheck | 266 | jnz restore_nocheck |
254 | need_resched: | 267 | need_resched: |
@@ -277,7 +290,7 @@ sysenter_past_esp: | |||
277 | * No need to follow this irqs on/off section: the syscall | 290 | * No need to follow this irqs on/off section: the syscall |
278 | * disabled irqs and here we enable it straight after entry: | 291 | * disabled irqs and here we enable it straight after entry: |
279 | */ | 292 | */ |
280 | ENABLE_INTERRUPTS | 293 | ENABLE_INTERRUPTS(CLBR_NONE) |
281 | pushl $(__USER_DS) | 294 | pushl $(__USER_DS) |
282 | CFI_ADJUST_CFA_OFFSET 4 | 295 | CFI_ADJUST_CFA_OFFSET 4 |
283 | /*CFI_REL_OFFSET ss, 0*/ | 296 | /*CFI_REL_OFFSET ss, 0*/ |
@@ -322,7 +335,7 @@ sysenter_past_esp: | |||
322 | jae syscall_badsys | 335 | jae syscall_badsys |
323 | call *sys_call_table(,%eax,4) | 336 | call *sys_call_table(,%eax,4) |
324 | movl %eax,PT_EAX(%esp) | 337 | movl %eax,PT_EAX(%esp) |
325 | DISABLE_INTERRUPTS | 338 | DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX) |
326 | TRACE_IRQS_OFF | 339 | TRACE_IRQS_OFF |
327 | movl TI_flags(%ebp), %ecx | 340 | movl TI_flags(%ebp), %ecx |
328 | testw $_TIF_ALLWORK_MASK, %cx | 341 | testw $_TIF_ALLWORK_MASK, %cx |
@@ -364,7 +377,7 @@ syscall_call: | |||
364 | call *sys_call_table(,%eax,4) | 377 | call *sys_call_table(,%eax,4) |
365 | movl %eax,PT_EAX(%esp) # store the return value | 378 | movl %eax,PT_EAX(%esp) # store the return value |
366 | syscall_exit: | 379 | syscall_exit: |
367 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt | 380 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
368 | # setting need_resched or sigpending | 381 | # setting need_resched or sigpending |
369 | # between sampling and the iret | 382 | # between sampling and the iret |
370 | TRACE_IRQS_OFF | 383 | TRACE_IRQS_OFF |
@@ -393,7 +406,7 @@ restore_nocheck_notrace: | |||
393 | .section .fixup,"ax" | 406 | .section .fixup,"ax" |
394 | iret_exc: | 407 | iret_exc: |
395 | TRACE_IRQS_ON | 408 | TRACE_IRQS_ON |
396 | ENABLE_INTERRUPTS | 409 | ENABLE_INTERRUPTS(CLBR_NONE) |
397 | pushl $0 # no error code | 410 | pushl $0 # no error code |
398 | pushl $do_iret_error | 411 | pushl $do_iret_error |
399 | jmp error_code | 412 | jmp error_code |
@@ -436,7 +449,7 @@ ldt_ss: | |||
436 | CFI_ADJUST_CFA_OFFSET 4 | 449 | CFI_ADJUST_CFA_OFFSET 4 |
437 | pushl %eax | 450 | pushl %eax |
438 | CFI_ADJUST_CFA_OFFSET 4 | 451 | CFI_ADJUST_CFA_OFFSET 4 |
439 | DISABLE_INTERRUPTS | 452 | DISABLE_INTERRUPTS(CLBR_EAX) |
440 | TRACE_IRQS_OFF | 453 | TRACE_IRQS_OFF |
441 | lss (%esp), %esp | 454 | lss (%esp), %esp |
442 | CFI_ADJUST_CFA_OFFSET -8 | 455 | CFI_ADJUST_CFA_OFFSET -8 |
@@ -451,7 +464,7 @@ work_pending: | |||
451 | jz work_notifysig | 464 | jz work_notifysig |
452 | work_resched: | 465 | work_resched: |
453 | call schedule | 466 | call schedule |
454 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt | 467 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
455 | # setting need_resched or sigpending | 468 | # setting need_resched or sigpending |
456 | # between sampling and the iret | 469 | # between sampling and the iret |
457 | TRACE_IRQS_OFF | 470 | TRACE_IRQS_OFF |
@@ -509,7 +522,7 @@ syscall_exit_work: | |||
509 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 522 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
510 | jz work_pending | 523 | jz work_pending |
511 | TRACE_IRQS_ON | 524 | TRACE_IRQS_ON |
512 | ENABLE_INTERRUPTS # could let do_syscall_trace() call | 525 | ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call |
513 | # schedule() instead | 526 | # schedule() instead |
514 | movl %esp, %eax | 527 | movl %esp, %eax |
515 | movl $1, %edx | 528 | movl $1, %edx |
@@ -693,7 +706,7 @@ ENTRY(device_not_available) | |||
693 | GET_CR0_INTO_EAX | 706 | GET_CR0_INTO_EAX |
694 | testl $0x4, %eax # EM (math emulation bit) | 707 | testl $0x4, %eax # EM (math emulation bit) |
695 | jne device_not_available_emulate | 708 | jne device_not_available_emulate |
696 | preempt_stop | 709 | preempt_stop(CLBR_ANY) |
697 | call math_state_restore | 710 | call math_state_restore |
698 | jmp ret_from_exception | 711 | jmp ret_from_exception |
699 | device_not_available_emulate: | 712 | device_not_available_emulate: |