diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-04-02 13:54:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:33 -0400 |
commit | b77797fb2bf31bf076e6b69736119bc6a077525b (patch) | |
tree | 4d0bfcb5bcc96988ef421c807837d7236fdb0e07 | |
parent | 2bd50036b5dfc929390ddc48be7f6314447b2be3 (diff) |
xen: fold xen_sysexit into xen_iret
xen_sysexit and xen_iret were doing essentially the same thing. Rather
than having a separate implementation for xen_sysexit, we can just strip
the stack back to an iret frame and jump into xen_iret. This removes
a lot of code and complexity - specifically, another critical region.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/entry_32.S | 9 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm.S | 70 |
2 files changed, 15 insertions, 64 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 209c334bb920..2a609dc3271c 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1044,15 +1044,8 @@ ENTRY(xen_hypervisor_callback) | |||
1044 | 1044 | ||
1045 | jmp xen_iret_crit_fixup | 1045 | jmp xen_iret_crit_fixup |
1046 | 1046 | ||
1047 | 1: cmpl $xen_sysexit_start_crit,%eax | ||
1048 | jb 2f | ||
1049 | cmpl $xen_sysexit_end_crit,%eax | ||
1050 | jae 2f | ||
1051 | |||
1052 | jmp xen_sysexit_crit_fixup | ||
1053 | |||
1054 | ENTRY(xen_do_upcall) | 1047 | ENTRY(xen_do_upcall) |
1055 | 2: mov %esp, %eax | 1048 | 1: mov %esp, %eax |
1056 | call xen_evtchn_do_upcall | 1049 | call xen_evtchn_do_upcall |
1057 | jmp ret_from_intr | 1050 | jmp ret_from_intr |
1058 | CFI_ENDPROC | 1051 | CFI_ENDPROC |
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 53cae923e148..2497a30f41de 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -108,6 +108,20 @@ ENDPATCH(xen_restore_fl_direct) | |||
108 | RELOC(xen_restore_fl_direct, 2b+1) | 108 | RELOC(xen_restore_fl_direct, 2b+1) |
109 | 109 | ||
110 | /* | 110 | /* |
111 | We can't use sysexit directly, because we're not running in ring0. | ||
112 | But we can easily fake it up using iret. Assuming xen_sysexit | ||
113 | is jumped to with a standard stack frame, we can just strip it | ||
114 | back to a standard iret frame and use iret. | ||
115 | */ | ||
116 | ENTRY(xen_sysexit) | ||
117 | movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ | ||
118 | orl $X86_EFLAGS_IF, PT_EFLAGS(%esp) | ||
119 | lea PT_EIP(%esp), %esp | ||
120 | |||
121 | jmp xen_iret | ||
122 | ENDPROC(xen_sysexit) | ||
123 | |||
124 | /* | ||
111 | This is run where a normal iret would be run, with the same stack setup: | 125 | This is run where a normal iret would be run, with the same stack setup: |
112 | 8: eflags | 126 | 8: eflags |
113 | 4: cs | 127 | 4: cs |
@@ -276,62 +290,6 @@ ENTRY(xen_iret_crit_fixup) | |||
276 | 2: jmp xen_do_upcall | 290 | 2: jmp xen_do_upcall |
277 | 291 | ||
278 | 292 | ||
279 | ENTRY(xen_sysexit) | ||
280 | /* Store vcpu_info pointer for easy access. Do it this | ||
281 | way to avoid having to reload %fs */ | ||
282 | #ifdef CONFIG_SMP | ||
283 | GET_THREAD_INFO(%eax) | ||
284 | movl TI_cpu(%eax),%eax | ||
285 | movl __per_cpu_offset(,%eax,4),%eax | ||
286 | mov per_cpu__xen_vcpu(%eax),%eax | ||
287 | #else | ||
288 | movl per_cpu__xen_vcpu, %eax | ||
289 | #endif | ||
290 | |||
291 | /* We can't actually use sysexit in a pv guest, | ||
292 | so fake it up with iret */ | ||
293 | pushl $__USER_DS /* user stack segment */ | ||
294 | pushl %ecx /* user esp */ | ||
295 | pushl PT_EFLAGS+2*4(%esp) /* user eflags */ | ||
296 | pushl $__USER_CS /* user code segment */ | ||
297 | pushl %edx /* user eip */ | ||
298 | |||
299 | xen_sysexit_start_crit: | ||
300 | /* Unmask events... */ | ||
301 | movb $0, XEN_vcpu_info_mask(%eax) | ||
302 | /* ...and test for pending. | ||
303 | There's a preempt window here, but it doesn't | ||
304 | matter because we're within the critical section. */ | ||
305 | testb $0xff, XEN_vcpu_info_pending(%eax) | ||
306 | |||
307 | /* If there's something pending, mask events again so we | ||
308 | can directly inject it back into the kernel. */ | ||
309 | jnz 1f | ||
310 | |||
311 | movl PT_EAX+5*4(%esp),%eax | ||
312 | 2: iret | ||
313 | 1: movb $1, XEN_vcpu_info_mask(%eax) | ||
314 | xen_sysexit_end_crit: | ||
315 | addl $5*4, %esp /* remove iret frame */ | ||
316 | /* no need to re-save regs, but need to restore kernel %fs */ | ||
317 | mov $__KERNEL_PERCPU, %eax | ||
318 | mov %eax, %fs | ||
319 | jmp xen_do_upcall | ||
320 | .section __ex_table,"a" | ||
321 | .align 4 | ||
322 | .long 2b,iret_exc | ||
323 | .previous | ||
324 | |||
325 | .globl xen_sysexit_start_crit, xen_sysexit_end_crit | ||
326 | /* | ||
327 | sysexit fixup is easy, since the old frame is still sitting there | ||
328 | on the stack. We just need to remove the new recursive | ||
329 | interrupt and return. | ||
330 | */ | ||
331 | ENTRY(xen_sysexit_crit_fixup) | ||
332 | addl $PT_OLDESP+5*4, %esp /* remove frame+iret */ | ||
333 | jmp xen_do_upcall | ||
334 | |||
335 | /* | 293 | /* |
336 | Force an event check by making a hypercall, | 294 | Force an event check by making a hypercall, |
337 | but preserve regs before making the call. | 295 | but preserve regs before making the call. |