aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-04-02 13:54:11 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-24 17:57:33 -0400
commitb77797fb2bf31bf076e6b69736119bc6a077525b (patch)
tree4d0bfcb5bcc96988ef421c807837d7236fdb0e07 /arch/x86/xen
parent2bd50036b5dfc929390ddc48be7f6314447b2be3 (diff)
xen: fold xen_sysexit into xen_iret
xen_sysexit and xen_iret were doing essentially the same thing. Rather than having a separate implementation for xen_sysexit, we can just strip the stack back to an iret frame and jump into xen_iret. This removes a lot of code and complexity - specifically, another critical region. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/xen-asm.S70
1 files changed, 14 insertions, 56 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 53cae923e148..2497a30f41de 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -108,6 +108,20 @@ ENDPATCH(xen_restore_fl_direct)
108 RELOC(xen_restore_fl_direct, 2b+1) 108 RELOC(xen_restore_fl_direct, 2b+1)
109 109
110/* 110/*
111 We can't use sysexit directly, because we're not running in ring0.
112 But we can easily fake it up using iret. Assuming xen_sysexit
113 is jumped to with a standard stack frame, we can just strip it
114 back to a standard iret frame and use iret.
115 */
116ENTRY(xen_sysexit)
117 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
118 orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
119 lea PT_EIP(%esp), %esp
120
121 jmp xen_iret
122ENDPROC(xen_sysexit)
123
124/*
111 This is run where a normal iret would be run, with the same stack setup: 125 This is run where a normal iret would be run, with the same stack setup:
112 8: eflags 126 8: eflags
113 4: cs 127 4: cs
@@ -276,62 +290,6 @@ ENTRY(xen_iret_crit_fixup)
2762: jmp xen_do_upcall 2902: jmp xen_do_upcall
277 291
278 292
279ENTRY(xen_sysexit)
280 /* Store vcpu_info pointer for easy access. Do it this
281 way to avoid having to reload %fs */
282#ifdef CONFIG_SMP
283 GET_THREAD_INFO(%eax)
284 movl TI_cpu(%eax),%eax
285 movl __per_cpu_offset(,%eax,4),%eax
286 mov per_cpu__xen_vcpu(%eax),%eax
287#else
288 movl per_cpu__xen_vcpu, %eax
289#endif
290
291 /* We can't actually use sysexit in a pv guest,
292 so fake it up with iret */
293 pushl $__USER_DS /* user stack segment */
294 pushl %ecx /* user esp */
295 pushl PT_EFLAGS+2*4(%esp) /* user eflags */
296 pushl $__USER_CS /* user code segment */
297 pushl %edx /* user eip */
298
299xen_sysexit_start_crit:
300 /* Unmask events... */
301 movb $0, XEN_vcpu_info_mask(%eax)
302 /* ...and test for pending.
303 There's a preempt window here, but it doesn't
304 matter because we're within the critical section. */
305 testb $0xff, XEN_vcpu_info_pending(%eax)
306
307 /* If there's something pending, mask events again so we
308 can directly inject it back into the kernel. */
309 jnz 1f
310
311 movl PT_EAX+5*4(%esp),%eax
3122: iret
3131: movb $1, XEN_vcpu_info_mask(%eax)
314xen_sysexit_end_crit:
315 addl $5*4, %esp /* remove iret frame */
316 /* no need to re-save regs, but need to restore kernel %fs */
317 mov $__KERNEL_PERCPU, %eax
318 mov %eax, %fs
319 jmp xen_do_upcall
320.section __ex_table,"a"
321 .align 4
322 .long 2b,iret_exc
323.previous
324
325 .globl xen_sysexit_start_crit, xen_sysexit_end_crit
326/*
327 sysexit fixup is easy, since the old frame is still sitting there
328 on the stack. We just need to remove the new recursive
329 interrupt and return.
330 */
331ENTRY(xen_sysexit_crit_fixup)
332 addl $PT_OLDESP+5*4, %esp /* remove frame+iret */
333 jmp xen_do_upcall
334
335/* 293/*
336 Force an event check by making a hypercall, 294 Force an event check by making a hypercall,
337 but preserve regs before making the call. 295 but preserve regs before making the call.