diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:37:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:31 -0400 |
commit | e2a81baf6604a2e08e10c7405b0349106f77c8af (patch) | |
tree | 3eaf386316be1f499d92fae213493ec3d6b5b576 /arch/x86/xen/xen-asm.S | |
parent | aa380c82b83252754a8c11bfc92359bd87cbf710 (diff) |
xen: support sysenter/sysexit if hypervisor does
64-bit Xen supports sysenter for 32-bit guests, so support its
use. (sysenter is faster than int $0x80 in 32-on-64.)
sysexit is still not supported, so we fake it up using iret.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 99223cc323be..1ac08082a4b4 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -280,6 +280,62 @@ ENTRY(xen_iret_crit_fixup) | |||
280 | 2: ret | 280 | 2: ret |
281 | 281 | ||
282 | 282 | ||
283 | ENTRY(xen_sysexit) | ||
284 | /* Store vcpu_info pointer for easy access. Do it this | ||
285 | way to avoid having to reload %fs */ | ||
286 | #ifdef CONFIG_SMP | ||
287 | GET_THREAD_INFO(%eax) | ||
288 | movl TI_cpu(%eax),%eax | ||
289 | movl __per_cpu_offset(,%eax,4),%eax | ||
290 | mov per_cpu__xen_vcpu(%eax),%eax | ||
291 | #else | ||
292 | movl per_cpu__xen_vcpu, %eax | ||
293 | #endif | ||
294 | |||
295 | /* We can't actually use sysexit in a pv guest, | ||
296 | so fake it up with iret */ | ||
297 | pushl $__USER_DS /* user stack segment */ | ||
298 | pushl %ecx /* user esp */ | ||
299 | pushl PT_EFLAGS+2*4(%esp) /* user eflags */ | ||
300 | pushl $__USER_CS /* user code segment */ | ||
301 | pushl %edx /* user eip */ | ||
302 | |||
303 | xen_sysexit_start_crit: | ||
304 | /* Unmask events... */ | ||
305 | movb $0, XEN_vcpu_info_mask(%eax) | ||
306 | /* ...and test for pending. | ||
307 | There's a preempt window here, but it doesn't | ||
308 | matter because we're within the critical section. */ | ||
309 | testb $0xff, XEN_vcpu_info_pending(%eax) | ||
310 | |||
311 | /* If there's something pending, mask events again so we | ||
312 | can directly inject it back into the kernel. */ | ||
313 | jnz 1f | ||
314 | |||
315 | movl PT_EAX+5*4(%esp),%eax | ||
316 | 2: iret | ||
317 | 1: movb $1, XEN_vcpu_info_mask(%eax) | ||
318 | xen_sysexit_end_crit: | ||
319 | addl $5*4, %esp /* remove iret frame */ | ||
320 | /* no need to re-save regs, but need to restore kernel %fs */ | ||
321 | mov $__KERNEL_PERCPU, %eax | ||
322 | mov %eax, %fs | ||
323 | jmp xen_do_upcall | ||
324 | .section __ex_table,"a" | ||
325 | .align 4 | ||
326 | .long 2b,iret_exc | ||
327 | .previous | ||
328 | |||
329 | .globl xen_sysexit_start_crit, xen_sysexit_end_crit | ||
330 | /* | ||
331 | sysexit fixup is easy, since the old frame is still sitting there | ||
332 | on the stack. We just need to remove the new recursive | ||
333 | interrupt and return. | ||
334 | */ | ||
335 | ENTRY(xen_sysexit_crit_fixup) | ||
336 | addl $PT_OLDESP+5*4, %esp /* remove frame+iret */ | ||
337 | jmp xen_do_upcall | ||
338 | |||
283 | /* | 339 | /* |
284 | Force an event check by making a hypercall, | 340 | Force an event check by making a hypercall, |
285 | but preserve regs before making the call. | 341 | but preserve regs before making the call. |