aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/xen-asm.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r--arch/x86/xen/xen-asm.S56
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 99223cc323be..1ac08082a4b4 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -280,6 +280,62 @@ ENTRY(xen_iret_crit_fixup)
2802: ret 2802: ret
281 281
282 282
283ENTRY(xen_sysexit)
284 /* Store vcpu_info pointer for easy access. Do it this
285 way to avoid having to reload %fs */
286#ifdef CONFIG_SMP
287 GET_THREAD_INFO(%eax)
288 movl TI_cpu(%eax),%eax
289 movl __per_cpu_offset(,%eax,4),%eax
290 mov per_cpu__xen_vcpu(%eax),%eax
291#else
292 movl per_cpu__xen_vcpu, %eax
293#endif
294
295 /* We can't actually use sysexit in a pv guest,
296 so fake it up with iret */
297 pushl $__USER_DS /* user stack segment */
298 pushl %ecx /* user esp */
299 pushl PT_EFLAGS+2*4(%esp) /* user eflags */
300 pushl $__USER_CS /* user code segment */
301 pushl %edx /* user eip */
302
303xen_sysexit_start_crit:
304 /* Unmask events... */
305 movb $0, XEN_vcpu_info_mask(%eax)
306 /* ...and test for pending.
307 There's a preempt window here, but it doesn't
308 matter because we're within the critical section. */
309 testb $0xff, XEN_vcpu_info_pending(%eax)
310
311 /* If there's something pending, mask events again so we
312 can directly inject it back into the kernel. */
313 jnz 1f
314
315 movl PT_EAX+5*4(%esp),%eax
3162: iret
3171: movb $1, XEN_vcpu_info_mask(%eax)
318xen_sysexit_end_crit:
319 addl $5*4, %esp /* remove iret frame */
320 /* no need to re-save regs, but need to restore kernel %fs */
321 mov $__KERNEL_PERCPU, %eax
322 mov %eax, %fs
323 jmp xen_do_upcall
324.section __ex_table,"a"
325 .align 4
326 .long 2b,iret_exc
327.previous
328
329 .globl xen_sysexit_start_crit, xen_sysexit_end_crit
330/*
331 sysexit fixup is easy, since the old frame is still sitting there
332 on the stack. We just need to remove the new recursive
333 interrupt and return.
334 */
335ENTRY(xen_sysexit_crit_fixup)
336 addl $PT_OLDESP+5*4, %esp /* remove frame+iret */
337 jmp xen_do_upcall
338
283/* 339/*
284 Force an event check by making a hypercall, 340 Force an event check by making a hypercall,
285 but preserve regs before making the call. 341 but preserve regs before making the call.