aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-13 03:45:09 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-13 03:45:09 -0500
commitab639f3593f0b5e4439d549831442c18c3baf989 (patch)
tree118743e94e5dc86c835dbc1f1d3bf1612f4ae740 /arch/x86/xen
parentf8a6b2b9cee298a9663cbe38ce1eb5240987cb62 (diff)
parent58105ef1857112a186696c9b8957020090226a28 (diff)
Merge branch 'core/percpu' into x86/core
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/enlighten.c17
-rw-r--r--arch/x86/xen/xen-asm.S78
-rw-r--r--arch/x86/xen/xen-asm_32.S238
-rw-r--r--arch/x86/xen/xen-asm_64.S107
4 files changed, 228 insertions, 212 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 37230342c2c4..95ff6a0e942a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -323,13 +323,14 @@ static void load_TLS_descriptor(struct thread_struct *t,
323static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 323static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
324{ 324{
325 /* 325 /*
326 * XXX sleazy hack: If we're being called in a lazy-cpu zone, 326 * XXX sleazy hack: If we're being called in a lazy-cpu zone
327 * it means we're in a context switch, and %gs has just been 327 * and lazy gs handling is enabled, it means we're in a
328 * saved. This means we can zero it out to prevent faults on 328 * context switch, and %gs has just been saved. This means we
329 * exit from the hypervisor if the next process has no %gs. 329 * can zero it out to prevent faults on exit from the
330 * Either way, it has been saved, and the new value will get 330 * hypervisor if the next process has no %gs. Either way, it
331 * loaded properly. This will go away as soon as Xen has been 331 * has been saved, and the new value will get loaded properly.
332 * modified to not save/restore %gs for normal hypercalls. 332 * This will go away as soon as Xen has been modified to not
333 * save/restore %gs for normal hypercalls.
333 * 334 *
334 * On x86_64, this hack is not used for %gs, because gs points 335 * On x86_64, this hack is not used for %gs, because gs points
335 * to KERNEL_GS_BASE (and uses it for PDA references), so we 336 * to KERNEL_GS_BASE (and uses it for PDA references), so we
@@ -341,7 +342,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
341 */ 342 */
342 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 343 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
343#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
344 loadsegment(gs, 0); 345 lazy_load_gs(0);
345#else 346#else
346 loadsegment(fs, 0); 347 loadsegment(fs, 0);
347#endif 348#endif
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 4c6f96799131..79d7362ad6d1 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -1,14 +1,14 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in percpu data) of 9 * We only bother with direct forms (ie, vcpu in percpu data) of the
10 the operations here; the indirect forms are better handled in 10 * operations here; the indirect forms are better handled in C, since
11 C, since they're generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
@@ -18,17 +18,19 @@
18#include "xen-asm.h" 18#include "xen-asm.h"
19 19
20/* 20/*
21 Enable events. This clears the event mask and tests the pending 21 * Enable events. This clears the event mask and tests the pending
22 event status with one and operation. If there are pending 22 * event status with one and operation. If there are pending events,
23 events, then enter the hypervisor to get them handled. 23 * then enter the hypervisor to get them handled.
24 */ 24 */
25ENTRY(xen_irq_enable_direct) 25ENTRY(xen_irq_enable_direct)
26 /* Unmask events */ 26 /* Unmask events */
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28 28
29 /* Preempt here doesn't matter because that will deal with 29 /*
30 any pending interrupts. The pending check may end up being 30 * Preempt here doesn't matter because that will deal with any
31 run on the wrong CPU, but that doesn't hurt. */ 31 * pending interrupts. The pending check may end up being run
32 * on the wrong CPU, but that doesn't hurt.
33 */
32 34
33 /* Test for pending */ 35 /* Test for pending */
34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 36 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
@@ -43,8 +45,8 @@ ENDPATCH(xen_irq_enable_direct)
43 45
44 46
45/* 47/*
46 Disabling events is simply a matter of making the event mask 48 * Disabling events is simply a matter of making the event mask
47 non-zero. 49 * non-zero.
48 */ 50 */
49ENTRY(xen_irq_disable_direct) 51ENTRY(xen_irq_disable_direct)
50 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 52 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
@@ -54,18 +56,18 @@ ENDPATCH(xen_irq_disable_direct)
54 RELOC(xen_irq_disable_direct, 0) 56 RELOC(xen_irq_disable_direct, 0)
55 57
56/* 58/*
57 (xen_)save_fl is used to get the current interrupt enable status. 59 * (xen_)save_fl is used to get the current interrupt enable status.
58 Callers expect the status to be in X86_EFLAGS_IF, and other bits 60 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
59 may be set in the return value. We take advantage of this by 61 * may be set in the return value. We take advantage of this by
60 making sure that X86_EFLAGS_IF has the right value (and other bits 62 * making sure that X86_EFLAGS_IF has the right value (and other bits
61 in that byte are 0), but other bits in the return value are 63 * in that byte are 0), but other bits in the return value are
62 undefined. We need to toggle the state of the bit, because 64 * undefined. We need to toggle the state of the bit, because Xen and
63 Xen and x86 use opposite senses (mask vs enable). 65 * x86 use opposite senses (mask vs enable).
64 */ 66 */
65ENTRY(xen_save_fl_direct) 67ENTRY(xen_save_fl_direct)
66 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 68 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
67 setz %ah 69 setz %ah
68 addb %ah,%ah 70 addb %ah, %ah
69ENDPATCH(xen_save_fl_direct) 71ENDPATCH(xen_save_fl_direct)
70 ret 72 ret
71 ENDPROC(xen_save_fl_direct) 73 ENDPROC(xen_save_fl_direct)
@@ -73,12 +75,11 @@ ENDPATCH(xen_save_fl_direct)
73 75
74 76
75/* 77/*
76 In principle the caller should be passing us a value return 78 * In principle the caller should be passing us a value return from
77 from xen_save_fl_direct, but for robustness sake we test only 79 * xen_save_fl_direct, but for robustness sake we test only the
78 the X86_EFLAGS_IF flag rather than the whole byte. After 80 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
79 setting the interrupt mask state, it checks for unmasked 81 * interrupt mask state, it checks for unmasked pending events and
80 pending events and enters the hypervisor to get them delivered 82 * enters the hypervisor to get them delivered if so.
81 if so.
82 */ 83 */
83ENTRY(xen_restore_fl_direct) 84ENTRY(xen_restore_fl_direct)
84#ifdef CONFIG_X86_64 85#ifdef CONFIG_X86_64
@@ -87,9 +88,11 @@ ENTRY(xen_restore_fl_direct)
87 testb $X86_EFLAGS_IF>>8, %ah 88 testb $X86_EFLAGS_IF>>8, %ah
88#endif 89#endif
89 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 90 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 /* Preempt here doesn't matter because that will deal with 91 /*
91 any pending interrupts. The pending check may end up being 92 * Preempt here doesn't matter because that will deal with any
92 run on the wrong CPU, but that doesn't hurt. */ 93 * pending interrupts. The pending check may end up being run
94 * on the wrong CPU, but that doesn't hurt.
95 */
93 96
94 /* check for unmasked and pending */ 97 /* check for unmasked and pending */
95 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
@@ -103,8 +106,8 @@ ENDPATCH(xen_restore_fl_direct)
103 106
104 107
105/* 108/*
106 Force an event check by making a hypercall, 109 * Force an event check by making a hypercall, but preserve regs
107 but preserve regs before making the call. 110 * before making the call.
108 */ 111 */
109check_events: 112check_events:
110#ifdef CONFIG_X86_32 113#ifdef CONFIG_X86_32
@@ -137,4 +140,3 @@ check_events:
137 pop %rax 140 pop %rax
138#endif 141#endif
139 ret 142 ret
140
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 082d173caaf3..88e15deb8b82 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -1,17 +1,16 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in pda) of the operations 9 * We only bother with direct forms (ie, vcpu in pda) of the
10 here; the indirect forms are better handled in C, since they're 10 * operations here; the indirect forms are better handled in C, since
11 generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14//#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 14#include <asm/thread_info.h>
16#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
17#include <asm/segment.h> 16#include <asm/segment.h>
@@ -21,8 +20,8 @@
21#include "xen-asm.h" 20#include "xen-asm.h"
22 21
23/* 22/*
24 Force an event check by making a hypercall, 23 * Force an event check by making a hypercall, but preserve regs
25 but preserve regs before making the call. 24 * before making the call.
26 */ 25 */
27check_events: 26check_events:
28 push %eax 27 push %eax
@@ -35,10 +34,10 @@ check_events:
35 ret 34 ret
36 35
37/* 36/*
38 We can't use sysexit directly, because we're not running in ring0. 37 * We can't use sysexit directly, because we're not running in ring0.
39 But we can easily fake it up using iret. Assuming xen_sysexit 38 * But we can easily fake it up using iret. Assuming xen_sysexit is
40 is jumped to with a standard stack frame, we can just strip it 39 * jumped to with a standard stack frame, we can just strip it back to
41 back to a standard iret frame and use iret. 40 * a standard iret frame and use iret.
42 */ 41 */
43ENTRY(xen_sysexit) 42ENTRY(xen_sysexit)
44 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ 43 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
@@ -49,33 +48,31 @@ ENTRY(xen_sysexit)
49ENDPROC(xen_sysexit) 48ENDPROC(xen_sysexit)
50 49
51/* 50/*
52 This is run where a normal iret would be run, with the same stack setup: 51 * This is run where a normal iret would be run, with the same stack setup:
53 8: eflags 52 * 8: eflags
54 4: cs 53 * 4: cs
55 esp-> 0: eip 54 * esp-> 0: eip
56 55 *
57 This attempts to make sure that any pending events are dealt 56 * This attempts to make sure that any pending events are dealt with
58 with on return to usermode, but there is a small window in 57 * on return to usermode, but there is a small window in which an
59 which an event can happen just before entering usermode. If 58 * event can happen just before entering usermode. If the nested
60 the nested interrupt ends up setting one of the TIF_WORK_MASK 59 * interrupt ends up setting one of the TIF_WORK_MASK pending work
61 pending work flags, they will not be tested again before 60 * flags, they will not be tested again before returning to
62 returning to usermode. This means that a process can end up 61 * usermode. This means that a process can end up with pending work,
63 with pending work, which will be unprocessed until the process 62 * which will be unprocessed until the process enters and leaves the
64 enters and leaves the kernel again, which could be an 63 * kernel again, which could be an unbounded amount of time. This
65 unbounded amount of time. This means that a pending signal or 64 * means that a pending signal or reschedule event could be
66 reschedule event could be indefinitely delayed. 65 * indefinitely delayed.
67 66 *
68 The fix is to notice a nested interrupt in the critical 67 * The fix is to notice a nested interrupt in the critical window, and
69 window, and if one occurs, then fold the nested interrupt into 68 * if one occurs, then fold the nested interrupt into the current
70 the current interrupt stack frame, and re-process it 69 * interrupt stack frame, and re-process it iteratively rather than
71 iteratively rather than recursively. This means that it will 70 * recursively. This means that it will exit via the normal path, and
72 exit via the normal path, and all pending work will be dealt 71 * all pending work will be dealt with appropriately.
73 with appropriately. 72 *
74 73 * Because the nested interrupt handler needs to deal with the current
75 Because the nested interrupt handler needs to deal with the 74 * stack state in whatever form its in, we keep things simple by only
76 current stack state in whatever form its in, we keep things 75 * using a single register which is pushed/popped on the stack.
77 simple by only using a single register which is pushed/popped
78 on the stack.
79 */ 76 */
80ENTRY(xen_iret) 77ENTRY(xen_iret)
81 /* test eflags for special cases */ 78 /* test eflags for special cases */
@@ -85,13 +82,15 @@ ENTRY(xen_iret)
85 push %eax 82 push %eax
86 ESP_OFFSET=4 # bytes pushed onto stack 83 ESP_OFFSET=4 # bytes pushed onto stack
87 84
88 /* Store vcpu_info pointer for easy access. Do it this 85 /*
89 way to avoid having to reload %fs */ 86 * Store vcpu_info pointer for easy access. Do it this way to
87 * avoid having to reload %fs
88 */
90#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
91 GET_THREAD_INFO(%eax) 90 GET_THREAD_INFO(%eax)
92 movl TI_cpu(%eax),%eax 91 movl TI_cpu(%eax), %eax
93 movl __per_cpu_offset(,%eax,4),%eax 92 movl __per_cpu_offset(,%eax,4), %eax
94 mov per_cpu__xen_vcpu(%eax),%eax 93 mov per_cpu__xen_vcpu(%eax), %eax
95#else 94#else
96 movl per_cpu__xen_vcpu, %eax 95 movl per_cpu__xen_vcpu, %eax
97#endif 96#endif
@@ -99,37 +98,46 @@ ENTRY(xen_iret)
99 /* check IF state we're restoring */ 98 /* check IF state we're restoring */
100 testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) 99 testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
101 100
102 /* Maybe enable events. Once this happens we could get a 101 /*
103 recursive event, so the critical region starts immediately 102 * Maybe enable events. Once this happens we could get a
104 afterwards. However, if that happens we don't end up 103 * recursive event, so the critical region starts immediately
105 resuming the code, so we don't have to be worried about 104 * afterwards. However, if that happens we don't end up
106 being preempted to another CPU. */ 105 * resuming the code, so we don't have to be worried about
106 * being preempted to another CPU.
107 */
107 setz XEN_vcpu_info_mask(%eax) 108 setz XEN_vcpu_info_mask(%eax)
108xen_iret_start_crit: 109xen_iret_start_crit:
109 110
110 /* check for unmasked and pending */ 111 /* check for unmasked and pending */
111 cmpw $0x0001, XEN_vcpu_info_pending(%eax) 112 cmpw $0x0001, XEN_vcpu_info_pending(%eax)
112 113
113 /* If there's something pending, mask events again so we 114 /*
114 can jump back into xen_hypervisor_callback */ 115 * If there's something pending, mask events again so we can
116 * jump back into xen_hypervisor_callback
117 */
115 sete XEN_vcpu_info_mask(%eax) 118 sete XEN_vcpu_info_mask(%eax)
116 119
117 popl %eax 120 popl %eax
118 121
119 /* From this point on the registers are restored and the stack 122 /*
120 updated, so we don't need to worry about it if we're preempted */ 123 * From this point on the registers are restored and the stack
124 * updated, so we don't need to worry about it if we're
125 * preempted
126 */
121iret_restore_end: 127iret_restore_end:
122 128
123 /* Jump to hypervisor_callback after fixing up the stack. 129 /*
124 Events are masked, so jumping out of the critical 130 * Jump to hypervisor_callback after fixing up the stack.
125 region is OK. */ 131 * Events are masked, so jumping out of the critical region is
132 * OK.
133 */
126 je xen_hypervisor_callback 134 je xen_hypervisor_callback
127 135
1281: iret 1361: iret
129xen_iret_end_crit: 137xen_iret_end_crit:
130.section __ex_table,"a" 138.section __ex_table, "a"
131 .align 4 139 .align 4
132 .long 1b,iret_exc 140 .long 1b, iret_exc
133.previous 141.previous
134 142
135hyper_iret: 143hyper_iret:
@@ -139,55 +147,55 @@ hyper_iret:
139 .globl xen_iret_start_crit, xen_iret_end_crit 147 .globl xen_iret_start_crit, xen_iret_end_crit
140 148
141/* 149/*
142 This is called by xen_hypervisor_callback in entry.S when it sees 150 * This is called by xen_hypervisor_callback in entry.S when it sees
143 that the EIP at the time of interrupt was between xen_iret_start_crit 151 * that the EIP at the time of interrupt was between
144 and xen_iret_end_crit. We're passed the EIP in %eax so we can do 152 * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
145 a more refined determination of what to do. 153 * %eax so we can do a more refined determination of what to do.
146 154 *
147 The stack format at this point is: 155 * The stack format at this point is:
148 ---------------- 156 * ----------------
149 ss : (ss/esp may be present if we came from usermode) 157 * ss : (ss/esp may be present if we came from usermode)
150 esp : 158 * esp :
151 eflags } outer exception info 159 * eflags } outer exception info
152 cs } 160 * cs }
153 eip } 161 * eip }
154 ---------------- <- edi (copy dest) 162 * ---------------- <- edi (copy dest)
155 eax : outer eax if it hasn't been restored 163 * eax : outer eax if it hasn't been restored
156 ---------------- 164 * ----------------
157 eflags } nested exception info 165 * eflags } nested exception info
158 cs } (no ss/esp because we're nested 166 * cs } (no ss/esp because we're nested
159 eip } from the same ring) 167 * eip } from the same ring)
160 orig_eax }<- esi (copy src) 168 * orig_eax }<- esi (copy src)
161 - - - - - - - - 169 * - - - - - - - -
162 fs } 170 * fs }
163 es } 171 * es }
164 ds } SAVE_ALL state 172 * ds } SAVE_ALL state
165 eax } 173 * eax }
166 : : 174 * : :
167 ebx }<- esp 175 * ebx }<- esp
168 ---------------- 176 * ----------------
169 177 *
170 In order to deliver the nested exception properly, we need to shift 178 * In order to deliver the nested exception properly, we need to shift
171 everything from the return addr up to the error code so it 179 * everything from the return addr up to the error code so it sits
172 sits just under the outer exception info. This means that when we 180 * just under the outer exception info. This means that when we
173 handle the exception, we do it in the context of the outer exception 181 * handle the exception, we do it in the context of the outer
174 rather than starting a new one. 182 * exception rather than starting a new one.
175 183 *
176 The only caveat is that if the outer eax hasn't been 184 * The only caveat is that if the outer eax hasn't been restored yet
177 restored yet (ie, it's still on stack), we need to insert 185 * (ie, it's still on stack), we need to insert its value into the
178 its value into the SAVE_ALL state before going on, since 186 * SAVE_ALL state before going on, since it's usermode state which we
179 it's usermode state which we eventually need to restore. 187 * eventually need to restore.
180 */ 188 */
181ENTRY(xen_iret_crit_fixup) 189ENTRY(xen_iret_crit_fixup)
182 /* 190 /*
183 Paranoia: Make sure we're really coming from kernel space. 191 * Paranoia: Make sure we're really coming from kernel space.
184 One could imagine a case where userspace jumps into the 192 * One could imagine a case where userspace jumps into the
185 critical range address, but just before the CPU delivers a GP, 193 * critical range address, but just before the CPU delivers a
186 it decides to deliver an interrupt instead. Unlikely? 194 * GP, it decides to deliver an interrupt instead. Unlikely?
187 Definitely. Easy to avoid? Yes. The Intel documents 195 * Definitely. Easy to avoid? Yes. The Intel documents
188 explicitly say that the reported EIP for a bad jump is the 196 * explicitly say that the reported EIP for a bad jump is the
189 jump instruction itself, not the destination, but some virtual 197 * jump instruction itself, not the destination, but some
190 environments get this wrong. 198 * virtual environments get this wrong.
191 */ 199 */
192 movl PT_CS(%esp), %ecx 200 movl PT_CS(%esp), %ecx
193 andl $SEGMENT_RPL_MASK, %ecx 201 andl $SEGMENT_RPL_MASK, %ecx
@@ -197,15 +205,17 @@ ENTRY(xen_iret_crit_fixup)
197 lea PT_ORIG_EAX(%esp), %esi 205 lea PT_ORIG_EAX(%esp), %esi
198 lea PT_EFLAGS(%esp), %edi 206 lea PT_EFLAGS(%esp), %edi
199 207
200 /* If eip is before iret_restore_end then stack 208 /*
201 hasn't been restored yet. */ 209 * If eip is before iret_restore_end then stack
210 * hasn't been restored yet.
211 */
202 cmp $iret_restore_end, %eax 212 cmp $iret_restore_end, %eax
203 jae 1f 213 jae 1f
204 214
205 movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */ 215 movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
206 movl %eax, PT_EAX(%esp) 216 movl %eax, PT_EAX(%esp)
207 217
208 lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ 218 lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
209 219
210 /* set up the copy */ 220 /* set up the copy */
2111: std 2211: std
@@ -213,6 +223,6 @@ ENTRY(xen_iret_crit_fixup)
213 rep movsl 223 rep movsl
214 cld 224 cld
215 225
216 lea 4(%edi),%esp /* point esp to new frame */ 226 lea 4(%edi), %esp /* point esp to new frame */
2172: jmp xen_do_upcall 2272: jmp xen_do_upcall
218 228
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index d205a283efe0..02f496a8dbaa 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -1,14 +1,14 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in pda) of the operations 9 * We only bother with direct forms (ie, vcpu in pda) of the
10 here; the indirect forms are better handled in C, since they're 10 * operations here; the indirect forms are better handled in C, since
11 generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14#include <asm/errno.h> 14#include <asm/errno.h>
@@ -21,25 +21,25 @@
21#include "xen-asm.h" 21#include "xen-asm.h"
22 22
23ENTRY(xen_adjust_exception_frame) 23ENTRY(xen_adjust_exception_frame)
24 mov 8+0(%rsp),%rcx 24 mov 8+0(%rsp), %rcx
25 mov 8+8(%rsp),%r11 25 mov 8+8(%rsp), %r11
26 ret $16 26 ret $16
27 27
28hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 28hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
29/* 29/*
30 Xen64 iret frame: 30 * Xen64 iret frame:
31 31 *
32 ss 32 * ss
33 rsp 33 * rsp
34 rflags 34 * rflags
35 cs 35 * cs
36 rip <-- standard iret frame 36 * rip <-- standard iret frame
37 37 *
38 flags 38 * flags
39 39 *
40 rcx } 40 * rcx }
41 r11 }<-- pushed by hypercall page 41 * r11 }<-- pushed by hypercall page
42rsp -> rax } 42 * rsp->rax }
43 */ 43 */
44ENTRY(xen_iret) 44ENTRY(xen_iret)
45 pushq $0 45 pushq $0
@@ -48,8 +48,8 @@ ENDPATCH(xen_iret)
48RELOC(xen_iret, 1b+1) 48RELOC(xen_iret, 1b+1)
49 49
50/* 50/*
51 sysexit is not used for 64-bit processes, so it's 51 * sysexit is not used for 64-bit processes, so it's only ever used to
52 only ever used to return to 32-bit compat userspace. 52 * return to 32-bit compat userspace.
53 */ 53 */
54ENTRY(xen_sysexit) 54ENTRY(xen_sysexit)
55 pushq $__USER32_DS 55 pushq $__USER32_DS
@@ -64,10 +64,12 @@ ENDPATCH(xen_sysexit)
64RELOC(xen_sysexit, 1b+1) 64RELOC(xen_sysexit, 1b+1)
65 65
66ENTRY(xen_sysret64) 66ENTRY(xen_sysret64)
67 /* We're already on the usermode stack at this point, but still 67 /*
68 with the kernel gs, so we can easily switch back */ 68 * We're already on the usermode stack at this point, but
69 * still with the kernel gs, so we can easily switch back
70 */
69 movq %rsp, PER_CPU_VAR(old_rsp) 71 movq %rsp, PER_CPU_VAR(old_rsp)
70 movq PER_CPU_VAR(kernel_stack),%rsp 72 movq PER_CPU_VAR(kernel_stack), %rsp
71 73
72 pushq $__USER_DS 74 pushq $__USER_DS
73 pushq PER_CPU_VAR(old_rsp) 75 pushq PER_CPU_VAR(old_rsp)
@@ -81,8 +83,10 @@ ENDPATCH(xen_sysret64)
81RELOC(xen_sysret64, 1b+1) 83RELOC(xen_sysret64, 1b+1)
82 84
83ENTRY(xen_sysret32) 85ENTRY(xen_sysret32)
84 /* We're already on the usermode stack at this point, but still 86 /*
85 with the kernel gs, so we can easily switch back */ 87 * We're already on the usermode stack at this point, but
88 * still with the kernel gs, so we can easily switch back
89 */
86 movq %rsp, PER_CPU_VAR(old_rsp) 90 movq %rsp, PER_CPU_VAR(old_rsp)
87 movq PER_CPU_VAR(kernel_stack), %rsp 91 movq PER_CPU_VAR(kernel_stack), %rsp
88 92
@@ -98,28 +102,27 @@ ENDPATCH(xen_sysret32)
98RELOC(xen_sysret32, 1b+1) 102RELOC(xen_sysret32, 1b+1)
99 103
100/* 104/*
101 Xen handles syscall callbacks much like ordinary exceptions, 105 * Xen handles syscall callbacks much like ordinary exceptions, which
102 which means we have: 106 * means we have:
103 - kernel gs 107 * - kernel gs
104 - kernel rsp 108 * - kernel rsp
105 - an iret-like stack frame on the stack (including rcx and r11): 109 * - an iret-like stack frame on the stack (including rcx and r11):
106 ss 110 * ss
107 rsp 111 * rsp
108 rflags 112 * rflags
109 cs 113 * cs
110 rip 114 * rip
111 r11 115 * r11
112 rsp-> rcx 116 * rsp->rcx
113 117 *
114 In all the entrypoints, we undo all that to make it look 118 * In all the entrypoints, we undo all that to make it look like a
115 like a CPU-generated syscall/sysenter and jump to the normal 119 * CPU-generated syscall/sysenter and jump to the normal entrypoint.
116 entrypoint.
117 */ 120 */
118 121
119.macro undo_xen_syscall 122.macro undo_xen_syscall
120 mov 0*8(%rsp),%rcx 123 mov 0*8(%rsp), %rcx
121 mov 1*8(%rsp),%r11 124 mov 1*8(%rsp), %r11
122 mov 5*8(%rsp),%rsp 125 mov 5*8(%rsp), %rsp
123.endm 126.endm
124 127
125/* Normal 64-bit system call target */ 128/* Normal 64-bit system call target */
@@ -146,7 +149,7 @@ ENDPROC(xen_sysenter_target)
146 149
147ENTRY(xen_syscall32_target) 150ENTRY(xen_syscall32_target)
148ENTRY(xen_sysenter_target) 151ENTRY(xen_sysenter_target)
149 lea 16(%rsp), %rsp /* strip %rcx,%r11 */ 152 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
150 mov $-ENOSYS, %rax 153 mov $-ENOSYS, %rax
151 pushq $VGCF_in_syscall 154 pushq $VGCF_in_syscall
152 jmp hypercall_iret 155 jmp hypercall_iret