diff options
Diffstat (limited to 'arch/x86/xen/xen-asm.S')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index fe161ed4b01e..2497a30f41de 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -108,6 +108,20 @@ ENDPATCH(xen_restore_fl_direct) | |||
108 | RELOC(xen_restore_fl_direct, 2b+1) | 108 | RELOC(xen_restore_fl_direct, 2b+1) |
109 | 109 | ||
110 | /* | 110 | /* |
111 | We can't use sysexit directly, because we're not running in ring0. | ||
112 | But we can easily fake it up using iret. Assuming xen_sysexit | ||
113 | is jumped to with a standard stack frame, we can just strip it | ||
114 | back to a standard iret frame and use iret. | ||
115 | */ | ||
116 | ENTRY(xen_sysexit) | ||
117 | movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ | ||
118 | orl $X86_EFLAGS_IF, PT_EFLAGS(%esp) | ||
119 | lea PT_EIP(%esp), %esp | ||
120 | |||
121 | jmp xen_iret | ||
122 | ENDPROC(xen_sysexit) | ||
123 | |||
124 | /* | ||
111 | This is run where a normal iret would be run, with the same stack setup: | 125 | This is run where a normal iret would be run, with the same stack setup: |
112 | 8: eflags | 126 | 8: eflags |
113 | 4: cs | 127 | 4: cs |
@@ -184,8 +198,12 @@ iret_restore_end: | |||
184 | region is OK. */ | 198 | region is OK. */ |
185 | je xen_hypervisor_callback | 199 | je xen_hypervisor_callback |
186 | 200 | ||
187 | iret | 201 | 1: iret |
188 | xen_iret_end_crit: | 202 | xen_iret_end_crit: |
203 | .section __ex_table,"a" | ||
204 | .align 4 | ||
205 | .long 1b,iret_exc | ||
206 | .previous | ||
189 | 207 | ||
190 | hyper_iret: | 208 | hyper_iret: |
191 | /* put this out of line since its very rarely used */ | 209 | /* put this out of line since its very rarely used */ |
@@ -219,9 +237,7 @@ hyper_iret: | |||
219 | ds } SAVE_ALL state | 237 | ds } SAVE_ALL state |
220 | eax } | 238 | eax } |
221 | : : | 239 | : : |
222 | ebx } | 240 | ebx }<- esp |
223 | ---------------- | ||
224 | return addr <- esp | ||
225 | ---------------- | 241 | ---------------- |
226 | 242 | ||
227 | In order to deliver the nested exception properly, we need to shift | 243 | In order to deliver the nested exception properly, we need to shift |
@@ -236,10 +252,8 @@ hyper_iret: | |||
236 | it's usermode state which we eventually need to restore. | 252 | it's usermode state which we eventually need to restore. |
237 | */ | 253 | */ |
238 | ENTRY(xen_iret_crit_fixup) | 254 | ENTRY(xen_iret_crit_fixup) |
239 | /* offsets +4 for return address */ | ||
240 | |||
241 | /* | 255 | /* |
242 | Paranoia: Make sure we're really coming from userspace. | 256 | Paranoia: Make sure we're really coming from kernel space. |
243 | One could imagine a case where userspace jumps into the | 257 | One could imagine a case where userspace jumps into the |
244 | critical range address, but just before the CPU delivers a GP, | 258 | critical range address, but just before the CPU delivers a GP, |
245 | it decides to deliver an interrupt instead. Unlikely? | 259 | it decides to deliver an interrupt instead. Unlikely? |
@@ -248,32 +262,32 @@ ENTRY(xen_iret_crit_fixup) | |||
248 | jump instruction itself, not the destination, but some virtual | 262 | jump instruction itself, not the destination, but some virtual |
249 | environments get this wrong. | 263 | environments get this wrong. |
250 | */ | 264 | */ |
251 | movl PT_CS+4(%esp), %ecx | 265 | movl PT_CS(%esp), %ecx |
252 | andl $SEGMENT_RPL_MASK, %ecx | 266 | andl $SEGMENT_RPL_MASK, %ecx |
253 | cmpl $USER_RPL, %ecx | 267 | cmpl $USER_RPL, %ecx |
254 | je 2f | 268 | je 2f |
255 | 269 | ||
256 | lea PT_ORIG_EAX+4(%esp), %esi | 270 | lea PT_ORIG_EAX(%esp), %esi |
257 | lea PT_EFLAGS+4(%esp), %edi | 271 | lea PT_EFLAGS(%esp), %edi |
258 | 272 | ||
259 | /* If eip is before iret_restore_end then stack | 273 | /* If eip is before iret_restore_end then stack |
260 | hasn't been restored yet. */ | 274 | hasn't been restored yet. */ |
261 | cmp $iret_restore_end, %eax | 275 | cmp $iret_restore_end, %eax |
262 | jae 1f | 276 | jae 1f |
263 | 277 | ||
264 | movl 0+4(%edi),%eax /* copy EAX */ | 278 | movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */ |
265 | movl %eax, PT_EAX+4(%esp) | 279 | movl %eax, PT_EAX(%esp) |
266 | 280 | ||
267 | lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ | 281 | lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ |
268 | 282 | ||
269 | /* set up the copy */ | 283 | /* set up the copy */ |
270 | 1: std | 284 | 1: std |
271 | mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */ | 285 | mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ |
272 | rep movsl | 286 | rep movsl |
273 | cld | 287 | cld |
274 | 288 | ||
275 | lea 4(%edi),%esp /* point esp to new frame */ | 289 | lea 4(%edi),%esp /* point esp to new frame */ |
276 | 2: ret | 290 | 2: jmp xen_do_upcall |
277 | 291 | ||
278 | 292 | ||
279 | /* | 293 | /* |