diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:37:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:32 -0400 |
commit | 0f2c87695219b1129ccf93e0f58acdcdd49724b9 (patch) | |
tree | 8c0d50e19a880f10d0fc5c92666b043d0257eb2d /arch/x86/xen | |
parent | dbe9e994c99ac9ac12d2b66ea42f44558f54fa52 (diff) |
xen: jump to iret fixup
Use jmp rather than call for the iret fixup, so its consistent with
the sysexit fixup, and it simplifies the stack (which is already
complex).
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/xen-asm.S | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 1ac08082a4b4..53cae923e148 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -223,9 +223,7 @@ hyper_iret: | |||
223 | ds } SAVE_ALL state | 223 | ds } SAVE_ALL state |
224 | eax } | 224 | eax } |
225 | : : | 225 | : : |
226 | ebx } | 226 | ebx }<- esp |
227 | ---------------- | ||
228 | return addr <- esp | ||
229 | ---------------- | 227 | ---------------- |
230 | 228 | ||
231 | In order to deliver the nested exception properly, we need to shift | 229 | In order to deliver the nested exception properly, we need to shift |
@@ -240,10 +238,8 @@ hyper_iret: | |||
240 | it's usermode state which we eventually need to restore. | 238 | it's usermode state which we eventually need to restore. |
241 | */ | 239 | */ |
242 | ENTRY(xen_iret_crit_fixup) | 240 | ENTRY(xen_iret_crit_fixup) |
243 | /* offsets +4 for return address */ | ||
244 | |||
245 | /* | 241 | /* |
246 | Paranoia: Make sure we're really coming from userspace. | 242 | Paranoia: Make sure we're really coming from kernel space. |
247 | One could imagine a case where userspace jumps into the | 243 | One could imagine a case where userspace jumps into the |
248 | critical range address, but just before the CPU delivers a GP, | 244 | critical range address, but just before the CPU delivers a GP, |
249 | it decides to deliver an interrupt instead. Unlikely? | 245 | it decides to deliver an interrupt instead. Unlikely? |
@@ -252,32 +248,32 @@ ENTRY(xen_iret_crit_fixup) | |||
252 | jump instruction itself, not the destination, but some virtual | 248 | jump instruction itself, not the destination, but some virtual |
253 | environments get this wrong. | 249 | environments get this wrong. |
254 | */ | 250 | */ |
255 | movl PT_CS+4(%esp), %ecx | 251 | movl PT_CS(%esp), %ecx |
256 | andl $SEGMENT_RPL_MASK, %ecx | 252 | andl $SEGMENT_RPL_MASK, %ecx |
257 | cmpl $USER_RPL, %ecx | 253 | cmpl $USER_RPL, %ecx |
258 | je 2f | 254 | je 2f |
259 | 255 | ||
260 | lea PT_ORIG_EAX+4(%esp), %esi | 256 | lea PT_ORIG_EAX(%esp), %esi |
261 | lea PT_EFLAGS+4(%esp), %edi | 257 | lea PT_EFLAGS(%esp), %edi |
262 | 258 | ||
263 | /* If eip is before iret_restore_end then stack | 259 | /* If eip is before iret_restore_end then stack |
264 | hasn't been restored yet. */ | 260 | hasn't been restored yet. */ |
265 | cmp $iret_restore_end, %eax | 261 | cmp $iret_restore_end, %eax |
266 | jae 1f | 262 | jae 1f |
267 | 263 | ||
268 | movl 0+4(%edi),%eax /* copy EAX */ | 264 | movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */ |
269 | movl %eax, PT_EAX+4(%esp) | 265 | movl %eax, PT_EAX(%esp) |
270 | 266 | ||
271 | lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ | 267 | lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ |
272 | 268 | ||
273 | /* set up the copy */ | 269 | /* set up the copy */ |
274 | 1: std | 270 | 1: std |
275 | mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */ | 271 | mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ |
276 | rep movsl | 272 | rep movsl |
277 | cld | 273 | cld |
278 | 274 | ||
279 | lea 4(%edi),%esp /* point esp to new frame */ | 275 | lea 4(%edi),%esp /* point esp to new frame */ |
280 | 2: ret | 276 | 2: jmp xen_do_upcall |
281 | 277 | ||
282 | 278 | ||
283 | ENTRY(xen_sysexit) | 279 | ENTRY(xen_sysexit) |