diff options
Diffstat (limited to 'drivers/lguest/lguest_asm.S')
-rw-r--r-- | drivers/lguest/lguest_asm.S | 57 |
1 files changed, 41 insertions, 16 deletions
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S index a3dbf22ee365..3126ae923cc0 100644 --- a/drivers/lguest/lguest_asm.S +++ b/drivers/lguest/lguest_asm.S | |||
@@ -4,15 +4,15 @@ | |||
4 | #include <asm/thread_info.h> | 4 | #include <asm/thread_info.h> |
5 | #include <asm/processor-flags.h> | 5 | #include <asm/processor-flags.h> |
6 | 6 | ||
7 | /* | 7 | /*G:020 This is where we begin: we have a magic signature which the launcher |
8 | * This is where we begin: we have a magic signature which the launcher looks | 8 | * looks for. The plan is that the Linux boot protocol will be extended with a |
9 | * for. The plan is that the Linux boot protocol will be extended with a | ||
10 | * "platform type" field which will guide us here from the normal entry point, | 9 | * "platform type" field which will guide us here from the normal entry point, |
11 | * but for the moment this suffices. We pass the virtual address of the boot | 10 | * but for the moment this suffices. The normal boot code uses %esi for the |
12 | * info to lguest_init(). | 11 | * boot header, so we do too. We convert it to a virtual address by adding |
12 | * PAGE_OFFSET, and hand it to lguest_init() as its argument (ie. %eax). | ||
13 | * | 13 | * |
14 | * We put it in .init.text will be discarded after boot. | 14 | * The .section line puts this code in .init.text so it will be discarded after |
15 | */ | 15 | * boot. */ |
16 | .section .init.text, "ax", @progbits | 16 | .section .init.text, "ax", @progbits |
17 | .ascii "GenuineLguest" | 17 | .ascii "GenuineLguest" |
18 | /* Set up initial stack. */ | 18 | /* Set up initial stack. */ |
@@ -21,7 +21,9 @@ | |||
21 | addl $__PAGE_OFFSET, %eax | 21 | addl $__PAGE_OFFSET, %eax |
22 | jmp lguest_init | 22 | jmp lguest_init |
23 | 23 | ||
24 | /* The templates for inline patching. */ | 24 | /*G:055 We create a macro which puts the assembler code between lgstart_ and |
25 | * lgend_ markers. These templates end up in the .init.text section, so they | ||
26 | * are discarded after boot. */ | ||
25 | #define LGUEST_PATCH(name, insns...) \ | 27 | #define LGUEST_PATCH(name, insns...) \ |
26 | lgstart_##name: insns; lgend_##name:; \ | 28 | lgstart_##name: insns; lgend_##name:; \ |
27 | .globl lgstart_##name; .globl lgend_##name | 29 | .globl lgstart_##name; .globl lgend_##name |
@@ -30,24 +32,47 @@ LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) | |||
30 | LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled) | 32 | LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled) |
31 | LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) | 33 | LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) |
32 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) | 34 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
35 | /*:*/ | ||
33 | 36 | ||
34 | .text | 37 | .text |
35 | /* These demark the EIP range where host should never deliver interrupts. */ | 38 | /* These demark the EIP range where host should never deliver interrupts. */ |
36 | .global lguest_noirq_start | 39 | .global lguest_noirq_start |
37 | .global lguest_noirq_end | 40 | .global lguest_noirq_end |
38 | 41 | ||
39 | /* | 42 | /*G:045 There is one final paravirt_op that the Guest implements, and glancing |
40 | * We move eflags word to lguest_data.irq_enabled to restore interrupt state. | 43 | * at it you can see why I left it to last. It's *cool*! It's in *assembler*! |
41 | * For page faults, gpfs and virtual interrupts, the hypervisor has saved | 44 | * |
42 | * eflags manually, otherwise it was delivered directly and so eflags reflects | 45 | * The "iret" instruction is used to return from an interrupt or trap. The |
43 | * the real machine IF state, ie. interrupts on. Since the kernel always dies | 46 | * stack looks like this: |
44 | * if it takes such a trap with interrupts disabled anyway, turning interrupts | 47 | * old address |
45 | * back on unconditionally here is OK. | 48 | * old code segment & privilege level |
46 | */ | 49 | * old processor flags ("eflags") |
50 | * | ||
51 | * The "iret" instruction pops those values off the stack and restores them all | ||
52 | * at once. The only problem is that eflags includes the Interrupt Flag which | ||
53 | * the Guest can't change: the CPU will simply ignore it when we do an "iret". | ||
54 | * So we have to copy eflags from the stack to lguest_data.irq_enabled before | ||
55 | * we do the "iret". | ||
56 | * | ||
57 | * There are two problems with this: firstly, we need to use a register to do | ||
58 | * the copy and secondly, the whole thing needs to be atomic. The first | ||
59 | * problem is easy to solve: push %eax on the stack so we can use it, and then | ||
60 | * restore it at the end just before the real "iret". | ||
61 | * | ||
62 | * The second is harder: copying eflags to lguest_data.irq_enabled will turn | ||
63 | * interrupts on before we're finished, so we could be interrupted before we | ||
64 | * return to userspace or wherever. Our solution to this is to surround the | ||
65 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the | ||
66 | * Host that it is *never* to interrupt us there, even if interrupts seem to be | ||
67 | * enabled. */ | ||
47 | ENTRY(lguest_iret) | 68 | ENTRY(lguest_iret) |
48 | pushl %eax | 69 | pushl %eax |
49 | movl 12(%esp), %eax | 70 | movl 12(%esp), %eax |
50 | lguest_noirq_start: | 71 | lguest_noirq_start: |
72 | /* Note the %ss: segment prefix here. Normal data accesses use the | ||
73 | * "ds" segment, but that will have already been restored for whatever | ||
74 | * we're returning to (such as userspace): we can't trust it. The %ss: | ||
75 | * prefix makes sure we use the stack segment, which is still valid. */ | ||
51 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled | 76 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled |
52 | popl %eax | 77 | popl %eax |
53 | iret | 78 | iret |