diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-13 00:27:03 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-12 08:57:03 -0400 |
commit | 61f4bc83fea248a3092beb7ba43daa5629615513 (patch) | |
tree | 5ce12fc0676f93a49f743dab1c60f8e1ca991ec3 /arch | |
parent | a32a8813d0173163ba44d8f9556e0d89fdc4fb46 (diff) |
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/asm-offsets_32.c | 1 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 45 | ||||
-rw-r--r-- | arch/x86/lguest/i386_head.S | 58 |
3 files changed, 74 insertions, 30 deletions
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 1a830cbd7015..dfdbf6403895 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -126,6 +126,7 @@ void foo(void) | |||
126 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) | 126 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) |
127 | BLANK(); | 127 | BLANK(); |
128 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); | 128 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); |
129 | OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); | ||
129 | OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); | 130 | OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); |
130 | 131 | ||
131 | BLANK(); | 132 | BLANK(); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 37b8c1d3e022..514f4d0d2bfa 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -179,7 +179,7 @@ static void lguest_end_context_switch(struct task_struct *next) | |||
179 | paravirt_end_context_switch(next); | 179 | paravirt_end_context_switch(next); |
180 | } | 180 | } |
181 | 181 | ||
182 | /*G:033 | 182 | /*G:032 |
183 | * After that diversion we return to our first native-instruction | 183 | * After that diversion we return to our first native-instruction |
184 | * replacements: four functions for interrupt control. | 184 | * replacements: four functions for interrupt control. |
185 | * | 185 | * |
@@ -199,41 +199,28 @@ static unsigned long save_fl(void) | |||
199 | { | 199 | { |
200 | return lguest_data.irq_enabled; | 200 | return lguest_data.irq_enabled; |
201 | } | 201 | } |
202 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); | ||
203 | |||
204 | /* restore_flags() just sets the flags back to the value given. */ | ||
205 | static void restore_fl(unsigned long flags) | ||
206 | { | ||
207 | lguest_data.irq_enabled = flags; | ||
208 | mb(); | ||
209 | /* Null hcall forces interrupt delivery now, if irq_pending is | ||
210 | * set to X86_EFLAGS_IF (ie. an interrupt is pending, and flags | ||
211 | * enables interrupts. */ | ||
212 | if (flags & lguest_data.irq_pending) | ||
213 | kvm_hypercall0(LHCALL_SEND_INTERRUPTS); | ||
214 | } | ||
215 | PV_CALLEE_SAVE_REGS_THUNK(restore_fl); | ||
216 | 202 | ||
217 | /* Interrupts go off... */ | 203 | /* Interrupts go off... */ |
218 | static void irq_disable(void) | 204 | static void irq_disable(void) |
219 | { | 205 | { |
220 | lguest_data.irq_enabled = 0; | 206 | lguest_data.irq_enabled = 0; |
221 | } | 207 | } |
222 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | ||
223 | 208 | ||
224 | /* Interrupts go on... */ | 209 | /* Let's pause a moment. Remember how I said these are called so often? |
225 | static void irq_enable(void) | 210 | * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to |
226 | { | 211 | * break some rules. In particular, these functions are assumed to save their |
227 | lguest_data.irq_enabled = X86_EFLAGS_IF; | 212 | * own registers if they need to: normal C functions assume they can trash the |
228 | mb(); | 213 | * eax register. To use normal C functions, we use |
229 | /* Null hcall forces interrupt delivery now. */ | 214 | * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the |
230 | if (lguest_data.irq_pending) | 215 | * C function, then restores it. */ |
231 | kvm_hypercall0(LHCALL_SEND_INTERRUPTS); | 216 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); |
217 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | ||
218 | /*:*/ | ||
232 | 219 | ||
233 | } | 220 | /* These are in i386_head.S */ |
234 | PV_CALLEE_SAVE_REGS_THUNK(irq_enable); | 221 | extern void lg_irq_enable(void); |
222 | extern void lg_restore_fl(unsigned long flags); | ||
235 | 223 | ||
236 | /*:*/ | ||
237 | /*M:003 Note that we don't check for outstanding interrupts when we re-enable | 224 | /*M:003 Note that we don't check for outstanding interrupts when we re-enable |
238 | * them (or when we unmask an interrupt). This seems to work for the moment, | 225 | * them (or when we unmask an interrupt). This seems to work for the moment, |
239 | * since interrupts are rare and we'll just get the interrupt on the next timer | 226 | * since interrupts are rare and we'll just get the interrupt on the next timer |
@@ -1041,9 +1028,9 @@ __init void lguest_init(void) | |||
1041 | /* interrupt-related operations */ | 1028 | /* interrupt-related operations */ |
1042 | pv_irq_ops.init_IRQ = lguest_init_IRQ; | 1029 | pv_irq_ops.init_IRQ = lguest_init_IRQ; |
1043 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); | 1030 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); |
1044 | pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl); | 1031 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); |
1045 | pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); | 1032 | pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); |
1046 | pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable); | 1033 | pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); |
1047 | pv_irq_ops.safe_halt = lguest_safe_halt; | 1034 | pv_irq_ops.safe_halt = lguest_safe_halt; |
1048 | 1035 | ||
1049 | /* init-time operations */ | 1036 | /* init-time operations */ |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 3e0c5545d59c..a9c8cfe61cd4 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -47,7 +47,63 @@ ENTRY(lguest_entry) | |||
47 | 47 | ||
48 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) | 48 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) |
49 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) | 49 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
50 | /*:*/ | 50 | |
51 | /*G:033 But using those wrappers is inefficient (we'll see why that doesn't | ||
52 | * matter for save_fl and irq_disable later). If we write our routines | ||
53 | * carefully in assembler, we can avoid clobbering any registers and avoid | ||
54 | * jumping through the wrapper functions. | ||
55 | * | ||
56 | * I skipped over our first piece of assembler, but this one is worth studying | ||
57 | * in a bit more detail so I'll describe in easy stages. First, the routine | ||
58 | * to enable interrupts: */ | ||
59 | ENTRY(lg_irq_enable) | ||
60 | /* The reverse of irq_disable, this sets lguest_data.irq_enabled to | ||
61 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). */ | ||
62 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled | ||
63 | /* But now we need to check if the Host wants to know: there might have | ||
64 | * been interrupts waiting to be delivered, in which case it will have | ||
65 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we | ||
66 | * jump to send_interrupts, otherwise we're done. */ | ||
67 | testl $0, lguest_data+LGUEST_DATA_irq_pending | ||
68 | jnz send_interrupts | ||
69 | /* One cool thing about x86 is that you can do many things without using | ||
70 | * a register. In this case, the normal path hasn't needed to save or | ||
71 | * restore any registers at all! */ | ||
72 | ret | ||
73 | send_interrupts: | ||
74 | /* OK, now we need a register: eax is used for the hypercall number, | ||
75 | * which is LHCALL_SEND_INTERRUPTS. | ||
76 | * | ||
77 | * We used not to bother with this pending detection at all, which was | ||
78 | * much simpler. Sooner or later the Host would realize it had to | ||
79 | * send us an interrupt. But that turns out to make performance 7 | ||
80 | * times worse on a simple tcp benchmark. So now we do this the hard | ||
81 | * way. */ | ||
82 | pushl %eax | ||
83 | movl $LHCALL_SEND_INTERRUPTS, %eax | ||
84 | /* This is a vmcall instruction (same thing that KVM uses). Older | ||
85 | * assembler versions might not know the "vmcall" instruction, so we | ||
86 | * create one manually here. */ | ||
87 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | ||
88 | popl %eax | ||
89 | ret | ||
90 | |||
91 | /* Finally, the "popf" or "restore flags" routine. The %eax register holds the | ||
92 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're | ||
93 | * enabling interrupts again, if it's 0 we're leaving them off. */ | ||
94 | ENTRY(lg_restore_fl) | ||
95 | /* This is just "lguest_data.irq_enabled = flags;" */ | ||
96 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled | ||
97 | /* Now, if the %eax value has enabled interrupts and | ||
98 | * lguest_data.irq_pending is set, we want to tell the Host so it can | ||
99 | * deliver any outstanding interrupts. Fortunately, both values will | ||
100 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" | ||
101 | * instruction will AND them together for us. If both are set, we | ||
102 | * jump to send_interrupts. */ | ||
103 | testl lguest_data+LGUEST_DATA_irq_pending, %eax | ||
104 | jnz send_interrupts | ||
105 | /* Again, the normal path has used no extra registers. Clever, huh? */ | ||
106 | ret | ||
51 | 107 | ||
52 | /* These demark the EIP range where host should never deliver interrupts. */ | 108 | /* These demark the EIP range where host should never deliver interrupts. */ |
53 | .global lguest_noirq_start | 109 | .global lguest_noirq_start |