aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-06-13 00:27:02 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-06-12 08:57:03 -0400
commita32a8813d0173163ba44d8f9556e0d89fdc4fb46 (patch)
treefddb6742338047d0219e8c2536cd39b04e643b16 /arch
parentabd41f037e1a64543000ed73b42f616d04d92700 (diff)
lguest: improve interrupt handling, speed up stream networking
lguest never checked for pending interrupts when enabling interrupts, and things still worked. However, it makes a significant difference to TCP performance, so it's time we fixed it by introducing a pending_irq flag and checking it on irq_restore and irq_enable. These two routines are now too big to patch into the 8/10 bytes patch space, so we drop that code. Note: The high latency on interrupt delivery had a very curious effect: once everything else was optimized, networking without GSO was faster than networking with GSO, since more interrupts were sent and hence a greater chance of one getting through to the Guest! Note2: (Almost) Closing the same loophole for iret doesn't have any measurable effect, so I'm leaving that patch for the moment. Before: 1GB tcpblast Guest->Host: 30.7 seconds 1GB tcpblast Guest->Host (no GSO): 76.0 seconds After: 1GB tcpblast Guest->Host: 6.8 seconds 1GB tcpblast Guest->Host (no GSO): 27.8 seconds Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/lguest/boot.c21
-rw-r--r--arch/x86/lguest/i386_head.S2
3 files changed, 16 insertions, 8 deletions
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index faae1996487b..f9a9f7811248 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -17,6 +17,7 @@
17#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
18#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18 19#define LHCALL_LOAD_GDT_ENTRY 18
20#define LHCALL_SEND_INTERRUPTS 19
20 21
21#define LGUEST_TRAP_ENTRY 0x1F 22#define LGUEST_TRAP_ENTRY 0x1F
22 23
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 2392a7a171c2..37b8c1d3e022 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -205,6 +205,12 @@ PV_CALLEE_SAVE_REGS_THUNK(save_fl);
205static void restore_fl(unsigned long flags) 205static void restore_fl(unsigned long flags)
206{ 206{
207 lguest_data.irq_enabled = flags; 207 lguest_data.irq_enabled = flags;
208 mb();
209 /* Null hcall forces interrupt delivery now, if irq_pending is
210 * set to X86_EFLAGS_IF (ie. an interrupt is pending, and flags
211 * enables interrupts. */
212 if (flags & lguest_data.irq_pending)
213 kvm_hypercall0(LHCALL_SEND_INTERRUPTS);
208} 214}
209PV_CALLEE_SAVE_REGS_THUNK(restore_fl); 215PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
210 216
@@ -219,6 +225,11 @@ PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
219static void irq_enable(void) 225static void irq_enable(void)
220{ 226{
221 lguest_data.irq_enabled = X86_EFLAGS_IF; 227 lguest_data.irq_enabled = X86_EFLAGS_IF;
228 mb();
229 /* Null hcall forces interrupt delivery now. */
230 if (lguest_data.irq_pending)
231 kvm_hypercall0(LHCALL_SEND_INTERRUPTS);
232
222} 233}
223PV_CALLEE_SAVE_REGS_THUNK(irq_enable); 234PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
224 235
@@ -972,10 +983,10 @@ static void lguest_restart(char *reason)
972 * 983 *
973 * Our current solution is to allow the paravirt back end to optionally patch 984 * Our current solution is to allow the paravirt back end to optionally patch
974 * over the indirect calls to replace them with something more efficient. We 985 * over the indirect calls to replace them with something more efficient. We
975 * patch the four most commonly called functions: disable interrupts, enable 986 * patch two of the simplest of the most commonly called functions: disable
976 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10 987 * interrupts and save interrupts. We usually have 6 or 10 bytes to patch
977 * bytes to patch into: the Guest versions of these operations are small enough 988 * into: the Guest versions of these operations are small enough that we can
978 * that we can fit comfortably. 989 * fit comfortably.
979 * 990 *
980 * First we need assembly templates of each of the patchable Guest operations, 991 * First we need assembly templates of each of the patchable Guest operations,
981 * and these are in i386_head.S. */ 992 * and these are in i386_head.S. */
@@ -986,8 +997,6 @@ static const struct lguest_insns
986 const char *start, *end; 997 const char *start, *end;
987} lguest_insns[] = { 998} lguest_insns[] = {
988 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli }, 999 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
989 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
990 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
991 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, 1000 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
992}; 1001};
993 1002
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index f79541989471..3e0c5545d59c 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -46,8 +46,6 @@ ENTRY(lguest_entry)
46 .globl lgstart_##name; .globl lgend_##name 46 .globl lgstart_##name; .globl lgend_##name
47 47
48LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) 48LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
49LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
50LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
51LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) 49LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
52/*:*/ 50/*:*/
53 51