aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 14:51:29 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 14:51:29 -0400
commit8965c1c0950d459d99b8b81dfc1ab02e3d2cfb08 (patch)
tree50ad53453562c7dc50afa866d52345bd6e820bef /drivers/lguest
parent93b1eab3d29e7ea32ee583de3362da84db06ded8 (diff)
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions: 1. enter lazy cpu mode 2. leave lazy cpu mode 3. enter lazy mmu mode 4. leave lazy mmu mode 5. flush pending batched operations This complicates each paravirt backend, since it needs to deal with all the possible state transitions, handling flushing, etc. In particular, flushing is quite distinct from the other 4 functions, and seems to just cause complication. This patch removes the set_lazy_mode operation, and adds "enter" and "leave" lazy mode operations on mmu_ops and cpu_ops. All the logic associated with enter and leaving lazy states is now in common code (basically BUG_ONs to make sure that no mode is current when entering a lazy mode, and make sure that the mode is current when leaving). Also, flush is handled in a common way, by simply leaving and re-entering the lazy mode. The result is that the Xen, lguest and VMI lazy mode implementations are much simpler. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Cc: Zach Amsden <zach@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Avi Kivity <avi@qumranet.com> Cc: Anthony Liguory <aliguori@us.ibm.com> Cc: "Glauber de Oliveira Costa" <glommer@gmail.com> Cc: Jun Nakajima <jun.nakajima@intel.com>
Diffstat (limited to 'drivers/lguest')
-rw-r--r--drivers/lguest/lguest.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index ca9b844f37c2..c302629e0895 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -97,29 +97,17 @@ static cycle_t clock_base;
97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls 97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
98 * are reasonably expensive, batching them up makes sense. For example, a 98 * are reasonably expensive, batching them up makes sense. For example, a
99 * large mmap might update dozens of page table entries: that code calls 99 * large mmap might update dozens of page table entries: that code calls
100 * lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls 100 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
101 * lguest_lazy_mode(PARAVIRT_LAZY_NONE). 101 * lguest_leave_lazy_mode().
102 * 102 *
103 * So, when we're in lazy mode, we call async_hypercall() to store the call for 103 * So, when we're in lazy mode, we call async_hypercall() to store the call for
104 * future processing. When lazy mode is turned off we issue a hypercall to 104 * future processing. When lazy mode is turned off we issue a hypercall to
105 * flush the stored calls. 105 * flush the stored calls.
106 * 106 */
107 * There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which 107static void lguest_leave_lazy_mode(void)
108 * indicates we're to flush any outstanding calls immediately. This is used
109 * when an interrupt handler does a kmap_atomic(): the page table changes must
110 * happen immediately even if we're in the middle of a batch. Usually we're
111 * not, though, so there's nothing to do. */
112static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
113static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
114{ 108{
115 if (mode == PARAVIRT_LAZY_FLUSH) { 109 paravirt_leave_lazy(paravirt_get_lazy_mode());
116 if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE)) 110 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
117 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
118 } else {
119 lazy_mode = mode;
120 if (mode == PARAVIRT_LAZY_NONE)
121 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
122 }
123} 111}
124 112
125static void lazy_hcall(unsigned long call, 113static void lazy_hcall(unsigned long call,
@@ -127,7 +115,7 @@ static void lazy_hcall(unsigned long call,
127 unsigned long arg2, 115 unsigned long arg2,
128 unsigned long arg3) 116 unsigned long arg3)
129{ 117{
130 if (lazy_mode == PARAVIRT_LAZY_NONE) 118 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
131 hcall(call, arg1, arg2, arg3); 119 hcall(call, arg1, arg2, arg3);
132 else 120 else
133 async_hcall(call, arg1, arg2, arg3); 121 async_hcall(call, arg1, arg2, arg3);
@@ -1011,6 +999,8 @@ __init void lguest_init(void *boot)
1011 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry; 999 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1012 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; 1000 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1013 pv_cpu_ops.wbinvd = lguest_wbinvd; 1001 pv_cpu_ops.wbinvd = lguest_wbinvd;
1002 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1003 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1014 1004
1015 /* pagetable management */ 1005 /* pagetable management */
1016 pv_mmu_ops.write_cr3 = lguest_write_cr3; 1006 pv_mmu_ops.write_cr3 = lguest_write_cr3;
@@ -1022,6 +1012,8 @@ __init void lguest_init(void *boot)
1022 pv_mmu_ops.set_pmd = lguest_set_pmd; 1012 pv_mmu_ops.set_pmd = lguest_set_pmd;
1023 pv_mmu_ops.read_cr2 = lguest_read_cr2; 1013 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1024 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1014 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1015 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1016 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1025 1017
1026#ifdef CONFIG_X86_LOCAL_APIC 1018#ifdef CONFIG_X86_LOCAL_APIC
1027 /* apic read/write intercepts */ 1019 /* apic read/write intercepts */
@@ -1034,8 +1026,6 @@ __init void lguest_init(void *boot)
1034 pv_time_ops.get_wallclock = lguest_get_wallclock; 1026 pv_time_ops.get_wallclock = lguest_get_wallclock;
1035 pv_time_ops.time_init = lguest_time_init; 1027 pv_time_ops.time_init = lguest_time_init;
1036 1028
1037 pv_misc_ops.set_lazy_mode = lguest_lazy_mode;
1038
1039 /* Now is a good time to look at the implementations of these functions 1029 /* Now is a good time to look at the implementations of these functions
1040 * before returning to the rest of lguest_init(). */ 1030 * before returning to the rest of lguest_init(). */
1041 1031