aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lguest
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-18 02:46:21 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-03-30 02:36:00 -0400
commitb407fc57b815b2016186220baabc76cc8264206e (patch)
tree29949f727af8b6c19fd689fa10f123ea84c03cb0 /arch/x86/lguest
parent7fd7d83d49914f03aefffba6aee09032fcd54cce (diff)
x86/paravirt: flush pending mmu updates on context switch
Impact: allow preemption during lazy mmu updates If we're in lazy mmu mode when context switching, leave lazy mmu mode, but remember the task's state in TIF_LAZY_MMU_UPDATES. When we resume the task, check this flag and re-enter lazy mmu mode if its set. This sets things up for allowing lazy mmu mode while preemptible, though that won't actually be active until the next change. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'arch/x86/lguest')
-rw-r--r--arch/x86/lguest/boot.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9fe4ddaa8f6f..41a5562e710e 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -147,10 +147,16 @@ static void lazy_hcall(unsigned long call,
147 147
148/* When lazy mode is turned off reset the per-cpu lazy mode variable and then 148/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
149 * issue the do-nothing hypercall to flush any stored calls. */ 149 * issue the do-nothing hypercall to flush any stored calls. */
150static void lguest_leave_lazy_mode(void) 150static void lguest_leave_lazy_mmu_mode(void)
151{ 151{
152 paravirt_leave_lazy(paravirt_get_lazy_mode());
153 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); 152 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
153 paravirt_leave_lazy_mmu();
154}
155
156static void lguest_leave_lazy_cpu_mode(void)
157{
158 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
159 paravirt_leave_lazy_cpu();
154} 160}
155 161
156/*G:033 162/*G:033
@@ -1026,7 +1032,7 @@ __init void lguest_init(void)
1026 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; 1032 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1027 pv_cpu_ops.wbinvd = lguest_wbinvd; 1033 pv_cpu_ops.wbinvd = lguest_wbinvd;
1028 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu; 1034 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1029 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode; 1035 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_cpu_mode;
1030 1036
1031 /* pagetable management */ 1037 /* pagetable management */
1032 pv_mmu_ops.write_cr3 = lguest_write_cr3; 1038 pv_mmu_ops.write_cr3 = lguest_write_cr3;
@@ -1039,7 +1045,7 @@ __init void lguest_init(void)
1039 pv_mmu_ops.read_cr2 = lguest_read_cr2; 1045 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1040 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1046 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1041 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1047 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1042 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode; 1048 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1043 1049
1044#ifdef CONFIG_X86_LOCAL_APIC 1050#ifdef CONFIG_X86_LOCAL_APIC
1045 /* apic read/write intercepts */ 1051 /* apic read/write intercepts */