diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-18 02:46:21 -0500 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-03-30 02:36:00 -0400 |
commit | b407fc57b815b2016186220baabc76cc8264206e (patch) | |
tree | 29949f727af8b6c19fd689fa10f123ea84c03cb0 /arch | |
parent | 7fd7d83d49914f03aefffba6aee09032fcd54cce (diff) |
x86/paravirt: flush pending mmu updates on context switch
Impact: allow preemption during lazy mmu updates
If we're in lazy mmu mode when context switching, leave
lazy mmu mode, but remember the task's state in
TIF_LAZY_MMU_UPDATES. When we resume the task, check this
flag and re-enter lazy mmu mode if its set.
This sets things up for allowing lazy mmu mode while preemptible,
though that won't actually be active until the next change.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 14 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 14 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 1 |
9 files changed, 42 insertions, 18 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7b28abac323f..58d2481b01a6 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -1418,7 +1418,6 @@ void paravirt_enter_lazy_cpu(void); | |||
1418 | void paravirt_leave_lazy_cpu(void); | 1418 | void paravirt_leave_lazy_cpu(void); |
1419 | void paravirt_enter_lazy_mmu(void); | 1419 | void paravirt_enter_lazy_mmu(void); |
1420 | void paravirt_leave_lazy_mmu(void); | 1420 | void paravirt_leave_lazy_mmu(void); |
1421 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode); | ||
1422 | 1421 | ||
1423 | #define __HAVE_ARCH_START_CONTEXT_SWITCH | 1422 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
1424 | static inline void arch_start_context_switch(void) | 1423 | static inline void arch_start_context_switch(void) |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index df9d5f78385e..2f34d643b567 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -94,6 +94,7 @@ struct thread_info { | |||
94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | 94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | ||
97 | 98 | ||
98 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 99 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
99 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -115,6 +116,7 @@ struct thread_info { | |||
115 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | 116 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
116 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 117 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
117 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 118 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
119 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | ||
118 | 120 | ||
119 | /* work to do in syscall_trace_enter() */ | 121 | /* work to do in syscall_trace_enter() */ |
120 | #define _TIF_WORK_SYSCALL_ENTRY \ | 122 | #define _TIF_WORK_SYSCALL_ENTRY \ |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 478bca986eca..5d7f6e76b5dc 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -201,7 +201,7 @@ static void kvm_leave_lazy_mmu(void) | |||
201 | struct kvm_para_state *state = kvm_para_state(); | 201 | struct kvm_para_state *state = kvm_para_state(); |
202 | 202 | ||
203 | mmu_queue_flush(state); | 203 | mmu_queue_flush(state); |
204 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | 204 | paravirt_leave_lazy_mmu(); |
205 | state->mode = paravirt_get_lazy_mode(); | 205 | state->mode = paravirt_get_lazy_mode(); |
206 | } | 206 | } |
207 | 207 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5eea9548216b..430a0e30577b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -252,7 +252,7 @@ static inline void enter_lazy(enum paravirt_lazy_mode mode) | |||
252 | __get_cpu_var(paravirt_lazy_mode) = mode; | 252 | __get_cpu_var(paravirt_lazy_mode) = mode; |
253 | } | 253 | } |
254 | 254 | ||
255 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode) | 255 | static void leave_lazy(enum paravirt_lazy_mode mode) |
256 | { | 256 | { |
257 | BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); | 257 | BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); |
258 | BUG_ON(preemptible()); | 258 | BUG_ON(preemptible()); |
@@ -267,17 +267,24 @@ void paravirt_enter_lazy_mmu(void) | |||
267 | 267 | ||
268 | void paravirt_leave_lazy_mmu(void) | 268 | void paravirt_leave_lazy_mmu(void) |
269 | { | 269 | { |
270 | paravirt_leave_lazy(PARAVIRT_LAZY_MMU); | 270 | leave_lazy(PARAVIRT_LAZY_MMU); |
271 | } | 271 | } |
272 | 272 | ||
273 | void paravirt_enter_lazy_cpu(void) | 273 | void paravirt_enter_lazy_cpu(void) |
274 | { | 274 | { |
275 | if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { | ||
276 | arch_leave_lazy_mmu_mode(); | ||
277 | set_thread_flag(TIF_LAZY_MMU_UPDATES); | ||
278 | } | ||
275 | enter_lazy(PARAVIRT_LAZY_CPU); | 279 | enter_lazy(PARAVIRT_LAZY_CPU); |
276 | } | 280 | } |
277 | 281 | ||
278 | void paravirt_leave_lazy_cpu(void) | 282 | void paravirt_leave_lazy_cpu(void) |
279 | { | 283 | { |
280 | paravirt_leave_lazy(PARAVIRT_LAZY_CPU); | 284 | leave_lazy(PARAVIRT_LAZY_CPU); |
285 | |||
286 | if (test_and_clear_thread_flag(TIF_LAZY_MMU_UPDATES)) | ||
287 | arch_enter_lazy_mmu_mode(); | ||
281 | } | 288 | } |
282 | 289 | ||
283 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | 290 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 2cc4a90e2cb3..950929c607d3 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -473,16 +473,22 @@ static void vmi_enter_lazy_cpu(void) | |||
473 | vmi_ops.set_lazy_mode(2); | 473 | vmi_ops.set_lazy_mode(2); |
474 | } | 474 | } |
475 | 475 | ||
476 | static void vmi_leave_lazy_cpu(void) | ||
477 | { | ||
478 | vmi_ops.set_lazy_mode(0); | ||
479 | paravirt_leave_lazy_cpu(); | ||
480 | } | ||
481 | |||
476 | static void vmi_enter_lazy_mmu(void) | 482 | static void vmi_enter_lazy_mmu(void) |
477 | { | 483 | { |
478 | paravirt_enter_lazy_mmu(); | 484 | paravirt_enter_lazy_mmu(); |
479 | vmi_ops.set_lazy_mode(1); | 485 | vmi_ops.set_lazy_mode(1); |
480 | } | 486 | } |
481 | 487 | ||
482 | static void vmi_leave_lazy(void) | 488 | static void vmi_leave_lazy_mmu(void) |
483 | { | 489 | { |
484 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
485 | vmi_ops.set_lazy_mode(0); | 490 | vmi_ops.set_lazy_mode(0); |
491 | paravirt_leave_lazy_mmu(); | ||
486 | } | 492 | } |
487 | 493 | ||
488 | static inline int __init check_vmi_rom(struct vrom_header *rom) | 494 | static inline int __init check_vmi_rom(struct vrom_header *rom) |
@@ -718,12 +724,12 @@ static inline int __init activate_vmi(void) | |||
718 | 724 | ||
719 | para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, | 725 | para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, |
720 | set_lazy_mode, SetLazyMode); | 726 | set_lazy_mode, SetLazyMode); |
721 | para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy, | 727 | para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy_cpu, |
722 | set_lazy_mode, SetLazyMode); | 728 | set_lazy_mode, SetLazyMode); |
723 | 729 | ||
724 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, | 730 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, |
725 | set_lazy_mode, SetLazyMode); | 731 | set_lazy_mode, SetLazyMode); |
726 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy, | 732 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu, |
727 | set_lazy_mode, SetLazyMode); | 733 | set_lazy_mode, SetLazyMode); |
728 | 734 | ||
729 | /* user and kernel flush are just handled with different flags to FlushTLB */ | 735 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9fe4ddaa8f6f..41a5562e710e 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -147,10 +147,16 @@ static void lazy_hcall(unsigned long call, | |||
147 | 147 | ||
148 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then | 148 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then |
149 | * issue the do-nothing hypercall to flush any stored calls. */ | 149 | * issue the do-nothing hypercall to flush any stored calls. */ |
150 | static void lguest_leave_lazy_mode(void) | 150 | static void lguest_leave_lazy_mmu_mode(void) |
151 | { | 151 | { |
152 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
153 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); | 152 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); |
153 | paravirt_leave_lazy_mmu(); | ||
154 | } | ||
155 | |||
156 | static void lguest_leave_lazy_cpu_mode(void) | ||
157 | { | ||
158 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); | ||
159 | paravirt_leave_lazy_cpu(); | ||
154 | } | 160 | } |
155 | 161 | ||
156 | /*G:033 | 162 | /*G:033 |
@@ -1026,7 +1032,7 @@ __init void lguest_init(void) | |||
1026 | pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; | 1032 | pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; |
1027 | pv_cpu_ops.wbinvd = lguest_wbinvd; | 1033 | pv_cpu_ops.wbinvd = lguest_wbinvd; |
1028 | pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu; | 1034 | pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu; |
1029 | pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode; | 1035 | pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_cpu_mode; |
1030 | 1036 | ||
1031 | /* pagetable management */ | 1037 | /* pagetable management */ |
1032 | pv_mmu_ops.write_cr3 = lguest_write_cr3; | 1038 | pv_mmu_ops.write_cr3 = lguest_write_cr3; |
@@ -1039,7 +1045,7 @@ __init void lguest_init(void) | |||
1039 | pv_mmu_ops.read_cr2 = lguest_read_cr2; | 1045 | pv_mmu_ops.read_cr2 = lguest_read_cr2; |
1040 | pv_mmu_ops.read_cr3 = lguest_read_cr3; | 1046 | pv_mmu_ops.read_cr3 = lguest_read_cr3; |
1041 | pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; | 1047 | pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; |
1042 | pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode; | 1048 | pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; |
1043 | 1049 | ||
1044 | #ifdef CONFIG_X86_LOCAL_APIC | 1050 | #ifdef CONFIG_X86_LOCAL_APIC |
1045 | /* apic read/write intercepts */ | 1051 | /* apic read/write intercepts */ |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 82cd39a6cbd3..f586e63b9a63 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -203,10 +203,10 @@ static unsigned long xen_get_debugreg(int reg) | |||
203 | return HYPERVISOR_get_debugreg(reg); | 203 | return HYPERVISOR_get_debugreg(reg); |
204 | } | 204 | } |
205 | 205 | ||
206 | void xen_leave_lazy(void) | 206 | static void xen_leave_lazy_cpu(void) |
207 | { | 207 | { |
208 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
209 | xen_mc_flush(); | 208 | xen_mc_flush(); |
209 | paravirt_leave_lazy_cpu(); | ||
210 | } | 210 | } |
211 | 211 | ||
212 | static unsigned long xen_store_tr(void) | 212 | static unsigned long xen_store_tr(void) |
@@ -819,7 +819,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
819 | 819 | ||
820 | .lazy_mode = { | 820 | .lazy_mode = { |
821 | .enter = paravirt_enter_lazy_cpu, | 821 | .enter = paravirt_enter_lazy_cpu, |
822 | .leave = xen_leave_lazy, | 822 | .leave = xen_leave_lazy_cpu, |
823 | }, | 823 | }, |
824 | }; | 824 | }; |
825 | 825 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6b98f87232ac..f5f8faa4f76c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1816,6 +1816,11 @@ __init void xen_post_allocator_init(void) | |||
1816 | xen_mark_init_mm_pinned(); | 1816 | xen_mark_init_mm_pinned(); |
1817 | } | 1817 | } |
1818 | 1818 | ||
1819 | static void xen_leave_lazy_mmu(void) | ||
1820 | { | ||
1821 | xen_mc_flush(); | ||
1822 | paravirt_leave_lazy_mmu(); | ||
1823 | } | ||
1819 | 1824 | ||
1820 | const struct pv_mmu_ops xen_mmu_ops __initdata = { | 1825 | const struct pv_mmu_ops xen_mmu_ops __initdata = { |
1821 | .pagetable_setup_start = xen_pagetable_setup_start, | 1826 | .pagetable_setup_start = xen_pagetable_setup_start, |
@@ -1891,7 +1896,7 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1891 | 1896 | ||
1892 | .lazy_mode = { | 1897 | .lazy_mode = { |
1893 | .enter = paravirt_enter_lazy_mmu, | 1898 | .enter = paravirt_enter_lazy_mmu, |
1894 | .leave = xen_leave_lazy, | 1899 | .leave = xen_leave_lazy_mmu, |
1895 | }, | 1900 | }, |
1896 | 1901 | ||
1897 | .set_fixmap = xen_set_fixmap, | 1902 | .set_fixmap = xen_set_fixmap, |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 2f5ef2632ea2..f897cdffccb6 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -30,7 +30,6 @@ pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); | |||
30 | void xen_ident_map_ISA(void); | 30 | void xen_ident_map_ISA(void); |
31 | void xen_reserve_top(void); | 31 | void xen_reserve_top(void); |
32 | 32 | ||
33 | void xen_leave_lazy(void); | ||
34 | void xen_post_allocator_init(void); | 33 | void xen_post_allocator_init(void); |
35 | 34 | ||
36 | char * __init xen_memory_setup(void); | 35 | char * __init xen_memory_setup(void); |