diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-10-16 14:51:29 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-10-16 14:51:29 -0400 |
commit | 8965c1c0950d459d99b8b81dfc1ab02e3d2cfb08 (patch) | |
tree | 50ad53453562c7dc50afa866d52345bd6e820bef /arch/x86/xen/enlighten.c | |
parent | 93b1eab3d29e7ea32ee583de3362da84db06ded8 (diff) |
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r-- | arch/x86/xen/enlighten.c | 40 |
1 files changed, 12 insertions, 28 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 3d3bf05dec7f..7171a0736071 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -52,8 +52,6 @@ | |||
52 | 52 | ||
53 | EXPORT_SYMBOL_GPL(hypercall_page); | 53 | EXPORT_SYMBOL_GPL(hypercall_page); |
54 | 54 | ||
55 | DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | ||
56 | |||
57 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | 55 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); |
58 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | 56 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); |
59 | DEFINE_PER_CPU(unsigned long, xen_cr3); | 57 | DEFINE_PER_CPU(unsigned long, xen_cr3); |
@@ -249,29 +247,10 @@ static void xen_halt(void) | |||
249 | xen_safe_halt(); | 247 | xen_safe_halt(); |
250 | } | 248 | } |
251 | 249 | ||
252 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | 250 | static void xen_leave_lazy(void) |
253 | { | 251 | { |
254 | BUG_ON(preemptible()); | 252 | paravirt_leave_lazy(paravirt_get_lazy_mode()); |
255 | |||
256 | switch (mode) { | ||
257 | case PARAVIRT_LAZY_NONE: | ||
258 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | ||
259 | break; | ||
260 | |||
261 | case PARAVIRT_LAZY_MMU: | ||
262 | case PARAVIRT_LAZY_CPU: | ||
263 | BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); | ||
264 | break; | ||
265 | |||
266 | case PARAVIRT_LAZY_FLUSH: | ||
267 | /* flush if necessary, but don't change state */ | ||
268 | if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) | ||
269 | xen_mc_flush(); | ||
270 | return; | ||
271 | } | ||
272 | |||
273 | xen_mc_flush(); | 253 | xen_mc_flush(); |
274 | x86_write_percpu(xen_lazy_mode, mode); | ||
275 | } | 254 | } |
276 | 255 | ||
277 | static unsigned long xen_store_tr(void) | 256 | static unsigned long xen_store_tr(void) |
@@ -358,7 +337,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |||
358 | * loaded properly. This will go away as soon as Xen has been | 337 | * loaded properly. This will go away as soon as Xen has been |
359 | * modified to not save/restore %gs for normal hypercalls. | 338 | * modified to not save/restore %gs for normal hypercalls. |
360 | */ | 339 | */ |
361 | if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) | 340 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) |
362 | loadsegment(gs, 0); | 341 | loadsegment(gs, 0); |
363 | } | 342 | } |
364 | 343 | ||
@@ -962,6 +941,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
962 | 941 | ||
963 | .set_iopl_mask = xen_set_iopl_mask, | 942 | .set_iopl_mask = xen_set_iopl_mask, |
964 | .io_delay = xen_io_delay, | 943 | .io_delay = xen_io_delay, |
944 | |||
945 | .lazy_mode = { | ||
946 | .enter = paravirt_enter_lazy_cpu, | ||
947 | .leave = xen_leave_lazy, | ||
948 | }, | ||
965 | }; | 949 | }; |
966 | 950 | ||
967 | static const struct pv_irq_ops xen_irq_ops __initdata = { | 951 | static const struct pv_irq_ops xen_irq_ops __initdata = { |
@@ -1037,10 +1021,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1037 | .activate_mm = xen_activate_mm, | 1021 | .activate_mm = xen_activate_mm, |
1038 | .dup_mmap = xen_dup_mmap, | 1022 | .dup_mmap = xen_dup_mmap, |
1039 | .exit_mmap = xen_exit_mmap, | 1023 | .exit_mmap = xen_exit_mmap, |
1040 | }; | ||
1041 | 1024 | ||
1042 | static const struct pv_misc_ops xen_misc_ops __initdata = { | 1025 | .lazy_mode = { |
1043 | .set_lazy_mode = xen_set_lazy_mode, | 1026 | .enter = paravirt_enter_lazy_mmu, |
1027 | .leave = xen_leave_lazy, | ||
1028 | }, | ||
1044 | }; | 1029 | }; |
1045 | 1030 | ||
1046 | #ifdef CONFIG_SMP | 1031 | #ifdef CONFIG_SMP |
@@ -1114,7 +1099,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1114 | pv_irq_ops = xen_irq_ops; | 1099 | pv_irq_ops = xen_irq_ops; |
1115 | pv_apic_ops = xen_apic_ops; | 1100 | pv_apic_ops = xen_apic_ops; |
1116 | pv_mmu_ops = xen_mmu_ops; | 1101 | pv_mmu_ops = xen_mmu_ops; |
1117 | pv_misc_ops = xen_misc_ops; | ||
1118 | 1102 | ||
1119 | machine_ops = xen_machine_ops; | 1103 | machine_ops = xen_machine_ops; |
1120 | 1104 | ||