aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/paravirt_32.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 14:51:29 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 14:51:29 -0400
commit8965c1c0950d459d99b8b81dfc1ab02e3d2cfb08 (patch)
tree50ad53453562c7dc50afa866d52345bd6e820bef /arch/x86/kernel/paravirt_32.c
parent93b1eab3d29e7ea32ee583de3362da84db06ded8 (diff)
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions: 1. enter lazy cpu mode 2. leave lazy cpu mode 3. enter lazy mmu mode 4. leave lazy mmu mode 5. flush pending batched operations This complicates each paravirt backend, since it needs to deal with all the possible state transitions, handling flushing, etc. In particular, flushing is quite distinct from the other 4 functions, and seems to just cause complication. This patch removes the set_lazy_mode operation, and adds "enter" and "leave" lazy mode operations on mmu_ops and cpu_ops. All the logic associated with enter and leaving lazy states is now in common code (basically BUG_ONs to make sure that no mode is current when entering a lazy mode, and make sure that the mode is current when leaving). Also, flush is handled in a common way, by simply leaving and re-entering the lazy mode. The result is that the Xen, lguest and VMI lazy mode implementations are much simpler. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Cc: Zach Amsden <zach@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Avi Kivity <avi@qumranet.com> Cc: Anthony Liguory <aliguori@us.ibm.com> Cc: "Glauber de Oliveira Costa" <glommer@gmail.com> Cc: Jun Nakajima <jun.nakajima@intel.com>
Diffstat (limited to 'arch/x86/kernel/paravirt_32.c')
-rw-r--r--arch/x86/kernel/paravirt_32.c58
1 files changed, 53 insertions, 5 deletions
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index fa412515af79..6a80d67c2121 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -164,7 +164,6 @@ static void *get_call_destination(u8 type)
164{ 164{
165 struct paravirt_patch_template tmpl = { 165 struct paravirt_patch_template tmpl = {
166 .pv_init_ops = pv_init_ops, 166 .pv_init_ops = pv_init_ops,
167 .pv_misc_ops = pv_misc_ops,
168 .pv_time_ops = pv_time_ops, 167 .pv_time_ops = pv_time_ops,
169 .pv_cpu_ops = pv_cpu_ops, 168 .pv_cpu_ops = pv_cpu_ops,
170 .pv_irq_ops = pv_irq_ops, 169 .pv_irq_ops = pv_irq_ops,
@@ -282,6 +281,49 @@ int paravirt_disable_iospace(void)
282 return ret; 281 return ret;
283} 282}
284 283
284static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
285
286static inline void enter_lazy(enum paravirt_lazy_mode mode)
287{
288 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
289 BUG_ON(preemptible());
290
291 x86_write_percpu(paravirt_lazy_mode, mode);
292}
293
294void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
295{
296 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
297 BUG_ON(preemptible());
298
299 x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
300}
301
302void paravirt_enter_lazy_mmu(void)
303{
304 enter_lazy(PARAVIRT_LAZY_MMU);
305}
306
307void paravirt_leave_lazy_mmu(void)
308{
309 paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
310}
311
312void paravirt_enter_lazy_cpu(void)
313{
314 enter_lazy(PARAVIRT_LAZY_CPU);
315}
316
317void paravirt_leave_lazy_cpu(void)
318{
319 paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
320}
321
322enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
323{
324 return x86_read_percpu(paravirt_lazy_mode);
325}
326
285struct pv_info pv_info = { 327struct pv_info pv_info = {
286 .name = "bare hardware", 328 .name = "bare hardware",
287 .paravirt_enabled = 0, 329 .paravirt_enabled = 0,
@@ -347,6 +389,11 @@ struct pv_cpu_ops pv_cpu_ops = {
347 389
348 .set_iopl_mask = native_set_iopl_mask, 390 .set_iopl_mask = native_set_iopl_mask,
349 .io_delay = native_io_delay, 391 .io_delay = native_io_delay,
392
393 .lazy_mode = {
394 .enter = paravirt_nop,
395 .leave = paravirt_nop,
396 },
350}; 397};
351 398
352struct pv_apic_ops pv_apic_ops = { 399struct pv_apic_ops pv_apic_ops = {
@@ -360,10 +407,6 @@ struct pv_apic_ops pv_apic_ops = {
360#endif 407#endif
361}; 408};
362 409
363struct pv_misc_ops pv_misc_ops = {
364 .set_lazy_mode = paravirt_nop,
365};
366
367struct pv_mmu_ops pv_mmu_ops = { 410struct pv_mmu_ops pv_mmu_ops = {
368 .pagetable_setup_start = native_pagetable_setup_start, 411 .pagetable_setup_start = native_pagetable_setup_start,
369 .pagetable_setup_done = native_pagetable_setup_done, 412 .pagetable_setup_done = native_pagetable_setup_done,
@@ -414,6 +457,11 @@ struct pv_mmu_ops pv_mmu_ops = {
414 .dup_mmap = paravirt_nop, 457 .dup_mmap = paravirt_nop,
415 .exit_mmap = paravirt_nop, 458 .exit_mmap = paravirt_nop,
416 .activate_mm = paravirt_nop, 459 .activate_mm = paravirt_nop,
460
461 .lazy_mode = {
462 .enter = paravirt_nop,
463 .leave = paravirt_nop,
464 },
417}; 465};
418 466
419EXPORT_SYMBOL_GPL(pv_time_ops); 467EXPORT_SYMBOL_GPL(pv_time_ops);