aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/paravirt_32.c58
-rw-r--r--arch/x86/kernel/vmi_32.c39
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/multicalls.h2
-rw-r--r--arch/x86/xen/xen-ops.h7
-rw-r--r--drivers/lguest/lguest.c32
-rw-r--r--include/asm-x86/paravirt.h52
8 files changed, 134 insertions, 98 deletions
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index fa412515af79..6a80d67c2121 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -164,7 +164,6 @@ static void *get_call_destination(u8 type)
164{ 164{
165 struct paravirt_patch_template tmpl = { 165 struct paravirt_patch_template tmpl = {
166 .pv_init_ops = pv_init_ops, 166 .pv_init_ops = pv_init_ops,
167 .pv_misc_ops = pv_misc_ops,
168 .pv_time_ops = pv_time_ops, 167 .pv_time_ops = pv_time_ops,
169 .pv_cpu_ops = pv_cpu_ops, 168 .pv_cpu_ops = pv_cpu_ops,
170 .pv_irq_ops = pv_irq_ops, 169 .pv_irq_ops = pv_irq_ops,
@@ -282,6 +281,49 @@ int paravirt_disable_iospace(void)
282 return ret; 281 return ret;
283} 282}
284 283
284static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
285
286static inline void enter_lazy(enum paravirt_lazy_mode mode)
287{
288 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
289 BUG_ON(preemptible());
290
291 x86_write_percpu(paravirt_lazy_mode, mode);
292}
293
294void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
295{
296 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
297 BUG_ON(preemptible());
298
299 x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
300}
301
302void paravirt_enter_lazy_mmu(void)
303{
304 enter_lazy(PARAVIRT_LAZY_MMU);
305}
306
307void paravirt_leave_lazy_mmu(void)
308{
309 paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
310}
311
312void paravirt_enter_lazy_cpu(void)
313{
314 enter_lazy(PARAVIRT_LAZY_CPU);
315}
316
317void paravirt_leave_lazy_cpu(void)
318{
319 paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
320}
321
322enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
323{
324 return x86_read_percpu(paravirt_lazy_mode);
325}
326
285struct pv_info pv_info = { 327struct pv_info pv_info = {
286 .name = "bare hardware", 328 .name = "bare hardware",
287 .paravirt_enabled = 0, 329 .paravirt_enabled = 0,
@@ -347,6 +389,11 @@ struct pv_cpu_ops pv_cpu_ops = {
347 389
348 .set_iopl_mask = native_set_iopl_mask, 390 .set_iopl_mask = native_set_iopl_mask,
349 .io_delay = native_io_delay, 391 .io_delay = native_io_delay,
392
393 .lazy_mode = {
394 .enter = paravirt_nop,
395 .leave = paravirt_nop,
396 },
350}; 397};
351 398
352struct pv_apic_ops pv_apic_ops = { 399struct pv_apic_ops pv_apic_ops = {
@@ -360,10 +407,6 @@ struct pv_apic_ops pv_apic_ops = {
360#endif 407#endif
361}; 408};
362 409
363struct pv_misc_ops pv_misc_ops = {
364 .set_lazy_mode = paravirt_nop,
365};
366
367struct pv_mmu_ops pv_mmu_ops = { 410struct pv_mmu_ops pv_mmu_ops = {
368 .pagetable_setup_start = native_pagetable_setup_start, 411 .pagetable_setup_start = native_pagetable_setup_start,
369 .pagetable_setup_done = native_pagetable_setup_done, 412 .pagetable_setup_done = native_pagetable_setup_done,
@@ -414,6 +457,11 @@ struct pv_mmu_ops pv_mmu_ops = {
414 .dup_mmap = paravirt_nop, 457 .dup_mmap = paravirt_nop,
415 .exit_mmap = paravirt_nop, 458 .exit_mmap = paravirt_nop,
416 .activate_mm = paravirt_nop, 459 .activate_mm = paravirt_nop,
460
461 .lazy_mode = {
462 .enter = paravirt_nop,
463 .leave = paravirt_nop,
464 },
417}; 465};
418 466
419EXPORT_SYMBOL_GPL(pv_time_ops); 467EXPORT_SYMBOL_GPL(pv_time_ops);
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 67cea5c2e3e0..f02bad68abaa 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -552,24 +552,22 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
552} 552}
553#endif 553#endif
554 554
555static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode) 555static void vmi_enter_lazy_cpu(void)
556{ 556{
557 static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode); 557 paravirt_enter_lazy_cpu();
558 558 vmi_ops.set_lazy_mode(2);
559 if (!vmi_ops.set_lazy_mode) 559}
560 return;
561 560
562 /* Modes should never nest or overlap */ 561static void vmi_enter_lazy_mmu(void)
563 BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || 562{
564 mode == PARAVIRT_LAZY_FLUSH)); 563 paravirt_enter_lazy_mmu();
564 vmi_ops.set_lazy_mode(1);
565}
565 566
566 if (mode == PARAVIRT_LAZY_FLUSH) { 567static void vmi_leave_lazy(void)
567 vmi_ops.set_lazy_mode(0); 568{
568 vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); 569 paravirt_leave_lazy(paravirt_get_lazy_mode());
569 } else { 570 vmi_ops.set_lazy_mode(0);
570 vmi_ops.set_lazy_mode(mode);
571 __get_cpu_var(lazy_mode) = mode;
572 }
573} 571}
574 572
575static inline int __init check_vmi_rom(struct vrom_header *rom) 573static inline int __init check_vmi_rom(struct vrom_header *rom)
@@ -798,7 +796,16 @@ static inline int __init activate_vmi(void)
798 para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); 796 para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
799 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); 797 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
800 para_fill(pv_cpu_ops.io_delay, IODelay); 798 para_fill(pv_cpu_ops.io_delay, IODelay);
801 para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); 799
800 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
801 set_lazy_mode, SetLazyMode);
802 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
803 set_lazy_mode, SetLazyMode);
804
805 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
806 set_lazy_mode, SetLazyMode);
807 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
808 set_lazy_mode, SetLazyMode);
802 809
803 /* user and kernel flush are just handled with different flags to FlushTLB */ 810 /* user and kernel flush are just handled with different flags to FlushTLB */
804 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); 811 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 3d3bf05dec7f..7171a0736071 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -52,8 +52,6 @@
52 52
53EXPORT_SYMBOL_GPL(hypercall_page); 53EXPORT_SYMBOL_GPL(hypercall_page);
54 54
55DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
56
57DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 55DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
58DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 56DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
59DEFINE_PER_CPU(unsigned long, xen_cr3); 57DEFINE_PER_CPU(unsigned long, xen_cr3);
@@ -249,29 +247,10 @@ static void xen_halt(void)
249 xen_safe_halt(); 247 xen_safe_halt();
250} 248}
251 249
252static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) 250static void xen_leave_lazy(void)
253{ 251{
254 BUG_ON(preemptible()); 252 paravirt_leave_lazy(paravirt_get_lazy_mode());
255
256 switch (mode) {
257 case PARAVIRT_LAZY_NONE:
258 BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
259 break;
260
261 case PARAVIRT_LAZY_MMU:
262 case PARAVIRT_LAZY_CPU:
263 BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
264 break;
265
266 case PARAVIRT_LAZY_FLUSH:
267 /* flush if necessary, but don't change state */
268 if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
269 xen_mc_flush();
270 return;
271 }
272
273 xen_mc_flush(); 253 xen_mc_flush();
274 x86_write_percpu(xen_lazy_mode, mode);
275} 254}
276 255
277static unsigned long xen_store_tr(void) 256static unsigned long xen_store_tr(void)
@@ -358,7 +337,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
358 * loaded properly. This will go away as soon as Xen has been 337 * loaded properly. This will go away as soon as Xen has been
359 * modified to not save/restore %gs for normal hypercalls. 338 * modified to not save/restore %gs for normal hypercalls.
360 */ 339 */
361 if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) 340 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
362 loadsegment(gs, 0); 341 loadsegment(gs, 0);
363} 342}
364 343
@@ -962,6 +941,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
962 941
963 .set_iopl_mask = xen_set_iopl_mask, 942 .set_iopl_mask = xen_set_iopl_mask,
964 .io_delay = xen_io_delay, 943 .io_delay = xen_io_delay,
944
945 .lazy_mode = {
946 .enter = paravirt_enter_lazy_cpu,
947 .leave = xen_leave_lazy,
948 },
965}; 949};
966 950
967static const struct pv_irq_ops xen_irq_ops __initdata = { 951static const struct pv_irq_ops xen_irq_ops __initdata = {
@@ -1037,10 +1021,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1037 .activate_mm = xen_activate_mm, 1021 .activate_mm = xen_activate_mm,
1038 .dup_mmap = xen_dup_mmap, 1022 .dup_mmap = xen_dup_mmap,
1039 .exit_mmap = xen_exit_mmap, 1023 .exit_mmap = xen_exit_mmap,
1040};
1041 1024
1042static const struct pv_misc_ops xen_misc_ops __initdata = { 1025 .lazy_mode = {
1043 .set_lazy_mode = xen_set_lazy_mode, 1026 .enter = paravirt_enter_lazy_mmu,
1027 .leave = xen_leave_lazy,
1028 },
1044}; 1029};
1045 1030
1046#ifdef CONFIG_SMP 1031#ifdef CONFIG_SMP
@@ -1114,7 +1099,6 @@ asmlinkage void __init xen_start_kernel(void)
1114 pv_irq_ops = xen_irq_ops; 1099 pv_irq_ops = xen_irq_ops;
1115 pv_apic_ops = xen_apic_ops; 1100 pv_apic_ops = xen_apic_ops;
1116 pv_mmu_ops = xen_mmu_ops; 1101 pv_mmu_ops = xen_mmu_ops;
1117 pv_misc_ops = xen_misc_ops;
1118 1102
1119 machine_ops = xen_machine_ops; 1103 machine_ops = xen_machine_ops;
1120 1104
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 874db0cd1d2a..2061bdd3e7d8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -155,7 +155,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
155 pte_t *ptep, pte_t pteval) 155 pte_t *ptep, pte_t pteval)
156{ 156{
157 if (mm == current->mm || mm == &init_mm) { 157 if (mm == current->mm || mm == &init_mm) {
158 if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 158 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
159 struct multicall_space mcs; 159 struct multicall_space mcs;
160 mcs = xen_mc_entry(0); 160 mcs = xen_mc_entry(0);
161 161
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index e6f7530b156c..5d96a5fa210c 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -35,7 +35,7 @@ void xen_mc_flush(void);
35/* Issue a multicall if we're not in a lazy mode */ 35/* Issue a multicall if we're not in a lazy mode */
36static inline void xen_mc_issue(unsigned mode) 36static inline void xen_mc_issue(unsigned mode)
37{ 37{
38 if ((xen_get_lazy_mode() & mode) == 0) 38 if ((paravirt_get_lazy_mode() & mode) == 0)
39 xen_mc_flush(); 39 xen_mc_flush();
40 40
41 /* restore flags saved in xen_mc_batch */ 41 /* restore flags saved in xen_mc_batch */
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b9aaea45f07f..b5697bae52d0 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -29,13 +29,6 @@ unsigned long long xen_sched_clock(void);
29 29
30void xen_mark_init_mm_pinned(void); 30void xen_mark_init_mm_pinned(void);
31 31
32DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
33
34static inline unsigned xen_get_lazy_mode(void)
35{
36 return x86_read_percpu(xen_lazy_mode);
37}
38
39void __init xen_fill_possible_map(void); 32void __init xen_fill_possible_map(void);
40 33
41void __init xen_setup_vcpu_info_placement(void); 34void __init xen_setup_vcpu_info_placement(void);
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index ca9b844f37c2..c302629e0895 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -97,29 +97,17 @@ static cycle_t clock_base;
97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls 97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
98 * are reasonably expensive, batching them up makes sense. For example, a 98 * are reasonably expensive, batching them up makes sense. For example, a
99 * large mmap might update dozens of page table entries: that code calls 99 * large mmap might update dozens of page table entries: that code calls
100 * lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls 100 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
101 * lguest_lazy_mode(PARAVIRT_LAZY_NONE). 101 * lguest_leave_lazy_mode().
102 * 102 *
103 * So, when we're in lazy mode, we call async_hypercall() to store the call for 103 * So, when we're in lazy mode, we call async_hypercall() to store the call for
104 * future processing. When lazy mode is turned off we issue a hypercall to 104 * future processing. When lazy mode is turned off we issue a hypercall to
105 * flush the stored calls. 105 * flush the stored calls.
106 * 106 */
107 * There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which 107static void lguest_leave_lazy_mode(void)
108 * indicates we're to flush any outstanding calls immediately. This is used
109 * when an interrupt handler does a kmap_atomic(): the page table changes must
110 * happen immediately even if we're in the middle of a batch. Usually we're
111 * not, though, so there's nothing to do. */
112static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
113static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
114{ 108{
115 if (mode == PARAVIRT_LAZY_FLUSH) { 109 paravirt_leave_lazy(paravirt_get_lazy_mode());
116 if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE)) 110 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
117 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
118 } else {
119 lazy_mode = mode;
120 if (mode == PARAVIRT_LAZY_NONE)
121 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
122 }
123} 111}
124 112
125static void lazy_hcall(unsigned long call, 113static void lazy_hcall(unsigned long call,
@@ -127,7 +115,7 @@ static void lazy_hcall(unsigned long call,
127 unsigned long arg2, 115 unsigned long arg2,
128 unsigned long arg3) 116 unsigned long arg3)
129{ 117{
130 if (lazy_mode == PARAVIRT_LAZY_NONE) 118 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
131 hcall(call, arg1, arg2, arg3); 119 hcall(call, arg1, arg2, arg3);
132 else 120 else
133 async_hcall(call, arg1, arg2, arg3); 121 async_hcall(call, arg1, arg2, arg3);
@@ -1011,6 +999,8 @@ __init void lguest_init(void *boot)
1011 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry; 999 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1012 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; 1000 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1013 pv_cpu_ops.wbinvd = lguest_wbinvd; 1001 pv_cpu_ops.wbinvd = lguest_wbinvd;
1002 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1003 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1014 1004
1015 /* pagetable management */ 1005 /* pagetable management */
1016 pv_mmu_ops.write_cr3 = lguest_write_cr3; 1006 pv_mmu_ops.write_cr3 = lguest_write_cr3;
@@ -1022,6 +1012,8 @@ __init void lguest_init(void *boot)
1022 pv_mmu_ops.set_pmd = lguest_set_pmd; 1012 pv_mmu_ops.set_pmd = lguest_set_pmd;
1023 pv_mmu_ops.read_cr2 = lguest_read_cr2; 1013 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1024 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1014 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1015 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1016 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1025 1017
1026#ifdef CONFIG_X86_LOCAL_APIC 1018#ifdef CONFIG_X86_LOCAL_APIC
1027 /* apic read/write intercepts */ 1019 /* apic read/write intercepts */
@@ -1034,8 +1026,6 @@ __init void lguest_init(void *boot)
1034 pv_time_ops.get_wallclock = lguest_get_wallclock; 1026 pv_time_ops.get_wallclock = lguest_get_wallclock;
1035 pv_time_ops.time_init = lguest_time_init; 1027 pv_time_ops.time_init = lguest_time_init;
1036 1028
1037 pv_misc_ops.set_lazy_mode = lguest_lazy_mode;
1038
1039 /* Now is a good time to look at the implementations of these functions 1029 /* Now is a good time to look at the implementations of these functions
1040 * before returning to the rest of lguest_init(). */ 1030 * before returning to the rest of lguest_init(). */
1041 1031
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 19726e12051e..f59d370c5df4 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -25,15 +25,6 @@ struct tss_struct;
25struct mm_struct; 25struct mm_struct;
26struct desc_struct; 26struct desc_struct;
27 27
28/* Lazy mode for batching updates / context switch */
29enum paravirt_lazy_mode {
30 PARAVIRT_LAZY_NONE = 0,
31 PARAVIRT_LAZY_MMU = 1,
32 PARAVIRT_LAZY_CPU = 2,
33 PARAVIRT_LAZY_FLUSH = 3,
34};
35
36
37/* general info */ 28/* general info */
38struct pv_info { 29struct pv_info {
39 unsigned int kernel_rpl; 30 unsigned int kernel_rpl;
@@ -64,9 +55,10 @@ struct pv_init_ops {
64}; 55};
65 56
66 57
67struct pv_misc_ops { 58struct pv_lazy_ops {
68 /* Set deferred update mode, used for batching operations. */ 59 /* Set deferred update mode, used for batching operations. */
69 void (*set_lazy_mode)(enum paravirt_lazy_mode mode); 60 void (*enter)(void);
61 void (*leave)(void);
70}; 62};
71 63
72struct pv_time_ops { 64struct pv_time_ops {
@@ -131,6 +123,8 @@ struct pv_cpu_ops {
131 /* These two are jmp to, not actually called. */ 123 /* These two are jmp to, not actually called. */
132 void (*irq_enable_sysexit)(void); 124 void (*irq_enable_sysexit)(void);
133 void (*iret)(void); 125 void (*iret)(void);
126
127 struct pv_lazy_ops lazy_mode;
134}; 128};
135 129
136struct pv_irq_ops { 130struct pv_irq_ops {
@@ -244,6 +238,8 @@ struct pv_mmu_ops {
244#ifdef CONFIG_HIGHPTE 238#ifdef CONFIG_HIGHPTE
245 void *(*kmap_atomic_pte)(struct page *page, enum km_type type); 239 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
246#endif 240#endif
241
242 struct pv_lazy_ops lazy_mode;
247}; 243};
248 244
249/* This contains all the paravirt structures: we get a convenient 245/* This contains all the paravirt structures: we get a convenient
@@ -252,7 +248,6 @@ struct pv_mmu_ops {
252struct paravirt_patch_template 248struct paravirt_patch_template
253{ 249{
254 struct pv_init_ops pv_init_ops; 250 struct pv_init_ops pv_init_ops;
255 struct pv_misc_ops pv_misc_ops;
256 struct pv_time_ops pv_time_ops; 251 struct pv_time_ops pv_time_ops;
257 struct pv_cpu_ops pv_cpu_ops; 252 struct pv_cpu_ops pv_cpu_ops;
258 struct pv_irq_ops pv_irq_ops; 253 struct pv_irq_ops pv_irq_ops;
@@ -262,7 +257,6 @@ struct paravirt_patch_template
262 257
263extern struct pv_info pv_info; 258extern struct pv_info pv_info;
264extern struct pv_init_ops pv_init_ops; 259extern struct pv_init_ops pv_init_ops;
265extern struct pv_misc_ops pv_misc_ops;
266extern struct pv_time_ops pv_time_ops; 260extern struct pv_time_ops pv_time_ops;
267extern struct pv_cpu_ops pv_cpu_ops; 261extern struct pv_cpu_ops pv_cpu_ops;
268extern struct pv_irq_ops pv_irq_ops; 262extern struct pv_irq_ops pv_irq_ops;
@@ -953,37 +947,57 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
953} 947}
954#endif /* CONFIG_X86_PAE */ 948#endif /* CONFIG_X86_PAE */
955 949
950/* Lazy mode for batching updates / context switch */
951enum paravirt_lazy_mode {
952 PARAVIRT_LAZY_NONE,
953 PARAVIRT_LAZY_MMU,
954 PARAVIRT_LAZY_CPU,
955};
956
957enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
958void paravirt_enter_lazy_cpu(void);
959void paravirt_leave_lazy_cpu(void);
960void paravirt_enter_lazy_mmu(void);
961void paravirt_leave_lazy_mmu(void);
962void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
963
956#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE 964#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
957static inline void arch_enter_lazy_cpu_mode(void) 965static inline void arch_enter_lazy_cpu_mode(void)
958{ 966{
959 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_CPU); 967 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
960} 968}
961 969
962static inline void arch_leave_lazy_cpu_mode(void) 970static inline void arch_leave_lazy_cpu_mode(void)
963{ 971{
964 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); 972 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
965} 973}
966 974
967static inline void arch_flush_lazy_cpu_mode(void) 975static inline void arch_flush_lazy_cpu_mode(void)
968{ 976{
969 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); 977 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
978 arch_leave_lazy_cpu_mode();
979 arch_enter_lazy_cpu_mode();
980 }
970} 981}
971 982
972 983
973#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 984#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
974static inline void arch_enter_lazy_mmu_mode(void) 985static inline void arch_enter_lazy_mmu_mode(void)
975{ 986{
976 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU); 987 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
977} 988}
978 989
979static inline void arch_leave_lazy_mmu_mode(void) 990static inline void arch_leave_lazy_mmu_mode(void)
980{ 991{
981 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); 992 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
982} 993}
983 994
984static inline void arch_flush_lazy_mmu_mode(void) 995static inline void arch_flush_lazy_mmu_mode(void)
985{ 996{
986 PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); 997 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
998 arch_leave_lazy_mmu_mode();
999 arch_enter_lazy_mmu_mode();
1000 }
987} 1001}
988 1002
989void _paravirt_nop(void); 1003void _paravirt_nop(void);