diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/paravirt_32.c | 58 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 39 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 40 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.h | 2 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 7 |
6 files changed, 90 insertions, 58 deletions
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c index fa412515af79..6a80d67c2121 100644 --- a/arch/x86/kernel/paravirt_32.c +++ b/arch/x86/kernel/paravirt_32.c | |||
@@ -164,7 +164,6 @@ static void *get_call_destination(u8 type) | |||
164 | { | 164 | { |
165 | struct paravirt_patch_template tmpl = { | 165 | struct paravirt_patch_template tmpl = { |
166 | .pv_init_ops = pv_init_ops, | 166 | .pv_init_ops = pv_init_ops, |
167 | .pv_misc_ops = pv_misc_ops, | ||
168 | .pv_time_ops = pv_time_ops, | 167 | .pv_time_ops = pv_time_ops, |
169 | .pv_cpu_ops = pv_cpu_ops, | 168 | .pv_cpu_ops = pv_cpu_ops, |
170 | .pv_irq_ops = pv_irq_ops, | 169 | .pv_irq_ops = pv_irq_ops, |
@@ -282,6 +281,49 @@ int paravirt_disable_iospace(void) | |||
282 | return ret; | 281 | return ret; |
283 | } | 282 | } |
284 | 283 | ||
284 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; | ||
285 | |||
286 | static inline void enter_lazy(enum paravirt_lazy_mode mode) | ||
287 | { | ||
288 | BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); | ||
289 | BUG_ON(preemptible()); | ||
290 | |||
291 | x86_write_percpu(paravirt_lazy_mode, mode); | ||
292 | } | ||
293 | |||
294 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode) | ||
295 | { | ||
296 | BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode); | ||
297 | BUG_ON(preemptible()); | ||
298 | |||
299 | x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); | ||
300 | } | ||
301 | |||
302 | void paravirt_enter_lazy_mmu(void) | ||
303 | { | ||
304 | enter_lazy(PARAVIRT_LAZY_MMU); | ||
305 | } | ||
306 | |||
307 | void paravirt_leave_lazy_mmu(void) | ||
308 | { | ||
309 | paravirt_leave_lazy(PARAVIRT_LAZY_MMU); | ||
310 | } | ||
311 | |||
312 | void paravirt_enter_lazy_cpu(void) | ||
313 | { | ||
314 | enter_lazy(PARAVIRT_LAZY_CPU); | ||
315 | } | ||
316 | |||
317 | void paravirt_leave_lazy_cpu(void) | ||
318 | { | ||
319 | paravirt_leave_lazy(PARAVIRT_LAZY_CPU); | ||
320 | } | ||
321 | |||
322 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | ||
323 | { | ||
324 | return x86_read_percpu(paravirt_lazy_mode); | ||
325 | } | ||
326 | |||
285 | struct pv_info pv_info = { | 327 | struct pv_info pv_info = { |
286 | .name = "bare hardware", | 328 | .name = "bare hardware", |
287 | .paravirt_enabled = 0, | 329 | .paravirt_enabled = 0, |
@@ -347,6 +389,11 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
347 | 389 | ||
348 | .set_iopl_mask = native_set_iopl_mask, | 390 | .set_iopl_mask = native_set_iopl_mask, |
349 | .io_delay = native_io_delay, | 391 | .io_delay = native_io_delay, |
392 | |||
393 | .lazy_mode = { | ||
394 | .enter = paravirt_nop, | ||
395 | .leave = paravirt_nop, | ||
396 | }, | ||
350 | }; | 397 | }; |
351 | 398 | ||
352 | struct pv_apic_ops pv_apic_ops = { | 399 | struct pv_apic_ops pv_apic_ops = { |
@@ -360,10 +407,6 @@ struct pv_apic_ops pv_apic_ops = { | |||
360 | #endif | 407 | #endif |
361 | }; | 408 | }; |
362 | 409 | ||
363 | struct pv_misc_ops pv_misc_ops = { | ||
364 | .set_lazy_mode = paravirt_nop, | ||
365 | }; | ||
366 | |||
367 | struct pv_mmu_ops pv_mmu_ops = { | 410 | struct pv_mmu_ops pv_mmu_ops = { |
368 | .pagetable_setup_start = native_pagetable_setup_start, | 411 | .pagetable_setup_start = native_pagetable_setup_start, |
369 | .pagetable_setup_done = native_pagetable_setup_done, | 412 | .pagetable_setup_done = native_pagetable_setup_done, |
@@ -414,6 +457,11 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
414 | .dup_mmap = paravirt_nop, | 457 | .dup_mmap = paravirt_nop, |
415 | .exit_mmap = paravirt_nop, | 458 | .exit_mmap = paravirt_nop, |
416 | .activate_mm = paravirt_nop, | 459 | .activate_mm = paravirt_nop, |
460 | |||
461 | .lazy_mode = { | ||
462 | .enter = paravirt_nop, | ||
463 | .leave = paravirt_nop, | ||
464 | }, | ||
417 | }; | 465 | }; |
418 | 466 | ||
419 | EXPORT_SYMBOL_GPL(pv_time_ops); | 467 | EXPORT_SYMBOL_GPL(pv_time_ops); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 67cea5c2e3e0..f02bad68abaa 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -552,24 +552,22 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
552 | } | 552 | } |
553 | #endif | 553 | #endif |
554 | 554 | ||
555 | static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode) | 555 | static void vmi_enter_lazy_cpu(void) |
556 | { | 556 | { |
557 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode); | 557 | paravirt_enter_lazy_cpu(); |
558 | 558 | vmi_ops.set_lazy_mode(2); | |
559 | if (!vmi_ops.set_lazy_mode) | 559 | } |
560 | return; | ||
561 | 560 | ||
562 | /* Modes should never nest or overlap */ | 561 | static void vmi_enter_lazy_mmu(void) |
563 | BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || | 562 | { |
564 | mode == PARAVIRT_LAZY_FLUSH)); | 563 | paravirt_enter_lazy_mmu(); |
564 | vmi_ops.set_lazy_mode(1); | ||
565 | } | ||
565 | 566 | ||
566 | if (mode == PARAVIRT_LAZY_FLUSH) { | 567 | static void vmi_leave_lazy(void) |
567 | vmi_ops.set_lazy_mode(0); | 568 | { |
568 | vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); | 569 | paravirt_leave_lazy(paravirt_get_lazy_mode()); |
569 | } else { | 570 | vmi_ops.set_lazy_mode(0); |
570 | vmi_ops.set_lazy_mode(mode); | ||
571 | __get_cpu_var(lazy_mode) = mode; | ||
572 | } | ||
573 | } | 571 | } |
574 | 572 | ||
575 | static inline int __init check_vmi_rom(struct vrom_header *rom) | 573 | static inline int __init check_vmi_rom(struct vrom_header *rom) |
@@ -798,7 +796,16 @@ static inline int __init activate_vmi(void) | |||
798 | para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); | 796 | para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); |
799 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); | 797 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); |
800 | para_fill(pv_cpu_ops.io_delay, IODelay); | 798 | para_fill(pv_cpu_ops.io_delay, IODelay); |
801 | para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); | 799 | |
800 | para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, | ||
801 | set_lazy_mode, SetLazyMode); | ||
802 | para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy, | ||
803 | set_lazy_mode, SetLazyMode); | ||
804 | |||
805 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, | ||
806 | set_lazy_mode, SetLazyMode); | ||
807 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy, | ||
808 | set_lazy_mode, SetLazyMode); | ||
802 | 809 | ||
803 | /* user and kernel flush are just handled with different flags to FlushTLB */ | 810 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
804 | para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); | 811 | para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 3d3bf05dec7f..7171a0736071 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -52,8 +52,6 @@ | |||
52 | 52 | ||
53 | EXPORT_SYMBOL_GPL(hypercall_page); | 53 | EXPORT_SYMBOL_GPL(hypercall_page); |
54 | 54 | ||
55 | DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | ||
56 | |||
57 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | 55 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); |
58 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | 56 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); |
59 | DEFINE_PER_CPU(unsigned long, xen_cr3); | 57 | DEFINE_PER_CPU(unsigned long, xen_cr3); |
@@ -249,29 +247,10 @@ static void xen_halt(void) | |||
249 | xen_safe_halt(); | 247 | xen_safe_halt(); |
250 | } | 248 | } |
251 | 249 | ||
252 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | 250 | static void xen_leave_lazy(void) |
253 | { | 251 | { |
254 | BUG_ON(preemptible()); | 252 | paravirt_leave_lazy(paravirt_get_lazy_mode()); |
255 | |||
256 | switch (mode) { | ||
257 | case PARAVIRT_LAZY_NONE: | ||
258 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | ||
259 | break; | ||
260 | |||
261 | case PARAVIRT_LAZY_MMU: | ||
262 | case PARAVIRT_LAZY_CPU: | ||
263 | BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); | ||
264 | break; | ||
265 | |||
266 | case PARAVIRT_LAZY_FLUSH: | ||
267 | /* flush if necessary, but don't change state */ | ||
268 | if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) | ||
269 | xen_mc_flush(); | ||
270 | return; | ||
271 | } | ||
272 | |||
273 | xen_mc_flush(); | 253 | xen_mc_flush(); |
274 | x86_write_percpu(xen_lazy_mode, mode); | ||
275 | } | 254 | } |
276 | 255 | ||
277 | static unsigned long xen_store_tr(void) | 256 | static unsigned long xen_store_tr(void) |
@@ -358,7 +337,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |||
358 | * loaded properly. This will go away as soon as Xen has been | 337 | * loaded properly. This will go away as soon as Xen has been |
359 | * modified to not save/restore %gs for normal hypercalls. | 338 | * modified to not save/restore %gs for normal hypercalls. |
360 | */ | 339 | */ |
361 | if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) | 340 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) |
362 | loadsegment(gs, 0); | 341 | loadsegment(gs, 0); |
363 | } | 342 | } |
364 | 343 | ||
@@ -962,6 +941,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
962 | 941 | ||
963 | .set_iopl_mask = xen_set_iopl_mask, | 942 | .set_iopl_mask = xen_set_iopl_mask, |
964 | .io_delay = xen_io_delay, | 943 | .io_delay = xen_io_delay, |
944 | |||
945 | .lazy_mode = { | ||
946 | .enter = paravirt_enter_lazy_cpu, | ||
947 | .leave = xen_leave_lazy, | ||
948 | }, | ||
965 | }; | 949 | }; |
966 | 950 | ||
967 | static const struct pv_irq_ops xen_irq_ops __initdata = { | 951 | static const struct pv_irq_ops xen_irq_ops __initdata = { |
@@ -1037,10 +1021,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1037 | .activate_mm = xen_activate_mm, | 1021 | .activate_mm = xen_activate_mm, |
1038 | .dup_mmap = xen_dup_mmap, | 1022 | .dup_mmap = xen_dup_mmap, |
1039 | .exit_mmap = xen_exit_mmap, | 1023 | .exit_mmap = xen_exit_mmap, |
1040 | }; | ||
1041 | 1024 | ||
1042 | static const struct pv_misc_ops xen_misc_ops __initdata = { | 1025 | .lazy_mode = { |
1043 | .set_lazy_mode = xen_set_lazy_mode, | 1026 | .enter = paravirt_enter_lazy_mmu, |
1027 | .leave = xen_leave_lazy, | ||
1028 | }, | ||
1044 | }; | 1029 | }; |
1045 | 1030 | ||
1046 | #ifdef CONFIG_SMP | 1031 | #ifdef CONFIG_SMP |
@@ -1114,7 +1099,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1114 | pv_irq_ops = xen_irq_ops; | 1099 | pv_irq_ops = xen_irq_ops; |
1115 | pv_apic_ops = xen_apic_ops; | 1100 | pv_apic_ops = xen_apic_ops; |
1116 | pv_mmu_ops = xen_mmu_ops; | 1101 | pv_mmu_ops = xen_mmu_ops; |
1117 | pv_misc_ops = xen_misc_ops; | ||
1118 | 1102 | ||
1119 | machine_ops = xen_machine_ops; | 1103 | machine_ops = xen_machine_ops; |
1120 | 1104 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 874db0cd1d2a..2061bdd3e7d8 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -155,7 +155,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
155 | pte_t *ptep, pte_t pteval) | 155 | pte_t *ptep, pte_t pteval) |
156 | { | 156 | { |
157 | if (mm == current->mm || mm == &init_mm) { | 157 | if (mm == current->mm || mm == &init_mm) { |
158 | if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | 158 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { |
159 | struct multicall_space mcs; | 159 | struct multicall_space mcs; |
160 | mcs = xen_mc_entry(0); | 160 | mcs = xen_mc_entry(0); |
161 | 161 | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index e6f7530b156c..5d96a5fa210c 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -35,7 +35,7 @@ void xen_mc_flush(void); | |||
35 | /* Issue a multicall if we're not in a lazy mode */ | 35 | /* Issue a multicall if we're not in a lazy mode */ |
36 | static inline void xen_mc_issue(unsigned mode) | 36 | static inline void xen_mc_issue(unsigned mode) |
37 | { | 37 | { |
38 | if ((xen_get_lazy_mode() & mode) == 0) | 38 | if ((paravirt_get_lazy_mode() & mode) == 0) |
39 | xen_mc_flush(); | 39 | xen_mc_flush(); |
40 | 40 | ||
41 | /* restore flags saved in xen_mc_batch */ | 41 | /* restore flags saved in xen_mc_batch */ |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index b9aaea45f07f..b5697bae52d0 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -29,13 +29,6 @@ unsigned long long xen_sched_clock(void); | |||
29 | 29 | ||
30 | void xen_mark_init_mm_pinned(void); | 30 | void xen_mark_init_mm_pinned(void); |
31 | 31 | ||
32 | DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | ||
33 | |||
34 | static inline unsigned xen_get_lazy_mode(void) | ||
35 | { | ||
36 | return x86_read_percpu(xen_lazy_mode); | ||
37 | } | ||
38 | |||
39 | void __init xen_fill_possible_map(void); | 32 | void __init xen_fill_possible_map(void); |
40 | 33 | ||
41 | void __init xen_setup_vcpu_info_placement(void); | 34 | void __init xen_setup_vcpu_info_placement(void); |