diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-10-16 14:51:29 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-10-16 14:51:29 -0400 |
commit | 8965c1c0950d459d99b8b81dfc1ab02e3d2cfb08 (patch) | |
tree | 50ad53453562c7dc50afa866d52345bd6e820bef /include/asm-x86/paravirt.h | |
parent | 93b1eab3d29e7ea32ee583de3362da84db06ded8 (diff) |
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Diffstat (limited to 'include/asm-x86/paravirt.h')
-rw-r--r-- | include/asm-x86/paravirt.h | 52 |
1 files changed, 33 insertions, 19 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 19726e12051e..f59d370c5df4 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -25,15 +25,6 @@ struct tss_struct; | |||
25 | struct mm_struct; | 25 | struct mm_struct; |
26 | struct desc_struct; | 26 | struct desc_struct; |
27 | 27 | ||
28 | /* Lazy mode for batching updates / context switch */ | ||
29 | enum paravirt_lazy_mode { | ||
30 | PARAVIRT_LAZY_NONE = 0, | ||
31 | PARAVIRT_LAZY_MMU = 1, | ||
32 | PARAVIRT_LAZY_CPU = 2, | ||
33 | PARAVIRT_LAZY_FLUSH = 3, | ||
34 | }; | ||
35 | |||
36 | |||
37 | /* general info */ | 28 | /* general info */ |
38 | struct pv_info { | 29 | struct pv_info { |
39 | unsigned int kernel_rpl; | 30 | unsigned int kernel_rpl; |
@@ -64,9 +55,10 @@ struct pv_init_ops { | |||
64 | }; | 55 | }; |
65 | 56 | ||
66 | 57 | ||
67 | struct pv_misc_ops { | 58 | struct pv_lazy_ops { |
68 | /* Set deferred update mode, used for batching operations. */ | 59 | /* Set deferred update mode, used for batching operations. */ |
69 | void (*set_lazy_mode)(enum paravirt_lazy_mode mode); | 60 | void (*enter)(void); |
61 | void (*leave)(void); | ||
70 | }; | 62 | }; |
71 | 63 | ||
72 | struct pv_time_ops { | 64 | struct pv_time_ops { |
@@ -131,6 +123,8 @@ struct pv_cpu_ops { | |||
131 | /* These two are jmp to, not actually called. */ | 123 | /* These two are jmp to, not actually called. */ |
132 | void (*irq_enable_sysexit)(void); | 124 | void (*irq_enable_sysexit)(void); |
133 | void (*iret)(void); | 125 | void (*iret)(void); |
126 | |||
127 | struct pv_lazy_ops lazy_mode; | ||
134 | }; | 128 | }; |
135 | 129 | ||
136 | struct pv_irq_ops { | 130 | struct pv_irq_ops { |
@@ -244,6 +238,8 @@ struct pv_mmu_ops { | |||
244 | #ifdef CONFIG_HIGHPTE | 238 | #ifdef CONFIG_HIGHPTE |
245 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | 239 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); |
246 | #endif | 240 | #endif |
241 | |||
242 | struct pv_lazy_ops lazy_mode; | ||
247 | }; | 243 | }; |
248 | 244 | ||
249 | /* This contains all the paravirt structures: we get a convenient | 245 | /* This contains all the paravirt structures: we get a convenient |
@@ -252,7 +248,6 @@ struct pv_mmu_ops { | |||
252 | struct paravirt_patch_template | 248 | struct paravirt_patch_template |
253 | { | 249 | { |
254 | struct pv_init_ops pv_init_ops; | 250 | struct pv_init_ops pv_init_ops; |
255 | struct pv_misc_ops pv_misc_ops; | ||
256 | struct pv_time_ops pv_time_ops; | 251 | struct pv_time_ops pv_time_ops; |
257 | struct pv_cpu_ops pv_cpu_ops; | 252 | struct pv_cpu_ops pv_cpu_ops; |
258 | struct pv_irq_ops pv_irq_ops; | 253 | struct pv_irq_ops pv_irq_ops; |
@@ -262,7 +257,6 @@ struct paravirt_patch_template | |||
262 | 257 | ||
263 | extern struct pv_info pv_info; | 258 | extern struct pv_info pv_info; |
264 | extern struct pv_init_ops pv_init_ops; | 259 | extern struct pv_init_ops pv_init_ops; |
265 | extern struct pv_misc_ops pv_misc_ops; | ||
266 | extern struct pv_time_ops pv_time_ops; | 260 | extern struct pv_time_ops pv_time_ops; |
267 | extern struct pv_cpu_ops pv_cpu_ops; | 261 | extern struct pv_cpu_ops pv_cpu_ops; |
268 | extern struct pv_irq_ops pv_irq_ops; | 262 | extern struct pv_irq_ops pv_irq_ops; |
@@ -953,37 +947,57 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
953 | } | 947 | } |
954 | #endif /* CONFIG_X86_PAE */ | 948 | #endif /* CONFIG_X86_PAE */ |
955 | 949 | ||
950 | /* Lazy mode for batching updates / context switch */ | ||
951 | enum paravirt_lazy_mode { | ||
952 | PARAVIRT_LAZY_NONE, | ||
953 | PARAVIRT_LAZY_MMU, | ||
954 | PARAVIRT_LAZY_CPU, | ||
955 | }; | ||
956 | |||
957 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | ||
958 | void paravirt_enter_lazy_cpu(void); | ||
959 | void paravirt_leave_lazy_cpu(void); | ||
960 | void paravirt_enter_lazy_mmu(void); | ||
961 | void paravirt_leave_lazy_mmu(void); | ||
962 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode); | ||
963 | |||
956 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 964 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE |
957 | static inline void arch_enter_lazy_cpu_mode(void) | 965 | static inline void arch_enter_lazy_cpu_mode(void) |
958 | { | 966 | { |
959 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_CPU); | 967 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); |
960 | } | 968 | } |
961 | 969 | ||
962 | static inline void arch_leave_lazy_cpu_mode(void) | 970 | static inline void arch_leave_lazy_cpu_mode(void) |
963 | { | 971 | { |
964 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); | 972 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); |
965 | } | 973 | } |
966 | 974 | ||
967 | static inline void arch_flush_lazy_cpu_mode(void) | 975 | static inline void arch_flush_lazy_cpu_mode(void) |
968 | { | 976 | { |
969 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); | 977 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) { |
978 | arch_leave_lazy_cpu_mode(); | ||
979 | arch_enter_lazy_cpu_mode(); | ||
980 | } | ||
970 | } | 981 | } |
971 | 982 | ||
972 | 983 | ||
973 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 984 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
974 | static inline void arch_enter_lazy_mmu_mode(void) | 985 | static inline void arch_enter_lazy_mmu_mode(void) |
975 | { | 986 | { |
976 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU); | 987 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); |
977 | } | 988 | } |
978 | 989 | ||
979 | static inline void arch_leave_lazy_mmu_mode(void) | 990 | static inline void arch_leave_lazy_mmu_mode(void) |
980 | { | 991 | { |
981 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); | 992 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); |
982 | } | 993 | } |
983 | 994 | ||
984 | static inline void arch_flush_lazy_mmu_mode(void) | 995 | static inline void arch_flush_lazy_mmu_mode(void) |
985 | { | 996 | { |
986 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); | 997 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) { |
998 | arch_leave_lazy_mmu_mode(); | ||
999 | arch_enter_lazy_mmu_mode(); | ||
1000 | } | ||
987 | } | 1001 | } |
988 | 1002 | ||
989 | void _paravirt_nop(void); | 1003 | void _paravirt_nop(void); |