diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/pgtable.h | 13 | ||||
-rw-r--r-- | include/asm-i386/paravirt.h | 15 |
2 files changed, 28 insertions, 0 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 9d774d07d95b..00c23433b39f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -183,6 +183,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
183 | #endif | 183 | #endif |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * A facility to provide batching of the reload of page tables with the | ||
187 | * actual context switch code for paravirtualized guests. By convention, | ||
188 | * only one of the lazy modes (CPU, MMU) should be active at any given | ||
189 | * time, entry should never be nested, and entry and exits should always | ||
190 | * be paired. This is for sanity of maintaining and reasoning about the | ||
191 | * kernel code. | ||
192 | */ | ||
193 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | ||
194 | #define arch_enter_lazy_cpu_mode() do {} while (0) | ||
195 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
196 | #endif | ||
197 | |||
198 | /* | ||
186 | * When walking page tables, get the address of the next boundary, | 199 | * When walking page tables, get the address of the next boundary, |
187 | * or the end address of the range if that comes earlier. Although no | 200 | * or the end address of the range if that comes earlier. Although no |
188 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 201 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 53da276a2ec2..38e5164bd0e7 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -146,6 +146,8 @@ struct paravirt_ops | |||
146 | void (fastcall *pmd_clear)(pmd_t *pmdp); | 146 | void (fastcall *pmd_clear)(pmd_t *pmdp); |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | void (fastcall *set_lazy_mode)(int mode); | ||
150 | |||
149 | /* These two are jmp to, not actually called. */ | 151 | /* These two are jmp to, not actually called. */ |
150 | void (fastcall *irq_enable_sysexit)(void); | 152 | void (fastcall *irq_enable_sysexit)(void); |
151 | void (fastcall *iret)(void); | 153 | void (fastcall *iret)(void); |
@@ -386,6 +388,19 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
386 | } | 388 | } |
387 | #endif | 389 | #endif |
388 | 390 | ||
391 | /* Lazy mode for batching updates / context switch */ | ||
392 | #define PARAVIRT_LAZY_NONE 0 | ||
393 | #define PARAVIRT_LAZY_MMU 1 | ||
394 | #define PARAVIRT_LAZY_CPU 2 | ||
395 | |||
396 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | ||
397 | #define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU) | ||
398 | #define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) | ||
399 | |||
400 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
401 | #define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU) | ||
402 | #define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) | ||
403 | |||
389 | /* These all sit in the .parainstructions section to tell us what to patch. */ | 404 | /* These all sit in the .parainstructions section to tell us what to patch. */ |
390 | struct paravirt_patch { | 405 | struct paravirt_patch { |
391 | u8 *instr; /* original instructions */ | 406 | u8 *instr; /* original instructions */ |