diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/pgtable.h | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 78740716c9e7..56627fa453a6 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -171,6 +171,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
171 | #endif | 171 | #endif |
172 | 172 | ||
173 | /* | 173 | /* |
174 | * A facility to provide lazy MMU batching. This allows PTE updates and | ||
175 | * page invalidations to be delayed until a call to leave lazy MMU mode | ||
176 | * is issued. Some architectures may benefit from doing this, and it is | ||
177 | * beneficial for both shadow and direct mode hypervisors, which may batch | ||
178 | * the PTE updates which happen during this window. Note that using this | ||
179 | * interface requires that read hazards be removed from the code. A read | ||
180 | * hazard could result in the direct mode hypervisor case, since the actual | ||
181 | * write to the page tables may not yet have taken place, so reads though | ||
182 | * a raw PTE pointer after it has been modified are not guaranteed to be | ||
183 | * up to date. This mode can only be entered and left under the protection of | ||
184 | * the page table locks for all page tables which may be modified. In the UP | ||
185 | * case, this is required so that preemption is disabled, and in the SMP case, | ||
186 | * it must synchronize the delayed page table writes properly on other CPUs. | ||
187 | */ | ||
188 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
189 | #define arch_enter_lazy_mmu_mode() do {} while (0) | ||
190 | #define arch_leave_lazy_mmu_mode() do {} while (0) | ||
191 | #endif | ||
192 | |||
193 | /* | ||
174 | * When walking page tables, get the address of the next boundary, | 194 | * When walking page tables, get the address of the next boundary, |
175 | * or the end address of the range if that comes earlier. Although no | 195 | * or the end address of the range if that comes earlier. Although no |
176 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 196 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |