diff options
Diffstat (limited to 'include/asm-generic/pgtable.h')
-rw-r--r-- | include/asm-generic/pgtable.h | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 349260cd86ed..9d774d07d95b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -15,19 +15,11 @@ | |||
15 | * Note: the old pte is known to not be writable, so we don't need to | 15 | * Note: the old pte is known to not be writable, so we don't need to |
16 | * worry about dirty bits etc getting lost. | 16 | * worry about dirty bits etc getting lost. |
17 | */ | 17 | */ |
18 | #ifndef __HAVE_ARCH_SET_PTE_ATOMIC | ||
19 | #define ptep_establish(__vma, __address, __ptep, __entry) \ | 18 | #define ptep_establish(__vma, __address, __ptep, __entry) \ |
20 | do { \ | 19 | do { \ |
21 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | 20 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ |
22 | flush_tlb_page(__vma, __address); \ | 21 | flush_tlb_page(__vma, __address); \ |
23 | } while (0) | 22 | } while (0) |
24 | #else /* __HAVE_ARCH_SET_PTE_ATOMIC */ | ||
25 | #define ptep_establish(__vma, __address, __ptep, __entry) \ | ||
26 | do { \ | ||
27 | set_pte_atomic(__ptep, __entry); \ | ||
28 | flush_tlb_page(__vma, __address); \ | ||
29 | } while (0) | ||
30 | #endif /* __HAVE_ARCH_SET_PTE_ATOMIC */ | ||
31 | #endif | 23 | #endif |
32 | 24 | ||
33 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 25 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
@@ -112,8 +104,13 @@ do { \ | |||
112 | }) | 104 | }) |
113 | #endif | 105 | #endif |
114 | 106 | ||
115 | #ifndef __HAVE_ARCH_PTE_CLEAR_FULL | 107 | /* |
116 | #define pte_clear_full(__mm, __address, __ptep, __full) \ | 108 | * Some architectures may be able to avoid expensive synchronization |
109 | * primitives when modifications are made to PTE's which are already | ||
110 | * not present, or in the process of an address space destruction. | ||
111 | */ | ||
112 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | ||
113 | #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ | ||
117 | do { \ | 114 | do { \ |
118 | pte_clear((__mm), (__address), (__ptep)); \ | 115 | pte_clear((__mm), (__address), (__ptep)); \ |
119 | } while (0) | 116 | } while (0) |
@@ -166,6 +163,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
166 | #endif | 163 | #endif |
167 | 164 | ||
168 | /* | 165 | /* |
166 | * A facility to provide lazy MMU batching. This allows PTE updates and | ||
167 | * page invalidations to be delayed until a call to leave lazy MMU mode | ||
168 | * is issued. Some architectures may benefit from doing this, and it is | ||
169 | * beneficial for both shadow and direct mode hypervisors, which may batch | ||
170 | * the PTE updates which happen during this window. Note that using this | ||
171 | * interface requires that read hazards be removed from the code. A read | ||
172 | * hazard could result in the direct mode hypervisor case, since the actual | ||
173 | * write to the page tables may not yet have taken place, so reads though | ||
174 | * a raw PTE pointer after it has been modified are not guaranteed to be | ||
175 | * up to date. This mode can only be entered and left under the protection of | ||
176 | * the page table locks for all page tables which may be modified. In the UP | ||
177 | * case, this is required so that preemption is disabled, and in the SMP case, | ||
178 | * it must synchronize the delayed page table writes properly on other CPUs. | ||
179 | */ | ||
180 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
181 | #define arch_enter_lazy_mmu_mode() do {} while (0) | ||
182 | #define arch_leave_lazy_mmu_mode() do {} while (0) | ||
183 | #endif | ||
184 | |||
185 | /* | ||
169 | * When walking page tables, get the address of the next boundary, | 186 | * When walking page tables, get the address of the next boundary, |
170 | * or the end address of the range if that comes earlier. Although no | 187 | * or the end address of the range if that comes earlier. Although no |
171 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 188 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |