diff options
author | Greg Ungerer <gerg@snapgear.com> | 2007-08-10 16:01:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-11 18:47:42 -0400 |
commit | 9535239f6bc99f68e0cfae44505ad402b53ed24c (patch) | |
tree | 3805894887354ee777250a7593d4a61ec4b40d56 /include/asm-generic/pgtable.h | |
parent | 73c59afc65cfa50c3362b9ce1ec151a79c41dd8e (diff) |
changing include/asm-generic/pgtable.h for non-mmu
There are some parts of include/asm-generic/pgtable.h that are relevant to
the non-mmu architectures. To make it easier to include this from them I
would like to ifdef the relevant parts.
Without this there is a handful of functions that are referenced in here
that are not defined on many non-mmu architectures. They could be defined
out of course, as an alternative approach.
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic/pgtable.h')
-rw-r--r-- | include/asm-generic/pgtable.h | 73 |
1 files changed, 38 insertions, 35 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f605e8d0eed3..5f0d797d33fd 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_GENERIC_PGTABLE_H | 2 | #define _ASM_GENERIC_PGTABLE_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #ifdef CONFIG_MMU | ||
5 | 6 | ||
6 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
7 | /* | 8 | /* |
@@ -133,41 +134,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
133 | #endif | 134 | #endif |
134 | 135 | ||
135 | /* | 136 | /* |
136 | * A facility to provide lazy MMU batching. This allows PTE updates and | ||
137 | * page invalidations to be delayed until a call to leave lazy MMU mode | ||
138 | * is issued. Some architectures may benefit from doing this, and it is | ||
139 | * beneficial for both shadow and direct mode hypervisors, which may batch | ||
140 | * the PTE updates which happen during this window. Note that using this | ||
141 | * interface requires that read hazards be removed from the code. A read | ||
142 | * hazard could result in the direct mode hypervisor case, since the actual | ||
143 | * write to the page tables may not yet have taken place, so reads though | ||
144 | * a raw PTE pointer after it has been modified are not guaranteed to be | ||
145 | * up to date. This mode can only be entered and left under the protection of | ||
146 | * the page table locks for all page tables which may be modified. In the UP | ||
147 | * case, this is required so that preemption is disabled, and in the SMP case, | ||
148 | * it must synchronize the delayed page table writes properly on other CPUs. | ||
149 | */ | ||
150 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
151 | #define arch_enter_lazy_mmu_mode() do {} while (0) | ||
152 | #define arch_leave_lazy_mmu_mode() do {} while (0) | ||
153 | #define arch_flush_lazy_mmu_mode() do {} while (0) | ||
154 | #endif | ||
155 | |||
156 | /* | ||
157 | * A facility to provide batching of the reload of page tables with the | ||
158 | * actual context switch code for paravirtualized guests. By convention, | ||
159 | * only one of the lazy modes (CPU, MMU) should be active at any given | ||
160 | * time, entry should never be nested, and entry and exits should always | ||
161 | * be paired. This is for sanity of maintaining and reasoning about the | ||
162 | * kernel code. | ||
163 | */ | ||
164 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | ||
165 | #define arch_enter_lazy_cpu_mode() do {} while (0) | ||
166 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
167 | #define arch_flush_lazy_cpu_mode() do {} while (0) | ||
168 | #endif | ||
169 | |||
170 | /* | ||
171 | * When walking page tables, get the address of the next boundary, | 137 | * When walking page tables, get the address of the next boundary, |
172 | * or the end address of the range if that comes earlier. Although no | 138 | * or the end address of the range if that comes earlier. Although no |
173 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 139 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
@@ -233,6 +199,43 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) | |||
233 | } | 199 | } |
234 | return 0; | 200 | return 0; |
235 | } | 201 | } |
202 | #endif /* CONFIG_MMU */ | ||
203 | |||
204 | /* | ||
205 | * A facility to provide lazy MMU batching. This allows PTE updates and | ||
206 | * page invalidations to be delayed until a call to leave lazy MMU mode | ||
207 | * is issued. Some architectures may benefit from doing this, and it is | ||
208 | * beneficial for both shadow and direct mode hypervisors, which may batch | ||
209 | * the PTE updates which happen during this window. Note that using this | ||
210 | * interface requires that read hazards be removed from the code. A read | ||
211 | * hazard could result in the direct mode hypervisor case, since the actual | ||
212 | * write to the page tables may not yet have taken place, so reads though | ||
213 | * a raw PTE pointer after it has been modified are not guaranteed to be | ||
214 | * up to date. This mode can only be entered and left under the protection of | ||
215 | * the page table locks for all page tables which may be modified. In the UP | ||
216 | * case, this is required so that preemption is disabled, and in the SMP case, | ||
217 | * it must synchronize the delayed page table writes properly on other CPUs. | ||
218 | */ | ||
219 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
220 | #define arch_enter_lazy_mmu_mode() do {} while (0) | ||
221 | #define arch_leave_lazy_mmu_mode() do {} while (0) | ||
222 | #define arch_flush_lazy_mmu_mode() do {} while (0) | ||
223 | #endif | ||
224 | |||
225 | /* | ||
226 | * A facility to provide batching of the reload of page tables with the | ||
227 | * actual context switch code for paravirtualized guests. By convention, | ||
228 | * only one of the lazy modes (CPU, MMU) should be active at any given | ||
229 | * time, entry should never be nested, and entry and exits should always | ||
230 | * be paired. This is for sanity of maintaining and reasoning about the | ||
231 | * kernel code. | ||
232 | */ | ||
233 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | ||
234 | #define arch_enter_lazy_cpu_mode() do {} while (0) | ||
235 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
236 | #define arch_flush_lazy_cpu_mode() do {} while (0) | ||
237 | #endif | ||
238 | |||
236 | #endif /* !__ASSEMBLY__ */ | 239 | #endif /* !__ASSEMBLY__ */ |
237 | 240 | ||
238 | #endif /* _ASM_GENERIC_PGTABLE_H */ | 241 | #endif /* _ASM_GENERIC_PGTABLE_H */ |