diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:36:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:30 -0400 |
commit | 4f76cd382213b29dd3658e3e1ea47c0c2be06f3c (patch) | |
tree | a4822f341a6896ace039760d1df963b5f159c665 /arch/x86/mm/pgtable_32.c | |
parent | 79bf6d66abb5a20813a19dd365dfc49104f0bb88 (diff) |
x86: add common mm/pgtable.c
Add a common arch/x86/mm/pgtable.c file for common pagetable functions.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pgtable_32.c')
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 187 |
1 files changed, 0 insertions, 187 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6fb9e7c6893f..b46893e45d02 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -173,193 +173,6 @@ void reserve_top_address(unsigned long reserve) | |||
173 | __VMALLOC_RESERVE += reserve; | 173 | __VMALLOC_RESERVE += reserve; |
174 | } | 174 | } |
175 | 175 | ||
176 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
177 | { | ||
178 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
179 | } | ||
180 | |||
181 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
182 | { | ||
183 | struct page *pte; | ||
184 | |||
185 | #ifdef CONFIG_HIGHPTE | ||
186 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | ||
187 | #else | ||
188 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
189 | #endif | ||
190 | if (pte) | ||
191 | pgtable_page_ctor(pte); | ||
192 | return pte; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
197 | * in both cached and uncached pgd's; not needed for PAE since the | ||
198 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
199 | * tactic would be needed. This is essentially codepath-based locking | ||
200 | * against pageattr.c; it is the unique case in which a valid change | ||
201 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
202 | * vmalloc faults work because attached pagetables are never freed. | ||
203 | * -- wli | ||
204 | */ | ||
205 | static inline void pgd_list_add(pgd_t *pgd) | ||
206 | { | ||
207 | struct page *page = virt_to_page(pgd); | ||
208 | |||
209 | list_add(&page->lru, &pgd_list); | ||
210 | } | ||
211 | |||
212 | static inline void pgd_list_del(pgd_t *pgd) | ||
213 | { | ||
214 | struct page *page = virt_to_page(pgd); | ||
215 | |||
216 | list_del(&page->lru); | ||
217 | } | ||
218 | |||
219 | #define UNSHARED_PTRS_PER_PGD \ | ||
220 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
221 | |||
222 | static void pgd_ctor(void *p) | ||
223 | { | ||
224 | pgd_t *pgd = p; | ||
225 | unsigned long flags; | ||
226 | |||
227 | /* Clear usermode parts of PGD */ | ||
228 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | ||
229 | |||
230 | spin_lock_irqsave(&pgd_lock, flags); | ||
231 | |||
232 | /* If the pgd points to a shared pagetable level (either the | ||
233 | ptes in non-PAE, or shared PMD in PAE), then just copy the | ||
234 | references from swapper_pg_dir. */ | ||
235 | if (PAGETABLE_LEVELS == 2 || | ||
236 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | ||
237 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, | ||
238 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
239 | KERNEL_PGD_PTRS); | ||
240 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | ||
241 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | ||
242 | USER_PTRS_PER_PGD, | ||
243 | KERNEL_PGD_PTRS); | ||
244 | } | ||
245 | |||
246 | /* list required to sync kernel mapping updates */ | ||
247 | if (!SHARED_KERNEL_PMD) | ||
248 | pgd_list_add(pgd); | ||
249 | |||
250 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
251 | } | ||
252 | |||
253 | static void pgd_dtor(void *pgd) | ||
254 | { | ||
255 | unsigned long flags; /* can be called from interrupt context */ | ||
256 | |||
257 | if (SHARED_KERNEL_PMD) | ||
258 | return; | ||
259 | |||
260 | spin_lock_irqsave(&pgd_lock, flags); | ||
261 | pgd_list_del(pgd); | ||
262 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
263 | } | ||
264 | |||
265 | #ifdef CONFIG_X86_PAE | ||
266 | /* | ||
267 | * Mop up any pmd pages which may still be attached to the pgd. | ||
268 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | ||
269 | * preallocate which never got a corresponding vma will need to be | ||
270 | * freed manually. | ||
271 | */ | ||
272 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
273 | { | ||
274 | int i; | ||
275 | |||
276 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | ||
277 | pgd_t pgd = pgdp[i]; | ||
278 | |||
279 | if (pgd_val(pgd) != 0) { | ||
280 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | ||
281 | |||
282 | pgdp[i] = native_make_pgd(0); | ||
283 | |||
284 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); | ||
285 | pmd_free(mm, pmd); | ||
286 | } | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | ||
292 | * updating the top-level pagetable entries to guarantee the | ||
293 | * processor notices the update. Since this is expensive, and | ||
294 | * all 4 top-level entries are used almost immediately in a | ||
295 | * new process's life, we just pre-populate them here. | ||
296 | * | ||
297 | * Also, if we're in a paravirt environment where the kernel pmd is | ||
298 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | ||
299 | * and initialize the kernel pmds here. | ||
300 | */ | ||
301 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
302 | { | ||
303 | pud_t *pud; | ||
304 | unsigned long addr; | ||
305 | int i; | ||
306 | |||
307 | pud = pud_offset(pgd, 0); | ||
308 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | ||
309 | i++, pud++, addr += PUD_SIZE) { | ||
310 | pmd_t *pmd = pmd_alloc_one(mm, addr); | ||
311 | |||
312 | if (!pmd) { | ||
313 | pgd_mop_up_pmds(mm, pgd); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | if (i >= USER_PTRS_PER_PGD) | ||
318 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), | ||
319 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
320 | |||
321 | pud_populate(mm, pud, pmd); | ||
322 | } | ||
323 | |||
324 | return 1; | ||
325 | } | ||
326 | #else /* !CONFIG_X86_PAE */ | ||
327 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | ||
328 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
329 | { | ||
330 | return 1; | ||
331 | } | ||
332 | |||
333 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
334 | { | ||
335 | } | ||
336 | #endif /* CONFIG_X86_PAE */ | ||
337 | |||
338 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
339 | { | ||
340 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
341 | |||
342 | /* so that alloc_pd can use it */ | ||
343 | mm->pgd = pgd; | ||
344 | if (pgd) | ||
345 | pgd_ctor(pgd); | ||
346 | |||
347 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | ||
348 | pgd_dtor(pgd); | ||
349 | free_page((unsigned long)pgd); | ||
350 | pgd = NULL; | ||
351 | } | ||
352 | |||
353 | return pgd; | ||
354 | } | ||
355 | |||
356 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
357 | { | ||
358 | pgd_mop_up_pmds(mm, pgd); | ||
359 | pgd_dtor(pgd); | ||
360 | free_page((unsigned long)pgd); | ||
361 | } | ||
362 | |||
363 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | 176 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
364 | { | 177 | { |
365 | pgtable_page_dtor(pte); | 178 | pgtable_page_dtor(pte); |