diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:36:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:30 -0400 |
commit | 4f76cd382213b29dd3658e3e1ea47c0c2be06f3c (patch) | |
tree | a4822f341a6896ace039760d1df963b5f159c665 /arch/x86 | |
parent | 79bf6d66abb5a20813a19dd365dfc49104f0bb88 (diff) |
x86: add common mm/pgtable.c
Add a common arch/x86/mm/pgtable.c file for common pagetable functions.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 239 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 187 |
3 files changed, 240 insertions, 188 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 20941d2954e2..b7b3e4c7cfc9 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o | 2 | pat.o pgtable.o |
3 | 3 | ||
4 | obj-$(CONFIG_X86_32) += pgtable_32.o | 4 | obj-$(CONFIG_X86_32) += pgtable_32.o |
5 | 5 | ||
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c new file mode 100644 index 000000000000..d526b46ae188 --- /dev/null +++ b/arch/x86/mm/pgtable.c | |||
@@ -0,0 +1,239 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <asm/pgalloc.h> | ||
3 | #include <asm/tlb.h> | ||
4 | |||
5 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
6 | { | ||
7 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
8 | } | ||
9 | |||
10 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
11 | { | ||
12 | struct page *pte; | ||
13 | |||
14 | #ifdef CONFIG_HIGHPTE | ||
15 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | ||
16 | #else | ||
17 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
18 | #endif | ||
19 | if (pte) | ||
20 | pgtable_page_ctor(pte); | ||
21 | return pte; | ||
22 | } | ||
23 | |||
24 | #ifdef CONFIG_X86_64 | ||
25 | static inline void pgd_list_add(pgd_t *pgd) | ||
26 | { | ||
27 | struct page *page = virt_to_page(pgd); | ||
28 | unsigned long flags; | ||
29 | |||
30 | spin_lock_irqsave(&pgd_lock, flags); | ||
31 | list_add(&page->lru, &pgd_list); | ||
32 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
33 | } | ||
34 | |||
35 | static inline void pgd_list_del(pgd_t *pgd) | ||
36 | { | ||
37 | struct page *page = virt_to_page(pgd); | ||
38 | unsigned long flags; | ||
39 | |||
40 | spin_lock_irqsave(&pgd_lock, flags); | ||
41 | list_del(&page->lru); | ||
42 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
43 | } | ||
44 | |||
45 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
46 | { | ||
47 | unsigned boundary; | ||
48 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
49 | if (!pgd) | ||
50 | return NULL; | ||
51 | pgd_list_add(pgd); | ||
52 | /* | ||
53 | * Copy kernel pointers in from init. | ||
54 | * Could keep a freelist or slab cache of those because the kernel | ||
55 | * part never changes. | ||
56 | */ | ||
57 | boundary = pgd_index(__PAGE_OFFSET); | ||
58 | memset(pgd, 0, boundary * sizeof(pgd_t)); | ||
59 | memcpy(pgd + boundary, | ||
60 | init_level4_pgt + boundary, | ||
61 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | ||
62 | return pgd; | ||
63 | } | ||
64 | |||
65 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
66 | { | ||
67 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | ||
68 | pgd_list_del(pgd); | ||
69 | free_page((unsigned long)pgd); | ||
70 | } | ||
71 | #else | ||
72 | /* | ||
73 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
74 | * in both cached and uncached pgd's; not needed for PAE since the | ||
75 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
76 | * tactic would be needed. This is essentially codepath-based locking | ||
77 | * against pageattr.c; it is the unique case in which a valid change | ||
78 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
79 | * vmalloc faults work because attached pagetables are never freed. | ||
80 | * -- wli | ||
81 | */ | ||
82 | static inline void pgd_list_add(pgd_t *pgd) | ||
83 | { | ||
84 | struct page *page = virt_to_page(pgd); | ||
85 | |||
86 | list_add(&page->lru, &pgd_list); | ||
87 | } | ||
88 | |||
89 | static inline void pgd_list_del(pgd_t *pgd) | ||
90 | { | ||
91 | struct page *page = virt_to_page(pgd); | ||
92 | |||
93 | list_del(&page->lru); | ||
94 | } | ||
95 | |||
96 | #define UNSHARED_PTRS_PER_PGD \ | ||
97 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
98 | |||
99 | static void pgd_ctor(void *p) | ||
100 | { | ||
101 | pgd_t *pgd = p; | ||
102 | unsigned long flags; | ||
103 | |||
104 | /* Clear usermode parts of PGD */ | ||
105 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | ||
106 | |||
107 | spin_lock_irqsave(&pgd_lock, flags); | ||
108 | |||
109 | /* If the pgd points to a shared pagetable level (either the | ||
110 | ptes in non-PAE, or shared PMD in PAE), then just copy the | ||
111 | references from swapper_pg_dir. */ | ||
112 | if (PAGETABLE_LEVELS == 2 || | ||
113 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | ||
114 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, | ||
115 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
116 | KERNEL_PGD_PTRS); | ||
117 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | ||
118 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | ||
119 | USER_PTRS_PER_PGD, | ||
120 | KERNEL_PGD_PTRS); | ||
121 | } | ||
122 | |||
123 | /* list required to sync kernel mapping updates */ | ||
124 | if (!SHARED_KERNEL_PMD) | ||
125 | pgd_list_add(pgd); | ||
126 | |||
127 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
128 | } | ||
129 | |||
130 | static void pgd_dtor(void *pgd) | ||
131 | { | ||
132 | unsigned long flags; /* can be called from interrupt context */ | ||
133 | |||
134 | if (SHARED_KERNEL_PMD) | ||
135 | return; | ||
136 | |||
137 | spin_lock_irqsave(&pgd_lock, flags); | ||
138 | pgd_list_del(pgd); | ||
139 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_X86_PAE | ||
143 | /* | ||
144 | * Mop up any pmd pages which may still be attached to the pgd. | ||
145 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | ||
146 | * preallocate which never got a corresponding vma will need to be | ||
147 | * freed manually. | ||
148 | */ | ||
149 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | ||
154 | pgd_t pgd = pgdp[i]; | ||
155 | |||
156 | if (pgd_val(pgd) != 0) { | ||
157 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | ||
158 | |||
159 | pgdp[i] = native_make_pgd(0); | ||
160 | |||
161 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); | ||
162 | pmd_free(mm, pmd); | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | ||
169 | * updating the top-level pagetable entries to guarantee the | ||
170 | * processor notices the update. Since this is expensive, and | ||
171 | * all 4 top-level entries are used almost immediately in a | ||
172 | * new process's life, we just pre-populate them here. | ||
173 | * | ||
174 | * Also, if we're in a paravirt environment where the kernel pmd is | ||
175 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | ||
176 | * and initialize the kernel pmds here. | ||
177 | */ | ||
178 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
179 | { | ||
180 | pud_t *pud; | ||
181 | unsigned long addr; | ||
182 | int i; | ||
183 | |||
184 | pud = pud_offset(pgd, 0); | ||
185 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | ||
186 | i++, pud++, addr += PUD_SIZE) { | ||
187 | pmd_t *pmd = pmd_alloc_one(mm, addr); | ||
188 | |||
189 | if (!pmd) { | ||
190 | pgd_mop_up_pmds(mm, pgd); | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | if (i >= USER_PTRS_PER_PGD) | ||
195 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), | ||
196 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
197 | |||
198 | pud_populate(mm, pud, pmd); | ||
199 | } | ||
200 | |||
201 | return 1; | ||
202 | } | ||
203 | #else /* !CONFIG_X86_PAE */ | ||
204 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | ||
205 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
206 | { | ||
207 | return 1; | ||
208 | } | ||
209 | |||
210 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd) | ||
211 | { | ||
212 | } | ||
213 | #endif /* CONFIG_X86_PAE */ | ||
214 | |||
215 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
216 | { | ||
217 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
218 | |||
219 | /* so that alloc_pd can use it */ | ||
220 | mm->pgd = pgd; | ||
221 | if (pgd) | ||
222 | pgd_ctor(pgd); | ||
223 | |||
224 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | ||
225 | pgd_dtor(pgd); | ||
226 | free_page((unsigned long)pgd); | ||
227 | pgd = NULL; | ||
228 | } | ||
229 | |||
230 | return pgd; | ||
231 | } | ||
232 | |||
233 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
234 | { | ||
235 | pgd_mop_up_pmds(mm, pgd); | ||
236 | pgd_dtor(pgd); | ||
237 | free_page((unsigned long)pgd); | ||
238 | } | ||
239 | #endif | ||
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6fb9e7c6893f..b46893e45d02 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -173,193 +173,6 @@ void reserve_top_address(unsigned long reserve) | |||
173 | __VMALLOC_RESERVE += reserve; | 173 | __VMALLOC_RESERVE += reserve; |
174 | } | 174 | } |
175 | 175 | ||
176 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
177 | { | ||
178 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
179 | } | ||
180 | |||
181 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
182 | { | ||
183 | struct page *pte; | ||
184 | |||
185 | #ifdef CONFIG_HIGHPTE | ||
186 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | ||
187 | #else | ||
188 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
189 | #endif | ||
190 | if (pte) | ||
191 | pgtable_page_ctor(pte); | ||
192 | return pte; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
197 | * in both cached and uncached pgd's; not needed for PAE since the | ||
198 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
199 | * tactic would be needed. This is essentially codepath-based locking | ||
200 | * against pageattr.c; it is the unique case in which a valid change | ||
201 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
202 | * vmalloc faults work because attached pagetables are never freed. | ||
203 | * -- wli | ||
204 | */ | ||
205 | static inline void pgd_list_add(pgd_t *pgd) | ||
206 | { | ||
207 | struct page *page = virt_to_page(pgd); | ||
208 | |||
209 | list_add(&page->lru, &pgd_list); | ||
210 | } | ||
211 | |||
212 | static inline void pgd_list_del(pgd_t *pgd) | ||
213 | { | ||
214 | struct page *page = virt_to_page(pgd); | ||
215 | |||
216 | list_del(&page->lru); | ||
217 | } | ||
218 | |||
219 | #define UNSHARED_PTRS_PER_PGD \ | ||
220 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
221 | |||
222 | static void pgd_ctor(void *p) | ||
223 | { | ||
224 | pgd_t *pgd = p; | ||
225 | unsigned long flags; | ||
226 | |||
227 | /* Clear usermode parts of PGD */ | ||
228 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | ||
229 | |||
230 | spin_lock_irqsave(&pgd_lock, flags); | ||
231 | |||
232 | /* If the pgd points to a shared pagetable level (either the | ||
233 | ptes in non-PAE, or shared PMD in PAE), then just copy the | ||
234 | references from swapper_pg_dir. */ | ||
235 | if (PAGETABLE_LEVELS == 2 || | ||
236 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | ||
237 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, | ||
238 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
239 | KERNEL_PGD_PTRS); | ||
240 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | ||
241 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | ||
242 | USER_PTRS_PER_PGD, | ||
243 | KERNEL_PGD_PTRS); | ||
244 | } | ||
245 | |||
246 | /* list required to sync kernel mapping updates */ | ||
247 | if (!SHARED_KERNEL_PMD) | ||
248 | pgd_list_add(pgd); | ||
249 | |||
250 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
251 | } | ||
252 | |||
253 | static void pgd_dtor(void *pgd) | ||
254 | { | ||
255 | unsigned long flags; /* can be called from interrupt context */ | ||
256 | |||
257 | if (SHARED_KERNEL_PMD) | ||
258 | return; | ||
259 | |||
260 | spin_lock_irqsave(&pgd_lock, flags); | ||
261 | pgd_list_del(pgd); | ||
262 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
263 | } | ||
264 | |||
265 | #ifdef CONFIG_X86_PAE | ||
266 | /* | ||
267 | * Mop up any pmd pages which may still be attached to the pgd. | ||
268 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | ||
269 | * preallocate which never got a corresponding vma will need to be | ||
270 | * freed manually. | ||
271 | */ | ||
272 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
273 | { | ||
274 | int i; | ||
275 | |||
276 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | ||
277 | pgd_t pgd = pgdp[i]; | ||
278 | |||
279 | if (pgd_val(pgd) != 0) { | ||
280 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | ||
281 | |||
282 | pgdp[i] = native_make_pgd(0); | ||
283 | |||
284 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); | ||
285 | pmd_free(mm, pmd); | ||
286 | } | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | ||
292 | * updating the top-level pagetable entries to guarantee the | ||
293 | * processor notices the update. Since this is expensive, and | ||
294 | * all 4 top-level entries are used almost immediately in a | ||
295 | * new process's life, we just pre-populate them here. | ||
296 | * | ||
297 | * Also, if we're in a paravirt environment where the kernel pmd is | ||
298 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | ||
299 | * and initialize the kernel pmds here. | ||
300 | */ | ||
301 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
302 | { | ||
303 | pud_t *pud; | ||
304 | unsigned long addr; | ||
305 | int i; | ||
306 | |||
307 | pud = pud_offset(pgd, 0); | ||
308 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | ||
309 | i++, pud++, addr += PUD_SIZE) { | ||
310 | pmd_t *pmd = pmd_alloc_one(mm, addr); | ||
311 | |||
312 | if (!pmd) { | ||
313 | pgd_mop_up_pmds(mm, pgd); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | if (i >= USER_PTRS_PER_PGD) | ||
318 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), | ||
319 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
320 | |||
321 | pud_populate(mm, pud, pmd); | ||
322 | } | ||
323 | |||
324 | return 1; | ||
325 | } | ||
326 | #else /* !CONFIG_X86_PAE */ | ||
327 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | ||
328 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
329 | { | ||
330 | return 1; | ||
331 | } | ||
332 | |||
333 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
334 | { | ||
335 | } | ||
336 | #endif /* CONFIG_X86_PAE */ | ||
337 | |||
338 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
339 | { | ||
340 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
341 | |||
342 | /* so that alloc_pd can use it */ | ||
343 | mm->pgd = pgd; | ||
344 | if (pgd) | ||
345 | pgd_ctor(pgd); | ||
346 | |||
347 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | ||
348 | pgd_dtor(pgd); | ||
349 | free_page((unsigned long)pgd); | ||
350 | pgd = NULL; | ||
351 | } | ||
352 | |||
353 | return pgd; | ||
354 | } | ||
355 | |||
356 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
357 | { | ||
358 | pgd_mop_up_pmds(mm, pgd); | ||
359 | pgd_dtor(pgd); | ||
360 | free_page((unsigned long)pgd); | ||
361 | } | ||
362 | |||
363 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | 176 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
364 | { | 177 | { |
365 | pgtable_page_dtor(pte); | 178 | pgtable_page_dtor(pte); |