aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/pgtable.c239
-rw-r--r--arch/x86/mm/pgtable_32.c187
-rw-r--r--include/asm-x86/pgalloc.h18
-rw-r--r--include/asm-x86/pgalloc_32.h11
-rw-r--r--include/asm-x86/pgalloc_64.h67
6 files changed, 258 insertions, 266 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 20941d2954e2..b7b3e4c7cfc9 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,5 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o 2 pat.o pgtable.o
3 3
4obj-$(CONFIG_X86_32) += pgtable_32.o 4obj-$(CONFIG_X86_32) += pgtable_32.o
5 5
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
new file mode 100644
index 000000000000..d526b46ae188
--- /dev/null
+++ b/arch/x86/mm/pgtable.c
@@ -0,0 +1,239 @@
1#include <linux/mm.h>
2#include <asm/pgalloc.h>
3#include <asm/tlb.h>
4
5pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
6{
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
8}
9
10pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
11{
12 struct page *pte;
13
14#ifdef CONFIG_HIGHPTE
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
16#else
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
18#endif
19 if (pte)
20 pgtable_page_ctor(pte);
21 return pte;
22}
23
24#ifdef CONFIG_X86_64
25static inline void pgd_list_add(pgd_t *pgd)
26{
27 struct page *page = virt_to_page(pgd);
28 unsigned long flags;
29
30 spin_lock_irqsave(&pgd_lock, flags);
31 list_add(&page->lru, &pgd_list);
32 spin_unlock_irqrestore(&pgd_lock, flags);
33}
34
35static inline void pgd_list_del(pgd_t *pgd)
36{
37 struct page *page = virt_to_page(pgd);
38 unsigned long flags;
39
40 spin_lock_irqsave(&pgd_lock, flags);
41 list_del(&page->lru);
42 spin_unlock_irqrestore(&pgd_lock, flags);
43}
44
45pgd_t *pgd_alloc(struct mm_struct *mm)
46{
47 unsigned boundary;
48 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
49 if (!pgd)
50 return NULL;
51 pgd_list_add(pgd);
52 /*
53 * Copy kernel pointers in from init.
54 * Could keep a freelist or slab cache of those because the kernel
55 * part never changes.
56 */
57 boundary = pgd_index(__PAGE_OFFSET);
58 memset(pgd, 0, boundary * sizeof(pgd_t));
59 memcpy(pgd + boundary,
60 init_level4_pgt + boundary,
61 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
62 return pgd;
63}
64
65void pgd_free(struct mm_struct *mm, pgd_t *pgd)
66{
67 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
68 pgd_list_del(pgd);
69 free_page((unsigned long)pgd);
70}
71#else
72/*
73 * List of all pgd's needed for non-PAE so it can invalidate entries
74 * in both cached and uncached pgd's; not needed for PAE since the
75 * kernel pmd is shared. If PAE were not to share the pmd a similar
76 * tactic would be needed. This is essentially codepath-based locking
77 * against pageattr.c; it is the unique case in which a valid change
78 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
79 * vmalloc faults work because attached pagetables are never freed.
80 * -- wli
81 */
82static inline void pgd_list_add(pgd_t *pgd)
83{
84 struct page *page = virt_to_page(pgd);
85
86 list_add(&page->lru, &pgd_list);
87}
88
89static inline void pgd_list_del(pgd_t *pgd)
90{
91 struct page *page = virt_to_page(pgd);
92
93 list_del(&page->lru);
94}
95
96#define UNSHARED_PTRS_PER_PGD \
97 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
98
99static void pgd_ctor(void *p)
100{
101 pgd_t *pgd = p;
102 unsigned long flags;
103
104 /* Clear usermode parts of PGD */
105 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
106
107 spin_lock_irqsave(&pgd_lock, flags);
108
109 /* If the pgd points to a shared pagetable level (either the
110 ptes in non-PAE, or shared PMD in PAE), then just copy the
111 references from swapper_pg_dir. */
112 if (PAGETABLE_LEVELS == 2 ||
113 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
114 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
115 swapper_pg_dir + USER_PTRS_PER_PGD,
116 KERNEL_PGD_PTRS);
117 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
118 __pa(swapper_pg_dir) >> PAGE_SHIFT,
119 USER_PTRS_PER_PGD,
120 KERNEL_PGD_PTRS);
121 }
122
123 /* list required to sync kernel mapping updates */
124 if (!SHARED_KERNEL_PMD)
125 pgd_list_add(pgd);
126
127 spin_unlock_irqrestore(&pgd_lock, flags);
128}
129
130static void pgd_dtor(void *pgd)
131{
132 unsigned long flags; /* can be called from interrupt context */
133
134 if (SHARED_KERNEL_PMD)
135 return;
136
137 spin_lock_irqsave(&pgd_lock, flags);
138 pgd_list_del(pgd);
139 spin_unlock_irqrestore(&pgd_lock, flags);
140}
141
142#ifdef CONFIG_X86_PAE
143/*
144 * Mop up any pmd pages which may still be attached to the pgd.
145 * Normally they will be freed by munmap/exit_mmap, but any pmd we
146 * preallocate which never got a corresponding vma will need to be
147 * freed manually.
148 */
149static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
150{
151 int i;
152
153 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
154 pgd_t pgd = pgdp[i];
155
156 if (pgd_val(pgd) != 0) {
157 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
158
159 pgdp[i] = native_make_pgd(0);
160
161 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
162 pmd_free(mm, pmd);
163 }
164 }
165}
166
167/*
168 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
169 * updating the top-level pagetable entries to guarantee the
170 * processor notices the update. Since this is expensive, and
171 * all 4 top-level entries are used almost immediately in a
172 * new process's life, we just pre-populate them here.
173 *
174 * Also, if we're in a paravirt environment where the kernel pmd is
175 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
176 * and initialize the kernel pmds here.
177 */
178static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
179{
180 pud_t *pud;
181 unsigned long addr;
182 int i;
183
184 pud = pud_offset(pgd, 0);
185 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
186 i++, pud++, addr += PUD_SIZE) {
187 pmd_t *pmd = pmd_alloc_one(mm, addr);
188
189 if (!pmd) {
190 pgd_mop_up_pmds(mm, pgd);
191 return 0;
192 }
193
194 if (i >= USER_PTRS_PER_PGD)
195 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
196 sizeof(pmd_t) * PTRS_PER_PMD);
197
198 pud_populate(mm, pud, pmd);
199 }
200
201 return 1;
202}
203#else /* !CONFIG_X86_PAE */
204/* No need to prepopulate any pagetable entries in non-PAE modes. */
205static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
206{
207 return 1;
208}
209
210static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
211{
212}
213#endif /* CONFIG_X86_PAE */
214
215pgd_t *pgd_alloc(struct mm_struct *mm)
216{
217 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
218
219 /* so that alloc_pd can use it */
220 mm->pgd = pgd;
221 if (pgd)
222 pgd_ctor(pgd);
223
224 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
225 pgd_dtor(pgd);
226 free_page((unsigned long)pgd);
227 pgd = NULL;
228 }
229
230 return pgd;
231}
232
233void pgd_free(struct mm_struct *mm, pgd_t *pgd)
234{
235 pgd_mop_up_pmds(mm, pgd);
236 pgd_dtor(pgd);
237 free_page((unsigned long)pgd);
238}
239#endif
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 6fb9e7c6893f..b46893e45d02 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -173,193 +173,6 @@ void reserve_top_address(unsigned long reserve)
173 __VMALLOC_RESERVE += reserve; 173 __VMALLOC_RESERVE += reserve;
174} 174}
175 175
176pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
177{
178 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
179}
180
181pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
182{
183 struct page *pte;
184
185#ifdef CONFIG_HIGHPTE
186 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
187#else
188 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
189#endif
190 if (pte)
191 pgtable_page_ctor(pte);
192 return pte;
193}
194
195/*
196 * List of all pgd's needed for non-PAE so it can invalidate entries
197 * in both cached and uncached pgd's; not needed for PAE since the
198 * kernel pmd is shared. If PAE were not to share the pmd a similar
199 * tactic would be needed. This is essentially codepath-based locking
200 * against pageattr.c; it is the unique case in which a valid change
201 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
202 * vmalloc faults work because attached pagetables are never freed.
203 * -- wli
204 */
205static inline void pgd_list_add(pgd_t *pgd)
206{
207 struct page *page = virt_to_page(pgd);
208
209 list_add(&page->lru, &pgd_list);
210}
211
212static inline void pgd_list_del(pgd_t *pgd)
213{
214 struct page *page = virt_to_page(pgd);
215
216 list_del(&page->lru);
217}
218
219#define UNSHARED_PTRS_PER_PGD \
220 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
221
222static void pgd_ctor(void *p)
223{
224 pgd_t *pgd = p;
225 unsigned long flags;
226
227 /* Clear usermode parts of PGD */
228 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
229
230 spin_lock_irqsave(&pgd_lock, flags);
231
232 /* If the pgd points to a shared pagetable level (either the
233 ptes in non-PAE, or shared PMD in PAE), then just copy the
234 references from swapper_pg_dir. */
235 if (PAGETABLE_LEVELS == 2 ||
236 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
237 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
238 swapper_pg_dir + USER_PTRS_PER_PGD,
239 KERNEL_PGD_PTRS);
240 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
241 __pa(swapper_pg_dir) >> PAGE_SHIFT,
242 USER_PTRS_PER_PGD,
243 KERNEL_PGD_PTRS);
244 }
245
246 /* list required to sync kernel mapping updates */
247 if (!SHARED_KERNEL_PMD)
248 pgd_list_add(pgd);
249
250 spin_unlock_irqrestore(&pgd_lock, flags);
251}
252
253static void pgd_dtor(void *pgd)
254{
255 unsigned long flags; /* can be called from interrupt context */
256
257 if (SHARED_KERNEL_PMD)
258 return;
259
260 spin_lock_irqsave(&pgd_lock, flags);
261 pgd_list_del(pgd);
262 spin_unlock_irqrestore(&pgd_lock, flags);
263}
264
265#ifdef CONFIG_X86_PAE
266/*
267 * Mop up any pmd pages which may still be attached to the pgd.
268 * Normally they will be freed by munmap/exit_mmap, but any pmd we
269 * preallocate which never got a corresponding vma will need to be
270 * freed manually.
271 */
272static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
273{
274 int i;
275
276 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
277 pgd_t pgd = pgdp[i];
278
279 if (pgd_val(pgd) != 0) {
280 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
281
282 pgdp[i] = native_make_pgd(0);
283
284 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
285 pmd_free(mm, pmd);
286 }
287 }
288}
289
290/*
291 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
292 * updating the top-level pagetable entries to guarantee the
293 * processor notices the update. Since this is expensive, and
294 * all 4 top-level entries are used almost immediately in a
295 * new process's life, we just pre-populate them here.
296 *
297 * Also, if we're in a paravirt environment where the kernel pmd is
298 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
299 * and initialize the kernel pmds here.
300 */
301static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
302{
303 pud_t *pud;
304 unsigned long addr;
305 int i;
306
307 pud = pud_offset(pgd, 0);
308 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
309 i++, pud++, addr += PUD_SIZE) {
310 pmd_t *pmd = pmd_alloc_one(mm, addr);
311
312 if (!pmd) {
313 pgd_mop_up_pmds(mm, pgd);
314 return 0;
315 }
316
317 if (i >= USER_PTRS_PER_PGD)
318 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
319 sizeof(pmd_t) * PTRS_PER_PMD);
320
321 pud_populate(mm, pud, pmd);
322 }
323
324 return 1;
325}
326#else /* !CONFIG_X86_PAE */
327/* No need to prepopulate any pagetable entries in non-PAE modes. */
328static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
329{
330 return 1;
331}
332
333static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
334{
335}
336#endif /* CONFIG_X86_PAE */
337
338pgd_t *pgd_alloc(struct mm_struct *mm)
339{
340 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
341
342 /* so that alloc_pd can use it */
343 mm->pgd = pgd;
344 if (pgd)
345 pgd_ctor(pgd);
346
347 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
348 pgd_dtor(pgd);
349 free_page((unsigned long)pgd);
350 pgd = NULL;
351 }
352
353 return pgd;
354}
355
356void pgd_free(struct mm_struct *mm, pgd_t *pgd)
357{
358 pgd_mop_up_pmds(mm, pgd);
359 pgd_dtor(pgd);
360 free_page((unsigned long)pgd);
361}
362
363void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 176void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
364{ 177{
365 pgtable_page_dtor(pte); 178 pgtable_page_dtor(pte);
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 5886eed05886..ea9d27ad7f4e 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,5 +1,23 @@
1#ifndef _ASM_X86_PGALLOC_H
2#define _ASM_X86_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
8/*
9 * Allocate and free page tables.
10 */
11extern pgd_t *pgd_alloc(struct mm_struct *);
12extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
13
14extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
15extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
16
1#ifdef CONFIG_X86_32 17#ifdef CONFIG_X86_32
2# include "pgalloc_32.h" 18# include "pgalloc_32.h"
3#else 19#else
4# include "pgalloc_64.h" 20# include "pgalloc_64.h"
5#endif 21#endif
22
23#endif /* _ASM_X86_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
index 6bea6e5b5ee5..d60edb14f85e 100644
--- a/include/asm-x86/pgalloc_32.h
+++ b/include/asm-x86/pgalloc_32.h
@@ -1,12 +1,6 @@
1#ifndef _I386_PGALLOC_H 1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H 2#define _I386_PGALLOC_H
3 3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7#include <asm/tlb.h>
8#include <asm-generic/tlb.h>
9
10#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h> 5#include <asm/paravirt.h>
12#else 6#else
@@ -36,11 +30,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
36/* 30/*
37 * Allocate and free page tables. 31 * Allocate and free page tables.
38 */ 32 */
39extern pgd_t *pgd_alloc(struct mm_struct *);
40extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
41
42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
43extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
44 33
45static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 34static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
46{ 35{
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h
index bcf525f3fbd0..23f87501ac26 100644
--- a/include/asm-x86/pgalloc_64.h
+++ b/include/asm-x86/pgalloc_64.h
@@ -1,8 +1,6 @@
1#ifndef _X86_64_PGALLOC_H 1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H 2#define _X86_64_PGALLOC_H
3 3
4#include <linux/threads.h>
5#include <linux/mm.h>
6#include <asm/pda.h> 4#include <asm/pda.h>
7 5
8static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
@@ -49,71 +47,6 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
49 free_page((unsigned long)pud); 47 free_page((unsigned long)pud);
50} 48}
51 49
52static inline void pgd_list_add(pgd_t *pgd)
53{
54 struct page *page = virt_to_page(pgd);
55 unsigned long flags;
56
57 spin_lock_irqsave(&pgd_lock, flags);
58 list_add(&page->lru, &pgd_list);
59 spin_unlock_irqrestore(&pgd_lock, flags);
60}
61
62static inline void pgd_list_del(pgd_t *pgd)
63{
64 struct page *page = virt_to_page(pgd);
65 unsigned long flags;
66
67 spin_lock_irqsave(&pgd_lock, flags);
68 list_del(&page->lru);
69 spin_unlock_irqrestore(&pgd_lock, flags);
70}
71
72static inline pgd_t *pgd_alloc(struct mm_struct *mm)
73{
74 unsigned boundary;
75 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
76 if (!pgd)
77 return NULL;
78 pgd_list_add(pgd);
79 /*
80 * Copy kernel pointers in from init.
81 * Could keep a freelist or slab cache of those because the kernel
82 * part never changes.
83 */
84 boundary = pgd_index(__PAGE_OFFSET);
85 memset(pgd, 0, boundary * sizeof(pgd_t));
86 memcpy(pgd + boundary,
87 init_level4_pgt + boundary,
88 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
89 return pgd;
90}
91
92static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
93{
94 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
95 pgd_list_del(pgd);
96 free_page((unsigned long)pgd);
97}
98
99static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
100{
101 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
102}
103
104static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
105{
106 struct page *page;
107 void *p;
108
109 p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
110 if (!p)
111 return NULL;
112 page = virt_to_page(p);
113 pgtable_page_ctor(page);
114 return page;
115}
116
117/* Should really implement gc for free page table pages. This could be 50/* Should really implement gc for free page table pages. This could be
118 done with a reference count in struct page. */ 51 done with a reference count in struct page. */
119 52