diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:36:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:30 -0400 |
commit | 4f76cd382213b29dd3658e3e1ea47c0c2be06f3c (patch) | |
tree | a4822f341a6896ace039760d1df963b5f159c665 /arch/x86/mm/pgtable.c | |
parent | 79bf6d66abb5a20813a19dd365dfc49104f0bb88 (diff) |
x86: add common mm/pgtable.c
Add a common arch/x86/mm/pgtable.c file for common pagetable functions.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pgtable.c')
-rw-r--r-- | arch/x86/mm/pgtable.c | 239 |
1 files changed, 239 insertions, 0 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c new file mode 100644 index 000000000000..d526b46ae188 --- /dev/null +++ b/arch/x86/mm/pgtable.c | |||
@@ -0,0 +1,239 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <asm/pgalloc.h> | ||
3 | #include <asm/tlb.h> | ||
4 | |||
5 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
6 | { | ||
7 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
8 | } | ||
9 | |||
10 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
11 | { | ||
12 | struct page *pte; | ||
13 | |||
14 | #ifdef CONFIG_HIGHPTE | ||
15 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | ||
16 | #else | ||
17 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
18 | #endif | ||
19 | if (pte) | ||
20 | pgtable_page_ctor(pte); | ||
21 | return pte; | ||
22 | } | ||
23 | |||
24 | #ifdef CONFIG_X86_64 | ||
25 | static inline void pgd_list_add(pgd_t *pgd) | ||
26 | { | ||
27 | struct page *page = virt_to_page(pgd); | ||
28 | unsigned long flags; | ||
29 | |||
30 | spin_lock_irqsave(&pgd_lock, flags); | ||
31 | list_add(&page->lru, &pgd_list); | ||
32 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
33 | } | ||
34 | |||
35 | static inline void pgd_list_del(pgd_t *pgd) | ||
36 | { | ||
37 | struct page *page = virt_to_page(pgd); | ||
38 | unsigned long flags; | ||
39 | |||
40 | spin_lock_irqsave(&pgd_lock, flags); | ||
41 | list_del(&page->lru); | ||
42 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
43 | } | ||
44 | |||
45 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
46 | { | ||
47 | unsigned boundary; | ||
48 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
49 | if (!pgd) | ||
50 | return NULL; | ||
51 | pgd_list_add(pgd); | ||
52 | /* | ||
53 | * Copy kernel pointers in from init. | ||
54 | * Could keep a freelist or slab cache of those because the kernel | ||
55 | * part never changes. | ||
56 | */ | ||
57 | boundary = pgd_index(__PAGE_OFFSET); | ||
58 | memset(pgd, 0, boundary * sizeof(pgd_t)); | ||
59 | memcpy(pgd + boundary, | ||
60 | init_level4_pgt + boundary, | ||
61 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | ||
62 | return pgd; | ||
63 | } | ||
64 | |||
65 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
66 | { | ||
67 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | ||
68 | pgd_list_del(pgd); | ||
69 | free_page((unsigned long)pgd); | ||
70 | } | ||
71 | #else | ||
72 | /* | ||
73 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
74 | * in both cached and uncached pgd's; not needed for PAE since the | ||
75 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
76 | * tactic would be needed. This is essentially codepath-based locking | ||
77 | * against pageattr.c; it is the unique case in which a valid change | ||
78 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
79 | * vmalloc faults work because attached pagetables are never freed. | ||
80 | * -- wli | ||
81 | */ | ||
82 | static inline void pgd_list_add(pgd_t *pgd) | ||
83 | { | ||
84 | struct page *page = virt_to_page(pgd); | ||
85 | |||
86 | list_add(&page->lru, &pgd_list); | ||
87 | } | ||
88 | |||
89 | static inline void pgd_list_del(pgd_t *pgd) | ||
90 | { | ||
91 | struct page *page = virt_to_page(pgd); | ||
92 | |||
93 | list_del(&page->lru); | ||
94 | } | ||
95 | |||
96 | #define UNSHARED_PTRS_PER_PGD \ | ||
97 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
98 | |||
99 | static void pgd_ctor(void *p) | ||
100 | { | ||
101 | pgd_t *pgd = p; | ||
102 | unsigned long flags; | ||
103 | |||
104 | /* Clear usermode parts of PGD */ | ||
105 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | ||
106 | |||
107 | spin_lock_irqsave(&pgd_lock, flags); | ||
108 | |||
109 | /* If the pgd points to a shared pagetable level (either the | ||
110 | ptes in non-PAE, or shared PMD in PAE), then just copy the | ||
111 | references from swapper_pg_dir. */ | ||
112 | if (PAGETABLE_LEVELS == 2 || | ||
113 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | ||
114 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, | ||
115 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
116 | KERNEL_PGD_PTRS); | ||
117 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | ||
118 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | ||
119 | USER_PTRS_PER_PGD, | ||
120 | KERNEL_PGD_PTRS); | ||
121 | } | ||
122 | |||
123 | /* list required to sync kernel mapping updates */ | ||
124 | if (!SHARED_KERNEL_PMD) | ||
125 | pgd_list_add(pgd); | ||
126 | |||
127 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
128 | } | ||
129 | |||
130 | static void pgd_dtor(void *pgd) | ||
131 | { | ||
132 | unsigned long flags; /* can be called from interrupt context */ | ||
133 | |||
134 | if (SHARED_KERNEL_PMD) | ||
135 | return; | ||
136 | |||
137 | spin_lock_irqsave(&pgd_lock, flags); | ||
138 | pgd_list_del(pgd); | ||
139 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_X86_PAE | ||
143 | /* | ||
144 | * Mop up any pmd pages which may still be attached to the pgd. | ||
145 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | ||
146 | * preallocate which never got a corresponding vma will need to be | ||
147 | * freed manually. | ||
148 | */ | ||
149 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | ||
154 | pgd_t pgd = pgdp[i]; | ||
155 | |||
156 | if (pgd_val(pgd) != 0) { | ||
157 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | ||
158 | |||
159 | pgdp[i] = native_make_pgd(0); | ||
160 | |||
161 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); | ||
162 | pmd_free(mm, pmd); | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | ||
169 | * updating the top-level pagetable entries to guarantee the | ||
170 | * processor notices the update. Since this is expensive, and | ||
171 | * all 4 top-level entries are used almost immediately in a | ||
172 | * new process's life, we just pre-populate them here. | ||
173 | * | ||
174 | * Also, if we're in a paravirt environment where the kernel pmd is | ||
175 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | ||
176 | * and initialize the kernel pmds here. | ||
177 | */ | ||
178 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
179 | { | ||
180 | pud_t *pud; | ||
181 | unsigned long addr; | ||
182 | int i; | ||
183 | |||
184 | pud = pud_offset(pgd, 0); | ||
185 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | ||
186 | i++, pud++, addr += PUD_SIZE) { | ||
187 | pmd_t *pmd = pmd_alloc_one(mm, addr); | ||
188 | |||
189 | if (!pmd) { | ||
190 | pgd_mop_up_pmds(mm, pgd); | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | if (i >= USER_PTRS_PER_PGD) | ||
195 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), | ||
196 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
197 | |||
198 | pud_populate(mm, pud, pmd); | ||
199 | } | ||
200 | |||
201 | return 1; | ||
202 | } | ||
203 | #else /* !CONFIG_X86_PAE */ | ||
204 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | ||
205 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | ||
206 | { | ||
207 | return 1; | ||
208 | } | ||
209 | |||
210 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd) | ||
211 | { | ||
212 | } | ||
213 | #endif /* CONFIG_X86_PAE */ | ||
214 | |||
215 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
216 | { | ||
217 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
218 | |||
219 | /* so that alloc_pd can use it */ | ||
220 | mm->pgd = pgd; | ||
221 | if (pgd) | ||
222 | pgd_ctor(pgd); | ||
223 | |||
224 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | ||
225 | pgd_dtor(pgd); | ||
226 | free_page((unsigned long)pgd); | ||
227 | pgd = NULL; | ||
228 | } | ||
229 | |||
230 | return pgd; | ||
231 | } | ||
232 | |||
233 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
234 | { | ||
235 | pgd_mop_up_pmds(mm, pgd); | ||
236 | pgd_dtor(pgd); | ||
237 | free_page((unsigned long)pgd); | ||
238 | } | ||
239 | #endif | ||