aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-02-04 10:48:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-04 10:48:02 -0500
commite618c9579c745742c422b7c3de1f802aa67e6110 (patch)
treedc84bafacd209118f460af7a1ddd6cdb0af372cf /arch/x86/mm
parentc66315e0a785e95884b23887c1aa479dc0b32beb (diff)
x86: unify PAE/non-PAE pgd_ctor
The constructors for PAE and non-PAE pgd_ctors are more or less identical, and can be made into the same function. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: William Irwin <wli@holomorphy.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pgtable_32.c54
1 files changed, 20 insertions, 34 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index cb3aa470249b..f34e33d18443 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd)
219 list_del(&page->lru); 219 list_del(&page->lru);
220} 220}
221 221
222#define UNSHARED_PTRS_PER_PGD \
223 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
222 224
223 225static void pgd_ctor(void *p)
224#if (PTRS_PER_PMD == 1)
225/* Non-PAE pgd constructor */
226static void pgd_ctor(void *pgd)
227{ 226{
227 pgd_t *pgd = p;
228 unsigned long flags; 228 unsigned long flags;
229 229
230 /* !PAE, no pagetable sharing */ 230 /* Clear usermode parts of PGD */
231 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 231 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
232 232
233 spin_lock_irqsave(&pgd_lock, flags); 233 spin_lock_irqsave(&pgd_lock, flags);
234 234
235 /* must happen under lock */ 235 /* If the pgd points to a shared pagetable level (either the
236 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, 236 ptes in non-PAE, or shared PMD in PAE), then just copy the
237 swapper_pg_dir + USER_PTRS_PER_PGD, 237 references from swapper_pg_dir. */
238 KERNEL_PGD_PTRS); 238 if (PAGETABLE_LEVELS == 2 ||
239 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, 239 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
240 __pa(swapper_pg_dir) >> PAGE_SHIFT, 240 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
241 USER_PTRS_PER_PGD,
242 KERNEL_PGD_PTRS);
243 pgd_list_add(pgd);
244 spin_unlock_irqrestore(&pgd_lock, flags);
245}
246#else /* PTRS_PER_PMD > 1 */
247/* PAE pgd constructor */
248static void pgd_ctor(void *pgd)
249{
250 /* PAE, kernel PMD may be shared */
251
252 if (SHARED_KERNEL_PMD) {
253 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
254 swapper_pg_dir + USER_PTRS_PER_PGD, 241 swapper_pg_dir + USER_PTRS_PER_PGD,
255 KERNEL_PGD_PTRS); 242 KERNEL_PGD_PTRS);
256 } else { 243 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
257 unsigned long flags; 244 __pa(swapper_pg_dir) >> PAGE_SHIFT,
245 USER_PTRS_PER_PGD,
246 KERNEL_PGD_PTRS);
247 }
258 248
259 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 249 /* list required to sync kernel mapping updates */
260 spin_lock_irqsave(&pgd_lock, flags); 250 if (!SHARED_KERNEL_PMD)
261 pgd_list_add(pgd); 251 pgd_list_add(pgd);
262 spin_unlock_irqrestore(&pgd_lock, flags); 252
263 } 253 spin_unlock_irqrestore(&pgd_lock, flags);
264} 254}
265#endif /* PTRS_PER_PMD */
266 255
267static void pgd_dtor(void *pgd) 256static void pgd_dtor(void *pgd)
268{ 257{
@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd)
276 spin_unlock_irqrestore(&pgd_lock, flags); 265 spin_unlock_irqrestore(&pgd_lock, flags);
277} 266}
278 267
279#define UNSHARED_PTRS_PER_PGD \
280 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
281
282#ifdef CONFIG_X86_PAE 268#ifdef CONFIG_X86_PAE
283/* 269/*
284 * Mop up any pmd pages which may still be attached to the pgd. 270 * Mop up any pmd pages which may still be attached to the pgd.