diff options
Diffstat (limited to 'arch/x86/mm/pgtable_32.c')
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 61 |
1 files changed, 20 insertions, 41 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cb3aa470249b..c7db504be1ea 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
219 | list_del(&page->lru); | 219 | list_del(&page->lru); |
220 | } | 220 | } |
221 | 221 | ||
222 | #define UNSHARED_PTRS_PER_PGD \ | ||
223 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
222 | 224 | ||
223 | 225 | static void pgd_ctor(void *p) | |
224 | #if (PTRS_PER_PMD == 1) | ||
225 | /* Non-PAE pgd constructor */ | ||
226 | static void pgd_ctor(void *pgd) | ||
227 | { | 226 | { |
227 | pgd_t *pgd = p; | ||
228 | unsigned long flags; | 228 | unsigned long flags; |
229 | 229 | ||
230 | /* !PAE, no pagetable sharing */ | 230 | /* Clear usermode parts of PGD */ |
231 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | 231 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); |
232 | 232 | ||
233 | spin_lock_irqsave(&pgd_lock, flags); | 233 | spin_lock_irqsave(&pgd_lock, flags); |
234 | 234 | ||
235 | /* must happen under lock */ | 235 | /* If the pgd points to a shared pagetable level (either the |
236 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | 236 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
237 | swapper_pg_dir + USER_PTRS_PER_PGD, | 237 | references from swapper_pg_dir. */ |
238 | KERNEL_PGD_PTRS); | 238 | if (PAGETABLE_LEVELS == 2 || |
239 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | 239 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { |
240 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | 240 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, |
241 | USER_PTRS_PER_PGD, | ||
242 | KERNEL_PGD_PTRS); | ||
243 | pgd_list_add(pgd); | ||
244 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
245 | } | ||
246 | #else /* PTRS_PER_PMD > 1 */ | ||
247 | /* PAE pgd constructor */ | ||
248 | static void pgd_ctor(void *pgd) | ||
249 | { | ||
250 | /* PAE, kernel PMD may be shared */ | ||
251 | |||
252 | if (SHARED_KERNEL_PMD) { | ||
253 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | ||
254 | swapper_pg_dir + USER_PTRS_PER_PGD, | 241 | swapper_pg_dir + USER_PTRS_PER_PGD, |
255 | KERNEL_PGD_PTRS); | 242 | KERNEL_PGD_PTRS); |
256 | } else { | 243 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, |
257 | unsigned long flags; | 244 | __pa(swapper_pg_dir) >> PAGE_SHIFT, |
245 | USER_PTRS_PER_PGD, | ||
246 | KERNEL_PGD_PTRS); | ||
247 | } | ||
258 | 248 | ||
259 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | 249 | /* list required to sync kernel mapping updates */ |
260 | spin_lock_irqsave(&pgd_lock, flags); | 250 | if (!SHARED_KERNEL_PMD) |
261 | pgd_list_add(pgd); | 251 | pgd_list_add(pgd); |
262 | spin_unlock_irqrestore(&pgd_lock, flags); | 252 | |
263 | } | 253 | spin_unlock_irqrestore(&pgd_lock, flags); |
264 | } | 254 | } |
265 | #endif /* PTRS_PER_PMD */ | ||
266 | 255 | ||
267 | static void pgd_dtor(void *pgd) | 256 | static void pgd_dtor(void *pgd) |
268 | { | 257 | { |
@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd) | |||
276 | spin_unlock_irqrestore(&pgd_lock, flags); | 265 | spin_unlock_irqrestore(&pgd_lock, flags); |
277 | } | 266 | } |
278 | 267 | ||
279 | #define UNSHARED_PTRS_PER_PGD \ | ||
280 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | ||
281 | |||
282 | #ifdef CONFIG_X86_PAE | 268 | #ifdef CONFIG_X86_PAE |
283 | /* | 269 | /* |
284 | * Mop up any pmd pages which may still be attached to the pgd. | 270 | * Mop up any pmd pages which may still be attached to the pgd. |
@@ -387,13 +373,6 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | |||
387 | 373 | ||
388 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 374 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
389 | { | 375 | { |
390 | /* This is called just after the pmd has been detached from | ||
391 | the pgd, which requires a full tlb flush to be recognized | ||
392 | by the CPU. Rather than incurring multiple tlb flushes | ||
393 | while the address space is being pulled down, make the tlb | ||
394 | gathering machinery do a full flush when we're done. */ | ||
395 | tlb->fullmm = 1; | ||
396 | |||
397 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | 376 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); |
398 | tlb_remove_page(tlb, virt_to_page(pmd)); | 377 | tlb_remove_page(tlb, virt_to_page(pmd)); |
399 | } | 378 | } |