aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pgtable_32.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-03-09 08:14:37 -0400
committerIngo Molnar <mingo@elte.hu>2008-03-11 12:11:55 -0400
commit985a34bd75cc8c96e43f00dcdda7c3fdb51a3026 (patch)
tree86418dbb69daebb1d96818318e45fa3e427834f6 /arch/x86/mm/pgtable_32.c
parent40f0933d51f4cba26a5c009a26bb230f4514c1b6 (diff)
x86: remove quicklists
quicklists cause a serious memory leak on 32-bit x86, as documented at: http://bugzilla.kernel.org/show_bug.cgi?id=9991 the reason is that the quicklist pool is a special-purpose cache that grows out of proportion. It is not accounted for anywhere and users have no way to even realize that it's the quicklists that are causing RAM usage spikes. It was supposed to be a relatively small pool, but as demonstrated by KOSAKI Motohiro, they can grow as large as: Quicklists: 1194304 kB given how much trouble this code has caused historically, and given that Andrew objected to its introduction on x86 (years ago), the best option at this point is to remove them. [ any performance benefits of caching constructed pgds should be implemented in a more generic way (possibly within the page allocator), while still allowing constructed pages to be allocated by other workloads. ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pgtable_32.c')
-rw-r--r--arch/x86/mm/pgtable_32.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 73aba7125203..2f9e9afcb9f4 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -342,12 +342,16 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
342 342
343pgd_t *pgd_alloc(struct mm_struct *mm) 343pgd_t *pgd_alloc(struct mm_struct *mm)
344{ 344{
345 pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); 345 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
346 346
347 mm->pgd = pgd; /* so that alloc_pd can use it */ 347 /* so that alloc_pd can use it */
348 mm->pgd = pgd;
349 if (pgd)
350 pgd_ctor(pgd);
348 351
349 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { 352 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
350 quicklist_free(0, pgd_dtor, pgd); 353 pgd_dtor(pgd);
354 free_page((unsigned long)pgd);
351 pgd = NULL; 355 pgd = NULL;
352 } 356 }
353 357
@@ -357,12 +361,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
357void pgd_free(struct mm_struct *mm, pgd_t *pgd) 361void pgd_free(struct mm_struct *mm, pgd_t *pgd)
358{ 362{
359 pgd_mop_up_pmds(mm, pgd); 363 pgd_mop_up_pmds(mm, pgd);
360 quicklist_free(0, pgd_dtor, pgd); 364 pgd_dtor(pgd);
361} 365 free_page((unsigned long)pgd);
362
363void check_pgt_cache(void)
364{
365 quicklist_trim(0, pgd_dtor, 25, 16);
366} 366}
367 367
368void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 368void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)