aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-02-09 17:24:09 -0500
committerThomas Gleixner <tglx@linutronix.de>2008-02-09 17:24:09 -0500
commiteb5b5f024c40f02e9b0f3801173769a726f170fb (patch)
treebf02cac86eac4090b6d052d16824b1033f80b024 /arch/x86/mm/pageattr.c
parent76ebd0548df6ee48586e9b80d8fc2f58aa5fb51c (diff)
x86: cpa, use page pool
Switch the split page code to use the page pool. We do this unconditionally to avoid different behaviour with and without DEBUG_PAGEALLOC enabled. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 831462c3bc35..e5d29a112d00 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -411,20 +411,29 @@ void __init cpa_init(void)
411static int split_large_page(pte_t *kpte, unsigned long address) 411static int split_large_page(pte_t *kpte, unsigned long address)
412{ 412{
413 unsigned long flags, pfn, pfninc = 1; 413 unsigned long flags, pfn, pfninc = 1;
414 gfp_t gfp_flags = GFP_KERNEL;
415 unsigned int i, level; 414 unsigned int i, level;
416 pte_t *pbase, *tmp; 415 pte_t *pbase, *tmp;
417 pgprot_t ref_prot; 416 pgprot_t ref_prot;
418 struct page *base; 417 struct page *base;
419 418
420#ifdef CONFIG_DEBUG_PAGEALLOC 419 /*
421 gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 420 * Get a page from the pool. The pool list is protected by the
422#endif 421 * pgd_lock, which we have to take anyway for the split
423 base = alloc_pages(gfp_flags, 0); 422 * operation:
424 if (!base) 423 */
424 spin_lock_irqsave(&pgd_lock, flags);
425 if (list_empty(&page_pool)) {
426 spin_unlock_irqrestore(&pgd_lock, flags);
425 return -ENOMEM; 427 return -ENOMEM;
428 }
429
430 base = list_first_entry(&page_pool, struct page, lru);
431 list_del(&base->lru);
432 pool_pages--;
433
434 if (pool_pages < pool_low)
435 pool_low = pool_pages;
426 436
427 spin_lock_irqsave(&pgd_lock, flags);
428 /* 437 /*
429 * Check for races, another CPU might have split this page 438 * Check for races, another CPU might have split this page
430 * up for us already: 439 * up for us already:
@@ -469,11 +478,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
469 base = NULL; 478 base = NULL;
470 479
471out_unlock: 480out_unlock:
481 /*
482 * If we dropped out via the lookup_address check under
483 * pgd_lock then stick the page back into the pool:
484 */
485 if (base) {
486 list_add(&base->lru, &page_pool);
487 pool_pages++;
488 } else
489 pool_used++;
472 spin_unlock_irqrestore(&pgd_lock, flags); 490 spin_unlock_irqrestore(&pgd_lock, flags);
473 491
474 if (base)
475 __free_pages(base, 0);
476
477 return 0; 492 return 0;
478} 493}
479 494