aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2008-04-28 05:12:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:19 -0400
commit19fc3f0acde32636529969570055c7e2a744787c (patch)
treeabcd29adbebe027eb2f3f13770e63662c22c7975
parent797df5749032c2286bc7ff3a52de41fde0cdf0a5 (diff)
hugetlb: decrease hugetlb_lock cycling in gather_surplus_huge_pages
To reduce hugetlb_lock acquisitions and releases when freeing excess surplus pages, scan the page list in two parts. First, transfer the needed pages to the hugetlb pool. Then drop the lock and free the remaining pages back to the buddy allocator. In the common case there are zero excess pages and no lock operations are required. Thanks Mel Gorman for this improvement. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3737d82f5225..93ea46a0fba4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,11 +372,19 @@ retry:
372 resv_huge_pages += delta; 372 resv_huge_pages += delta;
373 ret = 0; 373 ret = 0;
374free: 374free:
375 /* Free the needed pages to the hugetlb pool */
375 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 376 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
377 if ((--needed) < 0)
378 break;
376 list_del(&page->lru); 379 list_del(&page->lru);
377 if ((--needed) >= 0) 380 enqueue_huge_page(page);
378 enqueue_huge_page(page); 381 }
379 else { 382
383 /* Free unnecessary surplus pages to the buddy allocator */
384 if (!list_empty(&surplus_list)) {
385 spin_unlock(&hugetlb_lock);
386 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
387 list_del(&page->lru);
380 /* 388 /*
381 * The page has a reference count of zero already, so 389 * The page has a reference count of zero already, so
382 * call free_huge_page directly instead of using 390 * call free_huge_page directly instead of using
@@ -384,10 +392,9 @@ free:
384 * unlocked which is safe because free_huge_page takes 392 * unlocked which is safe because free_huge_page takes
385 * hugetlb_lock before deciding how to free the page. 393 * hugetlb_lock before deciding how to free the page.
386 */ 394 */
387 spin_unlock(&hugetlb_lock);
388 free_huge_page(page); 395 free_huge_page(page);
389 spin_lock(&hugetlb_lock);
390 } 396 }
397 spin_lock(&hugetlb_lock);
391 } 398 }
392 399
393 return ret; 400 return ret;