diff options
author | Adam Litke <agl@us.ibm.com> | 2008-03-10 14:43:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-10 21:01:19 -0400 |
commit | 2668db9111bb1a6ab5a54f41f703179f35c7d098 (patch) | |
tree | bde940cfd298321663cf988b607151513c801a1a /mm/hugetlb.c | |
parent | 842078054da2d754c6b998b116d7c468abbfaaca (diff) |
hugetlb: correct page count for surplus huge pages
Free pages in the hugetlb pool are free and as such have a reference count of
zero. Regular allocations into the pool from the buddy are "freed" into the
pool which results in their page_count dropping to zero. However, surplus
pages can be directly utilized by the caller without first being freed to the
pool. Therefore, a call to put_page_testzero() is in order so that such a
page will be handed to the caller with a correct count.
This has not affected end users because the bad page count is reset before the
page is handed off. However, under CONFIG_DEBUG_VM this triggers a BUG when
the page count is validated.
Thanks go to Mel for first spotting this issue and providing an initial fix.
Signed-off-by: Adam Litke <agl@us.ibm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dcacc811e70..74c1b6b0b37 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -286,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, | |||
286 | 286 | ||
287 | spin_lock(&hugetlb_lock); | 287 | spin_lock(&hugetlb_lock); |
288 | if (page) { | 288 | if (page) { |
289 | /* | ||
290 | * This page is now managed by the hugetlb allocator and has | ||
291 | * no users -- drop the buddy allocator's reference. | ||
292 | */ | ||
293 | put_page_testzero(page); | ||
294 | VM_BUG_ON(page_count(page)); | ||
289 | nid = page_to_nid(page); | 295 | nid = page_to_nid(page); |
290 | set_compound_page_dtor(page, free_huge_page); | 296 | set_compound_page_dtor(page, free_huge_page); |
291 | /* | 297 | /* |
@@ -369,13 +375,14 @@ free: | |||
369 | enqueue_huge_page(page); | 375 | enqueue_huge_page(page); |
370 | else { | 376 | else { |
371 | /* | 377 | /* |
372 | * Decrement the refcount and free the page using its | 378 | * The page has a reference count of zero already, so |
373 | * destructor. This must be done with hugetlb_lock | 379 | * call free_huge_page directly instead of using |
380 | * put_page. This must be done with hugetlb_lock | ||
374 | * unlocked which is safe because free_huge_page takes | 381 | * unlocked which is safe because free_huge_page takes |
375 | * hugetlb_lock before deciding how to free the page. | 382 | * hugetlb_lock before deciding how to free the page. |
376 | */ | 383 | */ |
377 | spin_unlock(&hugetlb_lock); | 384 | spin_unlock(&hugetlb_lock); |
378 | put_page(page); | 385 | free_huge_page(page); |
379 | spin_lock(&hugetlb_lock); | 386 | spin_lock(&hugetlb_lock); |
380 | } | 387 | } |
381 | } | 388 | } |