aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dcacc811e70e..74c1b6b0b37b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -286,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
286 286
287 spin_lock(&hugetlb_lock); 287 spin_lock(&hugetlb_lock);
288 if (page) { 288 if (page) {
289 /*
290 * This page is now managed by the hugetlb allocator and has
291 * no users -- drop the buddy allocator's reference.
292 */
293 put_page_testzero(page);
294 VM_BUG_ON(page_count(page));
289 nid = page_to_nid(page); 295 nid = page_to_nid(page);
290 set_compound_page_dtor(page, free_huge_page); 296 set_compound_page_dtor(page, free_huge_page);
291 /* 297 /*
@@ -369,13 +375,14 @@ free:
369 enqueue_huge_page(page); 375 enqueue_huge_page(page);
370 else { 376 else {
371 /* 377 /*
372 * Decrement the refcount and free the page using its 378 * The page has a reference count of zero already, so
373 * destructor. This must be done with hugetlb_lock 379 * call free_huge_page directly instead of using
380 * put_page. This must be done with hugetlb_lock
374 * unlocked which is safe because free_huge_page takes 381 * unlocked which is safe because free_huge_page takes
375 * hugetlb_lock before deciding how to free the page. 382 * hugetlb_lock before deciding how to free the page.
376 */ 383 */
377 spin_unlock(&hugetlb_lock); 384 spin_unlock(&hugetlb_lock);
378 put_page(page); 385 free_huge_page(page);
379 spin_lock(&hugetlb_lock); 386 spin_lock(&hugetlb_lock);
380 } 387 }
381 } 388 }