diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b49579c7f2a5..0b7656e804d1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -653,6 +653,7 @@ static void free_huge_page(struct page *page) | |||
| 653 | BUG_ON(page_count(page)); | 653 | BUG_ON(page_count(page)); |
| 654 | BUG_ON(page_mapcount(page)); | 654 | BUG_ON(page_mapcount(page)); |
| 655 | restore_reserve = PagePrivate(page); | 655 | restore_reserve = PagePrivate(page); |
| 656 | ClearPagePrivate(page); | ||
| 656 | 657 | ||
| 657 | spin_lock(&hugetlb_lock); | 658 | spin_lock(&hugetlb_lock); |
| 658 | hugetlb_cgroup_uncharge_page(hstate_index(h), | 659 | hugetlb_cgroup_uncharge_page(hstate_index(h), |
| @@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
| 695 | /* we rely on prep_new_huge_page to set the destructor */ | 696 | /* we rely on prep_new_huge_page to set the destructor */ |
| 696 | set_compound_order(page, order); | 697 | set_compound_order(page, order); |
| 697 | __SetPageHead(page); | 698 | __SetPageHead(page); |
| 699 | __ClearPageReserved(page); | ||
| 698 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 700 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
| 699 | __SetPageTail(p); | 701 | __SetPageTail(p); |
| 702 | /* | ||
| 703 | * For gigantic hugepages allocated through bootmem at | ||
| 704 | * boot, it's safer to be consistent with the not-gigantic | ||
| 705 | * hugepages and clear the PG_reserved bit from all tail pages | ||
| 706 | * too. Otherwse drivers using get_user_pages() to access tail | ||
| 707 | * pages may get the reference counting wrong if they see | ||
| 708 | * PG_reserved set on a tail page (despite the head page not | ||
| 709 | * having PG_reserved set). Enforcing this consistency between | ||
| 710 | * head and tail pages allows drivers to optimize away a check | ||
| 711 | * on the head page when they need know if put_page() is needed | ||
| 712 | * after get_user_pages(). | ||
| 713 | */ | ||
| 714 | __ClearPageReserved(p); | ||
| 700 | set_page_count(p, 0); | 715 | set_page_count(p, 0); |
| 701 | p->first_page = page; | 716 | p->first_page = page; |
| 702 | } | 717 | } |
| @@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void) | |||
| 1329 | #else | 1344 | #else |
| 1330 | page = virt_to_page(m); | 1345 | page = virt_to_page(m); |
| 1331 | #endif | 1346 | #endif |
| 1332 | __ClearPageReserved(page); | ||
| 1333 | WARN_ON(page_count(page) != 1); | 1347 | WARN_ON(page_count(page) != 1); |
| 1334 | prep_compound_huge_page(page, h->order); | 1348 | prep_compound_huge_page(page, h->order); |
| 1349 | WARN_ON(PageReserved(page)); | ||
| 1335 | prep_new_huge_page(h, page, page_to_nid(page)); | 1350 | prep_new_huge_page(h, page, page_to_nid(page)); |
| 1336 | /* | 1351 | /* |
| 1337 | * If we had gigantic hugepages allocated at boot time, we need | 1352 | * If we had gigantic hugepages allocated at boot time, we need |
