summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2b0abc30685d..c33c5cbb67ff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1268,12 +1268,23 @@ void free_huge_page(struct page *page)
1268 ClearPagePrivate(page); 1268 ClearPagePrivate(page);
1269 1269
1270 /* 1270 /*
1271 * A return code of zero implies that the subpool will be under its 1271 * If PagePrivate() was set on page, page allocation consumed a
1272 * minimum size if the reservation is not restored after page is free. 1272 * reservation. If the page was associated with a subpool, there
1273 * Therefore, force restore_reserve operation. 1273 * would have been a page reserved in the subpool before allocation
1274 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1275 * reservtion, do not call hugepage_subpool_put_pages() as this will
1276 * remove the reserved page from the subpool.
1274 */ 1277 */
1275 if (hugepage_subpool_put_pages(spool, 1) == 0) 1278 if (!restore_reserve) {
1276 restore_reserve = true; 1279 /*
1280 * A return code of zero implies that the subpool will be
1281 * under its minimum size if the reservation is not restored
1282 * after page is free. Therefore, force restore_reserve
1283 * operation.
1284 */
1285 if (hugepage_subpool_put_pages(spool, 1) == 0)
1286 restore_reserve = true;
1287 }
1277 1288
1278 spin_lock(&hugetlb_lock); 1289 spin_lock(&hugetlb_lock);
1279 clear_page_huge_active(page); 1290 clear_page_huge_active(page);