aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2016-10-07 20:01:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:29 -0400
commit082d5b6b60e9f25e1511557fcfcb21eedd267446 (patch)
tree96193389b0c0ec724e7cb8ae8c34270642fbd15b /mm
parent2247bb335ab9c40058484cac36ea74ee652f3b7b (diff)
mm/hugetlb: check for reserved hugepages during memory offline
In dissolve_free_huge_pages(), free hugepages will be dissolved without making sure that there are enough of them left to satisfy hugepage reservations. Fix this by adding a return value to dissolve_free_huge_pages() and checking h->free_huge_pages vs. h->resv_huge_pages. Note that this may lead to the situation where dissolve_free_huge_page() returns an error and all free hugepages that were dissolved before that error are lost, while the memory block still cannot be set offline. Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage") Link: http://lkml.kernel.org/r/20160926172811.94033-3-gerald.schaefer@de.ibm.com Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Rui Teng <rui.teng@linux.vnet.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c26
-rw-r--r--mm/memory_hotplug.c4
2 files changed, 24 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 603bdd01ec2c..91ae1f567997 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1437 1437
1438/* 1438/*
1439 * Dissolve a given free hugepage into free buddy pages. This function does 1439 * Dissolve a given free hugepage into free buddy pages. This function does
1440 * nothing for in-use (including surplus) hugepages. 1440 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1441 * number of free hugepages would be reduced below the number of reserved
1442 * hugepages.
1441 */ 1443 */
1442static void dissolve_free_huge_page(struct page *page) 1444static int dissolve_free_huge_page(struct page *page)
1443{ 1445{
1446 int rc = 0;
1447
1444 spin_lock(&hugetlb_lock); 1448 spin_lock(&hugetlb_lock);
1445 if (PageHuge(page) && !page_count(page)) { 1449 if (PageHuge(page) && !page_count(page)) {
1446 struct page *head = compound_head(page); 1450 struct page *head = compound_head(page);
1447 struct hstate *h = page_hstate(head); 1451 struct hstate *h = page_hstate(head);
1448 int nid = page_to_nid(head); 1452 int nid = page_to_nid(head);
1453 if (h->free_huge_pages - h->resv_huge_pages == 0) {
1454 rc = -EBUSY;
1455 goto out;
1456 }
1449 list_del(&head->lru); 1457 list_del(&head->lru);
1450 h->free_huge_pages--; 1458 h->free_huge_pages--;
1451 h->free_huge_pages_node[nid]--; 1459 h->free_huge_pages_node[nid]--;
1452 h->max_huge_pages--; 1460 h->max_huge_pages--;
1453 update_and_free_page(h, head); 1461 update_and_free_page(h, head);
1454 } 1462 }
1463out:
1455 spin_unlock(&hugetlb_lock); 1464 spin_unlock(&hugetlb_lock);
1465 return rc;
1456} 1466}
1457 1467
1458/* 1468/*
@@ -1460,16 +1470,22 @@ static void dissolve_free_huge_page(struct page *page)
1460 * make specified memory blocks removable from the system. 1470 * make specified memory blocks removable from the system.
1461 * Note that this will dissolve a free gigantic hugepage completely, if any 1471 * Note that this will dissolve a free gigantic hugepage completely, if any
1462 * part of it lies within the given range. 1472 * part of it lies within the given range.
1473 * Also note that if dissolve_free_huge_page() returns with an error, all
1474 * free hugepages that were dissolved before that error are lost.
1463 */ 1475 */
1464void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1465{ 1477{
1466 unsigned long pfn; 1478 unsigned long pfn;
1479 int rc = 0;
1467 1480
1468 if (!hugepages_supported()) 1481 if (!hugepages_supported())
1469 return; 1482 return rc;
1470 1483
1471 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) 1484 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1472 dissolve_free_huge_page(pfn_to_page(pfn)); 1485 if (rc = dissolve_free_huge_page(pfn_to_page(pfn)))
1486 break;
1487
1488 return rc;
1473} 1489}
1474 1490
1475/* 1491/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9d29ba0f7192..962927309b6e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1945,7 +1945,9 @@ repeat:
1945 * dissolve free hugepages in the memory block before doing offlining 1945 * dissolve free hugepages in the memory block before doing offlining
1946 * actually in order to make hugetlbfs's object counting consistent. 1946 * actually in order to make hugetlbfs's object counting consistent.
1947 */ 1947 */
1948 dissolve_free_huge_pages(start_pfn, end_pfn); 1948 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1949 if (ret)
1950 goto failed_removal;
1949 /* check again */ 1951 /* check again */
1950 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 1952 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1951 if (offlined_pages < 0) { 1953 if (offlined_pages < 0) {