aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2018-08-23 20:00:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-23 21:48:43 -0400
commit6bc9b56433b76e40d11099338d27fbc5cd2935ca (patch)
tree3bfb85e2dc5f4a9367c720313c1ea9f4f4331c93
parent30aba6656f61ed44cba445a3c0d38b296fa9e8f5 (diff)
mm: fix race on soft-offlining free huge pages
Patch series "mm: soft-offline: fix race against page allocation". Xishi recently reported the issue about race on reusing the target pages of soft offlining. Discussion and analysis showed that we need make sure that setting PG_hwpoison should be done in the right place under zone->lock for soft offline. 1/2 handles free hugepage's case, and 2/2 hanldes free buddy page's case. This patch (of 2): There's a race condition between soft offline and hugetlb_fault which causes unexpected process killing and/or hugetlb allocation failure. The process killing is caused by the following flow: CPU 0 CPU 1 CPU 2 soft offline get_any_page // find the hugetlb is free mmap a hugetlb file page fault ... hugetlb_fault hugetlb_no_page alloc_huge_page // succeed soft_offline_free_page // set hwpoison flag mmap the hugetlb file page fault ... hugetlb_fault hugetlb_no_page find_lock_page return VM_FAULT_HWPOISON mm_fault_error do_sigbus // kill the process The hugetlb allocation failure comes from the following flow: CPU 0 CPU 1 mmap a hugetlb file // reserve all free page but don't fault-in soft offline get_any_page // find the hugetlb is free soft_offline_free_page // set hwpoison flag dissolve_free_huge_page // fail because all free hugepages are reserved page fault ... hugetlb_fault hugetlb_no_page alloc_huge_page ... dequeue_huge_page_node_exact // ignore hwpoisoned hugepage // and finally fail due to no-mem The root cause of this is that current soft-offline code is written based on an assumption that PageHWPoison flag should be set at first to avoid accessing the corrupted data. This makes sense for memory_failure() or hard offline, but does not for soft offline because soft offline is about corrected (not uncorrected) error and is safe from data lost. This patch changes soft offline semantics where it sets PageHWPoison flag only after containment of the error page completes successfully. Link: http://lkml.kernel.org/r/1531452366-11661-2-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reported-by: Xishi Qiu <xishi.qiuxishi@alibaba-inc.com> Suggested-by: Xishi Qiu <xishi.qiuxishi@alibaba-inc.com> Tested-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: <zy.zhengyi@alibaba-inc.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/memory-failure.c22
-rw-r--r--mm/migrate.c2
3 files changed, 21 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 47566bb0b4b1..9f1c853f67b5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1479,22 +1479,20 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1479/* 1479/*
1480 * Dissolve a given free hugepage into free buddy pages. This function does 1480 * Dissolve a given free hugepage into free buddy pages. This function does
1481 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the 1481 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1482 * number of free hugepages would be reduced below the number of reserved 1482 * dissolution fails because a give page is not a free hugepage, or because
1483 * hugepages. 1483 * free hugepages are fully reserved.
1484 */ 1484 */
1485int dissolve_free_huge_page(struct page *page) 1485int dissolve_free_huge_page(struct page *page)
1486{ 1486{
1487 int rc = 0; 1487 int rc = -EBUSY;
1488 1488
1489 spin_lock(&hugetlb_lock); 1489 spin_lock(&hugetlb_lock);
1490 if (PageHuge(page) && !page_count(page)) { 1490 if (PageHuge(page) && !page_count(page)) {
1491 struct page *head = compound_head(page); 1491 struct page *head = compound_head(page);
1492 struct hstate *h = page_hstate(head); 1492 struct hstate *h = page_hstate(head);
1493 int nid = page_to_nid(head); 1493 int nid = page_to_nid(head);
1494 if (h->free_huge_pages - h->resv_huge_pages == 0) { 1494 if (h->free_huge_pages - h->resv_huge_pages == 0)
1495 rc = -EBUSY;
1496 goto out; 1495 goto out;
1497 }
1498 /* 1496 /*
1499 * Move PageHWPoison flag from head page to the raw error page, 1497 * Move PageHWPoison flag from head page to the raw error page,
1500 * which makes any subpages rather than the error page reusable. 1498 * which makes any subpages rather than the error page reusable.
@@ -1508,6 +1506,7 @@ int dissolve_free_huge_page(struct page *page)
1508 h->free_huge_pages_node[nid]--; 1506 h->free_huge_pages_node[nid]--;
1509 h->max_huge_pages--; 1507 h->max_huge_pages--;
1510 update_and_free_page(h, head); 1508 update_and_free_page(h, head);
1509 rc = 0;
1511 } 1510 }
1512out: 1511out:
1513 spin_unlock(&hugetlb_lock); 1512 spin_unlock(&hugetlb_lock);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c83a1746812f..49dc32c61137 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1598,8 +1598,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
1598 if (ret > 0) 1598 if (ret > 0)
1599 ret = -EIO; 1599 ret = -EIO;
1600 } else { 1600 } else {
1601 if (PageHuge(page)) 1601 /*
1602 dissolve_free_huge_page(page); 1602 * We set PG_hwpoison only when the migration source hugepage
1603 * was successfully dissolved, because otherwise hwpoisoned
1604 * hugepage remains on free hugepage list, then userspace will
1605 * find it as SIGBUS by allocation failure. That's not expected
1606 * in soft-offlining.
1607 */
1608 ret = dissolve_free_huge_page(page);
1609 if (!ret) {
1610 if (set_hwpoison_free_buddy_page(page))
1611 num_poisoned_pages_inc();
1612 }
1603 } 1613 }
1604 return ret; 1614 return ret;
1605} 1615}
@@ -1715,13 +1725,13 @@ static int soft_offline_in_use_page(struct page *page, int flags)
1715 1725
1716static void soft_offline_free_page(struct page *page) 1726static void soft_offline_free_page(struct page *page)
1717{ 1727{
1728 int rc = 0;
1718 struct page *head = compound_head(page); 1729 struct page *head = compound_head(page);
1719 1730
1720 if (!TestSetPageHWPoison(head)) { 1731 if (PageHuge(head))
1732 rc = dissolve_free_huge_page(page);
1733 if (!rc && !TestSetPageHWPoison(page))
1721 num_poisoned_pages_inc(); 1734 num_poisoned_pages_inc();
1722 if (PageHuge(head))
1723 dissolve_free_huge_page(page);
1724 }
1725} 1735}
1726 1736
1727/** 1737/**
diff --git a/mm/migrate.c b/mm/migrate.c
index c27e97b5b69d..91a99457127c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1331,8 +1331,6 @@ put_anon:
1331out: 1331out:
1332 if (rc != -EAGAIN) 1332 if (rc != -EAGAIN)
1333 putback_active_hugepage(hpage); 1333 putback_active_hugepage(hpage);
1334 if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1335 num_poisoned_pages_inc();
1336 1334
1337 /* 1335 /*
1338 * If migration was not successful and there's a freeing callback, use 1336 * If migration was not successful and there's a freeing callback, use