aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2016-10-07 20:01:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:29 -0400
commiteb03aa008561004257900983193d024e57abdd96 (patch)
tree96080a7d5c80e9c5ffbb8072f5f90abc84afbb6e /mm
parent082d5b6b60e9f25e1511557fcfcb21eedd267446 (diff)
mm/hugetlb: improve locking in dissolve_free_huge_pages()
For every pfn aligned to minimum_order, dissolve_free_huge_pages() will call dissolve_free_huge_page() which takes the hugetlb spinlock, even if the page is not huge at all or a hugepage that is in-use. Improve this by doing the PageHuge() and page_count() checks already in dissolve_free_huge_pages() before calling dissolve_free_huge_page(). In dissolve_free_huge_page(), when holding the spinlock, those checks need to be revalidated. Link: http://lkml.kernel.org/r/20160926172811.94033-4-gerald.schaefer@de.ibm.com Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Rui Teng <rui.teng@linux.vnet.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 91ae1f567997..770d83eb3f48 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1476,14 +1476,20 @@ out:
1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1477{ 1477{
1478 unsigned long pfn; 1478 unsigned long pfn;
1479 struct page *page;
1479 int rc = 0; 1480 int rc = 0;
1480 1481
1481 if (!hugepages_supported()) 1482 if (!hugepages_supported())
1482 return rc; 1483 return rc;
1483 1484
1484 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) 1485 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1485 if (rc = dissolve_free_huge_page(pfn_to_page(pfn))) 1486 page = pfn_to_page(pfn);
1486 break; 1487 if (PageHuge(page) && !page_count(page)) {
1488 rc = dissolve_free_huge_page(page);
1489 if (rc)
1490 break;
1491 }
1492 }
1487 1493
1488 return rc; 1494 return rc;
1489} 1495}