summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2015-11-05 21:50:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 22:34:48 -0500
commite0ec90ee7e6f6cbaa6d59ffb48d2a7af5e80e61d (patch)
tree2e48efdf409a59c0853f7a11285dcfcd186403c4 /mm
parent099730d67417dfee273e9b10ac2560ca7fac7eb9 (diff)
mm, hugetlbfs: optimize when NUMA=n
My recent patch "mm, hugetlb: use memory policy when available" added some bloat to hugetlb.o. This patch aims to get some of the bloat back, especially when NUMA is not in play. It does this with an implicit #ifdef and marking some things static that should have been static in my first patch. It also makes the warnings only VM_WARN_ON()s. They were responsible for a pretty big chunk of the bloat. Doing this gets our NUMA=n text size back to a wee bit _below_ where we started before the original patch. It also shaves a bit of space off the NUMA=y case, but not much. Enforcing the mempolicy definitely takes some text and it's hard to avoid. size(1) output: text data bss dec hex filename 30745 3433 2492 36670 8f3e hugetlb.o.nonuma.baseline 31305 3755 2492 37552 92b0 hugetlb.o.nonuma.patch1 30713 3433 2492 36638 8f1e hugetlb.o.nonuma.patch2 (this patch) 25235 473 41276 66984 105a8 hugetlb.o.numa.baseline 25715 475 41276 67466 1078a hugetlb.o.numa.patch1 25491 473 41276 67240 106a8 hugetlb.o.numa.patch2 (this patch) Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 899f6a81e77a..241de2712b36 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1455,9 +1455,14 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1455 1455
1456 /* 1456 /*
1457 * We need a VMA to get a memory policy. If we do not 1457 * We need a VMA to get a memory policy. If we do not
1458 * have one, we use the 'nid' argument 1458 * have one, we use the 'nid' argument.
1459 *
1460 * The mempolicy stuff below has some non-inlined bits
1461 * and calls ->vm_ops. That makes it hard to optimize at
1462 * compile-time, even when NUMA is off and it does
1463 * nothing. This helps the compiler optimize it out.
1459 */ 1464 */
1460 if (!vma) { 1465 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1461 /* 1466 /*
1462 * If a specific node is requested, make sure to 1467 * If a specific node is requested, make sure to
1463 * get memory from there, but only when a node 1468 * get memory from there, but only when a node
@@ -1474,7 +1479,8 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1474 1479
1475 /* 1480 /*
1476 * OK, so we have a VMA. Fetch the mempolicy and try to 1481 * OK, so we have a VMA. Fetch the mempolicy and try to
1477 * allocate a huge page with it. 1482 * allocate a huge page with it. We will only reach this
1483 * when CONFIG_NUMA=y.
1478 */ 1484 */
1479 do { 1485 do {
1480 struct page *page; 1486 struct page *page;
@@ -1520,8 +1526,8 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h,
1520 * we can call this function, not both. 1526 * we can call this function, not both.
1521 */ 1527 */
1522 if (vma || (addr != -1)) { 1528 if (vma || (addr != -1)) {
1523 WARN_ON_ONCE(addr == -1); 1529 VM_WARN_ON_ONCE(addr == -1);
1524 WARN_ON_ONCE(nid != NUMA_NO_NODE); 1530 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1525 } 1531 }
1526 /* 1532 /*
1527 * Assume we will successfully allocate the surplus page to 1533 * Assume we will successfully allocate the surplus page to
@@ -1585,6 +1591,7 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h,
1585 * NUMA_NO_NODE, which means that it may be allocated 1591 * NUMA_NO_NODE, which means that it may be allocated
1586 * anywhere. 1592 * anywhere.
1587 */ 1593 */
1594static
1588struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) 1595struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1589{ 1596{
1590 unsigned long addr = -1; 1597 unsigned long addr = -1;
@@ -1595,6 +1602,7 @@ struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1595/* 1602/*
1596 * Use the VMA's mpolicy to allocate a huge page from the buddy. 1603 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1597 */ 1604 */
1605static
1598struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, 1606struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1599 struct vm_area_struct *vma, unsigned long addr) 1607 struct vm_area_struct *vma, unsigned long addr)
1600{ 1608{