diff options
-rw-r--r-- | mm/hugetlb.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 899f6a81e77a..241de2712b36 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1455,9 +1455,14 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, | |||
1455 | 1455 | ||
1456 | /* | 1456 | /* |
1457 | * We need a VMA to get a memory policy. If we do not | 1457 | * We need a VMA to get a memory policy. If we do not |
1458 | * have one, we use the 'nid' argument | 1458 | * have one, we use the 'nid' argument. |
1459 | * | ||
1460 | * The mempolicy stuff below has some non-inlined bits | ||
1461 | * and calls ->vm_ops. That makes it hard to optimize at | ||
1462 | * compile-time, even when NUMA is off and it does | ||
1463 | * nothing. This helps the compiler optimize it out. | ||
1459 | */ | 1464 | */ |
1460 | if (!vma) { | 1465 | if (!IS_ENABLED(CONFIG_NUMA) || !vma) { |
1461 | /* | 1466 | /* |
1462 | * If a specific node is requested, make sure to | 1467 | * If a specific node is requested, make sure to |
1463 | * get memory from there, but only when a node | 1468 | * get memory from there, but only when a node |
@@ -1474,7 +1479,8 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, | |||
1474 | 1479 | ||
1475 | /* | 1480 | /* |
1476 | * OK, so we have a VMA. Fetch the mempolicy and try to | 1481 | * OK, so we have a VMA. Fetch the mempolicy and try to |
1477 | * allocate a huge page with it. | 1482 | * allocate a huge page with it. We will only reach this |
1483 | * when CONFIG_NUMA=y. | ||
1478 | */ | 1484 | */ |
1479 | do { | 1485 | do { |
1480 | struct page *page; | 1486 | struct page *page; |
@@ -1520,8 +1526,8 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, | |||
1520 | * we can call this function, not both. | 1526 | * we can call this function, not both. |
1521 | */ | 1527 | */ |
1522 | if (vma || (addr != -1)) { | 1528 | if (vma || (addr != -1)) { |
1523 | WARN_ON_ONCE(addr == -1); | 1529 | VM_WARN_ON_ONCE(addr == -1); |
1524 | WARN_ON_ONCE(nid != NUMA_NO_NODE); | 1530 | VM_WARN_ON_ONCE(nid != NUMA_NO_NODE); |
1525 | } | 1531 | } |
1526 | /* | 1532 | /* |
1527 | * Assume we will successfully allocate the surplus page to | 1533 | * Assume we will successfully allocate the surplus page to |
@@ -1585,6 +1591,7 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, | |||
1585 | * NUMA_NO_NODE, which means that it may be allocated | 1591 | * NUMA_NO_NODE, which means that it may be allocated |
1586 | * anywhere. | 1592 | * anywhere. |
1587 | */ | 1593 | */ |
1594 | static | ||
1588 | struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) | 1595 | struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) |
1589 | { | 1596 | { |
1590 | unsigned long addr = -1; | 1597 | unsigned long addr = -1; |
@@ -1595,6 +1602,7 @@ struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) | |||
1595 | /* | 1602 | /* |
1596 | * Use the VMA's mpolicy to allocate a huge page from the buddy. | 1603 | * Use the VMA's mpolicy to allocate a huge page from the buddy. |
1597 | */ | 1604 | */ |
1605 | static | ||
1598 | struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, | 1606 | struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, |
1599 | struct vm_area_struct *vma, unsigned long addr) | 1607 | struct vm_area_struct *vma, unsigned long addr) |
1600 | { | 1608 | { |