aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-07-06 18:40:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:34 -0400
commit04ec6264f28793e56114d0a367bb4d3af667ab6a (patch)
treeb20688e500f325ad1aab00398a56da78bcc8b911 /mm
parent45816682b2cd6771cf63cb7dc7dbebdd827a0132 (diff)
mm, page_alloc: pass preferred nid instead of zonelist to allocator
The main allocator function __alloc_pages_nodemask() takes a zonelist pointer as one of its parameters. All of its callers directly or indirectly obtain the zonelist via node_zonelist() using a preferred node id and gfp_mask. We can make the code a bit simpler by doing the zonelist lookup in __alloc_pages_nodemask(), passing it a preferred node id instead (gfp_mask is already another parameter). There are some code size benefits thanks to removal of inlined node_zonelist(): bloat-o-meter add/remove: 2/2 grow/shrink: 4/36 up/down: 399/-1351 (-952) This will also make things simpler if we proceed with converting cpusets to zonelists. Link: http://lkml.kernel.org/r/20170517081140.30654-4-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Christoph Lameter <cl@linux.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Dimitri Sivanich <sivanich@sgi.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c15
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mempolicy.c41
-rw-r--r--mm/page_alloc.c10
4 files changed, 35 insertions, 37 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51d31352a5bf..1a88006ec634 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -920,6 +920,8 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
920 struct page *page = NULL; 920 struct page *page = NULL;
921 struct mempolicy *mpol; 921 struct mempolicy *mpol;
922 nodemask_t *nodemask; 922 nodemask_t *nodemask;
923 gfp_t gfp_mask;
924 int nid;
923 struct zonelist *zonelist; 925 struct zonelist *zonelist;
924 struct zone *zone; 926 struct zone *zone;
925 struct zoneref *z; 927 struct zoneref *z;
@@ -940,12 +942,13 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
940 942
941retry_cpuset: 943retry_cpuset:
942 cpuset_mems_cookie = read_mems_allowed_begin(); 944 cpuset_mems_cookie = read_mems_allowed_begin();
943 zonelist = huge_zonelist(vma, address, 945 gfp_mask = htlb_alloc_mask(h);
944 htlb_alloc_mask(h), &mpol, &nodemask); 946 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
947 zonelist = node_zonelist(nid, gfp_mask);
945 948
946 for_each_zone_zonelist_nodemask(zone, z, zonelist, 949 for_each_zone_zonelist_nodemask(zone, z, zonelist,
947 MAX_NR_ZONES - 1, nodemask) { 950 MAX_NR_ZONES - 1, nodemask) {
948 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { 951 if (cpuset_zone_allowed(zone, gfp_mask)) {
949 page = dequeue_huge_page_node(h, zone_to_nid(zone)); 952 page = dequeue_huge_page_node(h, zone_to_nid(zone));
950 if (page) { 953 if (page) {
951 if (avoid_reserve) 954 if (avoid_reserve)
@@ -1558,13 +1561,13 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1558 do { 1561 do {
1559 struct page *page; 1562 struct page *page;
1560 struct mempolicy *mpol; 1563 struct mempolicy *mpol;
1561 struct zonelist *zl; 1564 int nid;
1562 nodemask_t *nodemask; 1565 nodemask_t *nodemask;
1563 1566
1564 cpuset_mems_cookie = read_mems_allowed_begin(); 1567 cpuset_mems_cookie = read_mems_allowed_begin();
1565 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); 1568 nid = huge_node(vma, addr, gfp, &mpol, &nodemask);
1566 mpol_cond_put(mpol); 1569 mpol_cond_put(mpol);
1567 page = __alloc_pages_nodemask(gfp, order, zl, nodemask); 1570 page = __alloc_pages_nodemask(gfp, order, nid, nodemask);
1568 if (page) 1571 if (page)
1569 return page; 1572 return page;
1570 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 1573 } while (read_mems_allowed_retry(cpuset_mems_cookie));
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e4fdb97b6ef2..9ac997b8f2a6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1459,11 +1459,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1459 gfp_mask |= __GFP_HIGHMEM; 1459 gfp_mask |= __GFP_HIGHMEM;
1460 1460
1461 if (!nodes_empty(nmask)) 1461 if (!nodes_empty(nmask))
1462 new_page = __alloc_pages_nodemask(gfp_mask, 0, 1462 new_page = __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
1463 node_zonelist(nid, gfp_mask), &nmask);
1464 if (!new_page) 1463 if (!new_page)
1465 new_page = __alloc_pages(gfp_mask, 0, 1464 new_page = __alloc_pages(gfp_mask, 0, nid);
1466 node_zonelist(nid, gfp_mask));
1467 1465
1468 return new_page; 1466 return new_page;
1469} 1467}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d77177c7283b..c60807625fd5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1669,9 +1669,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1669 return NULL; 1669 return NULL;
1670} 1670}
1671 1671
1672/* Return a zonelist indicated by gfp for node representing a mempolicy */ 1672/* Return the node id preferred by the given mempolicy, or the given id */
1673static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 1673static int policy_node(gfp_t gfp, struct mempolicy *policy,
1674 int nd) 1674 int nd)
1675{ 1675{
1676 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 1676 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1677 nd = policy->v.preferred_node; 1677 nd = policy->v.preferred_node;
@@ -1684,7 +1684,7 @@ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1684 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1684 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1685 } 1685 }
1686 1686
1687 return node_zonelist(nd, gfp); 1687 return nd;
1688} 1688}
1689 1689
1690/* Do dynamic interleaving for a process */ 1690/* Do dynamic interleaving for a process */
@@ -1791,38 +1791,37 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1791 1791
1792#ifdef CONFIG_HUGETLBFS 1792#ifdef CONFIG_HUGETLBFS
1793/* 1793/*
1794 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1794 * huge_node(@vma, @addr, @gfp_flags, @mpol)
1795 * @vma: virtual memory area whose policy is sought 1795 * @vma: virtual memory area whose policy is sought
1796 * @addr: address in @vma for shared policy lookup and interleave policy 1796 * @addr: address in @vma for shared policy lookup and interleave policy
1797 * @gfp_flags: for requested zone 1797 * @gfp_flags: for requested zone
1798 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1798 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1799 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1799 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1800 * 1800 *
1801 * Returns a zonelist suitable for a huge page allocation and a pointer 1801 * Returns a nid suitable for a huge page allocation and a pointer
1802 * to the struct mempolicy for conditional unref after allocation. 1802 * to the struct mempolicy for conditional unref after allocation.
1803 * If the effective policy is 'BIND, returns a pointer to the mempolicy's 1803 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1804 * @nodemask for filtering the zonelist. 1804 * @nodemask for filtering the zonelist.
1805 * 1805 *
1806 * Must be protected by read_mems_allowed_begin() 1806 * Must be protected by read_mems_allowed_begin()
1807 */ 1807 */
1808struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 1808int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1809 gfp_t gfp_flags, struct mempolicy **mpol, 1809 struct mempolicy **mpol, nodemask_t **nodemask)
1810 nodemask_t **nodemask)
1811{ 1810{
1812 struct zonelist *zl; 1811 int nid;
1813 1812
1814 *mpol = get_vma_policy(vma, addr); 1813 *mpol = get_vma_policy(vma, addr);
1815 *nodemask = NULL; /* assume !MPOL_BIND */ 1814 *nodemask = NULL; /* assume !MPOL_BIND */
1816 1815
1817 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 1816 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1818 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1817 nid = interleave_nid(*mpol, vma, addr,
1819 huge_page_shift(hstate_vma(vma))), gfp_flags); 1818 huge_page_shift(hstate_vma(vma)));
1820 } else { 1819 } else {
1821 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 1820 nid = policy_node(gfp_flags, *mpol, numa_node_id());
1822 if ((*mpol)->mode == MPOL_BIND) 1821 if ((*mpol)->mode == MPOL_BIND)
1823 *nodemask = &(*mpol)->v.nodes; 1822 *nodemask = &(*mpol)->v.nodes;
1824 } 1823 }
1825 return zl; 1824 return nid;
1826} 1825}
1827 1826
1828/* 1827/*
@@ -1924,12 +1923,10 @@ out:
1924static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1923static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1925 unsigned nid) 1924 unsigned nid)
1926{ 1925{
1927 struct zonelist *zl;
1928 struct page *page; 1926 struct page *page;
1929 1927
1930 zl = node_zonelist(nid, gfp); 1928 page = __alloc_pages(gfp, order, nid);
1931 page = __alloc_pages(gfp, order, zl); 1929 if (page && page_to_nid(page) == nid)
1932 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1933 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 1930 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1934 return page; 1931 return page;
1935} 1932}
@@ -1963,8 +1960,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1963{ 1960{
1964 struct mempolicy *pol; 1961 struct mempolicy *pol;
1965 struct page *page; 1962 struct page *page;
1963 int preferred_nid;
1966 unsigned int cpuset_mems_cookie; 1964 unsigned int cpuset_mems_cookie;
1967 struct zonelist *zl;
1968 nodemask_t *nmask; 1965 nodemask_t *nmask;
1969 1966
1970retry_cpuset: 1967retry_cpuset:
@@ -2007,8 +2004,8 @@ retry_cpuset:
2007 } 2004 }
2008 2005
2009 nmask = policy_nodemask(gfp, pol); 2006 nmask = policy_nodemask(gfp, pol);
2010 zl = policy_zonelist(gfp, pol, node); 2007 preferred_nid = policy_node(gfp, pol, node);
2011 page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2008 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2012 mpol_cond_put(pol); 2009 mpol_cond_put(pol);
2013out: 2010out:
2014 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2011 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
@@ -2055,7 +2052,7 @@ retry_cpuset:
2055 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2052 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2056 else 2053 else
2057 page = __alloc_pages_nodemask(gfp, order, 2054 page = __alloc_pages_nodemask(gfp, order,
2058 policy_zonelist(gfp, pol, numa_node_id()), 2055 policy_node(gfp, pol, numa_node_id()),
2059 policy_nodemask(gfp, pol)); 2056 policy_nodemask(gfp, pol));
2060 2057
2061 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2058 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 019af778ec55..8aa860017d66 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3980,12 +3980,12 @@ got_pg:
3980} 3980}
3981 3981
3982static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 3982static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
3983 struct zonelist *zonelist, nodemask_t *nodemask, 3983 int preferred_nid, nodemask_t *nodemask,
3984 struct alloc_context *ac, gfp_t *alloc_mask, 3984 struct alloc_context *ac, gfp_t *alloc_mask,
3985 unsigned int *alloc_flags) 3985 unsigned int *alloc_flags)
3986{ 3986{
3987 ac->high_zoneidx = gfp_zone(gfp_mask); 3987 ac->high_zoneidx = gfp_zone(gfp_mask);
3988 ac->zonelist = zonelist; 3988 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
3989 ac->nodemask = nodemask; 3989 ac->nodemask = nodemask;
3990 ac->migratetype = gfpflags_to_migratetype(gfp_mask); 3990 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
3991 3991
@@ -4030,8 +4030,8 @@ static inline void finalise_ac(gfp_t gfp_mask,
4030 * This is the 'heart' of the zoned buddy allocator. 4030 * This is the 'heart' of the zoned buddy allocator.
4031 */ 4031 */
4032struct page * 4032struct page *
4033__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 4033__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4034 struct zonelist *zonelist, nodemask_t *nodemask) 4034 nodemask_t *nodemask)
4035{ 4035{
4036 struct page *page; 4036 struct page *page;
4037 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4037 unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -4039,7 +4039,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
4039 struct alloc_context ac = { }; 4039 struct alloc_context ac = { };
4040 4040
4041 gfp_mask &= gfp_allowed_mask; 4041 gfp_mask &= gfp_allowed_mask;
4042 if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags)) 4042 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4043 return NULL; 4043 return NULL;
4044 4044
4045 finalise_ac(gfp_mask, order, &ac); 4045 finalise_ac(gfp_mask, order, &ac);