aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-04-07 18:37:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:35:54 -0400
commit2a389610a7331d22344698f23ef2e8c55b2cde7b (patch)
treebfb35d93c238bb25c5ffed0f66aabc750925c556
parent514ddb446c5c5a238eca32b7052b7a8accae4e93 (diff)
mm, mempolicy: rename slab_node for clarity
slab_node() is actually a mempolicy function, so rename it to mempolicy_slab_node() to make it clearer that it used for processes with mempolicies. At the same time, cleanup its code by saving numa_mem_id() in a local variable (since we require a node with memory, not just any node) and remove an obsolete comment that assumes the mempolicy is actually passed into the function. Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Tim Hockin <thockin@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--mm/mempolicy.c15
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c2
4 files changed, 10 insertions, 13 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1ea756aace..cfe55dfca015 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -151,7 +151,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
151extern bool init_nodemask_of_mempolicy(nodemask_t *mask); 151extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
152extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, 152extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
153 const nodemask_t *mask); 153 const nodemask_t *mask);
154extern unsigned slab_node(void); 154extern unsigned int mempolicy_slab_node(void);
155 155
156extern enum zone_type policy_zone; 156extern enum zone_type policy_zone;
157 157
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e3ab02822799..0ad0ba31979f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1782,21 +1782,18 @@ static unsigned interleave_nodes(struct mempolicy *policy)
1782/* 1782/*
1783 * Depending on the memory policy provide a node from which to allocate the 1783 * Depending on the memory policy provide a node from which to allocate the
1784 * next slab entry. 1784 * next slab entry.
1785 * @policy must be protected by freeing by the caller. If @policy is
1786 * the current task's mempolicy, this protection is implicit, as only the
1787 * task can change it's policy. The system default policy requires no
1788 * such protection.
1789 */ 1785 */
1790unsigned slab_node(void) 1786unsigned int mempolicy_slab_node(void)
1791{ 1787{
1792 struct mempolicy *policy; 1788 struct mempolicy *policy;
1789 int node = numa_mem_id();
1793 1790
1794 if (in_interrupt()) 1791 if (in_interrupt())
1795 return numa_node_id(); 1792 return node;
1796 1793
1797 policy = current->mempolicy; 1794 policy = current->mempolicy;
1798 if (!policy || policy->flags & MPOL_F_LOCAL) 1795 if (!policy || policy->flags & MPOL_F_LOCAL)
1799 return numa_node_id(); 1796 return node;
1800 1797
1801 switch (policy->mode) { 1798 switch (policy->mode) {
1802 case MPOL_PREFERRED: 1799 case MPOL_PREFERRED:
@@ -1816,11 +1813,11 @@ unsigned slab_node(void)
1816 struct zonelist *zonelist; 1813 struct zonelist *zonelist;
1817 struct zone *zone; 1814 struct zone *zone;
1818 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1815 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1819 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 1816 zonelist = &NODE_DATA(node)->node_zonelists[0];
1820 (void)first_zones_zonelist(zonelist, highest_zoneidx, 1817 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1821 &policy->v.nodes, 1818 &policy->v.nodes,
1822 &zone); 1819 &zone);
1823 return zone ? zone->node : numa_node_id(); 1820 return zone ? zone->node : node;
1824 } 1821 }
1825 1822
1826 default: 1823 default:
diff --git a/mm/slab.c b/mm/slab.c
index 9153c802e2fe..4b17f4c2e92d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3042,7 +3042,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3042 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3042 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3043 nid_alloc = cpuset_slab_spread_node(); 3043 nid_alloc = cpuset_slab_spread_node();
3044 else if (current->mempolicy) 3044 else if (current->mempolicy)
3045 nid_alloc = slab_node(); 3045 nid_alloc = mempolicy_slab_node();
3046 if (nid_alloc != nid_here) 3046 if (nid_alloc != nid_here)
3047 return ____cache_alloc_node(cachep, flags, nid_alloc); 3047 return ____cache_alloc_node(cachep, flags, nid_alloc);
3048 return NULL; 3048 return NULL;
@@ -3074,7 +3074,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3074 3074
3075retry_cpuset: 3075retry_cpuset:
3076 cpuset_mems_cookie = read_mems_allowed_begin(); 3076 cpuset_mems_cookie = read_mems_allowed_begin();
3077 zonelist = node_zonelist(slab_node(), flags); 3077 zonelist = node_zonelist(mempolicy_slab_node(), flags);
3078 3078
3079retry: 3079retry:
3080 /* 3080 /*
diff --git a/mm/slub.c b/mm/slub.c
index fe6d7be22ef0..5b05e4fe9a1a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1685,7 +1685,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1685 1685
1686 do { 1686 do {
1687 cpuset_mems_cookie = read_mems_allowed_begin(); 1687 cpuset_mems_cookie = read_mems_allowed_begin();
1688 zonelist = node_zonelist(slab_node(), flags); 1688 zonelist = node_zonelist(mempolicy_slab_node(), flags);
1689 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1689 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1690 struct kmem_cache_node *n; 1690 struct kmem_cache_node *n;
1691 1691