summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-10-20 07:50:30 -0400
committerTejun Heo <tj@kernel.org>2014-10-27 11:15:27 -0400
commit344736f29b359790facd0b7a521e367f1715c11c (patch)
treec32487de22e7640a828f28819b5707790ede5105 /mm/page_alloc.c
parent8447a0fee974433f7e0035fd30e1edecf00e014f (diff)
cpuset: simplify cpuset_node_allowed API
Current cpuset API for checking if a zone/node is allowed to allocate from looks rather awkward. We have hardwall and softwall versions of cpuset_node_allowed with the softwall version doing literally the same as the hardwall version if __GFP_HARDWALL is passed to it in gfp flags. If it isn't, the softwall version may check the given node against the enclosing hardwall cpuset, which it needs to take the callback lock to do. Such a distinction was introduced by commit 02a0e53d8227 ("cpuset: rework cpuset_zone_allowed api"). Before, we had the only version with the __GFP_HARDWALL flag determining its behavior. The purpose of the commit was to avoid sleep-in-atomic bugs when someone would mistakenly call the function without the __GFP_HARDWALL flag for an atomic allocation. The suffixes introduced were intended to make the callers think before using the function. However, since the callback lock was converted from mutex to spinlock by the previous patch, the softwall check function cannot sleep, and these precautions are no longer necessary. So let's simplify the API back to the single check. Suggested-by: David Rientjes <rientjes@google.com> Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9cd36b822444..ab07b496672f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1962,7 +1962,7 @@ zonelist_scan:
1962 1962
1963 /* 1963 /*
1964 * Scan zonelist, looking for a zone with enough free. 1964 * Scan zonelist, looking for a zone with enough free.
1965 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. 1965 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
1966 */ 1966 */
1967 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1967 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1968 high_zoneidx, nodemask) { 1968 high_zoneidx, nodemask) {
@@ -1973,7 +1973,7 @@ zonelist_scan:
1973 continue; 1973 continue;
1974 if (cpusets_enabled() && 1974 if (cpusets_enabled() &&
1975 (alloc_flags & ALLOC_CPUSET) && 1975 (alloc_flags & ALLOC_CPUSET) &&
1976 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1976 !cpuset_zone_allowed(zone, gfp_mask))
1977 continue; 1977 continue;
1978 /* 1978 /*
1979 * Distribute pages in proportion to the individual 1979 * Distribute pages in proportion to the individual
@@ -2514,7 +2514,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2514 alloc_flags |= ALLOC_HARDER; 2514 alloc_flags |= ALLOC_HARDER;
2515 /* 2515 /*
2516 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2516 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2517 * comment for __cpuset_node_allowed_softwall(). 2517 * comment for __cpuset_node_allowed().
2518 */ 2518 */
2519 alloc_flags &= ~ALLOC_CPUSET; 2519 alloc_flags &= ~ALLOC_CPUSET;
2520 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2520 } else if (unlikely(rt_task(current)) && !in_interrupt())