aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-10-20 07:50:30 -0400
committerTejun Heo <tj@kernel.org>2014-10-27 11:15:27 -0400
commit344736f29b359790facd0b7a521e367f1715c11c (patch)
treec32487de22e7640a828f28819b5707790ede5105 /mm/vmscan.c
parent8447a0fee974433f7e0035fd30e1edecf00e014f (diff)
cpuset: simplify cpuset_node_allowed API
Current cpuset API for checking if a zone/node is allowed to allocate from looks rather awkward. We have hardwall and softwall versions of cpuset_node_allowed with the softwall version doing literally the same as the hardwall version if __GFP_HARDWALL is passed to it in gfp flags. If it isn't, the softwall version may check the given node against the enclosing hardwall cpuset, which it needs to take the callback lock to do. Such a distinction was introduced by commit 02a0e53d8227 ("cpuset: rework cpuset_zone_allowed api"). Before, we had the only version with the __GFP_HARDWALL flag determining its behavior. The purpose of the commit was to avoid sleep-in-atomic bugs when someone would mistakenly call the function without the __GFP_HARDWALL flag for an atomic allocation. The suffixes introduced were intended to make the callers think before using the function. However, since the callback lock was converted from mutex to spinlock by the previous patch, the softwall check function cannot sleep, and these precautions are no longer necessary. So let's simplify the API back to the single check. Suggested-by: David Rientjes <rientjes@google.com> Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dcb47074ae03..38878b2ab1d0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2405 * to global LRU. 2405 * to global LRU.
2406 */ 2406 */
2407 if (global_reclaim(sc)) { 2407 if (global_reclaim(sc)) {
2408 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2408 if (!cpuset_zone_allowed(zone,
2409 GFP_KERNEL | __GFP_HARDWALL))
2409 continue; 2410 continue;
2410 2411
2411 lru_pages += zone_reclaimable_pages(zone); 2412 lru_pages += zone_reclaimable_pages(zone);
@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3388 if (!populated_zone(zone)) 3389 if (!populated_zone(zone))
3389 return; 3390 return;
3390 3391
3391 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 3392 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3392 return; 3393 return;
3393 pgdat = zone->zone_pgdat; 3394 pgdat = zone->zone_pgdat;
3394 if (pgdat->kswapd_max_order < order) { 3395 if (pgdat->kswapd_max_order < order) {