aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 21:57:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 21:57:19 -0500
commit2756d373a3f45a3a9ebf4ac389f9e0e02bd35a93 (patch)
treee248c5adccb3045f96b3cfe0a1ffeb37bb81e4cb /mm
parent4e8790f77f051d4cc745a57b48a73052521e8dfc (diff)
parenteeecbd1971517103e06f11750dd1a9a1dc37e4e6 (diff)
Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup update from Tejun Heo: "cpuset got simplified a bit. cgroup core got a fix on unified hierarchy and grew some effective css related interfaces which will be used for blkio support for writeback IO traffic which is currently being worked on" * 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: implement cgroup_get_e_css() cgroup: add cgroup_subsys->css_e_css_changed() cgroup: add cgroup_subsys->css_released() cgroup: fix the async css offline wait logic in cgroup_subtree_control_write() cgroup: restructure child_subsys_mask handling in cgroup_subtree_control_write() cgroup: separate out cgroup_calc_child_subsys_mask() from cgroup_refresh_child_subsys_mask() cpuset: lock vs unlock typo cpuset: simplify cpuset_node_allowed API cpuset: convert callback_mutex to a spinlock
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/vmscan.c5
6 files changed, 11 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 30cd96879152..919b86a2164d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -582,7 +582,7 @@ retry_cpuset:
582 582
583 for_each_zone_zonelist_nodemask(zone, z, zonelist, 583 for_each_zone_zonelist_nodemask(zone, z, zonelist,
584 MAX_NR_ZONES - 1, nodemask) { 584 MAX_NR_ZONES - 1, nodemask) {
585 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { 585 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
586 page = dequeue_huge_page_node(h, zone_to_nid(zone)); 586 page = dequeue_huge_page_node(h, zone_to_nid(zone));
587 if (page) { 587 if (page) {
588 if (avoid_reserve) 588 if (avoid_reserve)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3b014d326151..864bba992735 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
233 /* Check this allocation failure is caused by cpuset's wall function */ 233 /* Check this allocation failure is caused by cpuset's wall function */
234 for_each_zone_zonelist_nodemask(zone, z, zonelist, 234 for_each_zone_zonelist_nodemask(zone, z, zonelist,
235 high_zoneidx, nodemask) 235 high_zoneidx, nodemask)
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) 236 if (!cpuset_zone_allowed(zone, gfp_mask))
237 cpuset_limited = true; 237 cpuset_limited = true;
238 238
239 if (cpuset_limited) { 239 if (cpuset_limited) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a7198c065999..df542feaac3b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1990,7 +1990,7 @@ zonelist_scan:
1990 1990
1991 /* 1991 /*
1992 * Scan zonelist, looking for a zone with enough free. 1992 * Scan zonelist, looking for a zone with enough free.
1993 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. 1993 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
1994 */ 1994 */
1995 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1995 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1996 high_zoneidx, nodemask) { 1996 high_zoneidx, nodemask) {
@@ -2001,7 +2001,7 @@ zonelist_scan:
2001 continue; 2001 continue;
2002 if (cpusets_enabled() && 2002 if (cpusets_enabled() &&
2003 (alloc_flags & ALLOC_CPUSET) && 2003 (alloc_flags & ALLOC_CPUSET) &&
2004 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 2004 !cpuset_zone_allowed(zone, gfp_mask))
2005 continue; 2005 continue;
2006 /* 2006 /*
2007 * Distribute pages in proportion to the individual 2007 * Distribute pages in proportion to the individual
@@ -2529,7 +2529,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2529 alloc_flags |= ALLOC_HARDER; 2529 alloc_flags |= ALLOC_HARDER;
2530 /* 2530 /*
2531 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2531 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2532 * comment for __cpuset_node_allowed_softwall(). 2532 * comment for __cpuset_node_allowed().
2533 */ 2533 */
2534 alloc_flags &= ~ALLOC_CPUSET; 2534 alloc_flags &= ~ALLOC_CPUSET;
2535 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2535 } else if (unlikely(rt_task(current)) && !in_interrupt())
diff --git a/mm/slab.c b/mm/slab.c
index 79e15f0a2a6e..fee275b5b6b7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3015,7 +3015,7 @@ retry:
3015 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3015 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3016 nid = zone_to_nid(zone); 3016 nid = zone_to_nid(zone);
3017 3017
3018 if (cpuset_zone_allowed_hardwall(zone, flags) && 3018 if (cpuset_zone_allowed(zone, flags | __GFP_HARDWALL) &&
3019 get_node(cache, nid) && 3019 get_node(cache, nid) &&
3020 get_node(cache, nid)->free_objects) { 3020 get_node(cache, nid)->free_objects) {
3021 obj = ____cache_alloc_node(cache, 3021 obj = ____cache_alloc_node(cache,
diff --git a/mm/slub.c b/mm/slub.c
index 386bbed76e94..765c5884d03d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1665,7 +1665,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1665 1665
1666 n = get_node(s, zone_to_nid(zone)); 1666 n = get_node(s, zone_to_nid(zone));
1667 1667
1668 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1668 if (n && cpuset_zone_allowed(zone,
1669 flags | __GFP_HARDWALL) &&
1669 n->nr_partial > s->min_partial) { 1670 n->nr_partial > s->min_partial) {
1670 object = get_partial_node(s, n, c, flags); 1671 object = get_partial_node(s, n, c, flags);
1671 if (object) { 1672 if (object) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4636d9e822c1..a384339bf718 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2405 * to global LRU. 2405 * to global LRU.
2406 */ 2406 */
2407 if (global_reclaim(sc)) { 2407 if (global_reclaim(sc)) {
2408 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2408 if (!cpuset_zone_allowed(zone,
2409 GFP_KERNEL | __GFP_HARDWALL))
2409 continue; 2410 continue;
2410 2411
2411 lru_pages += zone_reclaimable_pages(zone); 2412 lru_pages += zone_reclaimable_pages(zone);
@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3388 if (!populated_zone(zone)) 3389 if (!populated_zone(zone))
3389 return; 3390 return;
3390 3391
3391 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 3392 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3392 return; 3393 return;
3393 pgdat = zone->zone_pgdat; 3394 pgdat = zone->zone_pgdat;
3394 if (pgdat->kswapd_max_order < order) { 3395 if (pgdat->kswapd_max_order < order) {