summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-02-22 18:46:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:30 -0500
commit9af744d743170b5f5ef70031dea8d772d166ab28 (patch)
tree87290ffc304288352c66bc9afda0d4214403862c /mm/page_alloc.c
parent6d23f8a5d432337aa2590ea8fd5eee8b0bc28eee (diff)
lib/show_mem.c: teach show_mem to work with the given nodemask
show_mem() allows to filter out node specific data which is irrelevant to the allocation request via SHOW_MEM_FILTER_NODES. The filtering is done in skip_free_areas_node which skips all nodes which are not in the mems_allowed of the current process. This works most of the time as expected because the nodemask shouldn't be outside of the allocating task but there are some exceptions. E.g. memory hotplug might want to request allocations from outside of the allowed nodes (see new_node_page). Get rid of this hardcoded behavior and push the allocation mask down the show_mem path and use it instead of cpuset_current_mems_allowed. NULL nodemask is interpreted as cpuset_current_mems_allowed. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20170117091543.25850-5-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 96c8fe602dfb..644fb75f6f24 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3005,7 +3005,7 @@ static inline bool should_suppress_show_mem(void)
3005 return ret; 3005 return ret;
3006} 3006}
3007 3007
3008static void warn_alloc_show_mem(gfp_t gfp_mask) 3008static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3009{ 3009{
3010 unsigned int filter = SHOW_MEM_FILTER_NODES; 3010 unsigned int filter = SHOW_MEM_FILTER_NODES;
3011 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1); 3011 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
@@ -3025,7 +3025,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask)
3025 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3025 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3026 filter &= ~SHOW_MEM_FILTER_NODES; 3026 filter &= ~SHOW_MEM_FILTER_NODES;
3027 3027
3028 show_mem(filter); 3028 show_mem(filter, nodemask);
3029} 3029}
3030 3030
3031void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3031void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
@@ -3052,7 +3052,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3052 cpuset_print_current_mems_allowed(); 3052 cpuset_print_current_mems_allowed();
3053 3053
3054 dump_stack(); 3054 dump_stack();
3055 warn_alloc_show_mem(gfp_mask); 3055 warn_alloc_show_mem(gfp_mask, nm);
3056} 3056}
3057 3057
3058static inline struct page * 3058static inline struct page *
@@ -4274,20 +4274,20 @@ void si_meminfo_node(struct sysinfo *val, int nid)
4274 * Determine whether the node should be displayed or not, depending on whether 4274 * Determine whether the node should be displayed or not, depending on whether
4275 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 4275 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
4276 */ 4276 */
4277bool skip_free_areas_node(unsigned int flags, int nid) 4277static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4278{ 4278{
4279 bool ret = false;
4280 unsigned int cpuset_mems_cookie;
4281
4282 if (!(flags & SHOW_MEM_FILTER_NODES)) 4279 if (!(flags & SHOW_MEM_FILTER_NODES))
4283 goto out; 4280 return false;
4284 4281
4285 do { 4282 /*
4286 cpuset_mems_cookie = read_mems_allowed_begin(); 4283 * no node mask - aka implicit memory numa policy. Do not bother with
4287 ret = !node_isset(nid, cpuset_current_mems_allowed); 4284 * the synchronization - read_mems_allowed_begin - because we do not
4288 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 4285 * have to be precise here.
4289out: 4286 */
4290 return ret; 4287 if (!nodemask)
4288 nodemask = &cpuset_current_mems_allowed;
4289
4290 return !node_isset(nid, *nodemask);
4291} 4291}
4292 4292
4293#define K(x) ((x) << (PAGE_SHIFT-10)) 4293#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -4328,7 +4328,7 @@ static void show_migration_types(unsigned char type)
4328 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 4328 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4329 * cpuset. 4329 * cpuset.
4330 */ 4330 */
4331void show_free_areas(unsigned int filter) 4331void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4332{ 4332{
4333 unsigned long free_pcp = 0; 4333 unsigned long free_pcp = 0;
4334 int cpu; 4334 int cpu;
@@ -4336,7 +4336,7 @@ void show_free_areas(unsigned int filter)
4336 pg_data_t *pgdat; 4336 pg_data_t *pgdat;
4337 4337
4338 for_each_populated_zone(zone) { 4338 for_each_populated_zone(zone) {
4339 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4339 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4340 continue; 4340 continue;
4341 4341
4342 for_each_online_cpu(cpu) 4342 for_each_online_cpu(cpu)
@@ -4370,7 +4370,7 @@ void show_free_areas(unsigned int filter)
4370 global_page_state(NR_FREE_CMA_PAGES)); 4370 global_page_state(NR_FREE_CMA_PAGES));
4371 4371
4372 for_each_online_pgdat(pgdat) { 4372 for_each_online_pgdat(pgdat) {
4373 if (skip_free_areas_node(filter, pgdat->node_id)) 4373 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4374 continue; 4374 continue;
4375 4375
4376 printk("Node %d" 4376 printk("Node %d"
@@ -4422,7 +4422,7 @@ void show_free_areas(unsigned int filter)
4422 for_each_populated_zone(zone) { 4422 for_each_populated_zone(zone) {
4423 int i; 4423 int i;
4424 4424
4425 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4425 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4426 continue; 4426 continue;
4427 4427
4428 free_pcp = 0; 4428 free_pcp = 0;
@@ -4487,7 +4487,7 @@ void show_free_areas(unsigned int filter)
4487 unsigned long nr[MAX_ORDER], flags, total = 0; 4487 unsigned long nr[MAX_ORDER], flags, total = 0;
4488 unsigned char types[MAX_ORDER]; 4488 unsigned char types[MAX_ORDER];
4489 4489
4490 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4490 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4491 continue; 4491 continue;
4492 show_node(zone); 4492 show_node(zone);
4493 printk(KERN_CONT "%s: ", zone->name); 4493 printk(KERN_CONT "%s: ", zone->name);