diff options
author | Konstantin Khlebnikov <khlebnikov@yandex-team.ru> | 2015-04-14 18:45:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:01 -0400 |
commit | d1bfcdb8ce0ea6eb6034daa7ff02548e0bc9c21b (patch) | |
tree | af4e212c85863c7d58d3366e0ba18ac07fbdd510 /mm/page_alloc.c | |
parent | b9ea25152e56365ce149b9a39637cd7a16eec556 (diff) |
mm: hide per-cpu lists in output of show_mem()
This makes show_mem() much less verbose on huge machines. Instead of huge
and almost useless dump of counters for each per-zone per-cpu lists this
patch prints the sum of these counters for each zone (free_pcp) and size
of per-cpu list for current cpu (local_pcp).
The filter flag SHOW_MEM_PERCPU_LISTS reverts to the old verbose mode.
[akpm@linux-foundation.org: update show_free_areas comment]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6dfa5b24cc79..eab8e2018a46 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3251,25 +3251,37 @@ static void show_migration_types(unsigned char type) | |||
3251 | * Show free area list (used inside shift_scroll-lock stuff) | 3251 | * Show free area list (used inside shift_scroll-lock stuff) |
3252 | * We also calculate the percentage fragmentation. We do this by counting the | 3252 | * We also calculate the percentage fragmentation. We do this by counting the |
3253 | * memory on each free list with the exception of the first item on the list. | 3253 | * memory on each free list with the exception of the first item on the list. |
3254 | * Suppresses nodes that are not allowed by current's cpuset if | 3254 | * |
3255 | * SHOW_MEM_FILTER_NODES is passed. | 3255 | * Bits in @filter: |
3256 | * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's | ||
3257 | * cpuset. | ||
3258 | * SHOW_MEM_PERCPU_LISTS: display full per-node per-cpu pcp lists | ||
3256 | */ | 3259 | */ |
3257 | void show_free_areas(unsigned int filter) | 3260 | void show_free_areas(unsigned int filter) |
3258 | { | 3261 | { |
3262 | unsigned long free_pcp = 0; | ||
3259 | int cpu; | 3263 | int cpu; |
3260 | struct zone *zone; | 3264 | struct zone *zone; |
3261 | 3265 | ||
3262 | for_each_populated_zone(zone) { | 3266 | for_each_populated_zone(zone) { |
3263 | if (skip_free_areas_node(filter, zone_to_nid(zone))) | 3267 | if (skip_free_areas_node(filter, zone_to_nid(zone))) |
3264 | continue; | 3268 | continue; |
3265 | show_node(zone); | 3269 | |
3266 | printk("%s per-cpu:\n", zone->name); | 3270 | if (filter & SHOW_MEM_PERCPU_LISTS) { |
3271 | show_node(zone); | ||
3272 | printk("%s per-cpu:\n", zone->name); | ||
3273 | } | ||
3267 | 3274 | ||
3268 | for_each_online_cpu(cpu) { | 3275 | for_each_online_cpu(cpu) { |
3269 | struct per_cpu_pageset *pageset; | 3276 | struct per_cpu_pageset *pageset; |
3270 | 3277 | ||
3271 | pageset = per_cpu_ptr(zone->pageset, cpu); | 3278 | pageset = per_cpu_ptr(zone->pageset, cpu); |
3272 | 3279 | ||
3280 | free_pcp += pageset->pcp.count; | ||
3281 | |||
3282 | if (!(filter & SHOW_MEM_PERCPU_LISTS)) | ||
3283 | continue; | ||
3284 | |||
3273 | printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", | 3285 | printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", |
3274 | cpu, pageset->pcp.high, | 3286 | cpu, pageset->pcp.high, |
3275 | pageset->pcp.batch, pageset->pcp.count); | 3287 | pageset->pcp.batch, pageset->pcp.count); |
@@ -3278,11 +3290,10 @@ void show_free_areas(unsigned int filter) | |||
3278 | 3290 | ||
3279 | printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" | 3291 | printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" |
3280 | " active_file:%lu inactive_file:%lu isolated_file:%lu\n" | 3292 | " active_file:%lu inactive_file:%lu isolated_file:%lu\n" |
3281 | " unevictable:%lu" | 3293 | " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" |
3282 | " dirty:%lu writeback:%lu unstable:%lu\n" | 3294 | " slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
3283 | " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" | ||
3284 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" | 3295 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" |
3285 | " free_cma:%lu\n", | 3296 | " free:%lu free_pcp:%lu free_cma:%lu\n", |
3286 | global_page_state(NR_ACTIVE_ANON), | 3297 | global_page_state(NR_ACTIVE_ANON), |
3287 | global_page_state(NR_INACTIVE_ANON), | 3298 | global_page_state(NR_INACTIVE_ANON), |
3288 | global_page_state(NR_ISOLATED_ANON), | 3299 | global_page_state(NR_ISOLATED_ANON), |
@@ -3293,13 +3304,14 @@ void show_free_areas(unsigned int filter) | |||
3293 | global_page_state(NR_FILE_DIRTY), | 3304 | global_page_state(NR_FILE_DIRTY), |
3294 | global_page_state(NR_WRITEBACK), | 3305 | global_page_state(NR_WRITEBACK), |
3295 | global_page_state(NR_UNSTABLE_NFS), | 3306 | global_page_state(NR_UNSTABLE_NFS), |
3296 | global_page_state(NR_FREE_PAGES), | ||
3297 | global_page_state(NR_SLAB_RECLAIMABLE), | 3307 | global_page_state(NR_SLAB_RECLAIMABLE), |
3298 | global_page_state(NR_SLAB_UNRECLAIMABLE), | 3308 | global_page_state(NR_SLAB_UNRECLAIMABLE), |
3299 | global_page_state(NR_FILE_MAPPED), | 3309 | global_page_state(NR_FILE_MAPPED), |
3300 | global_page_state(NR_SHMEM), | 3310 | global_page_state(NR_SHMEM), |
3301 | global_page_state(NR_PAGETABLE), | 3311 | global_page_state(NR_PAGETABLE), |
3302 | global_page_state(NR_BOUNCE), | 3312 | global_page_state(NR_BOUNCE), |
3313 | global_page_state(NR_FREE_PAGES), | ||
3314 | free_pcp, | ||
3303 | global_page_state(NR_FREE_CMA_PAGES)); | 3315 | global_page_state(NR_FREE_CMA_PAGES)); |
3304 | 3316 | ||
3305 | for_each_populated_zone(zone) { | 3317 | for_each_populated_zone(zone) { |
@@ -3307,6 +3319,11 @@ void show_free_areas(unsigned int filter) | |||
3307 | 3319 | ||
3308 | if (skip_free_areas_node(filter, zone_to_nid(zone))) | 3320 | if (skip_free_areas_node(filter, zone_to_nid(zone))) |
3309 | continue; | 3321 | continue; |
3322 | |||
3323 | free_pcp = 0; | ||
3324 | for_each_online_cpu(cpu) | ||
3325 | free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; | ||
3326 | |||
3310 | show_node(zone); | 3327 | show_node(zone); |
3311 | printk("%s" | 3328 | printk("%s" |
3312 | " free:%lukB" | 3329 | " free:%lukB" |
@@ -3333,6 +3350,8 @@ void show_free_areas(unsigned int filter) | |||
3333 | " pagetables:%lukB" | 3350 | " pagetables:%lukB" |
3334 | " unstable:%lukB" | 3351 | " unstable:%lukB" |
3335 | " bounce:%lukB" | 3352 | " bounce:%lukB" |
3353 | " free_pcp:%lukB" | ||
3354 | " local_pcp:%ukB" | ||
3336 | " free_cma:%lukB" | 3355 | " free_cma:%lukB" |
3337 | " writeback_tmp:%lukB" | 3356 | " writeback_tmp:%lukB" |
3338 | " pages_scanned:%lu" | 3357 | " pages_scanned:%lu" |
@@ -3364,6 +3383,8 @@ void show_free_areas(unsigned int filter) | |||
3364 | K(zone_page_state(zone, NR_PAGETABLE)), | 3383 | K(zone_page_state(zone, NR_PAGETABLE)), |
3365 | K(zone_page_state(zone, NR_UNSTABLE_NFS)), | 3384 | K(zone_page_state(zone, NR_UNSTABLE_NFS)), |
3366 | K(zone_page_state(zone, NR_BOUNCE)), | 3385 | K(zone_page_state(zone, NR_BOUNCE)), |
3386 | K(free_pcp), | ||
3387 | K(this_cpu_read(zone->pageset->pcp.count)), | ||
3367 | K(zone_page_state(zone, NR_FREE_CMA_PAGES)), | 3388 | K(zone_page_state(zone, NR_FREE_CMA_PAGES)), |
3368 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), | 3389 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), |
3369 | K(zone_page_state(zone, NR_PAGES_SCANNED)), | 3390 | K(zone_page_state(zone, NR_PAGES_SCANNED)), |