aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorRabin Vincent <rabin.vincent@stericsson.com>2012-12-11 19:00:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commit377e4f167664d8bc390c04c911d846366000159c (patch)
tree8c5cd576a9a3a2a70b9bd1aa85da1172db59c604 /mm/page_alloc.c
parentd0e1d66b5aa1ec9f556f951aa9a114cc192cd01c (diff)
mm: show migration types in show_mem
This is useful to diagnose the reason for page allocation failure for cases where there appear to be several free pages. Example, with this alloc_pages(GFP_ATOMIC) failure: swapper/0: page allocation failure: order:0, mode:0x0 ... Mem-info: Normal per-cpu: CPU 0: hi: 90, btch: 15 usd: 48 CPU 1: hi: 90, btch: 15 usd: 21 active_anon:0 inactive_anon:0 isolated_anon:0 active_file:0 inactive_file:84 isolated_file:0 unevictable:0 dirty:0 writeback:0 unstable:0 free:4026 slab_reclaimable:75 slab_unreclaimable:484 mapped:0 shmem:0 pagetables:0 bounce:0 Normal free:16104kB min:2296kB low:2868kB high:3444kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:336kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:331776kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:300kB slab_unreclaimable:1936kB kernel_stack:328kB pagetables:0kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? no lowmem_reserve[]: 0 0 Before the patch, it's hard (for me, at least) to say why all these free chunks weren't considered for allocation: Normal: 0*4kB 0*8kB 0*16kB 0*32kB 0*64kB 0*128kB 1*256kB 1*512kB 1*1024kB 1*2048kB 3*4096kB = 16128kB After the patch, it's obvious that the reason is that all of these are in the MIGRATE_CMA (C) freelist: Normal: 0*4kB 0*8kB 0*16kB 0*32kB 0*64kB 0*128kB 1*256kB (C) 1*512kB (C) 1*1024kB (C) 1*2048kB (C) 3*4096kB (C) = 16128kB Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7e208f0ad68c..dc018b486b74 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2877,6 +2877,31 @@ out:
2877 2877
2878#define K(x) ((x) << (PAGE_SHIFT-10)) 2878#define K(x) ((x) << (PAGE_SHIFT-10))
2879 2879
2880static void show_migration_types(unsigned char type)
2881{
2882 static const char types[MIGRATE_TYPES] = {
2883 [MIGRATE_UNMOVABLE] = 'U',
2884 [MIGRATE_RECLAIMABLE] = 'E',
2885 [MIGRATE_MOVABLE] = 'M',
2886 [MIGRATE_RESERVE] = 'R',
2887#ifdef CONFIG_CMA
2888 [MIGRATE_CMA] = 'C',
2889#endif
2890 [MIGRATE_ISOLATE] = 'I',
2891 };
2892 char tmp[MIGRATE_TYPES + 1];
2893 char *p = tmp;
2894 int i;
2895
2896 for (i = 0; i < MIGRATE_TYPES; i++) {
2897 if (type & (1 << i))
2898 *p++ = types[i];
2899 }
2900
2901 *p = '\0';
2902 printk("(%s) ", tmp);
2903}
2904
2880/* 2905/*
2881 * Show free area list (used inside shift_scroll-lock stuff) 2906 * Show free area list (used inside shift_scroll-lock stuff)
2882 * We also calculate the percentage fragmentation. We do this by counting the 2907 * We also calculate the percentage fragmentation. We do this by counting the
@@ -3005,6 +3030,7 @@ void show_free_areas(unsigned int filter)
3005 3030
3006 for_each_populated_zone(zone) { 3031 for_each_populated_zone(zone) {
3007 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3032 unsigned long nr[MAX_ORDER], flags, order, total = 0;
3033 unsigned char types[MAX_ORDER];
3008 3034
3009 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3035 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3010 continue; 3036 continue;
@@ -3013,12 +3039,24 @@ void show_free_areas(unsigned int filter)
3013 3039
3014 spin_lock_irqsave(&zone->lock, flags); 3040 spin_lock_irqsave(&zone->lock, flags);
3015 for (order = 0; order < MAX_ORDER; order++) { 3041 for (order = 0; order < MAX_ORDER; order++) {
3016 nr[order] = zone->free_area[order].nr_free; 3042 struct free_area *area = &zone->free_area[order];
3043 int type;
3044
3045 nr[order] = area->nr_free;
3017 total += nr[order] << order; 3046 total += nr[order] << order;
3047
3048 types[order] = 0;
3049 for (type = 0; type < MIGRATE_TYPES; type++) {
3050 if (!list_empty(&area->free_list[type]))
3051 types[order] |= 1 << type;
3052 }
3018 } 3053 }
3019 spin_unlock_irqrestore(&zone->lock, flags); 3054 spin_unlock_irqrestore(&zone->lock, flags);
3020 for (order = 0; order < MAX_ORDER; order++) 3055 for (order = 0; order < MAX_ORDER; order++) {
3021 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3056 printk("%lu*%lukB ", nr[order], K(1UL) << order);
3057 if (nr[order])
3058 show_migration_types(types[order]);
3059 }
3022 printk("= %lukB\n", K(total)); 3060 printk("= %lukB\n", K(total));
3023 } 3061 }
3024 3062