aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-05-24 17:32:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:59 -0400
commitd7a5752c0c19750312efab3a2a80d350e11fa4a2 (patch)
treef604d9ae1d7f689e3314120e2d4edac36c1b5f22 /mm/vmstat.c
parenta8bef8ff6ea15fa4c67433cab0f5f3484574ef7c (diff)
mm: export unusable free space index via debugfs
The unusable free space index measures how much of the available free memory cannot be used to satisfy an allocation of a given size and is a value between 0 and 1. The higher the value, the more of free memory is unusable and by implication, the worse the external fragmentation is. For the most part, the huge page size will be the size of interest but not necessarily so it is exported on a per-order and per-zone basis via /sys/kernel/debug/extfrag/unusable_index. > cat /sys/kernel/debug/extfrag/unusable_index Node 0, zone DMA 0.000 0.000 0.000 0.001 0.005 0.013 0.021 0.037 0.037 0.101 0.230 Node 0, zone Normal 0.000 0.000 0.000 0.001 0.002 0.002 0.005 0.015 0.028 0.028 0.054 [akpm@linux-foundation.org: Fix allnoconfig] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c150
1 files changed, 149 insertions, 1 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fa12ea3051fb..d3e0fa169f05 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -379,7 +379,50 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)
379} 379}
380#endif 380#endif
381 381
382#ifdef CONFIG_PROC_FS 382#ifdef CONFIG_COMPACTION
383struct contig_page_info {
384 unsigned long free_pages;
385 unsigned long free_blocks_total;
386 unsigned long free_blocks_suitable;
387};
388
389/*
390 * Calculate the number of free pages in a zone, how many contiguous
391 * pages are free and how many are large enough to satisfy an allocation of
392 * the target size. Note that this function makes no attempt to estimate
393 * how many suitable free blocks there *might* be if MOVABLE pages were
394 * migrated. Calculating that is possible, but expensive and can be
395 * figured out from userspace
396 */
397static void fill_contig_page_info(struct zone *zone,
398 unsigned int suitable_order,
399 struct contig_page_info *info)
400{
401 unsigned int order;
402
403 info->free_pages = 0;
404 info->free_blocks_total = 0;
405 info->free_blocks_suitable = 0;
406
407 for (order = 0; order < MAX_ORDER; order++) {
408 unsigned long blocks;
409
410 /* Count number of free blocks */
411 blocks = zone->free_area[order].nr_free;
412 info->free_blocks_total += blocks;
413
414 /* Count free base pages */
415 info->free_pages += blocks << order;
416
417 /* Count the suitable free blocks */
418 if (order >= suitable_order)
419 info->free_blocks_suitable += blocks <<
420 (order - suitable_order);
421 }
422}
423#endif
424
425#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
383#include <linux/proc_fs.h> 426#include <linux/proc_fs.h>
384#include <linux/seq_file.h> 427#include <linux/seq_file.h>
385 428
@@ -432,7 +475,9 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
432 spin_unlock_irqrestore(&zone->lock, flags); 475 spin_unlock_irqrestore(&zone->lock, flags);
433 } 476 }
434} 477}
478#endif
435 479
480#ifdef CONFIG_PROC_FS
436static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 481static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
437 struct zone *zone) 482 struct zone *zone)
438{ 483{
@@ -954,3 +999,106 @@ static int __init setup_vmstat(void)
954 return 0; 999 return 0;
955} 1000}
956module_init(setup_vmstat) 1001module_init(setup_vmstat)
1002
1003#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1004#include <linux/debugfs.h>
1005
1006static struct dentry *extfrag_debug_root;
1007
1008/*
1009 * Return an index indicating how much of the available free memory is
1010 * unusable for an allocation of the requested size.
1011 */
1012static int unusable_free_index(unsigned int order,
1013 struct contig_page_info *info)
1014{
1015 /* No free memory is interpreted as all free memory is unusable */
1016 if (info->free_pages == 0)
1017 return 1000;
1018
1019 /*
1020 * Index should be a value between 0 and 1. Return a value to 3
1021 * decimal places.
1022 *
1023 * 0 => no fragmentation
1024 * 1 => high fragmentation
1025 */
1026 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1027
1028}
1029
1030static void unusable_show_print(struct seq_file *m,
1031 pg_data_t *pgdat, struct zone *zone)
1032{
1033 unsigned int order;
1034 int index;
1035 struct contig_page_info info;
1036
1037 seq_printf(m, "Node %d, zone %8s ",
1038 pgdat->node_id,
1039 zone->name);
1040 for (order = 0; order < MAX_ORDER; ++order) {
1041 fill_contig_page_info(zone, order, &info);
1042 index = unusable_free_index(order, &info);
1043 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1044 }
1045
1046 seq_putc(m, '\n');
1047}
1048
1049/*
1050 * Display unusable free space index
1051 *
1052 * The unusable free space index measures how much of the available free
1053 * memory cannot be used to satisfy an allocation of a given size and is a
1054 * value between 0 and 1. The higher the value, the more of free memory is
1055 * unusable and by implication, the worse the external fragmentation is. This
1056 * can be expressed as a percentage by multiplying by 100.
1057 */
1058static int unusable_show(struct seq_file *m, void *arg)
1059{
1060 pg_data_t *pgdat = (pg_data_t *)arg;
1061
1062 /* check memoryless node */
1063 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1064 return 0;
1065
1066 walk_zones_in_node(m, pgdat, unusable_show_print);
1067
1068 return 0;
1069}
1070
1071static const struct seq_operations unusable_op = {
1072 .start = frag_start,
1073 .next = frag_next,
1074 .stop = frag_stop,
1075 .show = unusable_show,
1076};
1077
1078static int unusable_open(struct inode *inode, struct file *file)
1079{
1080 return seq_open(file, &unusable_op);
1081}
1082
1083static const struct file_operations unusable_file_ops = {
1084 .open = unusable_open,
1085 .read = seq_read,
1086 .llseek = seq_lseek,
1087 .release = seq_release,
1088};
1089
1090static int __init extfrag_debug_init(void)
1091{
1092 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1093 if (!extfrag_debug_root)
1094 return -ENOMEM;
1095
1096 if (!debugfs_create_file("unusable_index", 0444,
1097 extfrag_debug_root, NULL, &unusable_file_ops))
1098 return -ENOMEM;
1099
1100 return 0;
1101}
1102
1103module_init(extfrag_debug_init);
1104#endif