aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2015-04-15 19:15:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:21 -0400
commit248ca1b053c82fa22427d22b33ac51a24c88a86d (patch)
tree34fe1947d4b2ae8e2c57554152bfd836729d5631 /mm
parent7b60a68529b0d827d26ea3426c2addd071bff789 (diff)
zsmalloc: add fullness into stat
During investigating compaction, fullness information of each class is helpful for investigating how the compaction works well. With that, we could know how compaction works well more clear on each size class. Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Juneho Choi <juno.choi@lge.com> Cc: Gunho Lee <gunho.lee@lge.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Seth Jennings <sjennings@variantweb.net> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c349
1 files changed, 184 insertions, 165 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 95771c75f2e9..461243e14d3e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -197,6 +197,8 @@ enum fullness_group {
197enum zs_stat_type { 197enum zs_stat_type {
198 OBJ_ALLOCATED, 198 OBJ_ALLOCATED,
199 OBJ_USED, 199 OBJ_USED,
200 CLASS_ALMOST_FULL,
201 CLASS_ALMOST_EMPTY,
200 NR_ZS_STAT_TYPE, 202 NR_ZS_STAT_TYPE,
201}; 203};
202 204
@@ -412,6 +414,11 @@ static struct zpool_driver zs_zpool_driver = {
412MODULE_ALIAS("zpool-zsmalloc"); 414MODULE_ALIAS("zpool-zsmalloc");
413#endif /* CONFIG_ZPOOL */ 415#endif /* CONFIG_ZPOOL */
414 416
417static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
418{
419 return pages_per_zspage * PAGE_SIZE / size;
420}
421
415/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ 422/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
416static DEFINE_PER_CPU(struct mapping_area, zs_map_area); 423static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
417 424
@@ -465,6 +472,179 @@ static int get_size_class_index(int size)
465 return min(zs_size_classes - 1, idx); 472 return min(zs_size_classes - 1, idx);
466} 473}
467 474
475#ifdef CONFIG_ZSMALLOC_STAT
476
477static inline void zs_stat_inc(struct size_class *class,
478 enum zs_stat_type type, unsigned long cnt)
479{
480 class->stats.objs[type] += cnt;
481}
482
483static inline void zs_stat_dec(struct size_class *class,
484 enum zs_stat_type type, unsigned long cnt)
485{
486 class->stats.objs[type] -= cnt;
487}
488
489static inline unsigned long zs_stat_get(struct size_class *class,
490 enum zs_stat_type type)
491{
492 return class->stats.objs[type];
493}
494
495static int __init zs_stat_init(void)
496{
497 if (!debugfs_initialized())
498 return -ENODEV;
499
500 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
501 if (!zs_stat_root)
502 return -ENOMEM;
503
504 return 0;
505}
506
507static void __exit zs_stat_exit(void)
508{
509 debugfs_remove_recursive(zs_stat_root);
510}
511
512static int zs_stats_size_show(struct seq_file *s, void *v)
513{
514 int i;
515 struct zs_pool *pool = s->private;
516 struct size_class *class;
517 int objs_per_zspage;
518 unsigned long class_almost_full, class_almost_empty;
519 unsigned long obj_allocated, obj_used, pages_used;
520 unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
521 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
522
523 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
524 "class", "size", "almost_full", "almost_empty",
525 "obj_allocated", "obj_used", "pages_used",
526 "pages_per_zspage");
527
528 for (i = 0; i < zs_size_classes; i++) {
529 class = pool->size_class[i];
530
531 if (class->index != i)
532 continue;
533
534 spin_lock(&class->lock);
535 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
536 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
537 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
538 obj_used = zs_stat_get(class, OBJ_USED);
539 spin_unlock(&class->lock);
540
541 objs_per_zspage = get_maxobj_per_zspage(class->size,
542 class->pages_per_zspage);
543 pages_used = obj_allocated / objs_per_zspage *
544 class->pages_per_zspage;
545
546 seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
547 i, class->size, class_almost_full, class_almost_empty,
548 obj_allocated, obj_used, pages_used,
549 class->pages_per_zspage);
550
551 total_class_almost_full += class_almost_full;
552 total_class_almost_empty += class_almost_empty;
553 total_objs += obj_allocated;
554 total_used_objs += obj_used;
555 total_pages += pages_used;
556 }
557
558 seq_puts(s, "\n");
559 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
560 "Total", "", total_class_almost_full,
561 total_class_almost_empty, total_objs,
562 total_used_objs, total_pages);
563
564 return 0;
565}
566
567static int zs_stats_size_open(struct inode *inode, struct file *file)
568{
569 return single_open(file, zs_stats_size_show, inode->i_private);
570}
571
572static const struct file_operations zs_stat_size_ops = {
573 .open = zs_stats_size_open,
574 .read = seq_read,
575 .llseek = seq_lseek,
576 .release = single_release,
577};
578
579static int zs_pool_stat_create(char *name, struct zs_pool *pool)
580{
581 struct dentry *entry;
582
583 if (!zs_stat_root)
584 return -ENODEV;
585
586 entry = debugfs_create_dir(name, zs_stat_root);
587 if (!entry) {
588 pr_warn("debugfs dir <%s> creation failed\n", name);
589 return -ENOMEM;
590 }
591 pool->stat_dentry = entry;
592
593 entry = debugfs_create_file("classes", S_IFREG | S_IRUGO,
594 pool->stat_dentry, pool, &zs_stat_size_ops);
595 if (!entry) {
596 pr_warn("%s: debugfs file entry <%s> creation failed\n",
597 name, "classes");
598 return -ENOMEM;
599 }
600
601 return 0;
602}
603
604static void zs_pool_stat_destroy(struct zs_pool *pool)
605{
606 debugfs_remove_recursive(pool->stat_dentry);
607}
608
609#else /* CONFIG_ZSMALLOC_STAT */
610
611static inline void zs_stat_inc(struct size_class *class,
612 enum zs_stat_type type, unsigned long cnt)
613{
614}
615
616static inline void zs_stat_dec(struct size_class *class,
617 enum zs_stat_type type, unsigned long cnt)
618{
619}
620
621static inline unsigned long zs_stat_get(struct size_class *class,
622 enum zs_stat_type type)
623{
624 return 0;
625}
626
627static int __init zs_stat_init(void)
628{
629 return 0;
630}
631
632static void __exit zs_stat_exit(void)
633{
634}
635
636static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
637{
638 return 0;
639}
640
641static inline void zs_pool_stat_destroy(struct zs_pool *pool)
642{
643}
644
645#endif
646
647
468/* 648/*
469 * For each size class, zspages are divided into different groups 649 * For each size class, zspages are divided into different groups
470 * depending on how "full" they are. This was done so that we could 650 * depending on how "full" they are. This was done so that we could
@@ -514,6 +694,8 @@ static void insert_zspage(struct page *page, struct size_class *class,
514 list_add_tail(&page->lru, &(*head)->lru); 694 list_add_tail(&page->lru, &(*head)->lru);
515 695
516 *head = page; 696 *head = page;
697 zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
698 CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
517} 699}
518 700
519/* 701/*
@@ -539,6 +721,8 @@ static void remove_zspage(struct page *page, struct size_class *class,
539 struct page, lru); 721 struct page, lru);
540 722
541 list_del_init(&page->lru); 723 list_del_init(&page->lru);
724 zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
725 CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
542} 726}
543 727
544/* 728/*
@@ -1057,11 +1241,6 @@ static void init_zs_size_classes(void)
1057 zs_size_classes = nr; 1241 zs_size_classes = nr;
1058} 1242}
1059 1243
1060static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
1061{
1062 return pages_per_zspage * PAGE_SIZE / size;
1063}
1064
1065static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) 1244static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
1066{ 1245{
1067 if (prev->pages_per_zspage != pages_per_zspage) 1246 if (prev->pages_per_zspage != pages_per_zspage)
@@ -1081,166 +1260,6 @@ static bool zspage_full(struct page *page)
1081 return page->inuse == page->objects; 1260 return page->inuse == page->objects;
1082} 1261}
1083 1262
1084#ifdef CONFIG_ZSMALLOC_STAT
1085
1086static inline void zs_stat_inc(struct size_class *class,
1087 enum zs_stat_type type, unsigned long cnt)
1088{
1089 class->stats.objs[type] += cnt;
1090}
1091
1092static inline void zs_stat_dec(struct size_class *class,
1093 enum zs_stat_type type, unsigned long cnt)
1094{
1095 class->stats.objs[type] -= cnt;
1096}
1097
1098static inline unsigned long zs_stat_get(struct size_class *class,
1099 enum zs_stat_type type)
1100{
1101 return class->stats.objs[type];
1102}
1103
1104static int __init zs_stat_init(void)
1105{
1106 if (!debugfs_initialized())
1107 return -ENODEV;
1108
1109 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
1110 if (!zs_stat_root)
1111 return -ENOMEM;
1112
1113 return 0;
1114}
1115
1116static void __exit zs_stat_exit(void)
1117{
1118 debugfs_remove_recursive(zs_stat_root);
1119}
1120
1121static int zs_stats_size_show(struct seq_file *s, void *v)
1122{
1123 int i;
1124 struct zs_pool *pool = s->private;
1125 struct size_class *class;
1126 int objs_per_zspage;
1127 unsigned long obj_allocated, obj_used, pages_used;
1128 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
1129
1130 seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size",
1131 "obj_allocated", "obj_used", "pages_used");
1132
1133 for (i = 0; i < zs_size_classes; i++) {
1134 class = pool->size_class[i];
1135
1136 if (class->index != i)
1137 continue;
1138
1139 spin_lock(&class->lock);
1140 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
1141 obj_used = zs_stat_get(class, OBJ_USED);
1142 spin_unlock(&class->lock);
1143
1144 objs_per_zspage = get_maxobj_per_zspage(class->size,
1145 class->pages_per_zspage);
1146 pages_used = obj_allocated / objs_per_zspage *
1147 class->pages_per_zspage;
1148
1149 seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i,
1150 class->size, obj_allocated, obj_used, pages_used);
1151
1152 total_objs += obj_allocated;
1153 total_used_objs += obj_used;
1154 total_pages += pages_used;
1155 }
1156
1157 seq_puts(s, "\n");
1158 seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
1159 total_objs, total_used_objs, total_pages);
1160
1161 return 0;
1162}
1163
1164static int zs_stats_size_open(struct inode *inode, struct file *file)
1165{
1166 return single_open(file, zs_stats_size_show, inode->i_private);
1167}
1168
1169static const struct file_operations zs_stat_size_ops = {
1170 .open = zs_stats_size_open,
1171 .read = seq_read,
1172 .llseek = seq_lseek,
1173 .release = single_release,
1174};
1175
1176static int zs_pool_stat_create(char *name, struct zs_pool *pool)
1177{
1178 struct dentry *entry;
1179
1180 if (!zs_stat_root)
1181 return -ENODEV;
1182
1183 entry = debugfs_create_dir(name, zs_stat_root);
1184 if (!entry) {
1185 pr_warn("debugfs dir <%s> creation failed\n", name);
1186 return -ENOMEM;
1187 }
1188 pool->stat_dentry = entry;
1189
1190 entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO,
1191 pool->stat_dentry, pool, &zs_stat_size_ops);
1192 if (!entry) {
1193 pr_warn("%s: debugfs file entry <%s> creation failed\n",
1194 name, "obj_in_classes");
1195 return -ENOMEM;
1196 }
1197
1198 return 0;
1199}
1200
1201static void zs_pool_stat_destroy(struct zs_pool *pool)
1202{
1203 debugfs_remove_recursive(pool->stat_dentry);
1204}
1205
1206#else /* CONFIG_ZSMALLOC_STAT */
1207
1208static inline void zs_stat_inc(struct size_class *class,
1209 enum zs_stat_type type, unsigned long cnt)
1210{
1211}
1212
1213static inline void zs_stat_dec(struct size_class *class,
1214 enum zs_stat_type type, unsigned long cnt)
1215{
1216}
1217
1218static inline unsigned long zs_stat_get(struct size_class *class,
1219 enum zs_stat_type type)
1220{
1221 return 0;
1222}
1223
1224static int __init zs_stat_init(void)
1225{
1226 return 0;
1227}
1228
1229static void __exit zs_stat_exit(void)
1230{
1231}
1232
1233static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
1234{
1235 return 0;
1236}
1237
1238static inline void zs_pool_stat_destroy(struct zs_pool *pool)
1239{
1240}
1241
1242#endif
1243
1244unsigned long zs_get_total_pages(struct zs_pool *pool) 1263unsigned long zs_get_total_pages(struct zs_pool *pool)
1245{ 1264{
1246 return atomic_long_read(&pool->pages_allocated); 1265 return atomic_long_read(&pool->pages_allocated);