aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGanesh Mahendran <opensource.ganesh@gmail.com>2015-02-12 18:00:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:12 -0500
commit0f050d997e275cf0e47ddc7006284eaa3c6fe049 (patch)
tree380ee8b8b699a07a6b52c545310a8b4f65f8f6d6 /mm
parent3eba0c6a56c04f2b017b43641a821f1ebfb7fb4c (diff)
mm/zsmalloc: add statistics support
Keeping fragmentation of zsmalloc in a low level is our target. But now we still need to add the debug code in zsmalloc to get the quantitative data. This patch adds a new configuration CONFIG_ZSMALLOC_STAT to enable the statistics collection for developers. Currently only the objects statatitics in each class are collected. User can get the information via debugfs. cat /sys/kernel/debug/zsmalloc/zram0/... For example: After I copied "jdk-8u25-linux-x64.tar.gz" to zram with ext4 filesystem: class size obj_allocated obj_used pages_used 0 32 0 0 0 1 48 256 12 3 2 64 64 14 1 3 80 51 7 1 4 96 128 5 3 5 112 73 5 2 6 128 32 4 1 7 144 0 0 0 8 160 0 0 0 9 176 0 0 0 10 192 0 0 0 11 208 0 0 0 12 224 0 0 0 13 240 0 0 0 14 256 16 1 1 15 272 15 9 1 16 288 0 0 0 17 304 0 0 0 18 320 0 0 0 19 336 0 0 0 20 352 0 0 0 21 368 0 0 0 22 384 0 0 0 23 400 0 0 0 24 416 0 0 0 25 432 0 0 0 26 448 0 0 0 27 464 0 0 0 28 480 0 0 0 29 496 33 1 4 30 512 0 0 0 31 528 0 0 0 32 544 0 0 0 33 560 0 0 0 34 576 0 0 0 35 592 0 0 0 36 608 0 0 0 37 624 0 0 0 38 640 0 0 0 40 672 0 0 0 42 704 0 0 0 43 720 17 1 3 44 736 0 0 0 46 768 0 0 0 49 816 0 0 0 51 848 0 0 0 52 864 14 1 3 54 896 0 0 0 57 944 13 1 3 58 960 0 0 0 62 1024 4 1 1 66 1088 15 2 4 67 1104 0 0 0 71 1168 0 0 0 74 1216 0 0 0 76 1248 0 0 0 83 1360 3 1 1 91 1488 11 1 4 94 1536 0 0 0 100 1632 5 1 2 107 1744 0 0 0 111 1808 9 1 4 126 2048 4 4 2 144 2336 7 3 4 151 2448 0 0 0 168 2720 15 15 10 190 3072 28 27 21 202 3264 0 0 0 254 4096 36209 36209 36209 Total 37022 36326 36288 We can calculate the overall fragentation by the last line: Total 37022 36326 36288 (37022 - 36326) / 37022 = 1.87% Also by analysing objects alocated in every class we know why we got so low fragmentation: Most of the allocated objects is in <class 254>. And there is only 1 page in class 254 zspage. So, No fragmentation will be introduced by allocating objs in class 254. And in future, we can collect other zsmalloc statistics as we need and analyse them. Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com> Suggested-by: Minchan Kim <minchan@kernel.org> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Seth Jennings <sjennings@variantweb.net> Cc: Dan Streetman <ddstreet@ieee.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig10
-rw-r--r--mm/zsmalloc.c233
2 files changed, 239 insertions, 4 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4395b12869c8..de5239c152f9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -602,6 +602,16 @@ config PGTABLE_MAPPING
602 You can check speed with zsmalloc benchmark: 602 You can check speed with zsmalloc benchmark:
603 https://github.com/spartacus06/zsmapbench 603 https://github.com/spartacus06/zsmapbench
604 604
605config ZSMALLOC_STAT
606 bool "Export zsmalloc statistics"
607 depends on ZSMALLOC
608 select DEBUG_FS
609 help
610 This option enables code in the zsmalloc to collect various
611 statistics about whats happening in zsmalloc and exports that
612 information to userspace via debugfs.
613 If unsure, say N.
614
605config GENERIC_EARLY_IOREMAP 615config GENERIC_EARLY_IOREMAP
606 bool 616 bool
607 617
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2359e61b02bf..0dec1fa5f656 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -91,6 +91,7 @@
91#include <linux/hardirq.h> 91#include <linux/hardirq.h>
92#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/types.h> 93#include <linux/types.h>
94#include <linux/debugfs.h>
94#include <linux/zsmalloc.h> 95#include <linux/zsmalloc.h>
95#include <linux/zpool.h> 96#include <linux/zpool.h>
96 97
@@ -168,6 +169,22 @@ enum fullness_group {
168 ZS_FULL 169 ZS_FULL
169}; 170};
170 171
172enum zs_stat_type {
173 OBJ_ALLOCATED,
174 OBJ_USED,
175 NR_ZS_STAT_TYPE,
176};
177
178#ifdef CONFIG_ZSMALLOC_STAT
179
180static struct dentry *zs_stat_root;
181
182struct zs_size_stat {
183 unsigned long objs[NR_ZS_STAT_TYPE];
184};
185
186#endif
187
171/* 188/*
172 * number of size_classes 189 * number of size_classes
173 */ 190 */
@@ -200,6 +217,10 @@ struct size_class {
200 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 217 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
201 int pages_per_zspage; 218 int pages_per_zspage;
202 219
220#ifdef CONFIG_ZSMALLOC_STAT
221 struct zs_size_stat stats;
222#endif
223
203 spinlock_t lock; 224 spinlock_t lock;
204 225
205 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; 226 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
@@ -217,10 +238,16 @@ struct link_free {
217}; 238};
218 239
219struct zs_pool { 240struct zs_pool {
241 char *name;
242
220 struct size_class **size_class; 243 struct size_class **size_class;
221 244
222 gfp_t flags; /* allocation flags used when growing pool */ 245 gfp_t flags; /* allocation flags used when growing pool */
223 atomic_long_t pages_allocated; 246 atomic_long_t pages_allocated;
247
248#ifdef CONFIG_ZSMALLOC_STAT
249 struct dentry *stat_dentry;
250#endif
224}; 251};
225 252
226/* 253/*
@@ -942,6 +969,166 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
942 return true; 969 return true;
943} 970}
944 971
972#ifdef CONFIG_ZSMALLOC_STAT
973
974static inline void zs_stat_inc(struct size_class *class,
975 enum zs_stat_type type, unsigned long cnt)
976{
977 class->stats.objs[type] += cnt;
978}
979
980static inline void zs_stat_dec(struct size_class *class,
981 enum zs_stat_type type, unsigned long cnt)
982{
983 class->stats.objs[type] -= cnt;
984}
985
986static inline unsigned long zs_stat_get(struct size_class *class,
987 enum zs_stat_type type)
988{
989 return class->stats.objs[type];
990}
991
992static int __init zs_stat_init(void)
993{
994 if (!debugfs_initialized())
995 return -ENODEV;
996
997 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
998 if (!zs_stat_root)
999 return -ENOMEM;
1000
1001 return 0;
1002}
1003
1004static void __exit zs_stat_exit(void)
1005{
1006 debugfs_remove_recursive(zs_stat_root);
1007}
1008
1009static int zs_stats_size_show(struct seq_file *s, void *v)
1010{
1011 int i;
1012 struct zs_pool *pool = s->private;
1013 struct size_class *class;
1014 int objs_per_zspage;
1015 unsigned long obj_allocated, obj_used, pages_used;
1016 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
1017
1018 seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size",
1019 "obj_allocated", "obj_used", "pages_used");
1020
1021 for (i = 0; i < zs_size_classes; i++) {
1022 class = pool->size_class[i];
1023
1024 if (class->index != i)
1025 continue;
1026
1027 spin_lock(&class->lock);
1028 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
1029 obj_used = zs_stat_get(class, OBJ_USED);
1030 spin_unlock(&class->lock);
1031
1032 objs_per_zspage = get_maxobj_per_zspage(class->size,
1033 class->pages_per_zspage);
1034 pages_used = obj_allocated / objs_per_zspage *
1035 class->pages_per_zspage;
1036
1037 seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i,
1038 class->size, obj_allocated, obj_used, pages_used);
1039
1040 total_objs += obj_allocated;
1041 total_used_objs += obj_used;
1042 total_pages += pages_used;
1043 }
1044
1045 seq_puts(s, "\n");
1046 seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
1047 total_objs, total_used_objs, total_pages);
1048
1049 return 0;
1050}
1051
1052static int zs_stats_size_open(struct inode *inode, struct file *file)
1053{
1054 return single_open(file, zs_stats_size_show, inode->i_private);
1055}
1056
1057static const struct file_operations zs_stat_size_ops = {
1058 .open = zs_stats_size_open,
1059 .read = seq_read,
1060 .llseek = seq_lseek,
1061 .release = single_release,
1062};
1063
1064static int zs_pool_stat_create(char *name, struct zs_pool *pool)
1065{
1066 struct dentry *entry;
1067
1068 if (!zs_stat_root)
1069 return -ENODEV;
1070
1071 entry = debugfs_create_dir(name, zs_stat_root);
1072 if (!entry) {
1073 pr_warn("debugfs dir <%s> creation failed\n", name);
1074 return -ENOMEM;
1075 }
1076 pool->stat_dentry = entry;
1077
1078 entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO,
1079 pool->stat_dentry, pool, &zs_stat_size_ops);
1080 if (!entry) {
1081 pr_warn("%s: debugfs file entry <%s> creation failed\n",
1082 name, "obj_in_classes");
1083 return -ENOMEM;
1084 }
1085
1086 return 0;
1087}
1088
1089static void zs_pool_stat_destroy(struct zs_pool *pool)
1090{
1091 debugfs_remove_recursive(pool->stat_dentry);
1092}
1093
1094#else /* CONFIG_ZSMALLOC_STAT */
1095
1096static inline void zs_stat_inc(struct size_class *class,
1097 enum zs_stat_type type, unsigned long cnt)
1098{
1099}
1100
1101static inline void zs_stat_dec(struct size_class *class,
1102 enum zs_stat_type type, unsigned long cnt)
1103{
1104}
1105
1106static inline unsigned long zs_stat_get(struct size_class *class,
1107 enum zs_stat_type type)
1108{
1109 return 0;
1110}
1111
1112static int __init zs_stat_init(void)
1113{
1114 return 0;
1115}
1116
1117static void __exit zs_stat_exit(void)
1118{
1119}
1120
1121static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
1122{
1123 return 0;
1124}
1125
1126static inline void zs_pool_stat_destroy(struct zs_pool *pool)
1127{
1128}
1129
1130#endif
1131
945unsigned long zs_get_total_pages(struct zs_pool *pool) 1132unsigned long zs_get_total_pages(struct zs_pool *pool)
946{ 1133{
947 return atomic_long_read(&pool->pages_allocated); 1134 return atomic_long_read(&pool->pages_allocated);
@@ -1074,7 +1261,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1074 set_zspage_mapping(first_page, class->index, ZS_EMPTY); 1261 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
1075 atomic_long_add(class->pages_per_zspage, 1262 atomic_long_add(class->pages_per_zspage,
1076 &pool->pages_allocated); 1263 &pool->pages_allocated);
1264
1077 spin_lock(&class->lock); 1265 spin_lock(&class->lock);
1266 zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
1267 class->size, class->pages_per_zspage));
1078 } 1268 }
1079 1269
1080 obj = (unsigned long)first_page->freelist; 1270 obj = (unsigned long)first_page->freelist;
@@ -1088,6 +1278,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1088 kunmap_atomic(vaddr); 1278 kunmap_atomic(vaddr);
1089 1279
1090 first_page->inuse++; 1280 first_page->inuse++;
1281 zs_stat_inc(class, OBJ_USED, 1);
1091 /* Now move the zspage to another fullness group, if required */ 1282 /* Now move the zspage to another fullness group, if required */
1092 fix_fullness_group(pool, first_page); 1283 fix_fullness_group(pool, first_page);
1093 spin_unlock(&class->lock); 1284 spin_unlock(&class->lock);
@@ -1128,6 +1319,12 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
1128 1319
1129 first_page->inuse--; 1320 first_page->inuse--;
1130 fullness = fix_fullness_group(pool, first_page); 1321 fullness = fix_fullness_group(pool, first_page);
1322
1323 zs_stat_dec(class, OBJ_USED, 1);
1324 if (fullness == ZS_EMPTY)
1325 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
1326 class->size, class->pages_per_zspage));
1327
1131 spin_unlock(&class->lock); 1328 spin_unlock(&class->lock);
1132 1329
1133 if (fullness == ZS_EMPTY) { 1330 if (fullness == ZS_EMPTY) {
@@ -1158,9 +1355,16 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
1158 if (!pool) 1355 if (!pool)
1159 return NULL; 1356 return NULL;
1160 1357
1358 pool->name = kstrdup(name, GFP_KERNEL);
1359 if (!pool->name) {
1360 kfree(pool);
1361 return NULL;
1362 }
1363
1161 pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), 1364 pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
1162 GFP_KERNEL); 1365 GFP_KERNEL);
1163 if (!pool->size_class) { 1366 if (!pool->size_class) {
1367 kfree(pool->name);
1164 kfree(pool); 1368 kfree(pool);
1165 return NULL; 1369 return NULL;
1166 } 1370 }
@@ -1210,6 +1414,9 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
1210 1414
1211 pool->flags = flags; 1415 pool->flags = flags;
1212 1416
1417 if (zs_pool_stat_create(name, pool))
1418 goto err;
1419
1213 return pool; 1420 return pool;
1214 1421
1215err: 1422err:
@@ -1222,6 +1429,8 @@ void zs_destroy_pool(struct zs_pool *pool)
1222{ 1429{
1223 int i; 1430 int i;
1224 1431
1432 zs_pool_stat_destroy(pool);
1433
1225 for (i = 0; i < zs_size_classes; i++) { 1434 for (i = 0; i < zs_size_classes; i++) {
1226 int fg; 1435 int fg;
1227 struct size_class *class = pool->size_class[i]; 1436 struct size_class *class = pool->size_class[i];
@@ -1242,6 +1451,7 @@ void zs_destroy_pool(struct zs_pool *pool)
1242 } 1451 }
1243 1452
1244 kfree(pool->size_class); 1453 kfree(pool->size_class);
1454 kfree(pool->name);
1245 kfree(pool); 1455 kfree(pool);
1246} 1456}
1247EXPORT_SYMBOL_GPL(zs_destroy_pool); 1457EXPORT_SYMBOL_GPL(zs_destroy_pool);
@@ -1250,17 +1460,30 @@ static int __init zs_init(void)
1250{ 1460{
1251 int ret = zs_register_cpu_notifier(); 1461 int ret = zs_register_cpu_notifier();
1252 1462
1253 if (ret) { 1463 if (ret)
1254 zs_unregister_cpu_notifier(); 1464 goto notifier_fail;
1255 return ret;
1256 }
1257 1465
1258 init_zs_size_classes(); 1466 init_zs_size_classes();
1259 1467
1260#ifdef CONFIG_ZPOOL 1468#ifdef CONFIG_ZPOOL
1261 zpool_register_driver(&zs_zpool_driver); 1469 zpool_register_driver(&zs_zpool_driver);
1262#endif 1470#endif
1471
1472 ret = zs_stat_init();
1473 if (ret) {
1474 pr_err("zs stat initialization failed\n");
1475 goto stat_fail;
1476 }
1263 return 0; 1477 return 0;
1478
1479stat_fail:
1480#ifdef CONFIG_ZPOOL
1481 zpool_unregister_driver(&zs_zpool_driver);
1482#endif
1483notifier_fail:
1484 zs_unregister_cpu_notifier();
1485
1486 return ret;
1264} 1487}
1265 1488
1266static void __exit zs_exit(void) 1489static void __exit zs_exit(void)
@@ -1269,6 +1492,8 @@ static void __exit zs_exit(void)
1269 zpool_unregister_driver(&zs_zpool_driver); 1492 zpool_unregister_driver(&zs_zpool_driver);
1270#endif 1493#endif
1271 zs_unregister_cpu_notifier(); 1494 zs_unregister_cpu_notifier();
1495
1496 zs_stat_exit();
1272} 1497}
1273 1498
1274module_init(zs_init); 1499module_init(zs_init);