summaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2014-10-09 18:29:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:26:02 -0400
commit13de8933c96b4557f667c337676f05274e017f83 (patch)
treeeedb527ddfef66108692544e490ff00974c59296 /mm/zsmalloc.c
parentcd2567b6850b1648236a4aab0513a04ebaea6aa8 (diff)
zsmalloc: move pages_allocated to zs_pool
Currently, zram has no feature to limit memory so theoretically zram can deplete system memory. Users have asked for a limit several times as even without exhaustion zram makes it hard to control memory usage of the platform. This patchset adds the feature. Patch 1 makes zs_get_total_size_bytes faster because it would be used frequently in later patches for the new feature. Patch 2 changes zs_get_total_size_bytes's return unit from bytes to page so that zsmalloc doesn't need unnecessary operation(ie, << PAGE_SHIFT). Patch 3 adds new feature. I added the feature into zram layer, not zsmalloc because limiation is zram's requirement, not zsmalloc so any other user using zsmalloc(ie, zpool) shouldn't affected by unnecessary branch of zsmalloc. In future, if every users of zsmalloc want the feature, then, we could move the feature from client side to zsmalloc easily but vice versa would be painful. Patch 4 adds news facility to report maximum memory usage of zram so that this avoids user polling frequently via /sys/block/zram0/ mem_used_total and ensures transient max are not missed. This patch (of 4): pages_allocated has counted in size_class structure and when user of zsmalloc want to see total_size_bytes, it should gather all of count from each size_class to report the sum. It's not bad if user don't see the value often but if user start to see the value frequently, it would be not a good deal for performance pov. This patch moves the count from size_class to zs_pool so it could reduce memory footprint (from [255 * 8byte] to [sizeof(atomic_long_t)]). Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Dan Streetman <ddstreet@ieee.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: <juno.choi@lge.com> Cc: <seungho1.park@lge.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Seth Jennings <sjennings@variantweb.net> Reviewed-by: David Horner <ds2horner@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 94f38fac5e81..2a4acf400846 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -199,9 +199,6 @@ struct size_class {
199 199
200 spinlock_t lock; 200 spinlock_t lock;
201 201
202 /* stats */
203 u64 pages_allocated;
204
205 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; 202 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
206}; 203};
207 204
@@ -220,6 +217,7 @@ struct zs_pool {
220 struct size_class size_class[ZS_SIZE_CLASSES]; 217 struct size_class size_class[ZS_SIZE_CLASSES];
221 218
222 gfp_t flags; /* allocation flags used when growing pool */ 219 gfp_t flags; /* allocation flags used when growing pool */
220 atomic_long_t pages_allocated;
223}; 221};
224 222
225/* 223/*
@@ -1028,8 +1026,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
1028 return 0; 1026 return 0;
1029 1027
1030 set_zspage_mapping(first_page, class->index, ZS_EMPTY); 1028 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
1029 atomic_long_add(class->pages_per_zspage,
1030 &pool->pages_allocated);
1031 spin_lock(&class->lock); 1031 spin_lock(&class->lock);
1032 class->pages_allocated += class->pages_per_zspage;
1033 } 1032 }
1034 1033
1035 obj = (unsigned long)first_page->freelist; 1034 obj = (unsigned long)first_page->freelist;
@@ -1082,14 +1081,13 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
1082 1081
1083 first_page->inuse--; 1082 first_page->inuse--;
1084 fullness = fix_fullness_group(pool, first_page); 1083 fullness = fix_fullness_group(pool, first_page);
1085
1086 if (fullness == ZS_EMPTY)
1087 class->pages_allocated -= class->pages_per_zspage;
1088
1089 spin_unlock(&class->lock); 1084 spin_unlock(&class->lock);
1090 1085
1091 if (fullness == ZS_EMPTY) 1086 if (fullness == ZS_EMPTY) {
1087 atomic_long_sub(class->pages_per_zspage,
1088 &pool->pages_allocated);
1092 free_zspage(first_page); 1089 free_zspage(first_page);
1090 }
1093} 1091}
1094EXPORT_SYMBOL_GPL(zs_free); 1092EXPORT_SYMBOL_GPL(zs_free);
1095 1093
@@ -1185,12 +1183,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
1185 1183
1186u64 zs_get_total_size_bytes(struct zs_pool *pool) 1184u64 zs_get_total_size_bytes(struct zs_pool *pool)
1187{ 1185{
1188 int i; 1186 u64 npages = atomic_long_read(&pool->pages_allocated);
1189 u64 npages = 0;
1190
1191 for (i = 0; i < ZS_SIZE_CLASSES; i++)
1192 npages += pool->size_class[i].pages_allocated;
1193
1194 return npages << PAGE_SHIFT; 1187 return npages << PAGE_SHIFT;
1195} 1188}
1196EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); 1189EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);