aboutsummaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2015-09-08 18:04:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commit6cbf16b3b66a61b9c6df8f2ed4ac346cb427f28a (patch)
tree51eb943fa8a3dbaa304226ae57c3275514d0ec06 /mm/zsmalloc.c
parentad9d5e175a77a253f52a7259a7c918b8351d99f1 (diff)
zsmalloc: use class->pages_per_zspage
There is no need to recalcurate pages_per_zspage in runtime. Just use class->pages_per_zspage to avoid unnecessary runtime overhead. Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c10885ca87a4..ce08d043becd 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class)
1723 obj_wasted /= get_maxobj_per_zspage(class->size, 1723 obj_wasted /= get_maxobj_per_zspage(class->size,
1724 class->pages_per_zspage); 1724 class->pages_per_zspage);
1725 1725
1726 return obj_wasted * get_pages_per_zspage(class->size); 1726 return obj_wasted * class->pages_per_zspage;
1727} 1727}
1728 1728
1729static void __zs_compact(struct zs_pool *pool, struct size_class *class) 1729static void __zs_compact(struct zs_pool *pool, struct size_class *class)
@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
1761 1761
1762 putback_zspage(pool, class, dst_page); 1762 putback_zspage(pool, class, dst_page);
1763 if (putback_zspage(pool, class, src_page) == ZS_EMPTY) 1763 if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
1764 pool->stats.pages_compacted += 1764 pool->stats.pages_compacted += class->pages_per_zspage;
1765 get_pages_per_zspage(class->size);
1766 spin_unlock(&class->lock); 1765 spin_unlock(&class->lock);
1767 cond_resched(); 1766 cond_resched();
1768 spin_lock(&class->lock); 1767 spin_lock(&class->lock);