aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>2015-09-08 18:04:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commit0dc63d488a2a433a4a85d3908b3f195c4e6450d2 (patch)
tree0e8403439dedb4bda6fcfd0b17be0ee66ec25e6b /mm
parent04f05909e0fde36ba481ad4c850b666ebef1ac55 (diff)
zsmalloc: cosmetic compaction code adjustments
Change zs_object_copy() argument order to be (DST, SRC) rather than (SRC, DST). copy/move functions usually have (to, from) arguments order. Rename alloc_target_page() to isolate_target_page(). This function doesn't allocate anything, it isolates target page, pretty much like isolate_source_page(). Tweak __zs_compact() comment. Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4b39e5eaf34f..2a1f95249f12 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
1471} 1471}
1472EXPORT_SYMBOL_GPL(zs_free); 1472EXPORT_SYMBOL_GPL(zs_free);
1473 1473
1474static void zs_object_copy(unsigned long src, unsigned long dst, 1474static void zs_object_copy(unsigned long dst, unsigned long src,
1475 struct size_class *class) 1475 struct size_class *class)
1476{ 1476{
1477 struct page *s_page, *d_page; 1477 struct page *s_page, *d_page;
@@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1612 1612
1613 used_obj = handle_to_obj(handle); 1613 used_obj = handle_to_obj(handle);
1614 free_obj = obj_malloc(d_page, class, handle); 1614 free_obj = obj_malloc(d_page, class, handle);
1615 zs_object_copy(used_obj, free_obj, class); 1615 zs_object_copy(free_obj, used_obj, class);
1616 index++; 1616 index++;
1617 record_obj(handle, free_obj); 1617 record_obj(handle, free_obj);
1618 unpin_tag(handle); 1618 unpin_tag(handle);
@@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1628 return ret; 1628 return ret;
1629} 1629}
1630 1630
1631static struct page *alloc_target_page(struct size_class *class) 1631static struct page *isolate_target_page(struct size_class *class)
1632{ 1632{
1633 int i; 1633 int i;
1634 struct page *page; 1634 struct page *page;
@@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
1718 cc.index = 0; 1718 cc.index = 0;
1719 cc.s_page = src_page; 1719 cc.s_page = src_page;
1720 1720
1721 while ((dst_page = alloc_target_page(class))) { 1721 while ((dst_page = isolate_target_page(class))) {
1722 cc.d_page = dst_page; 1722 cc.d_page = dst_page;
1723 /* 1723 /*
1724 * If there is no more space in dst_page, try to 1724 * If there is no more space in dst_page, resched
1725 * allocate another zspage. 1725 * and see if anyone had allocated another zspage.
1726 */ 1726 */
1727 if (!migrate_zspage(pool, class, &cc)) 1727 if (!migrate_zspage(pool, class, &cc))
1728 break; 1728 break;