aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSeongJae Park <sj38.park@gmail.com>2014-04-07 18:38:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:36:03 -0400
commit6335b19344cc263724ae49a76ed930b21a659055 (patch)
tree5cae33c11876ca06aadcf88b69b3c7975b7c0912 /mm
parent6b4525164e247e29f48b3a69e3d35f60fab50ae5 (diff)
mm/zswap.c: update zsmalloc in comment to zbud
zswap used zsmalloc before and now using zbud. But, some comments saying it use zsmalloc yet. Fix the trivial problems. Signed-off-by: SeongJae Park <sj38.park@gmail.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zswap.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 5b2245324715..25312eb373a0 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -165,7 +165,7 @@ static void zswap_comp_exit(void)
165 * be held while changing the refcount. Since the lock must 165 * be held while changing the refcount. Since the lock must
166 * be held, there is no reason to also make refcount atomic. 166 * be held, there is no reason to also make refcount atomic.
167 * offset - the swap offset for the entry. Index into the red-black tree. 167 * offset - the swap offset for the entry. Index into the red-black tree.
168 * handle - zsmalloc allocation handle that stores the compressed page data 168 * handle - zbud allocation handle that stores the compressed page data
169 * length - the length in bytes of the compressed page data. Needed during 169 * length - the length in bytes of the compressed page data. Needed during
170 * decompression 170 * decompression
171 */ 171 */
@@ -282,7 +282,7 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
282} 282}
283 283
284/* 284/*
285 * Carries out the common pattern of freeing and entry's zsmalloc allocation, 285 * Carries out the common pattern of freeing and entry's zbud allocation,
286 * freeing the entry itself, and decrementing the number of stored pages. 286 * freeing the entry itself, and decrementing the number of stored pages.
287 */ 287 */
288static void zswap_free_entry(struct zswap_tree *tree, 288static void zswap_free_entry(struct zswap_tree *tree,