aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDan Streetman <ddstreet@ieee.org>2014-08-06 19:08:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:23 -0400
commit12d79d64bfd3913693304feb8636ccab504b9e63 (patch)
tree15e87996b0c210fc5c33db26ca703de5c2e56b01 /mm
parentc795779df29e180738568d2a5eb3a42f3b5e47f0 (diff)
mm/zpool: update zswap to use zpool
Change zswap to use the zpool api instead of directly using zbud. Add a boot-time param to allow selecting which zpool implementation to use, with zbud as the default. Signed-off-by: Dan Streetman <ddstreet@ieee.org> Tested-by: Seth Jennings <sjennings@variantweb.net> Cc: Weijie Yang <weijie.yang@samsung.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/zswap.c75
2 files changed, 46 insertions, 31 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 12179b8c3b89..886db2158538 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -535,7 +535,7 @@ config ZSWAP
535 bool "Compressed cache for swap pages (EXPERIMENTAL)" 535 bool "Compressed cache for swap pages (EXPERIMENTAL)"
536 depends on FRONTSWAP && CRYPTO=y 536 depends on FRONTSWAP && CRYPTO=y
537 select CRYPTO_LZO 537 select CRYPTO_LZO
538 select ZBUD 538 select ZPOOL
539 default n 539 default n
540 help 540 help
541 A lightweight compressed cache for swap pages. It takes 541 A lightweight compressed cache for swap pages. It takes
diff --git a/mm/zswap.c b/mm/zswap.c
index 008388fe7b0f..032c21eeab2b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -34,7 +34,7 @@
34#include <linux/swap.h> 34#include <linux/swap.h>
35#include <linux/crypto.h> 35#include <linux/crypto.h>
36#include <linux/mempool.h> 36#include <linux/mempool.h>
37#include <linux/zbud.h> 37#include <linux/zpool.h>
38 38
39#include <linux/mm_types.h> 39#include <linux/mm_types.h>
40#include <linux/page-flags.h> 40#include <linux/page-flags.h>
@@ -45,8 +45,8 @@
45/********************************* 45/*********************************
46* statistics 46* statistics
47**********************************/ 47**********************************/
48/* Number of memory pages used by the compressed pool */ 48/* Total bytes used by the compressed storage */
49static u64 zswap_pool_pages; 49static u64 zswap_pool_total_size;
50/* The number of compressed pages currently stored in zswap */ 50/* The number of compressed pages currently stored in zswap */
51static atomic_t zswap_stored_pages = ATOMIC_INIT(0); 51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52 52
@@ -89,8 +89,13 @@ static unsigned int zswap_max_pool_percent = 20;
89module_param_named(max_pool_percent, 89module_param_named(max_pool_percent,
90 zswap_max_pool_percent, uint, 0644); 90 zswap_max_pool_percent, uint, 0644);
91 91
92/* zbud_pool is shared by all of zswap backend */ 92/* Compressed storage to use */
93static struct zbud_pool *zswap_pool; 93#define ZSWAP_ZPOOL_DEFAULT "zbud"
94static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
95module_param_named(zpool, zswap_zpool_type, charp, 0444);
96
97/* zpool is shared by all of zswap backend */
98static struct zpool *zswap_pool;
94 99
95/********************************* 100/*********************************
96* compression functions 101* compression functions
@@ -168,7 +173,7 @@ static void zswap_comp_exit(void)
168 * be held while changing the refcount. Since the lock must 173 * be held while changing the refcount. Since the lock must
169 * be held, there is no reason to also make refcount atomic. 174 * be held, there is no reason to also make refcount atomic.
170 * offset - the swap offset for the entry. Index into the red-black tree. 175 * offset - the swap offset for the entry. Index into the red-black tree.
171 * handle - zbud allocation handle that stores the compressed page data 176 * handle - zpool allocation handle that stores the compressed page data
172 * length - the length in bytes of the compressed page data. Needed during 177 * length - the length in bytes of the compressed page data. Needed during
173 * decompression 178 * decompression
174 */ 179 */
@@ -284,15 +289,15 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
284} 289}
285 290
286/* 291/*
287 * Carries out the common pattern of freeing and entry's zbud allocation, 292 * Carries out the common pattern of freeing and entry's zpool allocation,
288 * freeing the entry itself, and decrementing the number of stored pages. 293 * freeing the entry itself, and decrementing the number of stored pages.
289 */ 294 */
290static void zswap_free_entry(struct zswap_entry *entry) 295static void zswap_free_entry(struct zswap_entry *entry)
291{ 296{
292 zbud_free(zswap_pool, entry->handle); 297 zpool_free(zswap_pool, entry->handle);
293 zswap_entry_cache_free(entry); 298 zswap_entry_cache_free(entry);
294 atomic_dec(&zswap_stored_pages); 299 atomic_dec(&zswap_stored_pages);
295 zswap_pool_pages = zbud_get_pool_size(zswap_pool); 300 zswap_pool_total_size = zpool_get_total_size(zswap_pool);
296} 301}
297 302
298/* caller must hold the tree lock */ 303/* caller must hold the tree lock */
@@ -409,7 +414,7 @@ cleanup:
409static bool zswap_is_full(void) 414static bool zswap_is_full(void)
410{ 415{
411 return totalram_pages * zswap_max_pool_percent / 100 < 416 return totalram_pages * zswap_max_pool_percent / 100 <
412 zswap_pool_pages; 417 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
413} 418}
414 419
415/********************************* 420/*********************************
@@ -525,7 +530,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
525 * the swap cache, the compressed version stored by zswap can be 530 * the swap cache, the compressed version stored by zswap can be
526 * freed. 531 * freed.
527 */ 532 */
528static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) 533static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
529{ 534{
530 struct zswap_header *zhdr; 535 struct zswap_header *zhdr;
531 swp_entry_t swpentry; 536 swp_entry_t swpentry;
@@ -541,9 +546,9 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
541 }; 546 };
542 547
543 /* extract swpentry from data */ 548 /* extract swpentry from data */
544 zhdr = zbud_map(pool, handle); 549 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
545 swpentry = zhdr->swpentry; /* here */ 550 swpentry = zhdr->swpentry; /* here */
546 zbud_unmap(pool, handle); 551 zpool_unmap_handle(pool, handle);
547 tree = zswap_trees[swp_type(swpentry)]; 552 tree = zswap_trees[swp_type(swpentry)];
548 offset = swp_offset(swpentry); 553 offset = swp_offset(swpentry);
549 554
@@ -573,13 +578,13 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
573 case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 578 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
574 /* decompress */ 579 /* decompress */
575 dlen = PAGE_SIZE; 580 dlen = PAGE_SIZE;
576 src = (u8 *)zbud_map(zswap_pool, entry->handle) + 581 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
577 sizeof(struct zswap_header); 582 ZPOOL_MM_RO) + sizeof(struct zswap_header);
578 dst = kmap_atomic(page); 583 dst = kmap_atomic(page);
579 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, 584 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
580 entry->length, dst, &dlen); 585 entry->length, dst, &dlen);
581 kunmap_atomic(dst); 586 kunmap_atomic(dst);
582 zbud_unmap(zswap_pool, entry->handle); 587 zpool_unmap_handle(zswap_pool, entry->handle);
583 BUG_ON(ret); 588 BUG_ON(ret);
584 BUG_ON(dlen != PAGE_SIZE); 589 BUG_ON(dlen != PAGE_SIZE);
585 590
@@ -652,7 +657,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
652 /* reclaim space if needed */ 657 /* reclaim space if needed */
653 if (zswap_is_full()) { 658 if (zswap_is_full()) {
654 zswap_pool_limit_hit++; 659 zswap_pool_limit_hit++;
655 if (zbud_reclaim_page(zswap_pool, 8)) { 660 if (zpool_shrink(zswap_pool, 1, NULL)) {
656 zswap_reject_reclaim_fail++; 661 zswap_reject_reclaim_fail++;
657 ret = -ENOMEM; 662 ret = -ENOMEM;
658 goto reject; 663 goto reject;
@@ -679,7 +684,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
679 684
680 /* store */ 685 /* store */
681 len = dlen + sizeof(struct zswap_header); 686 len = dlen + sizeof(struct zswap_header);
682 ret = zbud_alloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, 687 ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
683 &handle); 688 &handle);
684 if (ret == -ENOSPC) { 689 if (ret == -ENOSPC) {
685 zswap_reject_compress_poor++; 690 zswap_reject_compress_poor++;
@@ -689,11 +694,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
689 zswap_reject_alloc_fail++; 694 zswap_reject_alloc_fail++;
690 goto freepage; 695 goto freepage;
691 } 696 }
692 zhdr = zbud_map(zswap_pool, handle); 697 zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW);
693 zhdr->swpentry = swp_entry(type, offset); 698 zhdr->swpentry = swp_entry(type, offset);
694 buf = (u8 *)(zhdr + 1); 699 buf = (u8 *)(zhdr + 1);
695 memcpy(buf, dst, dlen); 700 memcpy(buf, dst, dlen);
696 zbud_unmap(zswap_pool, handle); 701 zpool_unmap_handle(zswap_pool, handle);
697 put_cpu_var(zswap_dstmem); 702 put_cpu_var(zswap_dstmem);
698 703
699 /* populate entry */ 704 /* populate entry */
@@ -716,7 +721,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
716 721
717 /* update stats */ 722 /* update stats */
718 atomic_inc(&zswap_stored_pages); 723 atomic_inc(&zswap_stored_pages);
719 zswap_pool_pages = zbud_get_pool_size(zswap_pool); 724 zswap_pool_total_size = zpool_get_total_size(zswap_pool);
720 725
721 return 0; 726 return 0;
722 727
@@ -752,13 +757,13 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
752 757
753 /* decompress */ 758 /* decompress */
754 dlen = PAGE_SIZE; 759 dlen = PAGE_SIZE;
755 src = (u8 *)zbud_map(zswap_pool, entry->handle) + 760 src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
756 sizeof(struct zswap_header); 761 ZPOOL_MM_RO) + sizeof(struct zswap_header);
757 dst = kmap_atomic(page); 762 dst = kmap_atomic(page);
758 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, 763 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
759 dst, &dlen); 764 dst, &dlen);
760 kunmap_atomic(dst); 765 kunmap_atomic(dst);
761 zbud_unmap(zswap_pool, entry->handle); 766 zpool_unmap_handle(zswap_pool, entry->handle);
762 BUG_ON(ret); 767 BUG_ON(ret);
763 768
764 spin_lock(&tree->lock); 769 spin_lock(&tree->lock);
@@ -811,7 +816,7 @@ static void zswap_frontswap_invalidate_area(unsigned type)
811 zswap_trees[type] = NULL; 816 zswap_trees[type] = NULL;
812} 817}
813 818
814static struct zbud_ops zswap_zbud_ops = { 819static struct zpool_ops zswap_zpool_ops = {
815 .evict = zswap_writeback_entry 820 .evict = zswap_writeback_entry
816}; 821};
817 822
@@ -869,8 +874,8 @@ static int __init zswap_debugfs_init(void)
869 zswap_debugfs_root, &zswap_written_back_pages); 874 zswap_debugfs_root, &zswap_written_back_pages);
870 debugfs_create_u64("duplicate_entry", S_IRUGO, 875 debugfs_create_u64("duplicate_entry", S_IRUGO,
871 zswap_debugfs_root, &zswap_duplicate_entry); 876 zswap_debugfs_root, &zswap_duplicate_entry);
872 debugfs_create_u64("pool_pages", S_IRUGO, 877 debugfs_create_u64("pool_total_size", S_IRUGO,
873 zswap_debugfs_root, &zswap_pool_pages); 878 zswap_debugfs_root, &zswap_pool_total_size);
874 debugfs_create_atomic_t("stored_pages", S_IRUGO, 879 debugfs_create_atomic_t("stored_pages", S_IRUGO,
875 zswap_debugfs_root, &zswap_stored_pages); 880 zswap_debugfs_root, &zswap_stored_pages);
876 881
@@ -895,16 +900,26 @@ static void __exit zswap_debugfs_exit(void) { }
895**********************************/ 900**********************************/
896static int __init init_zswap(void) 901static int __init init_zswap(void)
897{ 902{
903 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
904
898 if (!zswap_enabled) 905 if (!zswap_enabled)
899 return 0; 906 return 0;
900 907
901 pr_info("loading zswap\n"); 908 pr_info("loading zswap\n");
902 909
903 zswap_pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops); 910 zswap_pool = zpool_create_pool(zswap_zpool_type, gfp, &zswap_zpool_ops);
911 if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
912 pr_info("%s zpool not available\n", zswap_zpool_type);
913 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
914 zswap_pool = zpool_create_pool(zswap_zpool_type, gfp,
915 &zswap_zpool_ops);
916 }
904 if (!zswap_pool) { 917 if (!zswap_pool) {
905 pr_err("zbud pool creation failed\n"); 918 pr_err("%s zpool not available\n", zswap_zpool_type);
919 pr_err("zpool creation failed\n");
906 goto error; 920 goto error;
907 } 921 }
922 pr_info("using %s pool\n", zswap_zpool_type);
908 923
909 if (zswap_entry_cache_create()) { 924 if (zswap_entry_cache_create()) {
910 pr_err("entry cache creation failed\n"); 925 pr_err("entry cache creation failed\n");
@@ -928,7 +943,7 @@ pcpufail:
928compfail: 943compfail:
929 zswap_entry_cache_destory(); 944 zswap_entry_cache_destory();
930cachefail: 945cachefail:
931 zbud_destroy_pool(zswap_pool); 946 zpool_destroy_pool(zswap_pool);
932error: 947error:
933 return -ENOMEM; 948 return -ENOMEM;
934} 949}