summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2019-09-23 18:39:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:12 -0400
commit068619e32ff6229a09407d267e36ea7710b96ea1 (patch)
treeea940e56669826675ee347d6b2e11c298044e5e7
parentd2fcd82bb83aab47c6d63aa8c960cd5edb578065 (diff)
zswap: do not map same object twice
zswap_writeback_entry() maps a handle to read swpentry first, and then in the most common case it would map the same handle again. This is ok when zbud is the backend since its mapping callback is plain and simple, but it slows things down for z3fold. Since there's hardly a point in unmapping a handle _that_ fast as zswap_writeback_entry() does when it reads swpentry, the suggestion is to keep the handle mapped till the end. Link: http://lkml.kernel.org/r/20190916004640.b453167d3556c4093af4cf7d@gmail.com Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Reviewed-by: Dan Streetman <ddstreet@ieee.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Vitaly Wool <vitalywool@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/zswap.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 08b6cefae5d8..46a322316e52 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -856,7 +856,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
856 /* extract swpentry from data */ 856 /* extract swpentry from data */
857 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); 857 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
858 swpentry = zhdr->swpentry; /* here */ 858 swpentry = zhdr->swpentry; /* here */
859 zpool_unmap_handle(pool, handle);
860 tree = zswap_trees[swp_type(swpentry)]; 859 tree = zswap_trees[swp_type(swpentry)];
861 offset = swp_offset(swpentry); 860 offset = swp_offset(swpentry);
862 861
@@ -866,6 +865,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
866 if (!entry) { 865 if (!entry) {
867 /* entry was invalidated */ 866 /* entry was invalidated */
868 spin_unlock(&tree->lock); 867 spin_unlock(&tree->lock);
868 zpool_unmap_handle(pool, handle);
869 return 0; 869 return 0;
870 } 870 }
871 spin_unlock(&tree->lock); 871 spin_unlock(&tree->lock);
@@ -886,15 +886,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
886 case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 886 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
887 /* decompress */ 887 /* decompress */
888 dlen = PAGE_SIZE; 888 dlen = PAGE_SIZE;
889 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, 889 src = (u8 *)zhdr + sizeof(struct zswap_header);
890 ZPOOL_MM_RO) + sizeof(struct zswap_header);
891 dst = kmap_atomic(page); 890 dst = kmap_atomic(page);
892 tfm = *get_cpu_ptr(entry->pool->tfm); 891 tfm = *get_cpu_ptr(entry->pool->tfm);
893 ret = crypto_comp_decompress(tfm, src, entry->length, 892 ret = crypto_comp_decompress(tfm, src, entry->length,
894 dst, &dlen); 893 dst, &dlen);
895 put_cpu_ptr(entry->pool->tfm); 894 put_cpu_ptr(entry->pool->tfm);
896 kunmap_atomic(dst); 895 kunmap_atomic(dst);
897 zpool_unmap_handle(entry->pool->zpool, entry->handle);
898 BUG_ON(ret); 896 BUG_ON(ret);
899 BUG_ON(dlen != PAGE_SIZE); 897 BUG_ON(dlen != PAGE_SIZE);
900 898
@@ -940,6 +938,7 @@ fail:
940 spin_unlock(&tree->lock); 938 spin_unlock(&tree->lock);
941 939
942end: 940end:
941 zpool_unmap_handle(pool, handle);
943 return ret; 942 return ret;
944} 943}
945 944