diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:38:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:38:06 -0400 |
commit | 26c12d93348f0bda0756aff83f4867d9ae58a5a6 (patch) | |
tree | 65221f6837c66a9260c5c973e5fb908b10e0d504 /mm/zswap.c | |
parent | dc5ed40686a4da95881c35d913b60f867755cbe2 (diff) | |
parent | fdc5813fbbd484a54c88477f91a78934cda8bb32 (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge second patch-bomb from Andrew Morton:
- the rest of MM
- zram updates
- zswap updates
- exit
- procfs
- exec
- wait
- crash dump
- lib/idr
- rapidio
- adfs, affs, bfs, ufs
- cris
- Kconfig things
- initramfs
- small amount of IPC material
- percpu enhancements
- early ioremap support
- various other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (156 commits)
MAINTAINERS: update Intel C600 SAS driver maintainers
fs/ufs: remove unused ufs_super_block_third pointer
fs/ufs: remove unused ufs_super_block_second pointer
fs/ufs: remove unused ufs_super_block_first pointer
fs/ufs/super.c: add __init to init_inodecache()
doc/kernel-parameters.txt: add early_ioremap_debug
arm64: add early_ioremap support
arm64: initialize pgprot info earlier in boot
x86: use generic early_ioremap
mm: create generic early_ioremap() support
x86/mm: sparse warning fix for early_memremap
lglock: map to spinlock when !CONFIG_SMP
percpu: add preemption checks to __this_cpu ops
vmstat: use raw_cpu_ops to avoid false positives on preemption checks
slub: use raw_cpu_inc for incrementing statistics
net: replace __this_cpu_inc in route.c with raw_cpu_inc
modules: use raw_cpu_write for initialization of per cpu refcount.
mm: use raw_cpu ops for determining current NUMA node
percpu: add raw_cpu_ops
slub: fix leak of 'name' in sysfs_slab_add
...
Diffstat (limited to 'mm/zswap.c')
-rw-r--r-- | mm/zswap.c | 78 |
1 files changed, 40 insertions, 38 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index d7337fbf6605..aeaef0fb5624 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -89,6 +89,9 @@ static unsigned int zswap_max_pool_percent = 20; | |||
89 | module_param_named(max_pool_percent, | 89 | module_param_named(max_pool_percent, |
90 | zswap_max_pool_percent, uint, 0644); | 90 | zswap_max_pool_percent, uint, 0644); |
91 | 91 | ||
92 | /* zbud_pool is shared by all of zswap backend */ | ||
93 | static struct zbud_pool *zswap_pool; | ||
94 | |||
92 | /********************************* | 95 | /********************************* |
93 | * compression functions | 96 | * compression functions |
94 | **********************************/ | 97 | **********************************/ |
@@ -160,14 +163,14 @@ static void zswap_comp_exit(void) | |||
160 | * rbnode - links the entry into red-black tree for the appropriate swap type | 163 | * rbnode - links the entry into red-black tree for the appropriate swap type |
161 | * refcount - the number of outstanding reference to the entry. This is needed | 164 | * refcount - the number of outstanding reference to the entry. This is needed |
162 | * to protect against premature freeing of the entry by code | 165 | * to protect against premature freeing of the entry by code |
163 | * concurent calls to load, invalidate, and writeback. The lock | 166 | * concurrent calls to load, invalidate, and writeback. The lock |
164 | * for the zswap_tree structure that contains the entry must | 167 | * for the zswap_tree structure that contains the entry must |
165 | * be held while changing the refcount. Since the lock must | 168 | * be held while changing the refcount. Since the lock must |
166 | * be held, there is no reason to also make refcount atomic. | 169 | * be held, there is no reason to also make refcount atomic. |
167 | * offset - the swap offset for the entry. Index into the red-black tree. | 170 | * offset - the swap offset for the entry. Index into the red-black tree. |
168 | * handle - zsmalloc allocation handle that stores the compressed page data | 171 | * handle - zbud allocation handle that stores the compressed page data |
169 | * length - the length in bytes of the compressed page data. Needed during | 172 | * length - the length in bytes of the compressed page data. Needed during |
170 | * decompression | 173 | * decompression |
171 | */ | 174 | */ |
172 | struct zswap_entry { | 175 | struct zswap_entry { |
173 | struct rb_node rbnode; | 176 | struct rb_node rbnode; |
@@ -189,7 +192,6 @@ struct zswap_header { | |||
189 | struct zswap_tree { | 192 | struct zswap_tree { |
190 | struct rb_root rbroot; | 193 | struct rb_root rbroot; |
191 | spinlock_t lock; | 194 | spinlock_t lock; |
192 | struct zbud_pool *pool; | ||
193 | }; | 195 | }; |
194 | 196 | ||
195 | static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; | 197 | static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; |
@@ -202,7 +204,7 @@ static struct kmem_cache *zswap_entry_cache; | |||
202 | static int zswap_entry_cache_create(void) | 204 | static int zswap_entry_cache_create(void) |
203 | { | 205 | { |
204 | zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); | 206 | zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); |
205 | return (zswap_entry_cache == NULL); | 207 | return zswap_entry_cache == NULL; |
206 | } | 208 | } |
207 | 209 | ||
208 | static void zswap_entry_cache_destory(void) | 210 | static void zswap_entry_cache_destory(void) |
@@ -282,16 +284,15 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) | |||
282 | } | 284 | } |
283 | 285 | ||
284 | /* | 286 | /* |
285 | * Carries out the common pattern of freeing and entry's zsmalloc allocation, | 287 | * Carries out the common pattern of freeing and entry's zbud allocation, |
286 | * freeing the entry itself, and decrementing the number of stored pages. | 288 | * freeing the entry itself, and decrementing the number of stored pages. |
287 | */ | 289 | */ |
288 | static void zswap_free_entry(struct zswap_tree *tree, | 290 | static void zswap_free_entry(struct zswap_entry *entry) |
289 | struct zswap_entry *entry) | ||
290 | { | 291 | { |
291 | zbud_free(tree->pool, entry->handle); | 292 | zbud_free(zswap_pool, entry->handle); |
292 | zswap_entry_cache_free(entry); | 293 | zswap_entry_cache_free(entry); |
293 | atomic_dec(&zswap_stored_pages); | 294 | atomic_dec(&zswap_stored_pages); |
294 | zswap_pool_pages = zbud_get_pool_size(tree->pool); | 295 | zswap_pool_pages = zbud_get_pool_size(zswap_pool); |
295 | } | 296 | } |
296 | 297 | ||
297 | /* caller must hold the tree lock */ | 298 | /* caller must hold the tree lock */ |
@@ -311,7 +312,7 @@ static void zswap_entry_put(struct zswap_tree *tree, | |||
311 | BUG_ON(refcount < 0); | 312 | BUG_ON(refcount < 0); |
312 | if (refcount == 0) { | 313 | if (refcount == 0) { |
313 | zswap_rb_erase(&tree->rbroot, entry); | 314 | zswap_rb_erase(&tree->rbroot, entry); |
314 | zswap_free_entry(tree, entry); | 315 | zswap_free_entry(entry); |
315 | } | 316 | } |
316 | } | 317 | } |
317 | 318 | ||
@@ -407,8 +408,8 @@ cleanup: | |||
407 | **********************************/ | 408 | **********************************/ |
408 | static bool zswap_is_full(void) | 409 | static bool zswap_is_full(void) |
409 | { | 410 | { |
410 | return (totalram_pages * zswap_max_pool_percent / 100 < | 411 | return totalram_pages * zswap_max_pool_percent / 100 < |
411 | zswap_pool_pages); | 412 | zswap_pool_pages; |
412 | } | 413 | } |
413 | 414 | ||
414 | /********************************* | 415 | /********************************* |
@@ -545,7 +546,6 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) | |||
545 | zbud_unmap(pool, handle); | 546 | zbud_unmap(pool, handle); |
546 | tree = zswap_trees[swp_type(swpentry)]; | 547 | tree = zswap_trees[swp_type(swpentry)]; |
547 | offset = swp_offset(swpentry); | 548 | offset = swp_offset(swpentry); |
548 | BUG_ON(pool != tree->pool); | ||
549 | 549 | ||
550 | /* find and ref zswap entry */ | 550 | /* find and ref zswap entry */ |
551 | spin_lock(&tree->lock); | 551 | spin_lock(&tree->lock); |
@@ -573,13 +573,13 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) | |||
573 | case ZSWAP_SWAPCACHE_NEW: /* page is locked */ | 573 | case ZSWAP_SWAPCACHE_NEW: /* page is locked */ |
574 | /* decompress */ | 574 | /* decompress */ |
575 | dlen = PAGE_SIZE; | 575 | dlen = PAGE_SIZE; |
576 | src = (u8 *)zbud_map(tree->pool, entry->handle) + | 576 | src = (u8 *)zbud_map(zswap_pool, entry->handle) + |
577 | sizeof(struct zswap_header); | 577 | sizeof(struct zswap_header); |
578 | dst = kmap_atomic(page); | 578 | dst = kmap_atomic(page); |
579 | ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, | 579 | ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, |
580 | entry->length, dst, &dlen); | 580 | entry->length, dst, &dlen); |
581 | kunmap_atomic(dst); | 581 | kunmap_atomic(dst); |
582 | zbud_unmap(tree->pool, entry->handle); | 582 | zbud_unmap(zswap_pool, entry->handle); |
583 | BUG_ON(ret); | 583 | BUG_ON(ret); |
584 | BUG_ON(dlen != PAGE_SIZE); | 584 | BUG_ON(dlen != PAGE_SIZE); |
585 | 585 | ||
@@ -652,7 +652,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
652 | /* reclaim space if needed */ | 652 | /* reclaim space if needed */ |
653 | if (zswap_is_full()) { | 653 | if (zswap_is_full()) { |
654 | zswap_pool_limit_hit++; | 654 | zswap_pool_limit_hit++; |
655 | if (zbud_reclaim_page(tree->pool, 8)) { | 655 | if (zbud_reclaim_page(zswap_pool, 8)) { |
656 | zswap_reject_reclaim_fail++; | 656 | zswap_reject_reclaim_fail++; |
657 | ret = -ENOMEM; | 657 | ret = -ENOMEM; |
658 | goto reject; | 658 | goto reject; |
@@ -679,7 +679,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
679 | 679 | ||
680 | /* store */ | 680 | /* store */ |
681 | len = dlen + sizeof(struct zswap_header); | 681 | len = dlen + sizeof(struct zswap_header); |
682 | ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN, | 682 | ret = zbud_alloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, |
683 | &handle); | 683 | &handle); |
684 | if (ret == -ENOSPC) { | 684 | if (ret == -ENOSPC) { |
685 | zswap_reject_compress_poor++; | 685 | zswap_reject_compress_poor++; |
@@ -689,11 +689,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
689 | zswap_reject_alloc_fail++; | 689 | zswap_reject_alloc_fail++; |
690 | goto freepage; | 690 | goto freepage; |
691 | } | 691 | } |
692 | zhdr = zbud_map(tree->pool, handle); | 692 | zhdr = zbud_map(zswap_pool, handle); |
693 | zhdr->swpentry = swp_entry(type, offset); | 693 | zhdr->swpentry = swp_entry(type, offset); |
694 | buf = (u8 *)(zhdr + 1); | 694 | buf = (u8 *)(zhdr + 1); |
695 | memcpy(buf, dst, dlen); | 695 | memcpy(buf, dst, dlen); |
696 | zbud_unmap(tree->pool, handle); | 696 | zbud_unmap(zswap_pool, handle); |
697 | put_cpu_var(zswap_dstmem); | 697 | put_cpu_var(zswap_dstmem); |
698 | 698 | ||
699 | /* populate entry */ | 699 | /* populate entry */ |
@@ -716,7 +716,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
716 | 716 | ||
717 | /* update stats */ | 717 | /* update stats */ |
718 | atomic_inc(&zswap_stored_pages); | 718 | atomic_inc(&zswap_stored_pages); |
719 | zswap_pool_pages = zbud_get_pool_size(tree->pool); | 719 | zswap_pool_pages = zbud_get_pool_size(zswap_pool); |
720 | 720 | ||
721 | return 0; | 721 | return 0; |
722 | 722 | ||
@@ -752,13 +752,13 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, | |||
752 | 752 | ||
753 | /* decompress */ | 753 | /* decompress */ |
754 | dlen = PAGE_SIZE; | 754 | dlen = PAGE_SIZE; |
755 | src = (u8 *)zbud_map(tree->pool, entry->handle) + | 755 | src = (u8 *)zbud_map(zswap_pool, entry->handle) + |
756 | sizeof(struct zswap_header); | 756 | sizeof(struct zswap_header); |
757 | dst = kmap_atomic(page); | 757 | dst = kmap_atomic(page); |
758 | ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, | 758 | ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, |
759 | dst, &dlen); | 759 | dst, &dlen); |
760 | kunmap_atomic(dst); | 760 | kunmap_atomic(dst); |
761 | zbud_unmap(tree->pool, entry->handle); | 761 | zbud_unmap(zswap_pool, entry->handle); |
762 | BUG_ON(ret); | 762 | BUG_ON(ret); |
763 | 763 | ||
764 | spin_lock(&tree->lock); | 764 | spin_lock(&tree->lock); |
@@ -804,11 +804,9 @@ static void zswap_frontswap_invalidate_area(unsigned type) | |||
804 | /* walk the tree and free everything */ | 804 | /* walk the tree and free everything */ |
805 | spin_lock(&tree->lock); | 805 | spin_lock(&tree->lock); |
806 | rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) | 806 | rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) |
807 | zswap_free_entry(tree, entry); | 807 | zswap_free_entry(entry); |
808 | tree->rbroot = RB_ROOT; | 808 | tree->rbroot = RB_ROOT; |
809 | spin_unlock(&tree->lock); | 809 | spin_unlock(&tree->lock); |
810 | |||
811 | zbud_destroy_pool(tree->pool); | ||
812 | kfree(tree); | 810 | kfree(tree); |
813 | zswap_trees[type] = NULL; | 811 | zswap_trees[type] = NULL; |
814 | } | 812 | } |
@@ -822,20 +820,14 @@ static void zswap_frontswap_init(unsigned type) | |||
822 | struct zswap_tree *tree; | 820 | struct zswap_tree *tree; |
823 | 821 | ||
824 | tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL); | 822 | tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL); |
825 | if (!tree) | 823 | if (!tree) { |
826 | goto err; | 824 | pr_err("alloc failed, zswap disabled for swap type %d\n", type); |
827 | tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops); | 825 | return; |
828 | if (!tree->pool) | 826 | } |
829 | goto freetree; | 827 | |
830 | tree->rbroot = RB_ROOT; | 828 | tree->rbroot = RB_ROOT; |
831 | spin_lock_init(&tree->lock); | 829 | spin_lock_init(&tree->lock); |
832 | zswap_trees[type] = tree; | 830 | zswap_trees[type] = tree; |
833 | return; | ||
834 | |||
835 | freetree: | ||
836 | kfree(tree); | ||
837 | err: | ||
838 | pr_err("alloc failed, zswap disabled for swap type %d\n", type); | ||
839 | } | 831 | } |
840 | 832 | ||
841 | static struct frontswap_ops zswap_frontswap_ops = { | 833 | static struct frontswap_ops zswap_frontswap_ops = { |
@@ -907,9 +899,16 @@ static int __init init_zswap(void) | |||
907 | return 0; | 899 | return 0; |
908 | 900 | ||
909 | pr_info("loading zswap\n"); | 901 | pr_info("loading zswap\n"); |
902 | |||
903 | zswap_pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops); | ||
904 | if (!zswap_pool) { | ||
905 | pr_err("zbud pool creation failed\n"); | ||
906 | goto error; | ||
907 | } | ||
908 | |||
910 | if (zswap_entry_cache_create()) { | 909 | if (zswap_entry_cache_create()) { |
911 | pr_err("entry cache creation failed\n"); | 910 | pr_err("entry cache creation failed\n"); |
912 | goto error; | 911 | goto cachefail; |
913 | } | 912 | } |
914 | if (zswap_comp_init()) { | 913 | if (zswap_comp_init()) { |
915 | pr_err("compressor initialization failed\n"); | 914 | pr_err("compressor initialization failed\n"); |
@@ -919,6 +918,7 @@ static int __init init_zswap(void) | |||
919 | pr_err("per-cpu initialization failed\n"); | 918 | pr_err("per-cpu initialization failed\n"); |
920 | goto pcpufail; | 919 | goto pcpufail; |
921 | } | 920 | } |
921 | |||
922 | frontswap_register_ops(&zswap_frontswap_ops); | 922 | frontswap_register_ops(&zswap_frontswap_ops); |
923 | if (zswap_debugfs_init()) | 923 | if (zswap_debugfs_init()) |
924 | pr_warn("debugfs initialization failed\n"); | 924 | pr_warn("debugfs initialization failed\n"); |
@@ -927,6 +927,8 @@ pcpufail: | |||
927 | zswap_comp_exit(); | 927 | zswap_comp_exit(); |
928 | compfail: | 928 | compfail: |
929 | zswap_entry_cache_destory(); | 929 | zswap_entry_cache_destory(); |
930 | cachefail: | ||
931 | zbud_destroy_pool(zswap_pool); | ||
930 | error: | 932 | error: |
931 | return -ENOMEM; | 933 | return -ENOMEM; |
932 | } | 934 | } |