diff options
author | Cody P Schafer <cody@linux.vnet.ibm.com> | 2013-09-11 17:25:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:59:21 -0400 |
commit | 0bd42136f7ae4ea1375da34c32838fb35eee8c59 (patch) | |
tree | 0cbdcf537d072266e763f120fb26578920d5c97d /mm/zswap.c | |
parent | 7c993e11aa59d9d1cefbd6acc8d84f2d8d46545a (diff) |
mm/zswap: use postorder iteration when destroying rbtree
Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Reviewed-by: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: David Woodhouse <David.Woodhouse@intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/zswap.c')
-rw-r--r-- | mm/zswap.c | 16 |
1 files changed, 2 insertions, 14 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index efed4c8b7f5b..841e35f1db22 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) | |||
790 | static void zswap_frontswap_invalidate_area(unsigned type) | 790 | static void zswap_frontswap_invalidate_area(unsigned type) |
791 | { | 791 | { |
792 | struct zswap_tree *tree = zswap_trees[type]; | 792 | struct zswap_tree *tree = zswap_trees[type]; |
793 | struct rb_node *node; | 793 | struct zswap_entry *entry, *n; |
794 | struct zswap_entry *entry; | ||
795 | 794 | ||
796 | if (!tree) | 795 | if (!tree) |
797 | return; | 796 | return; |
798 | 797 | ||
799 | /* walk the tree and free everything */ | 798 | /* walk the tree and free everything */ |
800 | spin_lock(&tree->lock); | 799 | spin_lock(&tree->lock); |
801 | /* | 800 | rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) { |
802 | * TODO: Even though this code should not be executed because | ||
803 | * the try_to_unuse() in swapoff should have emptied the tree, | ||
804 | * it is very wasteful to rebalance the tree after every | ||
805 | * removal when we are freeing the whole tree. | ||
806 | * | ||
807 | * If post-order traversal code is ever added to the rbtree | ||
808 | * implementation, it should be used here. | ||
809 | */ | ||
810 | while ((node = rb_first(&tree->rbroot))) { | ||
811 | entry = rb_entry(node, struct zswap_entry, rbnode); | ||
812 | rb_erase(&entry->rbnode, &tree->rbroot); | ||
813 | zbud_free(tree->pool, entry->handle); | 801 | zbud_free(tree->pool, entry->handle); |
814 | zswap_entry_cache_free(entry); | 802 | zswap_entry_cache_free(entry); |
815 | atomic_dec(&zswap_stored_pages); | 803 | atomic_dec(&zswap_stored_pages); |