diff options
author | Weijie Yang <weijie.yang@samsung.com> | 2013-11-12 18:08:27 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 22:09:11 -0500 |
commit | 0ab0abcf511545d1fddbe72a36b3ca73388ac937 (patch) | |
tree | 66109020ad7d9888970f153cdd55b3b5e2491560 /mm | |
parent | 67d13fe846c57a54d12578e7a4518f68c5c86ad7 (diff) |
mm/zswap: refactor the get/put routines
The refcount routine was not fit the kernel get/put semantic exactly,
There were too many judgement statements on refcount and it could be
minus.
This patch does the following:
- move refcount judgement to zswap_entry_put() to hide resource free function.
- add a new function zswap_entry_find_get(), so that callers can use
easily in the following pattern:
zswap_entry_find_get
.../* do something */
zswap_entry_put
- to eliminate compile error, move some functions declaration
This patch is based on Minchan Kim <minchan@kernel.org> 's idea and suggestion.
Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
Cc: Seth Jennings <sjennings@variantweb.net>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Bob Liu <bob.liu@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zswap.c | 182 |
1 files changed, 88 insertions, 94 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index 0ffcad03baea..5a63f78a5601 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -217,6 +217,7 @@ static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) | |||
217 | if (!entry) | 217 | if (!entry) |
218 | return NULL; | 218 | return NULL; |
219 | entry->refcount = 1; | 219 | entry->refcount = 1; |
220 | RB_CLEAR_NODE(&entry->rbnode); | ||
220 | return entry; | 221 | return entry; |
221 | } | 222 | } |
222 | 223 | ||
@@ -225,19 +226,6 @@ static void zswap_entry_cache_free(struct zswap_entry *entry) | |||
225 | kmem_cache_free(zswap_entry_cache, entry); | 226 | kmem_cache_free(zswap_entry_cache, entry); |
226 | } | 227 | } |
227 | 228 | ||
228 | /* caller must hold the tree lock */ | ||
229 | static void zswap_entry_get(struct zswap_entry *entry) | ||
230 | { | ||
231 | entry->refcount++; | ||
232 | } | ||
233 | |||
234 | /* caller must hold the tree lock */ | ||
235 | static int zswap_entry_put(struct zswap_entry *entry) | ||
236 | { | ||
237 | entry->refcount--; | ||
238 | return entry->refcount; | ||
239 | } | ||
240 | |||
241 | /********************************* | 229 | /********************************* |
242 | * rbtree functions | 230 | * rbtree functions |
243 | **********************************/ | 231 | **********************************/ |
@@ -285,6 +273,61 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, | |||
285 | return 0; | 273 | return 0; |
286 | } | 274 | } |
287 | 275 | ||
276 | static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) | ||
277 | { | ||
278 | if (!RB_EMPTY_NODE(&entry->rbnode)) { | ||
279 | rb_erase(&entry->rbnode, root); | ||
280 | RB_CLEAR_NODE(&entry->rbnode); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Carries out the common pattern of freeing and entry's zsmalloc allocation, | ||
286 | * freeing the entry itself, and decrementing the number of stored pages. | ||
287 | */ | ||
288 | static void zswap_free_entry(struct zswap_tree *tree, | ||
289 | struct zswap_entry *entry) | ||
290 | { | ||
291 | zbud_free(tree->pool, entry->handle); | ||
292 | zswap_entry_cache_free(entry); | ||
293 | atomic_dec(&zswap_stored_pages); | ||
294 | zswap_pool_pages = zbud_get_pool_size(tree->pool); | ||
295 | } | ||
296 | |||
297 | /* caller must hold the tree lock */ | ||
298 | static void zswap_entry_get(struct zswap_entry *entry) | ||
299 | { | ||
300 | entry->refcount++; | ||
301 | } | ||
302 | |||
303 | /* caller must hold the tree lock | ||
304 | * remove from the tree and free it, if nobody reference the entry | ||
305 | */ | ||
306 | static void zswap_entry_put(struct zswap_tree *tree, | ||
307 | struct zswap_entry *entry) | ||
308 | { | ||
309 | int refcount = --entry->refcount; | ||
310 | |||
311 | BUG_ON(refcount < 0); | ||
312 | if (refcount == 0) { | ||
313 | zswap_rb_erase(&tree->rbroot, entry); | ||
314 | zswap_free_entry(tree, entry); | ||
315 | } | ||
316 | } | ||
317 | |||
318 | /* caller must hold the tree lock */ | ||
319 | static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, | ||
320 | pgoff_t offset) | ||
321 | { | ||
322 | struct zswap_entry *entry = NULL; | ||
323 | |||
324 | entry = zswap_rb_search(root, offset); | ||
325 | if (entry) | ||
326 | zswap_entry_get(entry); | ||
327 | |||
328 | return entry; | ||
329 | } | ||
330 | |||
288 | /********************************* | 331 | /********************************* |
289 | * per-cpu code | 332 | * per-cpu code |
290 | **********************************/ | 333 | **********************************/ |
@@ -368,18 +411,6 @@ static bool zswap_is_full(void) | |||
368 | zswap_pool_pages); | 411 | zswap_pool_pages); |
369 | } | 412 | } |
370 | 413 | ||
371 | /* | ||
372 | * Carries out the common pattern of freeing and entry's zsmalloc allocation, | ||
373 | * freeing the entry itself, and decrementing the number of stored pages. | ||
374 | */ | ||
375 | static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry) | ||
376 | { | ||
377 | zbud_free(tree->pool, entry->handle); | ||
378 | zswap_entry_cache_free(entry); | ||
379 | atomic_dec(&zswap_stored_pages); | ||
380 | zswap_pool_pages = zbud_get_pool_size(tree->pool); | ||
381 | } | ||
382 | |||
383 | /********************************* | 414 | /********************************* |
384 | * writeback code | 415 | * writeback code |
385 | **********************************/ | 416 | **********************************/ |
@@ -503,7 +534,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) | |||
503 | struct page *page; | 534 | struct page *page; |
504 | u8 *src, *dst; | 535 | u8 *src, *dst; |
505 | unsigned int dlen; | 536 | unsigned int dlen; |
506 | int ret, refcount; | 537 | int ret; |
507 | struct writeback_control wbc = { | 538 | struct writeback_control wbc = { |
508 | .sync_mode = WB_SYNC_NONE, | 539 | .sync_mode = WB_SYNC_NONE, |
509 | }; | 540 | }; |
@@ -518,13 +549,12 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) | |||
518 | 549 | ||
519 | /* find and ref zswap entry */ | 550 | /* find and ref zswap entry */ |
520 | spin_lock(&tree->lock); | 551 | spin_lock(&tree->lock); |
521 | entry = zswap_rb_search(&tree->rbroot, offset); | 552 | entry = zswap_entry_find_get(&tree->rbroot, offset); |
522 | if (!entry) { | 553 | if (!entry) { |
523 | /* entry was invalidated */ | 554 | /* entry was invalidated */ |
524 | spin_unlock(&tree->lock); | 555 | spin_unlock(&tree->lock); |
525 | return 0; | 556 | return 0; |
526 | } | 557 | } |
527 | zswap_entry_get(entry); | ||
528 | spin_unlock(&tree->lock); | 558 | spin_unlock(&tree->lock); |
529 | BUG_ON(offset != entry->offset); | 559 | BUG_ON(offset != entry->offset); |
530 | 560 | ||
@@ -566,42 +596,35 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) | |||
566 | zswap_written_back_pages++; | 596 | zswap_written_back_pages++; |
567 | 597 | ||
568 | spin_lock(&tree->lock); | 598 | spin_lock(&tree->lock); |
569 | |||
570 | /* drop local reference */ | 599 | /* drop local reference */ |
571 | zswap_entry_put(entry); | 600 | zswap_entry_put(tree, entry); |
572 | /* drop the initial reference from entry creation */ | ||
573 | refcount = zswap_entry_put(entry); | ||
574 | 601 | ||
575 | /* | 602 | /* |
576 | * There are three possible values for refcount here: | 603 | * There are two possible situations for entry here: |
577 | * (1) refcount is 1, load is in progress, unlink from rbtree, | 604 | * (1) refcount is 1(normal case), entry is valid and on the tree |
578 | * load will free | 605 | * (2) refcount is 0, entry is freed and not on the tree |
579 | * (2) refcount is 0, (normal case) entry is valid, | 606 | * because invalidate happened during writeback |
580 | * remove from rbtree and free entry | 607 | * search the tree and free the entry if find entry |
581 | * (3) refcount is -1, invalidate happened during writeback, | 608 | */ |
582 | * free entry | 609 | if (entry == zswap_rb_search(&tree->rbroot, offset)) |
583 | */ | 610 | zswap_entry_put(tree, entry); |
584 | if (refcount >= 0) { | ||
585 | /* no invalidate yet, remove from rbtree */ | ||
586 | rb_erase(&entry->rbnode, &tree->rbroot); | ||
587 | } | ||
588 | spin_unlock(&tree->lock); | 611 | spin_unlock(&tree->lock); |
589 | if (refcount <= 0) { | ||
590 | /* free the entry */ | ||
591 | zswap_free_entry(tree, entry); | ||
592 | return 0; | ||
593 | } | ||
594 | return -EAGAIN; | ||
595 | 612 | ||
613 | goto end; | ||
614 | |||
615 | /* | ||
616 | * if we get here due to ZSWAP_SWAPCACHE_EXIST | ||
617 | * a load may happening concurrently | ||
618 | * it is safe and okay to not free the entry | ||
619 | * if we free the entry in the following put | ||
620 | * it it either okay to return !0 | ||
621 | */ | ||
596 | fail: | 622 | fail: |
597 | spin_lock(&tree->lock); | 623 | spin_lock(&tree->lock); |
598 | refcount = zswap_entry_put(entry); | 624 | zswap_entry_put(tree, entry); |
599 | if (refcount <= 0) { | ||
600 | /* invalidate happened, consider writeback as success */ | ||
601 | zswap_free_entry(tree, entry); | ||
602 | ret = 0; | ||
603 | } | ||
604 | spin_unlock(&tree->lock); | 625 | spin_unlock(&tree->lock); |
626 | |||
627 | end: | ||
605 | return ret; | 628 | return ret; |
606 | } | 629 | } |
607 | 630 | ||
@@ -685,11 +708,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
685 | if (ret == -EEXIST) { | 708 | if (ret == -EEXIST) { |
686 | zswap_duplicate_entry++; | 709 | zswap_duplicate_entry++; |
687 | /* remove from rbtree */ | 710 | /* remove from rbtree */ |
688 | rb_erase(&dupentry->rbnode, &tree->rbroot); | 711 | zswap_rb_erase(&tree->rbroot, dupentry); |
689 | if (!zswap_entry_put(dupentry)) { | 712 | zswap_entry_put(tree, dupentry); |
690 | /* free */ | ||
691 | zswap_free_entry(tree, dupentry); | ||
692 | } | ||
693 | } | 713 | } |
694 | } while (ret == -EEXIST); | 714 | } while (ret == -EEXIST); |
695 | spin_unlock(&tree->lock); | 715 | spin_unlock(&tree->lock); |
@@ -718,17 +738,16 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, | |||
718 | struct zswap_entry *entry; | 738 | struct zswap_entry *entry; |
719 | u8 *src, *dst; | 739 | u8 *src, *dst; |
720 | unsigned int dlen; | 740 | unsigned int dlen; |
721 | int refcount, ret; | 741 | int ret; |
722 | 742 | ||
723 | /* find */ | 743 | /* find */ |
724 | spin_lock(&tree->lock); | 744 | spin_lock(&tree->lock); |
725 | entry = zswap_rb_search(&tree->rbroot, offset); | 745 | entry = zswap_entry_find_get(&tree->rbroot, offset); |
726 | if (!entry) { | 746 | if (!entry) { |
727 | /* entry was written back */ | 747 | /* entry was written back */ |
728 | spin_unlock(&tree->lock); | 748 | spin_unlock(&tree->lock); |
729 | return -1; | 749 | return -1; |
730 | } | 750 | } |
731 | zswap_entry_get(entry); | ||
732 | spin_unlock(&tree->lock); | 751 | spin_unlock(&tree->lock); |
733 | 752 | ||
734 | /* decompress */ | 753 | /* decompress */ |
@@ -743,22 +762,9 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, | |||
743 | BUG_ON(ret); | 762 | BUG_ON(ret); |
744 | 763 | ||
745 | spin_lock(&tree->lock); | 764 | spin_lock(&tree->lock); |
746 | refcount = zswap_entry_put(entry); | 765 | zswap_entry_put(tree, entry); |
747 | if (likely(refcount)) { | ||
748 | spin_unlock(&tree->lock); | ||
749 | return 0; | ||
750 | } | ||
751 | spin_unlock(&tree->lock); | 766 | spin_unlock(&tree->lock); |
752 | 767 | ||
753 | /* | ||
754 | * We don't have to unlink from the rbtree because | ||
755 | * zswap_writeback_entry() or zswap_frontswap_invalidate page() | ||
756 | * has already done this for us if we are the last reference. | ||
757 | */ | ||
758 | /* free */ | ||
759 | |||
760 | zswap_free_entry(tree, entry); | ||
761 | |||
762 | return 0; | 768 | return 0; |
763 | } | 769 | } |
764 | 770 | ||
@@ -767,7 +773,6 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) | |||
767 | { | 773 | { |
768 | struct zswap_tree *tree = zswap_trees[type]; | 774 | struct zswap_tree *tree = zswap_trees[type]; |
769 | struct zswap_entry *entry; | 775 | struct zswap_entry *entry; |
770 | int refcount; | ||
771 | 776 | ||
772 | /* find */ | 777 | /* find */ |
773 | spin_lock(&tree->lock); | 778 | spin_lock(&tree->lock); |
@@ -779,20 +784,12 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) | |||
779 | } | 784 | } |
780 | 785 | ||
781 | /* remove from rbtree */ | 786 | /* remove from rbtree */ |
782 | rb_erase(&entry->rbnode, &tree->rbroot); | 787 | zswap_rb_erase(&tree->rbroot, entry); |
783 | 788 | ||
784 | /* drop the initial reference from entry creation */ | 789 | /* drop the initial reference from entry creation */ |
785 | refcount = zswap_entry_put(entry); | 790 | zswap_entry_put(tree, entry); |
786 | 791 | ||
787 | spin_unlock(&tree->lock); | 792 | spin_unlock(&tree->lock); |
788 | |||
789 | if (refcount) { | ||
790 | /* writeback in progress, writeback will free */ | ||
791 | return; | ||
792 | } | ||
793 | |||
794 | /* free */ | ||
795 | zswap_free_entry(tree, entry); | ||
796 | } | 793 | } |
797 | 794 | ||
798 | /* frees all zswap entries for the given swap type */ | 795 | /* frees all zswap entries for the given swap type */ |
@@ -806,11 +803,8 @@ static void zswap_frontswap_invalidate_area(unsigned type) | |||
806 | 803 | ||
807 | /* walk the tree and free everything */ | 804 | /* walk the tree and free everything */ |
808 | spin_lock(&tree->lock); | 805 | spin_lock(&tree->lock); |
809 | rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) { | 806 | rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) |
810 | zbud_free(tree->pool, entry->handle); | 807 | zswap_free_entry(tree, entry); |
811 | zswap_entry_cache_free(entry); | ||
812 | atomic_dec(&zswap_stored_pages); | ||
813 | } | ||
814 | tree->rbroot = RB_ROOT; | 808 | tree->rbroot = RB_ROOT; |
815 | spin_unlock(&tree->lock); | 809 | spin_unlock(&tree->lock); |
816 | 810 | ||