diff options
author | Dmitry Safonov <0x7f454c46@gmail.com> | 2015-09-08 18:05:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 18:35:28 -0400 |
commit | 5b999aadbae65696a148f55250d94b6f3d74071e (patch) | |
tree | 3148d29e63658131dd762df51403bdba5bc77502 /mm/zswap.c | |
parent | 708649694a8699ff91d395c4aef5ecea3ade14bc (diff) |
mm: swap: zswap: maybe_preload & refactoring
zswap_get_swap_cache_page and read_swap_cache_async have pretty much the
same code with only significant difference in return value and usage of
swap_readpage.
I a helper __read_swap_cache_async() with the common code. Behavior
change: now zswap_get_swap_cache_page will use radix_tree_maybe_preload
instead radix_tree_preload. Looks like, this wasn't changed only by the
reason of code duplication.
Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Seth Jennings <sjennings@variantweb.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/zswap.c')
-rw-r--r-- | mm/zswap.c | 73 |
1 files changed, 6 insertions, 67 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index 2d5727baed59..09208c7c86f3 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -446,75 +446,14 @@ enum zswap_get_swap_ret { | |||
446 | static int zswap_get_swap_cache_page(swp_entry_t entry, | 446 | static int zswap_get_swap_cache_page(swp_entry_t entry, |
447 | struct page **retpage) | 447 | struct page **retpage) |
448 | { | 448 | { |
449 | struct page *found_page, *new_page = NULL; | 449 | bool page_was_allocated; |
450 | struct address_space *swapper_space = swap_address_space(entry); | ||
451 | int err; | ||
452 | 450 | ||
453 | *retpage = NULL; | 451 | *retpage = __read_swap_cache_async(entry, GFP_KERNEL, |
454 | do { | 452 | NULL, 0, &page_was_allocated); |
455 | /* | 453 | if (page_was_allocated) |
456 | * First check the swap cache. Since this is normally | 454 | return ZSWAP_SWAPCACHE_NEW; |
457 | * called after lookup_swap_cache() failed, re-calling | 455 | if (!*retpage) |
458 | * that would confuse statistics. | ||
459 | */ | ||
460 | found_page = find_get_page(swapper_space, entry.val); | ||
461 | if (found_page) | ||
462 | break; | ||
463 | |||
464 | /* | ||
465 | * Get a new page to read into from swap. | ||
466 | */ | ||
467 | if (!new_page) { | ||
468 | new_page = alloc_page(GFP_KERNEL); | ||
469 | if (!new_page) | ||
470 | break; /* Out of memory */ | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * call radix_tree_preload() while we can wait. | ||
475 | */ | ||
476 | err = radix_tree_preload(GFP_KERNEL); | ||
477 | if (err) | ||
478 | break; | ||
479 | |||
480 | /* | ||
481 | * Swap entry may have been freed since our caller observed it. | ||
482 | */ | ||
483 | err = swapcache_prepare(entry); | ||
484 | if (err == -EEXIST) { /* seems racy */ | ||
485 | radix_tree_preload_end(); | ||
486 | continue; | ||
487 | } | ||
488 | if (err) { /* swp entry is obsolete ? */ | ||
489 | radix_tree_preload_end(); | ||
490 | break; | ||
491 | } | ||
492 | |||
493 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ | ||
494 | __set_page_locked(new_page); | ||
495 | SetPageSwapBacked(new_page); | ||
496 | err = __add_to_swap_cache(new_page, entry); | ||
497 | if (likely(!err)) { | ||
498 | radix_tree_preload_end(); | ||
499 | lru_cache_add_anon(new_page); | ||
500 | *retpage = new_page; | ||
501 | return ZSWAP_SWAPCACHE_NEW; | ||
502 | } | ||
503 | radix_tree_preload_end(); | ||
504 | ClearPageSwapBacked(new_page); | ||
505 | __clear_page_locked(new_page); | ||
506 | /* | ||
507 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | ||
508 | * clear SWAP_HAS_CACHE flag. | ||
509 | */ | ||
510 | swapcache_free(entry); | ||
511 | } while (err != -ENOMEM); | ||
512 | |||
513 | if (new_page) | ||
514 | page_cache_release(new_page); | ||
515 | if (!found_page) | ||
516 | return ZSWAP_SWAPCACHE_FAIL; | 456 | return ZSWAP_SWAPCACHE_FAIL; |
517 | *retpage = found_page; | ||
518 | return ZSWAP_SWAPCACHE_EXIST; | 457 | return ZSWAP_SWAPCACHE_EXIST; |
519 | } | 458 | } |
520 | 459 | ||