diff options
author | Minchan Kim <minchan@kernel.org> | 2017-07-06 18:37:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:24:31 -0400 |
commit | 75f6d6d29a40b5541f0f107201cf7dec134ad210 (patch) | |
tree | 5c9e1f349f9634f555d6c2946df2a1927f9680e1 /mm/swap_state.c | |
parent | 38d8b4e6bdc872f07a3149309ab01719c96f3894 (diff) |
mm, THP, swap: unify swap slot free functions to put_swap_page
Now, get_swap_page takes struct page and allocates swap space according
to page size(ie, normal or THP) so it would be more cleaner to introduce
put_swap_page which is a counter function of get_swap_page. Then, it
calls right swap slot free function depending on page's size.
[ying.huang@intel.com: minor cleanup and fix]
Link: http://lkml.kernel.org/r/20170515112522.32457-3-ying.huang@intel.com
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 13 |
1 files changed, 3 insertions, 10 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index 16ff89d058f4..0ad214d7a7ad 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -231,10 +231,7 @@ retry: | |||
231 | return 1; | 231 | return 1; |
232 | 232 | ||
233 | fail_free: | 233 | fail_free: |
234 | if (PageTransHuge(page)) | 234 | put_swap_page(page, entry); |
235 | swapcache_free_cluster(entry); | ||
236 | else | ||
237 | swapcache_free(entry); | ||
238 | fail: | 235 | fail: |
239 | if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) | 236 | if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) |
240 | goto retry; | 237 | goto retry; |
@@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page) | |||
259 | __delete_from_swap_cache(page); | 256 | __delete_from_swap_cache(page); |
260 | spin_unlock_irq(&address_space->tree_lock); | 257 | spin_unlock_irq(&address_space->tree_lock); |
261 | 258 | ||
262 | if (PageTransHuge(page)) | 259 | put_swap_page(page, entry); |
263 | swapcache_free_cluster(entry); | ||
264 | else | ||
265 | swapcache_free(entry); | ||
266 | |||
267 | page_ref_sub(page, hpage_nr_pages(page)); | 260 | page_ref_sub(page, hpage_nr_pages(page)); |
268 | } | 261 | } |
269 | 262 | ||
@@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
415 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | 408 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
416 | * clear SWAP_HAS_CACHE flag. | 409 | * clear SWAP_HAS_CACHE flag. |
417 | */ | 410 | */ |
418 | swapcache_free(entry); | 411 | put_swap_page(new_page, entry); |
419 | } while (err != -ENOMEM); | 412 | } while (err != -ENOMEM); |
420 | 413 | ||
421 | if (new_page) | 414 | if (new_page) |