diff options
author | Minchan Kim <minchan@kernel.org> | 2017-07-06 18:37:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:24:31 -0400 |
commit | 75f6d6d29a40b5541f0f107201cf7dec134ad210 (patch) | |
tree | 5c9e1f349f9634f555d6c2946df2a1927f9680e1 | |
parent | 38d8b4e6bdc872f07a3149309ab01719c96f3894 (diff) |
mm, THP, swap: unify swap slot free functions to put_swap_page
Now, get_swap_page takes struct page and allocates swap space according
to page size(ie, normal or THP) so it would be more cleaner to introduce
put_swap_page which is a counter function of get_swap_page. Then, it
calls right swap slot free function depending on page's size.
[ying.huang@intel.com: minor cleanup and fix]
Link: http://lkml.kernel.org/r/20170515112522.32457-3-ying.huang@intel.com
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 12 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 13 | ||||
-rw-r--r-- | mm/swapfile.c | 16 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
5 files changed, 21 insertions, 24 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index d18876384de0..ead6fd7966b4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -387,6 +387,7 @@ static inline long get_nr_swap_pages(void) | |||
387 | 387 | ||
388 | extern void si_swapinfo(struct sysinfo *); | 388 | extern void si_swapinfo(struct sysinfo *); |
389 | extern swp_entry_t get_swap_page(struct page *page); | 389 | extern swp_entry_t get_swap_page(struct page *page); |
390 | extern void put_swap_page(struct page *page, swp_entry_t entry); | ||
390 | extern swp_entry_t get_swap_page_of_type(int); | 391 | extern swp_entry_t get_swap_page_of_type(int); |
391 | extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); | 392 | extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); |
392 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 393 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
@@ -394,7 +395,6 @@ extern void swap_shmem_alloc(swp_entry_t); | |||
394 | extern int swap_duplicate(swp_entry_t); | 395 | extern int swap_duplicate(swp_entry_t); |
395 | extern int swapcache_prepare(swp_entry_t); | 396 | extern int swapcache_prepare(swp_entry_t); |
396 | extern void swap_free(swp_entry_t); | 397 | extern void swap_free(swp_entry_t); |
397 | extern void swapcache_free(swp_entry_t); | ||
398 | extern void swapcache_free_entries(swp_entry_t *entries, int n); | 398 | extern void swapcache_free_entries(swp_entry_t *entries, int n); |
399 | extern int free_swap_and_cache(swp_entry_t); | 399 | extern int free_swap_and_cache(swp_entry_t); |
400 | extern int swap_type_of(dev_t, sector_t, struct block_device **); | 400 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
@@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp) | |||
453 | { | 453 | { |
454 | } | 454 | } |
455 | 455 | ||
456 | static inline void swapcache_free(swp_entry_t swp) | 456 | static inline void put_swap_page(struct page *page, swp_entry_t swp) |
457 | { | 457 | { |
458 | } | 458 | } |
459 | 459 | ||
@@ -578,13 +578,5 @@ static inline bool mem_cgroup_swap_full(struct page *page) | |||
578 | } | 578 | } |
579 | #endif | 579 | #endif |
580 | 580 | ||
581 | #ifdef CONFIG_THP_SWAP | ||
582 | extern void swapcache_free_cluster(swp_entry_t entry); | ||
583 | #else | ||
584 | static inline void swapcache_free_cluster(swp_entry_t entry) | ||
585 | { | ||
586 | } | ||
587 | #endif | ||
588 | |||
589 | #endif /* __KERNEL__*/ | 581 | #endif /* __KERNEL__*/ |
590 | #endif /* _LINUX_SWAP_H */ | 582 | #endif /* _LINUX_SWAP_H */ |
diff --git a/mm/shmem.c b/mm/shmem.c index bbb987c58dad..a06f23731d3f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1327 | 1327 | ||
1328 | mutex_unlock(&shmem_swaplist_mutex); | 1328 | mutex_unlock(&shmem_swaplist_mutex); |
1329 | free_swap: | 1329 | free_swap: |
1330 | swapcache_free(swap); | 1330 | put_swap_page(page, swap); |
1331 | redirty: | 1331 | redirty: |
1332 | set_page_dirty(page); | 1332 | set_page_dirty(page); |
1333 | if (wbc->for_reclaim) | 1333 | if (wbc->for_reclaim) |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 16ff89d058f4..0ad214d7a7ad 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -231,10 +231,7 @@ retry: | |||
231 | return 1; | 231 | return 1; |
232 | 232 | ||
233 | fail_free: | 233 | fail_free: |
234 | if (PageTransHuge(page)) | 234 | put_swap_page(page, entry); |
235 | swapcache_free_cluster(entry); | ||
236 | else | ||
237 | swapcache_free(entry); | ||
238 | fail: | 235 | fail: |
239 | if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) | 236 | if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) |
240 | goto retry; | 237 | goto retry; |
@@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page) | |||
259 | __delete_from_swap_cache(page); | 256 | __delete_from_swap_cache(page); |
260 | spin_unlock_irq(&address_space->tree_lock); | 257 | spin_unlock_irq(&address_space->tree_lock); |
261 | 258 | ||
262 | if (PageTransHuge(page)) | 259 | put_swap_page(page, entry); |
263 | swapcache_free_cluster(entry); | ||
264 | else | ||
265 | swapcache_free(entry); | ||
266 | |||
267 | page_ref_sub(page, hpage_nr_pages(page)); | 260 | page_ref_sub(page, hpage_nr_pages(page)); |
268 | } | 261 | } |
269 | 262 | ||
@@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
415 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | 408 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
416 | * clear SWAP_HAS_CACHE flag. | 409 | * clear SWAP_HAS_CACHE flag. |
417 | */ | 410 | */ |
418 | swapcache_free(entry); | 411 | put_swap_page(new_page, entry); |
419 | } while (err != -ENOMEM); | 412 | } while (err != -ENOMEM); |
420 | 413 | ||
421 | if (new_page) | 414 | if (new_page) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 984f0dd94948..8a6cdf9e55f9 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1148,7 +1148,7 @@ void swap_free(swp_entry_t entry) | |||
1148 | /* | 1148 | /* |
1149 | * Called after dropping swapcache to decrease refcnt to swap entries. | 1149 | * Called after dropping swapcache to decrease refcnt to swap entries. |
1150 | */ | 1150 | */ |
1151 | void swapcache_free(swp_entry_t entry) | 1151 | static void swapcache_free(swp_entry_t entry) |
1152 | { | 1152 | { |
1153 | struct swap_info_struct *p; | 1153 | struct swap_info_struct *p; |
1154 | 1154 | ||
@@ -1160,7 +1160,7 @@ void swapcache_free(swp_entry_t entry) | |||
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | #ifdef CONFIG_THP_SWAP | 1162 | #ifdef CONFIG_THP_SWAP |
1163 | void swapcache_free_cluster(swp_entry_t entry) | 1163 | static void swapcache_free_cluster(swp_entry_t entry) |
1164 | { | 1164 | { |
1165 | unsigned long offset = swp_offset(entry); | 1165 | unsigned long offset = swp_offset(entry); |
1166 | unsigned long idx = offset / SWAPFILE_CLUSTER; | 1166 | unsigned long idx = offset / SWAPFILE_CLUSTER; |
@@ -1184,8 +1184,20 @@ void swapcache_free_cluster(swp_entry_t entry) | |||
1184 | swap_free_cluster(si, idx); | 1184 | swap_free_cluster(si, idx); |
1185 | spin_unlock(&si->lock); | 1185 | spin_unlock(&si->lock); |
1186 | } | 1186 | } |
1187 | #else | ||
1188 | static inline void swapcache_free_cluster(swp_entry_t entry) | ||
1189 | { | ||
1190 | } | ||
1187 | #endif /* CONFIG_THP_SWAP */ | 1191 | #endif /* CONFIG_THP_SWAP */ |
1188 | 1192 | ||
1193 | void put_swap_page(struct page *page, swp_entry_t entry) | ||
1194 | { | ||
1195 | if (!PageTransHuge(page)) | ||
1196 | swapcache_free(entry); | ||
1197 | else | ||
1198 | swapcache_free_cluster(entry); | ||
1199 | } | ||
1200 | |||
1189 | void swapcache_free_entries(swp_entry_t *entries, int n) | 1201 | void swapcache_free_entries(swp_entry_t *entries, int n) |
1190 | { | 1202 | { |
1191 | struct swap_info_struct *p, *prev; | 1203 | struct swap_info_struct *p, *prev; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a10e05870835..cb7c154a4a9d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, | |||
708 | mem_cgroup_swapout(page, swap); | 708 | mem_cgroup_swapout(page, swap); |
709 | __delete_from_swap_cache(page); | 709 | __delete_from_swap_cache(page); |
710 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 710 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
711 | swapcache_free(swap); | 711 | put_swap_page(page, swap); |
712 | } else { | 712 | } else { |
713 | void (*freepage)(struct page *); | 713 | void (*freepage)(struct page *); |
714 | void *shadow = NULL; | 714 | void *shadow = NULL; |