diff options
author | Huang Ying <ying.huang@intel.com> | 2018-10-26 18:03:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-26 19:25:19 -0400 |
commit | bcd49e86710b42f15c7512de594d23b3ae0b21d7 (patch) | |
tree | 4122785ea9c35fe02f9509c4b48bd849c8826ac9 /mm/swapfile.c | |
parent | 154221c3e52083d9f54fa58b4e1090264969f6bc (diff) |
mm/swapfile.c: use __try_to_reclaim_swap() in free_swap_and_cache()
The code path to reclaim the swap entry in free_swap_and_cache() is
almost same as that of __try_to_reclaim_swap(). The largest
difference is just coding style. So the support to the additional
requirement of free_swap_and_cache() is added into
__try_to_reclaim_swap(). free_swap_and_cache() is changed to call
__try_to_reclaim_swap(), and delete the duplicated code. This will
improve code readability and reduce the potential bugs.
There are 2 functionality differences between __try_to_reclaim_swap()
and swap entry reclaim code of free_swap_and_cache().
- free_swap_and_cache() only reclaims the swap entry if the page is
unmapped or swap is getting full. The support has been added into
__try_to_reclaim_swap().
- try_to_free_swap() (called by __try_to_reclaim_swap()) checks
pm_suspended_storage(), while free_swap_and_cache() not. I think
this is OK. Because the page and the swap entry can be reclaimed
later eventually.
Link: http://lkml.kernel.org/r/20180827075535.17406-2-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r-- | mm/swapfile.c | 57 |
1 files changed, 25 insertions, 32 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index d954b71c4f9c..0d44179213ed 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -103,26 +103,39 @@ static inline unsigned char swap_count(unsigned char ent) | |||
103 | return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ | 103 | return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Reclaim the swap entry anyway if possible */ | ||
107 | #define TTRS_ANYWAY 0x1 | ||
108 | /* | ||
109 | * Reclaim the swap entry if there are no more mappings of the | ||
110 | * corresponding page | ||
111 | */ | ||
112 | #define TTRS_UNMAPPED 0x2 | ||
113 | /* Reclaim the swap entry if swap is getting full*/ | ||
114 | #define TTRS_FULL 0x4 | ||
115 | |||
106 | /* returns 1 if swap entry is freed */ | 116 | /* returns 1 if swap entry is freed */ |
107 | static int | 117 | static int __try_to_reclaim_swap(struct swap_info_struct *si, |
108 | __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) | 118 | unsigned long offset, unsigned long flags) |
109 | { | 119 | { |
110 | swp_entry_t entry = swp_entry(si->type, offset); | 120 | swp_entry_t entry = swp_entry(si->type, offset); |
111 | struct page *page; | 121 | struct page *page; |
112 | int ret = 0; | 122 | int ret = 0; |
113 | 123 | ||
114 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); | 124 | page = find_get_page(swap_address_space(entry), offset); |
115 | if (!page) | 125 | if (!page) |
116 | return 0; | 126 | return 0; |
117 | /* | 127 | /* |
118 | * This function is called from scan_swap_map() and it's called | 128 | * When this function is called from scan_swap_map_slots() and it's |
119 | * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. | 129 | * called by vmscan.c at reclaiming pages. So, we hold a lock on a page, |
120 | * We have to use trylock for avoiding deadlock. This is a special | 130 | * here. We have to use trylock for avoiding deadlock. This is a special |
121 | * case and you should use try_to_free_swap() with explicit lock_page() | 131 | * case and you should use try_to_free_swap() with explicit lock_page() |
122 | * in usual operations. | 132 | * in usual operations. |
123 | */ | 133 | */ |
124 | if (trylock_page(page)) { | 134 | if (trylock_page(page)) { |
125 | ret = try_to_free_swap(page); | 135 | if ((flags & TTRS_ANYWAY) || |
136 | ((flags & TTRS_UNMAPPED) && !page_mapped(page)) || | ||
137 | ((flags & TTRS_FULL) && mem_cgroup_swap_full(page))) | ||
138 | ret = try_to_free_swap(page); | ||
126 | unlock_page(page); | 139 | unlock_page(page); |
127 | } | 140 | } |
128 | put_page(page); | 141 | put_page(page); |
@@ -780,7 +793,7 @@ checks: | |||
780 | int swap_was_freed; | 793 | int swap_was_freed; |
781 | unlock_cluster(ci); | 794 | unlock_cluster(ci); |
782 | spin_unlock(&si->lock); | 795 | spin_unlock(&si->lock); |
783 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 796 | swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); |
784 | spin_lock(&si->lock); | 797 | spin_lock(&si->lock); |
785 | /* entry was freed successfully, try to use this again */ | 798 | /* entry was freed successfully, try to use this again */ |
786 | if (swap_was_freed) | 799 | if (swap_was_freed) |
@@ -1612,7 +1625,6 @@ int try_to_free_swap(struct page *page) | |||
1612 | int free_swap_and_cache(swp_entry_t entry) | 1625 | int free_swap_and_cache(swp_entry_t entry) |
1613 | { | 1626 | { |
1614 | struct swap_info_struct *p; | 1627 | struct swap_info_struct *p; |
1615 | struct page *page = NULL; | ||
1616 | unsigned char count; | 1628 | unsigned char count; |
1617 | 1629 | ||
1618 | if (non_swap_entry(entry)) | 1630 | if (non_swap_entry(entry)) |
@@ -1622,31 +1634,12 @@ int free_swap_and_cache(swp_entry_t entry) | |||
1622 | if (p) { | 1634 | if (p) { |
1623 | count = __swap_entry_free(p, entry, 1); | 1635 | count = __swap_entry_free(p, entry, 1); |
1624 | if (count == SWAP_HAS_CACHE && | 1636 | if (count == SWAP_HAS_CACHE && |
1625 | !swap_page_trans_huge_swapped(p, entry)) { | 1637 | !swap_page_trans_huge_swapped(p, entry)) |
1626 | page = find_get_page(swap_address_space(entry), | 1638 | __try_to_reclaim_swap(p, swp_offset(entry), |
1627 | swp_offset(entry)); | 1639 | TTRS_UNMAPPED | TTRS_FULL); |
1628 | if (page && !trylock_page(page)) { | 1640 | else if (!count) |
1629 | put_page(page); | ||
1630 | page = NULL; | ||
1631 | } | ||
1632 | } else if (!count) | ||
1633 | free_swap_slot(entry); | 1641 | free_swap_slot(entry); |
1634 | } | 1642 | } |
1635 | if (page) { | ||
1636 | /* | ||
1637 | * Not mapped elsewhere, or swap space full? Free it! | ||
1638 | * Also recheck PageSwapCache now page is locked (above). | ||
1639 | */ | ||
1640 | if (PageSwapCache(page) && !PageWriteback(page) && | ||
1641 | (!page_mapped(page) || mem_cgroup_swap_full(page)) && | ||
1642 | !swap_page_trans_huge_swapped(p, entry)) { | ||
1643 | page = compound_head(page); | ||
1644 | delete_from_swap_cache(page); | ||
1645 | SetPageDirty(page); | ||
1646 | } | ||
1647 | unlock_page(page); | ||
1648 | put_page(page); | ||
1649 | } | ||
1650 | return p != NULL; | 1643 | return p != NULL; |
1651 | } | 1644 | } |
1652 | 1645 | ||