summaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2017-07-06 18:37:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:31 -0400
commit0f0746589e4be071a8f890b2035c97c30c7a4e16 (patch)
treefa4613c9460f97cb64176ad8fb93d3fb2b30ad36 /mm/swap_state.c
parent75f6d6d29a40b5541f0f107201cf7dec134ad210 (diff)
mm, THP, swap: move anonymous THP split logic to vmscan
The add_to_swap aims to allocate swap_space(ie, swap slot and swapcache) so if it fails due to lack of space in case of THP or something(hdd swap but tries THP swapout) *caller* rather than add_to_swap itself should split the THP page and retry it with base page which is more natural. Link: http://lkml.kernel.org/r/20170515112522.32457-4-ying.huang@intel.com Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0ad214d7a7ad..9c71b6b2562f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -184,7 +184,7 @@ void __delete_from_swap_cache(struct page *page)
184 * Allocate swap space for the page and add the page to the 184 * Allocate swap space for the page and add the page to the
185 * swap cache. Caller needs to hold the page lock. 185 * swap cache. Caller needs to hold the page lock.
186 */ 186 */
187int add_to_swap(struct page *page, struct list_head *list) 187int add_to_swap(struct page *page)
188{ 188{
189 swp_entry_t entry; 189 swp_entry_t entry;
190 int err; 190 int err;
@@ -192,12 +192,12 @@ int add_to_swap(struct page *page, struct list_head *list)
192 VM_BUG_ON_PAGE(!PageLocked(page), page); 192 VM_BUG_ON_PAGE(!PageLocked(page), page);
193 VM_BUG_ON_PAGE(!PageUptodate(page), page); 193 VM_BUG_ON_PAGE(!PageUptodate(page), page);
194 194
195retry:
196 entry = get_swap_page(page); 195 entry = get_swap_page(page);
197 if (!entry.val) 196 if (!entry.val)
198 goto fail; 197 return 0;
198
199 if (mem_cgroup_try_charge_swap(page, entry)) 199 if (mem_cgroup_try_charge_swap(page, entry))
200 goto fail_free; 200 goto fail;
201 201
202 /* 202 /*
203 * Radix-tree node allocations from PF_MEMALLOC contexts could 203 * Radix-tree node allocations from PF_MEMALLOC contexts could
@@ -218,23 +218,12 @@ retry:
218 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 218 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
219 * clear SWAP_HAS_CACHE flag. 219 * clear SWAP_HAS_CACHE flag.
220 */ 220 */
221 goto fail_free; 221 goto fail;
222
223 if (PageTransHuge(page)) {
224 err = split_huge_page_to_list(page, list);
225 if (err) {
226 delete_from_swap_cache(page);
227 return 0;
228 }
229 }
230 222
231 return 1; 223 return 1;
232 224
233fail_free:
234 put_swap_page(page, entry);
235fail: 225fail:
236 if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) 226 put_swap_page(page, entry);
237 goto retry;
238 return 0; 227 return 0;
239} 228}
240 229