aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2018-11-30 17:10:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-11-30 17:56:15 -0500
commit95feeabb77149f7d48f05bde61d75621c57db67e (patch)
treefeb93071531a60ec390ac4b18d37cf2c1358b68b
parent06a5e1268a5fb9c2b346a3da6b97e85f2eba0f07 (diff)
mm/khugepaged: fix the xas_create_range() error path
collapse_shmem()'s xas_nomem() is very unlikely to fail, but it is rightly given a failure path, so move the whole xas_create_range() block up before __SetPageLocked(new_page): so that it does not need to remember to unlock_page(new_page). Add the missing mem_cgroup_cancel_charge(), and set (currently unused) result to SCAN_FAIL rather than SCAN_SUCCEED. Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261531200.2275@eggly.anvils Fixes: 77da9389b9d5 ("mm: Convert collapse_shmem to XArray") Signed-off-by: Hugh Dickins <hughd@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/khugepaged.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2c5fe4f7a0c6..8e2ff195ecb3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1329,6 +1329,20 @@ static void collapse_shmem(struct mm_struct *mm,
1329 goto out; 1329 goto out;
1330 } 1330 }
1331 1331
1332 /* This will be less messy when we use multi-index entries */
1333 do {
1334 xas_lock_irq(&xas);
1335 xas_create_range(&xas);
1336 if (!xas_error(&xas))
1337 break;
1338 xas_unlock_irq(&xas);
1339 if (!xas_nomem(&xas, GFP_KERNEL)) {
1340 mem_cgroup_cancel_charge(new_page, memcg, true);
1341 result = SCAN_FAIL;
1342 goto out;
1343 }
1344 } while (1);
1345
1332 __SetPageLocked(new_page); 1346 __SetPageLocked(new_page);
1333 __SetPageSwapBacked(new_page); 1347 __SetPageSwapBacked(new_page);
1334 new_page->index = start; 1348 new_page->index = start;
@@ -1340,17 +1354,6 @@ static void collapse_shmem(struct mm_struct *mm,
1340 * be able to map it or use it in another way until we unlock it. 1354 * be able to map it or use it in another way until we unlock it.
1341 */ 1355 */
1342 1356
1343 /* This will be less messy when we use multi-index entries */
1344 do {
1345 xas_lock_irq(&xas);
1346 xas_create_range(&xas);
1347 if (!xas_error(&xas))
1348 break;
1349 xas_unlock_irq(&xas);
1350 if (!xas_nomem(&xas, GFP_KERNEL))
1351 goto out;
1352 } while (1);
1353
1354 xas_set(&xas, start); 1357 xas_set(&xas, start);
1355 for (index = start; index < end; index++) { 1358 for (index = start; index < end; index++) {
1356 struct page *page = xas_next(&xas); 1359 struct page *page = xas_next(&xas);