aboutsummaryrefslogtreecommitdiffstats
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2019-09-23 18:34:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:08 -0400
commit4101196b19d7f905dca5dcf46cd35eb758cf06c0 (patch)
treef19a6fe24db9f749ef3e8c808eba6a067a336aa8 /mm/khugepaged.c
parent875d91b11a201276ac3a9ab79f8b0fa3dc4ee8fd (diff)
mm: page cache: store only head pages in i_pages
Transparent Huge Pages are currently stored in i_pages as pointers to consecutive subpages. This patch changes that to storing consecutive pointers to the head page in preparation for storing huge pages more efficiently in i_pages. Large parts of this are "inspired" by Kirill's patch https://lore.kernel.org/lkml/20170126115819.58875-2-kirill.shutemov@linux.intel.com/ Kirill and Huang Ying contributed several fixes. [willy@infradead.org: use compound_nr, squish uninit-var warning] Link: http://lkml.kernel.org/r/20190731210400.7419-1-willy@infradead.org Signed-off-by: Matthew Wilcox <willy@infradead.org> Acked-by: Jan Kara <jack@suse.cz> Reviewed-by: Kirill Shutemov <kirill@shutemov.name> Reviewed-by: Song Liu <songliubraving@fb.com> Tested-by: Song Liu <songliubraving@fb.com> Tested-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Tested-by: Qian Cai <cai@lca.pw> Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com> Cc: Hugh Dickins <hughd@google.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Song Liu <songliubraving@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ccede2425c3f..04a54ff5a8ac 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm,
1378 result = SCAN_FAIL; 1378 result = SCAN_FAIL;
1379 goto xa_locked; 1379 goto xa_locked;
1380 } 1380 }
1381 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1381 xas_store(&xas, new_page);
1382 nr_none++; 1382 nr_none++;
1383 continue; 1383 continue;
1384 } 1384 }
@@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm,
1454 list_add_tail(&page->lru, &pagelist); 1454 list_add_tail(&page->lru, &pagelist);
1455 1455
1456 /* Finally, replace with the new page. */ 1456 /* Finally, replace with the new page. */
1457 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1457 xas_store(&xas, new_page);
1458 continue; 1458 continue;
1459out_unlock: 1459out_unlock:
1460 unlock_page(page); 1460 unlock_page(page);