aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2019-03-05 18:43:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:15 -0500
commit52d1e606ee733921e984770d47539a6bb91e8506 (patch)
tree766f0493a901bb74bea0bf3d0bc01f7754e0bd4d /mm
parent7c9eefe82ca1efec5890678c33e66d5d520c06f4 (diff)
mm: reuse only-pte-mapped KSM page in do_wp_page()
Add an optimization for KSM pages almost in the same way that we have for ordinary anonymous pages. If there is a write fault in a page, which is mapped to an only pte, and it is not related to swap cache; the page may be reused without copying its content. [ Note that we do not consider PageSwapCache() pages at least for now, since we don't want to complicate __get_ksm_page(), which has nice optimization based on this (for the migration case). Currenly it is spinning on PageSwapCache() pages, waiting for when they have unfreezed counters (i.e., for the migration finish). But we don't want to make it also spinning on swap cache pages, which we try to reuse, since there is not a very high probability to reuse them. So, for now we do not consider PageSwapCache() pages at all. ] So in reuse_ksm_page() we check for 1) PageSwapCache() and 2) page_stable_node(), to skip a page, which KSM is currently trying to link to stable tree. Then we do page_ref_freeze() to prohibit KSM to merge one more page into the page, we are reusing. After that, nobody can refer to the reusing page: KSM skips !PageSwapCache() pages with zero refcount; and the protection against of all other participants is the same as for reused ordinary anon pages pte lock, page lock and mmap_sem. [akpm@linux-foundation.org: replace BUG_ON()s with WARN_ON()s] Link: http://lkml.kernel.org/r/154471491016.31352.1168978849911555609.stgit@localhost.localdomain Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Reviewed-by: Yang Shi <yang.shi@linux.alibaba.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Hugh Dickins <hughd@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christian Koenig <christian.koenig@amd.com> Cc: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com> Cc: Rik van Riel <riel@surriel.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c30
-rw-r--r--mm/memory.c16
2 files changed, 42 insertions, 4 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index fd2db6a74d3c..983fbac24bda 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -706,8 +706,9 @@ again:
706 * case this node is no longer referenced, and should be freed; 706 * case this node is no longer referenced, and should be freed;
707 * however, it might mean that the page is under page_ref_freeze(). 707 * however, it might mean that the page is under page_ref_freeze().
708 * The __remove_mapping() case is easy, again the node is now stale; 708 * The __remove_mapping() case is easy, again the node is now stale;
709 * but if page is swapcache in migrate_page_move_mapping(), it might 709 * the same is in reuse_ksm_page() case; but if page is swapcache
710 * still be our page, in which case it's essential to keep the node. 710 * in migrate_page_move_mapping(), it might still be our page,
711 * in which case it's essential to keep the node.
711 */ 712 */
712 while (!get_page_unless_zero(page)) { 713 while (!get_page_unless_zero(page)) {
713 /* 714 /*
@@ -2642,6 +2643,31 @@ again:
2642 goto again; 2643 goto again;
2643} 2644}
2644 2645
2646bool reuse_ksm_page(struct page *page,
2647 struct vm_area_struct *vma,
2648 unsigned long address)
2649{
2650#ifdef CONFIG_DEBUG_VM
2651 if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
2652 WARN_ON(!page_mapped(page)) ||
2653 WARN_ON(!PageLocked(page))) {
2654 dump_page(page, "reuse_ksm_page");
2655 return false;
2656 }
2657#endif
2658
2659 if (PageSwapCache(page) || !page_stable_node(page))
2660 return false;
2661 /* Prohibit parallel get_ksm_page() */
2662 if (!page_ref_freeze(page, 1))
2663 return false;
2664
2665 page_move_anon_rmap(page, vma);
2666 page->index = linear_page_index(vma, address);
2667 page_ref_unfreeze(page, 1);
2668
2669 return true;
2670}
2645#ifdef CONFIG_MIGRATION 2671#ifdef CONFIG_MIGRATION
2646void ksm_migrate_page(struct page *newpage, struct page *oldpage) 2672void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2647{ 2673{
diff --git a/mm/memory.c b/mm/memory.c
index eb40f32295d2..222da66f16b4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2505,8 +2505,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2505 * Take out anonymous pages first, anonymous shared vmas are 2505 * Take out anonymous pages first, anonymous shared vmas are
2506 * not dirty accountable. 2506 * not dirty accountable.
2507 */ 2507 */
2508 if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { 2508 if (PageAnon(vmf->page)) {
2509 int total_map_swapcount; 2509 int total_map_swapcount;
2510 if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
2511 page_count(vmf->page) != 1))
2512 goto copy;
2510 if (!trylock_page(vmf->page)) { 2513 if (!trylock_page(vmf->page)) {
2511 get_page(vmf->page); 2514 get_page(vmf->page);
2512 pte_unmap_unlock(vmf->pte, vmf->ptl); 2515 pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2521,6 +2524,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2521 } 2524 }
2522 put_page(vmf->page); 2525 put_page(vmf->page);
2523 } 2526 }
2527 if (PageKsm(vmf->page)) {
2528 bool reused = reuse_ksm_page(vmf->page, vmf->vma,
2529 vmf->address);
2530 unlock_page(vmf->page);
2531 if (!reused)
2532 goto copy;
2533 wp_page_reuse(vmf);
2534 return VM_FAULT_WRITE;
2535 }
2524 if (reuse_swap_page(vmf->page, &total_map_swapcount)) { 2536 if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
2525 if (total_map_swapcount == 1) { 2537 if (total_map_swapcount == 1) {
2526 /* 2538 /*
@@ -2541,7 +2553,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2541 (VM_WRITE|VM_SHARED))) { 2553 (VM_WRITE|VM_SHARED))) {
2542 return wp_page_shared(vmf); 2554 return wp_page_shared(vmf);
2543 } 2555 }
2544 2556copy:
2545 /* 2557 /*
2546 * Ok, we need to copy. Oh, well.. 2558 * Ok, we need to copy. Oh, well..
2547 */ 2559 */