aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ksm.h7
-rw-r--r--mm/ksm.c30
-rw-r--r--mm/memory.c16
3 files changed, 49 insertions, 4 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 161e8164abcf..e48b1e453ff5 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -53,6 +53,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
53 53
54void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); 54void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
55void ksm_migrate_page(struct page *newpage, struct page *oldpage); 55void ksm_migrate_page(struct page *newpage, struct page *oldpage);
56bool reuse_ksm_page(struct page *page,
57 struct vm_area_struct *vma, unsigned long address);
56 58
57#else /* !CONFIG_KSM */ 59#else /* !CONFIG_KSM */
58 60
@@ -86,6 +88,11 @@ static inline void rmap_walk_ksm(struct page *page,
86static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) 88static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
87{ 89{
88} 90}
91static inline bool reuse_ksm_page(struct page *page,
92 struct vm_area_struct *vma, unsigned long address)
93{
94 return false;
95}
89#endif /* CONFIG_MMU */ 96#endif /* CONFIG_MMU */
90#endif /* !CONFIG_KSM */ 97#endif /* !CONFIG_KSM */
91 98
diff --git a/mm/ksm.c b/mm/ksm.c
index fd2db6a74d3c..983fbac24bda 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -706,8 +706,9 @@ again:
706 * case this node is no longer referenced, and should be freed; 706 * case this node is no longer referenced, and should be freed;
707 * however, it might mean that the page is under page_ref_freeze(). 707 * however, it might mean that the page is under page_ref_freeze().
708 * The __remove_mapping() case is easy, again the node is now stale; 708 * The __remove_mapping() case is easy, again the node is now stale;
709 * but if page is swapcache in migrate_page_move_mapping(), it might 709 * the same is in reuse_ksm_page() case; but if page is swapcache
710 * still be our page, in which case it's essential to keep the node. 710 * in migrate_page_move_mapping(), it might still be our page,
711 * in which case it's essential to keep the node.
711 */ 712 */
712 while (!get_page_unless_zero(page)) { 713 while (!get_page_unless_zero(page)) {
713 /* 714 /*
@@ -2642,6 +2643,31 @@ again:
2642 goto again; 2643 goto again;
2643} 2644}
2644 2645
2646bool reuse_ksm_page(struct page *page,
2647 struct vm_area_struct *vma,
2648 unsigned long address)
2649{
2650#ifdef CONFIG_DEBUG_VM
2651 if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
2652 WARN_ON(!page_mapped(page)) ||
2653 WARN_ON(!PageLocked(page))) {
2654 dump_page(page, "reuse_ksm_page");
2655 return false;
2656 }
2657#endif
2658
2659 if (PageSwapCache(page) || !page_stable_node(page))
2660 return false;
2661 /* Prohibit parallel get_ksm_page() */
2662 if (!page_ref_freeze(page, 1))
2663 return false;
2664
2665 page_move_anon_rmap(page, vma);
2666 page->index = linear_page_index(vma, address);
2667 page_ref_unfreeze(page, 1);
2668
2669 return true;
2670}
2645#ifdef CONFIG_MIGRATION 2671#ifdef CONFIG_MIGRATION
2646void ksm_migrate_page(struct page *newpage, struct page *oldpage) 2672void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2647{ 2673{
diff --git a/mm/memory.c b/mm/memory.c
index eb40f32295d2..222da66f16b4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2505,8 +2505,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2505 * Take out anonymous pages first, anonymous shared vmas are 2505 * Take out anonymous pages first, anonymous shared vmas are
2506 * not dirty accountable. 2506 * not dirty accountable.
2507 */ 2507 */
2508 if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { 2508 if (PageAnon(vmf->page)) {
2509 int total_map_swapcount; 2509 int total_map_swapcount;
2510 if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
2511 page_count(vmf->page) != 1))
2512 goto copy;
2510 if (!trylock_page(vmf->page)) { 2513 if (!trylock_page(vmf->page)) {
2511 get_page(vmf->page); 2514 get_page(vmf->page);
2512 pte_unmap_unlock(vmf->pte, vmf->ptl); 2515 pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2521,6 +2524,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2521 } 2524 }
2522 put_page(vmf->page); 2525 put_page(vmf->page);
2523 } 2526 }
2527 if (PageKsm(vmf->page)) {
2528 bool reused = reuse_ksm_page(vmf->page, vmf->vma,
2529 vmf->address);
2530 unlock_page(vmf->page);
2531 if (!reused)
2532 goto copy;
2533 wp_page_reuse(vmf);
2534 return VM_FAULT_WRITE;
2535 }
2524 if (reuse_swap_page(vmf->page, &total_map_swapcount)) { 2536 if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
2525 if (total_map_swapcount == 1) { 2537 if (total_map_swapcount == 1) {
2526 /* 2538 /*
@@ -2541,7 +2553,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
2541 (VM_WRITE|VM_SHARED))) { 2553 (VM_WRITE|VM_SHARED))) {
2542 return wp_page_shared(vmf); 2554 return wp_page_shared(vmf);
2543 } 2555 }
2544 2556copy:
2545 /* 2557 /*
2546 * Ok, we need to copy. Oh, well.. 2558 * Ok, we need to copy. Oh, well..
2547 */ 2559 */