aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6ab19dd4a199..09e4b1be7b67 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -572,7 +572,7 @@ out:
572 * covered by this vma. 572 * covered by this vma.
573 */ 573 */
574 574
575static inline void 575static inline unsigned long
576copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 576copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
577 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 577 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
578 unsigned long addr, int *rss) 578 unsigned long addr, int *rss)
@@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
586 if (!pte_file(pte)) { 586 if (!pte_file(pte)) {
587 swp_entry_t entry = pte_to_swp_entry(pte); 587 swp_entry_t entry = pte_to_swp_entry(pte);
588 588
589 swap_duplicate(entry); 589 if (swap_duplicate(entry) < 0)
590 return entry.val;
591
590 /* make sure dst_mm is on swapoff's mmlist. */ 592 /* make sure dst_mm is on swapoff's mmlist. */
591 if (unlikely(list_empty(&dst_mm->mmlist))) { 593 if (unlikely(list_empty(&dst_mm->mmlist))) {
592 spin_lock(&mmlist_lock); 594 spin_lock(&mmlist_lock);
@@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
635 637
636out_set_pte: 638out_set_pte:
637 set_pte_at(dst_mm, addr, dst_pte, pte); 639 set_pte_at(dst_mm, addr, dst_pte, pte);
640 return 0;
638} 641}
639 642
640static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 643static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
646 spinlock_t *src_ptl, *dst_ptl; 649 spinlock_t *src_ptl, *dst_ptl;
647 int progress = 0; 650 int progress = 0;
648 int rss[2]; 651 int rss[2];
652 swp_entry_t entry = (swp_entry_t){0};
649 653
650again: 654again:
651 rss[1] = rss[0] = 0; 655 rss[1] = rss[0] = 0;
@@ -674,7 +678,10 @@ again:
674 progress++; 678 progress++;
675 continue; 679 continue;
676 } 680 }
677 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 681 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
682 vma, addr, rss);
683 if (entry.val)
684 break;
678 progress += 8; 685 progress += 8;
679 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 686 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
680 687
@@ -684,6 +691,12 @@ again:
684 add_mm_rss(dst_mm, rss[0], rss[1]); 691 add_mm_rss(dst_mm, rss[0], rss[1]);
685 pte_unmap_unlock(orig_dst_pte, dst_ptl); 692 pte_unmap_unlock(orig_dst_pte, dst_ptl);
686 cond_resched(); 693 cond_resched();
694
695 if (entry.val) {
696 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
697 return -ENOMEM;
698 progress = 0;
699 }
687 if (addr != end) 700 if (addr != end)
688 goto again; 701 goto again;
689 return 0; 702 return 0;
@@ -943,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
943 details = NULL; 956 details = NULL;
944 957
945 BUG_ON(addr >= end); 958 BUG_ON(addr >= end);
959 mem_cgroup_uncharge_start();
946 tlb_start_vma(tlb, vma); 960 tlb_start_vma(tlb, vma);
947 pgd = pgd_offset(vma->vm_mm, addr); 961 pgd = pgd_offset(vma->vm_mm, addr);
948 do { 962 do {
@@ -955,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
955 zap_work, details); 969 zap_work, details);
956 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 970 } while (pgd++, addr = next, (addr != end && *zap_work > 0));
957 tlb_end_vma(tlb, vma); 971 tlb_end_vma(tlb, vma);
972 mem_cgroup_uncharge_end();
958 973
959 return addr; 974 return addr;
960} 975}
@@ -2514,7 +2529,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2514 ret = VM_FAULT_HWPOISON; 2529 ret = VM_FAULT_HWPOISON;
2515 } else { 2530 } else {
2516 print_bad_pte(vma, address, orig_pte, NULL); 2531 print_bad_pte(vma, address, orig_pte, NULL);
2517 ret = VM_FAULT_OOM; 2532 ret = VM_FAULT_SIGBUS;
2518 } 2533 }
2519 goto out; 2534 goto out;
2520 } 2535 }
@@ -2540,6 +2555,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2540 ret = VM_FAULT_MAJOR; 2555 ret = VM_FAULT_MAJOR;
2541 count_vm_event(PGMAJFAULT); 2556 count_vm_event(PGMAJFAULT);
2542 } else if (PageHWPoison(page)) { 2557 } else if (PageHWPoison(page)) {
2558 /*
2559 * hwpoisoned dirty swapcache pages are kept for killing
2560 * owner processes (which may be unknown at hwpoison time)
2561 */
2543 ret = VM_FAULT_HWPOISON; 2562 ret = VM_FAULT_HWPOISON;
2544 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2563 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2545 goto out_release; 2564 goto out_release;
@@ -2548,6 +2567,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2548 lock_page(page); 2567 lock_page(page);
2549 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2568 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2550 2569
2570 page = ksm_might_need_to_copy(page, vma, address);
2571 if (!page) {
2572 ret = VM_FAULT_OOM;
2573 goto out;
2574 }
2575
2551 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 2576 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2552 ret = VM_FAULT_OOM; 2577 ret = VM_FAULT_OOM;
2553 goto out_page; 2578 goto out_page;
@@ -2910,7 +2935,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2910 * Page table corrupted: show pte and kill process. 2935 * Page table corrupted: show pte and kill process.
2911 */ 2936 */
2912 print_bad_pte(vma, address, orig_pte, NULL); 2937 print_bad_pte(vma, address, orig_pte, NULL);
2913 return VM_FAULT_OOM; 2938 return VM_FAULT_SIGBUS;
2914 } 2939 }
2915 2940
2916 pgoff = pte_to_pgoff(orig_pte); 2941 pgoff = pte_to_pgoff(orig_pte);