diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 54 |
1 files changed, 45 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c index 2ed2267439df..71b161b73bb5 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2623 | unsigned int flags, pte_t orig_pte) | 2623 | unsigned int flags, pte_t orig_pte) |
2624 | { | 2624 | { |
2625 | spinlock_t *ptl; | 2625 | spinlock_t *ptl; |
2626 | struct page *page; | 2626 | struct page *page, *swapcache = NULL; |
2627 | swp_entry_t entry; | 2627 | swp_entry_t entry; |
2628 | pte_t pte; | 2628 | pte_t pte; |
2629 | struct mem_cgroup *ptr = NULL; | 2629 | struct mem_cgroup *ptr = NULL; |
@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2679 | lock_page(page); | 2679 | lock_page(page); |
2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2681 | 2681 | ||
2682 | page = ksm_might_need_to_copy(page, vma, address); | 2682 | /* |
2683 | if (!page) { | 2683 | * Make sure try_to_free_swap didn't release the swapcache |
2684 | ret = VM_FAULT_OOM; | 2684 | * from under us. The page pin isn't enough to prevent that. |
2685 | goto out; | 2685 | */ |
2686 | if (unlikely(!PageSwapCache(page))) | ||
2687 | goto out_page; | ||
2688 | |||
2689 | if (ksm_might_need_to_copy(page, vma, address)) { | ||
2690 | swapcache = page; | ||
2691 | page = ksm_does_need_to_copy(page, vma, address); | ||
2692 | |||
2693 | if (unlikely(!page)) { | ||
2694 | ret = VM_FAULT_OOM; | ||
2695 | page = swapcache; | ||
2696 | swapcache = NULL; | ||
2697 | goto out_page; | ||
2698 | } | ||
2686 | } | 2699 | } |
2687 | 2700 | ||
2688 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { | 2701 | if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { |
@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2735 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | 2748 | if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
2736 | try_to_free_swap(page); | 2749 | try_to_free_swap(page); |
2737 | unlock_page(page); | 2750 | unlock_page(page); |
2751 | if (swapcache) { | ||
2752 | /* | ||
2753 | * Hold the lock to avoid the swap entry to be reused | ||
2754 | * until we take the PT lock for the pte_same() check | ||
2755 | * (to avoid false positives from pte_same). For | ||
2756 | * further safety release the lock after the swap_free | ||
2757 | * so that the swap count won't change under a | ||
2758 | * parallel locked swapcache. | ||
2759 | */ | ||
2760 | unlock_page(swapcache); | ||
2761 | page_cache_release(swapcache); | ||
2762 | } | ||
2738 | 2763 | ||
2739 | if (flags & FAULT_FLAG_WRITE) { | 2764 | if (flags & FAULT_FLAG_WRITE) { |
2740 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2765 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
@@ -2756,15 +2781,17 @@ out_page: | |||
2756 | unlock_page(page); | 2781 | unlock_page(page); |
2757 | out_release: | 2782 | out_release: |
2758 | page_cache_release(page); | 2783 | page_cache_release(page); |
2784 | if (swapcache) { | ||
2785 | unlock_page(swapcache); | ||
2786 | page_cache_release(swapcache); | ||
2787 | } | ||
2759 | return ret; | 2788 | return ret; |
2760 | } | 2789 | } |
2761 | 2790 | ||
2762 | /* | 2791 | /* |
2763 | * This is like a special single-page "expand_downwards()", | 2792 | * This is like a special single-page "expand_{down|up}wards()", |
2764 | * except we must first make sure that 'address-PAGE_SIZE' | 2793 | * except we must first make sure that 'address{-|+}PAGE_SIZE' |
2765 | * doesn't hit another vma. | 2794 | * doesn't hit another vma. |
2766 | * | ||
2767 | * The "find_vma()" will do the right thing even if we wrap | ||
2768 | */ | 2795 | */ |
2769 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) | 2796 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) |
2770 | { | 2797 | { |
@@ -2783,6 +2810,15 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
2783 | 2810 | ||
2784 | expand_stack(vma, address - PAGE_SIZE); | 2811 | expand_stack(vma, address - PAGE_SIZE); |
2785 | } | 2812 | } |
2813 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { | ||
2814 | struct vm_area_struct *next = vma->vm_next; | ||
2815 | |||
2816 | /* As VM_GROWSDOWN but s/below/above/ */ | ||
2817 | if (next && next->vm_start == address + PAGE_SIZE) | ||
2818 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; | ||
2819 | |||
2820 | expand_upwards(vma, address + PAGE_SIZE); | ||
2821 | } | ||
2786 | return 0; | 2822 | return 0; |
2787 | } | 2823 | } |
2788 | 2824 | ||