diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 42 |
1 files changed, 33 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c index 99275325f303..8068893697bb 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1965,6 +1965,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |||
1965 | vmf.pgoff = page->index; | 1965 | vmf.pgoff = page->index; |
1966 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | 1966 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; |
1967 | vmf.page = page; | 1967 | vmf.page = page; |
1968 | vmf.cow_page = NULL; | ||
1968 | 1969 | ||
1969 | ret = vma->vm_ops->page_mkwrite(vma, &vmf); | 1970 | ret = vma->vm_ops->page_mkwrite(vma, &vmf); |
1970 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | 1971 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
@@ -2329,6 +2330,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2329 | details.last_index = ULONG_MAX; | 2330 | details.last_index = ULONG_MAX; |
2330 | 2331 | ||
2331 | 2332 | ||
2333 | /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ | ||
2332 | i_mmap_lock_write(mapping); | 2334 | i_mmap_lock_write(mapping); |
2333 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) | 2335 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) |
2334 | unmap_mapping_range_tree(&mapping->i_mmap, &details); | 2336 | unmap_mapping_range_tree(&mapping->i_mmap, &details); |
@@ -2638,7 +2640,8 @@ oom: | |||
2638 | * See filemap_fault() and __lock_page_retry(). | 2640 | * See filemap_fault() and __lock_page_retry(). |
2639 | */ | 2641 | */ |
2640 | static int __do_fault(struct vm_area_struct *vma, unsigned long address, | 2642 | static int __do_fault(struct vm_area_struct *vma, unsigned long address, |
2641 | pgoff_t pgoff, unsigned int flags, struct page **page) | 2643 | pgoff_t pgoff, unsigned int flags, |
2644 | struct page *cow_page, struct page **page) | ||
2642 | { | 2645 | { |
2643 | struct vm_fault vmf; | 2646 | struct vm_fault vmf; |
2644 | int ret; | 2647 | int ret; |
@@ -2647,10 +2650,13 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, | |||
2647 | vmf.pgoff = pgoff; | 2650 | vmf.pgoff = pgoff; |
2648 | vmf.flags = flags; | 2651 | vmf.flags = flags; |
2649 | vmf.page = NULL; | 2652 | vmf.page = NULL; |
2653 | vmf.cow_page = cow_page; | ||
2650 | 2654 | ||
2651 | ret = vma->vm_ops->fault(vma, &vmf); | 2655 | ret = vma->vm_ops->fault(vma, &vmf); |
2652 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2656 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
2653 | return ret; | 2657 | return ret; |
2658 | if (!vmf.page) | ||
2659 | goto out; | ||
2654 | 2660 | ||
2655 | if (unlikely(PageHWPoison(vmf.page))) { | 2661 | if (unlikely(PageHWPoison(vmf.page))) { |
2656 | if (ret & VM_FAULT_LOCKED) | 2662 | if (ret & VM_FAULT_LOCKED) |
@@ -2664,6 +2670,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, | |||
2664 | else | 2670 | else |
2665 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); | 2671 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); |
2666 | 2672 | ||
2673 | out: | ||
2667 | *page = vmf.page; | 2674 | *page = vmf.page; |
2668 | return ret; | 2675 | return ret; |
2669 | } | 2676 | } |
@@ -2834,7 +2841,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2834 | pte_unmap_unlock(pte, ptl); | 2841 | pte_unmap_unlock(pte, ptl); |
2835 | } | 2842 | } |
2836 | 2843 | ||
2837 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); | 2844 | ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); |
2838 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2845 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
2839 | return ret; | 2846 | return ret; |
2840 | 2847 | ||
@@ -2874,26 +2881,43 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2874 | return VM_FAULT_OOM; | 2881 | return VM_FAULT_OOM; |
2875 | } | 2882 | } |
2876 | 2883 | ||
2877 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); | 2884 | ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); |
2878 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2885 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
2879 | goto uncharge_out; | 2886 | goto uncharge_out; |
2880 | 2887 | ||
2881 | copy_user_highpage(new_page, fault_page, address, vma); | 2888 | if (fault_page) |
2889 | copy_user_highpage(new_page, fault_page, address, vma); | ||
2882 | __SetPageUptodate(new_page); | 2890 | __SetPageUptodate(new_page); |
2883 | 2891 | ||
2884 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | 2892 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
2885 | if (unlikely(!pte_same(*pte, orig_pte))) { | 2893 | if (unlikely(!pte_same(*pte, orig_pte))) { |
2886 | pte_unmap_unlock(pte, ptl); | 2894 | pte_unmap_unlock(pte, ptl); |
2887 | unlock_page(fault_page); | 2895 | if (fault_page) { |
2888 | page_cache_release(fault_page); | 2896 | unlock_page(fault_page); |
2897 | page_cache_release(fault_page); | ||
2898 | } else { | ||
2899 | /* | ||
2900 | * The fault handler has no page to lock, so it holds | ||
2901 | * i_mmap_lock for read to protect against truncate. | ||
2902 | */ | ||
2903 | i_mmap_unlock_read(vma->vm_file->f_mapping); | ||
2904 | } | ||
2889 | goto uncharge_out; | 2905 | goto uncharge_out; |
2890 | } | 2906 | } |
2891 | do_set_pte(vma, address, new_page, pte, true, true); | 2907 | do_set_pte(vma, address, new_page, pte, true, true); |
2892 | mem_cgroup_commit_charge(new_page, memcg, false); | 2908 | mem_cgroup_commit_charge(new_page, memcg, false); |
2893 | lru_cache_add_active_or_unevictable(new_page, vma); | 2909 | lru_cache_add_active_or_unevictable(new_page, vma); |
2894 | pte_unmap_unlock(pte, ptl); | 2910 | pte_unmap_unlock(pte, ptl); |
2895 | unlock_page(fault_page); | 2911 | if (fault_page) { |
2896 | page_cache_release(fault_page); | 2912 | unlock_page(fault_page); |
2913 | page_cache_release(fault_page); | ||
2914 | } else { | ||
2915 | /* | ||
2916 | * The fault handler has no page to lock, so it holds | ||
2917 | * i_mmap_lock for read to protect against truncate. | ||
2918 | */ | ||
2919 | i_mmap_unlock_read(vma->vm_file->f_mapping); | ||
2920 | } | ||
2897 | return ret; | 2921 | return ret; |
2898 | uncharge_out: | 2922 | uncharge_out: |
2899 | mem_cgroup_cancel_charge(new_page, memcg); | 2923 | mem_cgroup_cancel_charge(new_page, memcg); |
@@ -2912,7 +2936,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2912 | int dirtied = 0; | 2936 | int dirtied = 0; |
2913 | int ret, tmp; | 2937 | int ret, tmp; |
2914 | 2938 | ||
2915 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); | 2939 | ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); |
2916 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2940 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
2917 | return ret; | 2941 | return ret; |
2918 | 2942 | ||