aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d5d1653d60a6..98bcb90d5957 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1310 cond_resched(); 1310 cond_resched();
1311 while (!(page = follow_page(vma, start, foll_flags))) { 1311 while (!(page = follow_page(vma, start, foll_flags))) {
1312 int ret; 1312 int ret;
1313 ret = handle_mm_fault(mm, vma, start, 1313
1314 foll_flags & FOLL_WRITE); 1314 /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
1315 ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
1315 if (ret & VM_FAULT_ERROR) { 1316 if (ret & VM_FAULT_ERROR) {
1316 if (ret & VM_FAULT_OOM) 1317 if (ret & VM_FAULT_OOM)
1317 return i ? i : -ENOMEM; 1318 return i ? i : -ENOMEM;
@@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2496 */ 2497 */
2497static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 2498static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2498 unsigned long address, pte_t *page_table, pmd_t *pmd, 2499 unsigned long address, pte_t *page_table, pmd_t *pmd,
2499 int write_access, pte_t orig_pte) 2500 unsigned int flags, pte_t orig_pte)
2500{ 2501{
2501 spinlock_t *ptl; 2502 spinlock_t *ptl;
2502 struct page *page; 2503 struct page *page;
@@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2572 2573
2573 inc_mm_counter(mm, anon_rss); 2574 inc_mm_counter(mm, anon_rss);
2574 pte = mk_pte(page, vma->vm_page_prot); 2575 pte = mk_pte(page, vma->vm_page_prot);
2575 if (write_access && reuse_swap_page(page)) { 2576 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2576 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2577 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2577 write_access = 0; 2578 flags &= ~FAULT_FLAG_WRITE;
2578 } 2579 }
2579 flush_icache_page(vma, page); 2580 flush_icache_page(vma, page);
2580 set_pte_at(mm, address, page_table, pte); 2581 set_pte_at(mm, address, page_table, pte);
@@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2587 try_to_free_swap(page); 2588 try_to_free_swap(page);
2588 unlock_page(page); 2589 unlock_page(page);
2589 2590
2590 if (write_access) { 2591 if (flags & FAULT_FLAG_WRITE) {
2591 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2592 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2592 if (ret & VM_FAULT_ERROR) 2593 if (ret & VM_FAULT_ERROR)
2593 ret &= VM_FAULT_ERROR; 2594 ret &= VM_FAULT_ERROR;
@@ -2616,7 +2617,7 @@ out_page:
2616 */ 2617 */
2617static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2618static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2618 unsigned long address, pte_t *page_table, pmd_t *pmd, 2619 unsigned long address, pte_t *page_table, pmd_t *pmd,
2619 int write_access) 2620 unsigned int flags)
2620{ 2621{
2621 struct page *page; 2622 struct page *page;
2622 spinlock_t *ptl; 2623 spinlock_t *ptl;
@@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2776 * due to the bad i386 page protection. But it's valid 2777 * due to the bad i386 page protection. But it's valid
2777 * for other architectures too. 2778 * for other architectures too.
2778 * 2779 *
2779 * Note that if write_access is true, we either now have 2780 * Note that if FAULT_FLAG_WRITE is set, we either now have
2780 * an exclusive copy of the page, or this is a shared mapping, 2781 * an exclusive copy of the page, or this is a shared mapping,
2781 * so we can make it writable and dirty to avoid having to 2782 * so we can make it writable and dirty to avoid having to
2782 * handle that later. 2783 * handle that later.
@@ -2847,11 +2848,10 @@ unwritable_page:
2847 2848
2848static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2849static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2849 unsigned long address, pte_t *page_table, pmd_t *pmd, 2850 unsigned long address, pte_t *page_table, pmd_t *pmd,
2850 int write_access, pte_t orig_pte) 2851 unsigned int flags, pte_t orig_pte)
2851{ 2852{
2852 pgoff_t pgoff = (((address & PAGE_MASK) 2853 pgoff_t pgoff = (((address & PAGE_MASK)
2853 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2854 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2854 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2855 2855
2856 pte_unmap(page_table); 2856 pte_unmap(page_table);
2857 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2857 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
@@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2868 */ 2868 */
2869static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2869static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2870 unsigned long address, pte_t *page_table, pmd_t *pmd, 2870 unsigned long address, pte_t *page_table, pmd_t *pmd,
2871 int write_access, pte_t orig_pte) 2871 unsigned int flags, pte_t orig_pte)
2872{ 2872{
2873 unsigned int flags = FAULT_FLAG_NONLINEAR |
2874 (write_access ? FAULT_FLAG_WRITE : 0);
2875 pgoff_t pgoff; 2873 pgoff_t pgoff;
2876 2874
2875 flags |= FAULT_FLAG_NONLINEAR;
2876
2877 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2877 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2878 return 0; 2878 return 0;
2879 2879
@@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2904 */ 2904 */
2905static inline int handle_pte_fault(struct mm_struct *mm, 2905static inline int handle_pte_fault(struct mm_struct *mm,
2906 struct vm_area_struct *vma, unsigned long address, 2906 struct vm_area_struct *vma, unsigned long address,
2907 pte_t *pte, pmd_t *pmd, int write_access) 2907 pte_t *pte, pmd_t *pmd, unsigned int flags)
2908{ 2908{
2909 pte_t entry; 2909 pte_t entry;
2910 spinlock_t *ptl; 2910 spinlock_t *ptl;
@@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2915 if (vma->vm_ops) { 2915 if (vma->vm_ops) {
2916 if (likely(vma->vm_ops->fault)) 2916 if (likely(vma->vm_ops->fault))
2917 return do_linear_fault(mm, vma, address, 2917 return do_linear_fault(mm, vma, address,
2918 pte, pmd, write_access, entry); 2918 pte, pmd, flags, entry);
2919 } 2919 }
2920 return do_anonymous_page(mm, vma, address, 2920 return do_anonymous_page(mm, vma, address,
2921 pte, pmd, write_access); 2921 pte, pmd, flags);
2922 } 2922 }
2923 if (pte_file(entry)) 2923 if (pte_file(entry))
2924 return do_nonlinear_fault(mm, vma, address, 2924 return do_nonlinear_fault(mm, vma, address,
2925 pte, pmd, write_access, entry); 2925 pte, pmd, flags, entry);
2926 return do_swap_page(mm, vma, address, 2926 return do_swap_page(mm, vma, address,
2927 pte, pmd, write_access, entry); 2927 pte, pmd, flags, entry);
2928 } 2928 }
2929 2929
2930 ptl = pte_lockptr(mm, pmd); 2930 ptl = pte_lockptr(mm, pmd);
2931 spin_lock(ptl); 2931 spin_lock(ptl);
2932 if (unlikely(!pte_same(*pte, entry))) 2932 if (unlikely(!pte_same(*pte, entry)))
2933 goto unlock; 2933 goto unlock;
2934 if (write_access) { 2934 if (flags & FAULT_FLAG_WRITE) {
2935 if (!pte_write(entry)) 2935 if (!pte_write(entry))
2936 return do_wp_page(mm, vma, address, 2936 return do_wp_page(mm, vma, address,
2937 pte, pmd, ptl, entry); 2937 pte, pmd, ptl, entry);
2938 entry = pte_mkdirty(entry); 2938 entry = pte_mkdirty(entry);
2939 } 2939 }
2940 entry = pte_mkyoung(entry); 2940 entry = pte_mkyoung(entry);
2941 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2941 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
2942 update_mmu_cache(vma, address, entry); 2942 update_mmu_cache(vma, address, entry);
2943 } else { 2943 } else {
2944 /* 2944 /*
@@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2947 * This still avoids useless tlb flushes for .text page faults 2947 * This still avoids useless tlb flushes for .text page faults
2948 * with threads. 2948 * with threads.
2949 */ 2949 */
2950 if (write_access) 2950 if (flags & FAULT_FLAG_WRITE)
2951 flush_tlb_page(vma, address); 2951 flush_tlb_page(vma, address);
2952 } 2952 }
2953unlock: 2953unlock:
@@ -2959,7 +2959,7 @@ unlock:
2959 * By the time we get here, we already hold the mm semaphore 2959 * By the time we get here, we already hold the mm semaphore
2960 */ 2960 */
2961int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2961int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2962 unsigned long address, int write_access) 2962 unsigned long address, unsigned int flags)
2963{ 2963{
2964 pgd_t *pgd; 2964 pgd_t *pgd;
2965 pud_t *pud; 2965 pud_t *pud;
@@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2971 count_vm_event(PGFAULT); 2971 count_vm_event(PGFAULT);
2972 2972
2973 if (unlikely(is_vm_hugetlb_page(vma))) 2973 if (unlikely(is_vm_hugetlb_page(vma)))
2974 return hugetlb_fault(mm, vma, address, write_access); 2974 return hugetlb_fault(mm, vma, address, flags);
2975 2975
2976 pgd = pgd_offset(mm, address); 2976 pgd = pgd_offset(mm, address);
2977 pud = pud_alloc(mm, pgd, address); 2977 pud = pud_alloc(mm, pgd, address);
@@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2984 if (!pte) 2984 if (!pte)
2985 return VM_FAULT_OOM; 2985 return VM_FAULT_OOM;
2986 2986
2987 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2987 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
2988} 2988}
2989 2989
2990#ifndef __PAGETABLE_PUD_FOLDED 2990#ifndef __PAGETABLE_PUD_FOLDED