diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-10 11:43:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-21 16:06:05 -0400 |
commit | 30c9f3a9fae79517bca595826a19c6855fbb6d32 (patch) | |
tree | f7eb9588fe38dc1b045e97409e25c57c516aaf44 /mm | |
parent | 232086b19964d0e13359d30d74b11ca31b0751cb (diff) |
Remove internal use of 'write_access' in mm/memory.c
The fault handling routines really want more fine-grained flags than a
single "was it a write fault" boolean - the callers will want to set
flags like "you can return a retry error" etc.
And that's actually how the VM works internally, but right now the
top-level fault handling functions in mm/memory.c all pass just the
'write_access' boolean around.
This switches them over to pass around the FAULT_FLAG_xyzzy 'flags'
variable instead. The 'write_access' calling convention still exists
for the exported 'handle_mm_fault()' function, but that is next.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c index d5d1653d60a6..e6a9700359df 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2496,7 +2496,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | |||
2496 | */ | 2496 | */ |
2497 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2497 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, |
2498 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2498 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
2499 | int write_access, pte_t orig_pte) | 2499 | unsigned int flags, pte_t orig_pte) |
2500 | { | 2500 | { |
2501 | spinlock_t *ptl; | 2501 | spinlock_t *ptl; |
2502 | struct page *page; | 2502 | struct page *page; |
@@ -2572,9 +2572,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2572 | 2572 | ||
2573 | inc_mm_counter(mm, anon_rss); | 2573 | inc_mm_counter(mm, anon_rss); |
2574 | pte = mk_pte(page, vma->vm_page_prot); | 2574 | pte = mk_pte(page, vma->vm_page_prot); |
2575 | if (write_access && reuse_swap_page(page)) { | 2575 | if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { |
2576 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | 2576 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
2577 | write_access = 0; | 2577 | flags &= ~FAULT_FLAG_WRITE; |
2578 | } | 2578 | } |
2579 | flush_icache_page(vma, page); | 2579 | flush_icache_page(vma, page); |
2580 | set_pte_at(mm, address, page_table, pte); | 2580 | set_pte_at(mm, address, page_table, pte); |
@@ -2587,7 +2587,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2587 | try_to_free_swap(page); | 2587 | try_to_free_swap(page); |
2588 | unlock_page(page); | 2588 | unlock_page(page); |
2589 | 2589 | ||
2590 | if (write_access) { | 2590 | if (flags & FAULT_FLAG_WRITE) { |
2591 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2591 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
2592 | if (ret & VM_FAULT_ERROR) | 2592 | if (ret & VM_FAULT_ERROR) |
2593 | ret &= VM_FAULT_ERROR; | 2593 | ret &= VM_FAULT_ERROR; |
@@ -2616,7 +2616,7 @@ out_page: | |||
2616 | */ | 2616 | */ |
2617 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2617 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
2618 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2618 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
2619 | int write_access) | 2619 | unsigned int flags) |
2620 | { | 2620 | { |
2621 | struct page *page; | 2621 | struct page *page; |
2622 | spinlock_t *ptl; | 2622 | spinlock_t *ptl; |
@@ -2776,7 +2776,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2776 | * due to the bad i386 page protection. But it's valid | 2776 | * due to the bad i386 page protection. But it's valid |
2777 | * for other architectures too. | 2777 | * for other architectures too. |
2778 | * | 2778 | * |
2779 | * Note that if write_access is true, we either now have | 2779 | * Note that if FAULT_FLAG_WRITE is set, we either now have |
2780 | * an exclusive copy of the page, or this is a shared mapping, | 2780 | * an exclusive copy of the page, or this is a shared mapping, |
2781 | * so we can make it writable and dirty to avoid having to | 2781 | * so we can make it writable and dirty to avoid having to |
2782 | * handle that later. | 2782 | * handle that later. |
@@ -2847,11 +2847,10 @@ unwritable_page: | |||
2847 | 2847 | ||
2848 | static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2848 | static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
2849 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2849 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
2850 | int write_access, pte_t orig_pte) | 2850 | unsigned int flags, pte_t orig_pte) |
2851 | { | 2851 | { |
2852 | pgoff_t pgoff = (((address & PAGE_MASK) | 2852 | pgoff_t pgoff = (((address & PAGE_MASK) |
2853 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 2853 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
2854 | unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); | ||
2855 | 2854 | ||
2856 | pte_unmap(page_table); | 2855 | pte_unmap(page_table); |
2857 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | 2856 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); |
@@ -2868,12 +2867,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2868 | */ | 2867 | */ |
2869 | static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2868 | static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
2870 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2869 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
2871 | int write_access, pte_t orig_pte) | 2870 | unsigned int flags, pte_t orig_pte) |
2872 | { | 2871 | { |
2873 | unsigned int flags = FAULT_FLAG_NONLINEAR | | ||
2874 | (write_access ? FAULT_FLAG_WRITE : 0); | ||
2875 | pgoff_t pgoff; | 2872 | pgoff_t pgoff; |
2876 | 2873 | ||
2874 | flags |= FAULT_FLAG_NONLINEAR; | ||
2875 | |||
2877 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) | 2876 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) |
2878 | return 0; | 2877 | return 0; |
2879 | 2878 | ||
@@ -2904,7 +2903,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2904 | */ | 2903 | */ |
2905 | static inline int handle_pte_fault(struct mm_struct *mm, | 2904 | static inline int handle_pte_fault(struct mm_struct *mm, |
2906 | struct vm_area_struct *vma, unsigned long address, | 2905 | struct vm_area_struct *vma, unsigned long address, |
2907 | pte_t *pte, pmd_t *pmd, int write_access) | 2906 | pte_t *pte, pmd_t *pmd, unsigned int flags) |
2908 | { | 2907 | { |
2909 | pte_t entry; | 2908 | pte_t entry; |
2910 | spinlock_t *ptl; | 2909 | spinlock_t *ptl; |
@@ -2915,30 +2914,30 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2915 | if (vma->vm_ops) { | 2914 | if (vma->vm_ops) { |
2916 | if (likely(vma->vm_ops->fault)) | 2915 | if (likely(vma->vm_ops->fault)) |
2917 | return do_linear_fault(mm, vma, address, | 2916 | return do_linear_fault(mm, vma, address, |
2918 | pte, pmd, write_access, entry); | 2917 | pte, pmd, flags, entry); |
2919 | } | 2918 | } |
2920 | return do_anonymous_page(mm, vma, address, | 2919 | return do_anonymous_page(mm, vma, address, |
2921 | pte, pmd, write_access); | 2920 | pte, pmd, flags); |
2922 | } | 2921 | } |
2923 | if (pte_file(entry)) | 2922 | if (pte_file(entry)) |
2924 | return do_nonlinear_fault(mm, vma, address, | 2923 | return do_nonlinear_fault(mm, vma, address, |
2925 | pte, pmd, write_access, entry); | 2924 | pte, pmd, flags, entry); |
2926 | return do_swap_page(mm, vma, address, | 2925 | return do_swap_page(mm, vma, address, |
2927 | pte, pmd, write_access, entry); | 2926 | pte, pmd, flags, entry); |
2928 | } | 2927 | } |
2929 | 2928 | ||
2930 | ptl = pte_lockptr(mm, pmd); | 2929 | ptl = pte_lockptr(mm, pmd); |
2931 | spin_lock(ptl); | 2930 | spin_lock(ptl); |
2932 | if (unlikely(!pte_same(*pte, entry))) | 2931 | if (unlikely(!pte_same(*pte, entry))) |
2933 | goto unlock; | 2932 | goto unlock; |
2934 | if (write_access) { | 2933 | if (flags & FAULT_FLAG_WRITE) { |
2935 | if (!pte_write(entry)) | 2934 | if (!pte_write(entry)) |
2936 | return do_wp_page(mm, vma, address, | 2935 | return do_wp_page(mm, vma, address, |
2937 | pte, pmd, ptl, entry); | 2936 | pte, pmd, ptl, entry); |
2938 | entry = pte_mkdirty(entry); | 2937 | entry = pte_mkdirty(entry); |
2939 | } | 2938 | } |
2940 | entry = pte_mkyoung(entry); | 2939 | entry = pte_mkyoung(entry); |
2941 | if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { | 2940 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { |
2942 | update_mmu_cache(vma, address, entry); | 2941 | update_mmu_cache(vma, address, entry); |
2943 | } else { | 2942 | } else { |
2944 | /* | 2943 | /* |
@@ -2947,7 +2946,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2947 | * This still avoids useless tlb flushes for .text page faults | 2946 | * This still avoids useless tlb flushes for .text page faults |
2948 | * with threads. | 2947 | * with threads. |
2949 | */ | 2948 | */ |
2950 | if (write_access) | 2949 | if (flags & FAULT_FLAG_WRITE) |
2951 | flush_tlb_page(vma, address); | 2950 | flush_tlb_page(vma, address); |
2952 | } | 2951 | } |
2953 | unlock: | 2952 | unlock: |
@@ -2965,13 +2964,14 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2965 | pud_t *pud; | 2964 | pud_t *pud; |
2966 | pmd_t *pmd; | 2965 | pmd_t *pmd; |
2967 | pte_t *pte; | 2966 | pte_t *pte; |
2967 | unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0; | ||
2968 | 2968 | ||
2969 | __set_current_state(TASK_RUNNING); | 2969 | __set_current_state(TASK_RUNNING); |
2970 | 2970 | ||
2971 | count_vm_event(PGFAULT); | 2971 | count_vm_event(PGFAULT); |
2972 | 2972 | ||
2973 | if (unlikely(is_vm_hugetlb_page(vma))) | 2973 | if (unlikely(is_vm_hugetlb_page(vma))) |
2974 | return hugetlb_fault(mm, vma, address, write_access); | 2974 | return hugetlb_fault(mm, vma, address, flags); |
2975 | 2975 | ||
2976 | pgd = pgd_offset(mm, address); | 2976 | pgd = pgd_offset(mm, address); |
2977 | pud = pud_alloc(mm, pgd, address); | 2977 | pud = pud_alloc(mm, pgd, address); |
@@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2984 | if (!pte) | 2984 | if (!pte) |
2985 | return VM_FAULT_OOM; | 2985 | return VM_FAULT_OOM; |
2986 | 2986 | ||
2987 | return handle_pte_fault(mm, vma, address, pte, pmd, write_access); | 2987 | return handle_pte_fault(mm, vma, address, pte, pmd, flags); |
2988 | } | 2988 | } |
2989 | 2989 | ||
2990 | #ifndef __PAGETABLE_PUD_FOLDED | 2990 | #ifndef __PAGETABLE_PUD_FOLDED |