diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 55 |
1 files changed, 27 insertions, 28 deletions
diff --git a/mm/memory.c b/mm/memory.c index 098f00d05461..93897f23cc11 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2054,7 +2054,7 @@ static inline int wp_page_reuse(struct mm_struct *mm, | |||
2054 | VM_BUG_ON_PAGE(PageAnon(page), page); | 2054 | VM_BUG_ON_PAGE(PageAnon(page), page); |
2055 | mapping = page->mapping; | 2055 | mapping = page->mapping; |
2056 | unlock_page(page); | 2056 | unlock_page(page); |
2057 | page_cache_release(page); | 2057 | put_page(page); |
2058 | 2058 | ||
2059 | if ((dirtied || page_mkwrite) && mapping) { | 2059 | if ((dirtied || page_mkwrite) && mapping) { |
2060 | /* | 2060 | /* |
@@ -2188,7 +2188,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2188 | } | 2188 | } |
2189 | 2189 | ||
2190 | if (new_page) | 2190 | if (new_page) |
2191 | page_cache_release(new_page); | 2191 | put_page(new_page); |
2192 | 2192 | ||
2193 | pte_unmap_unlock(page_table, ptl); | 2193 | pte_unmap_unlock(page_table, ptl); |
2194 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 2194 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
@@ -2203,14 +2203,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2203 | munlock_vma_page(old_page); | 2203 | munlock_vma_page(old_page); |
2204 | unlock_page(old_page); | 2204 | unlock_page(old_page); |
2205 | } | 2205 | } |
2206 | page_cache_release(old_page); | 2206 | put_page(old_page); |
2207 | } | 2207 | } |
2208 | return page_copied ? VM_FAULT_WRITE : 0; | 2208 | return page_copied ? VM_FAULT_WRITE : 0; |
2209 | oom_free_new: | 2209 | oom_free_new: |
2210 | page_cache_release(new_page); | 2210 | put_page(new_page); |
2211 | oom: | 2211 | oom: |
2212 | if (old_page) | 2212 | if (old_page) |
2213 | page_cache_release(old_page); | 2213 | put_page(old_page); |
2214 | return VM_FAULT_OOM; | 2214 | return VM_FAULT_OOM; |
2215 | } | 2215 | } |
2216 | 2216 | ||
@@ -2258,7 +2258,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2258 | { | 2258 | { |
2259 | int page_mkwrite = 0; | 2259 | int page_mkwrite = 0; |
2260 | 2260 | ||
2261 | page_cache_get(old_page); | 2261 | get_page(old_page); |
2262 | 2262 | ||
2263 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | 2263 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { |
2264 | int tmp; | 2264 | int tmp; |
@@ -2267,7 +2267,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2267 | tmp = do_page_mkwrite(vma, old_page, address); | 2267 | tmp = do_page_mkwrite(vma, old_page, address); |
2268 | if (unlikely(!tmp || (tmp & | 2268 | if (unlikely(!tmp || (tmp & |
2269 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { | 2269 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
2270 | page_cache_release(old_page); | 2270 | put_page(old_page); |
2271 | return tmp; | 2271 | return tmp; |
2272 | } | 2272 | } |
2273 | /* | 2273 | /* |
@@ -2281,7 +2281,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2281 | if (!pte_same(*page_table, orig_pte)) { | 2281 | if (!pte_same(*page_table, orig_pte)) { |
2282 | unlock_page(old_page); | 2282 | unlock_page(old_page); |
2283 | pte_unmap_unlock(page_table, ptl); | 2283 | pte_unmap_unlock(page_table, ptl); |
2284 | page_cache_release(old_page); | 2284 | put_page(old_page); |
2285 | return 0; | 2285 | return 0; |
2286 | } | 2286 | } |
2287 | page_mkwrite = 1; | 2287 | page_mkwrite = 1; |
@@ -2341,7 +2341,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2341 | */ | 2341 | */ |
2342 | if (PageAnon(old_page) && !PageKsm(old_page)) { | 2342 | if (PageAnon(old_page) && !PageKsm(old_page)) { |
2343 | if (!trylock_page(old_page)) { | 2343 | if (!trylock_page(old_page)) { |
2344 | page_cache_get(old_page); | 2344 | get_page(old_page); |
2345 | pte_unmap_unlock(page_table, ptl); | 2345 | pte_unmap_unlock(page_table, ptl); |
2346 | lock_page(old_page); | 2346 | lock_page(old_page); |
2347 | page_table = pte_offset_map_lock(mm, pmd, address, | 2347 | page_table = pte_offset_map_lock(mm, pmd, address, |
@@ -2349,10 +2349,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2349 | if (!pte_same(*page_table, orig_pte)) { | 2349 | if (!pte_same(*page_table, orig_pte)) { |
2350 | unlock_page(old_page); | 2350 | unlock_page(old_page); |
2351 | pte_unmap_unlock(page_table, ptl); | 2351 | pte_unmap_unlock(page_table, ptl); |
2352 | page_cache_release(old_page); | 2352 | put_page(old_page); |
2353 | return 0; | 2353 | return 0; |
2354 | } | 2354 | } |
2355 | page_cache_release(old_page); | 2355 | put_page(old_page); |
2356 | } | 2356 | } |
2357 | if (reuse_swap_page(old_page)) { | 2357 | if (reuse_swap_page(old_page)) { |
2358 | /* | 2358 | /* |
@@ -2375,7 +2375,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2375 | /* | 2375 | /* |
2376 | * Ok, we need to copy. Oh, well.. | 2376 | * Ok, we need to copy. Oh, well.. |
2377 | */ | 2377 | */ |
2378 | page_cache_get(old_page); | 2378 | get_page(old_page); |
2379 | 2379 | ||
2380 | pte_unmap_unlock(page_table, ptl); | 2380 | pte_unmap_unlock(page_table, ptl); |
2381 | return wp_page_copy(mm, vma, address, page_table, pmd, | 2381 | return wp_page_copy(mm, vma, address, page_table, pmd, |
@@ -2400,7 +2400,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, | |||
2400 | 2400 | ||
2401 | vba = vma->vm_pgoff; | 2401 | vba = vma->vm_pgoff; |
2402 | vea = vba + vma_pages(vma) - 1; | 2402 | vea = vba + vma_pages(vma) - 1; |
2403 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ | ||
2404 | zba = details->first_index; | 2403 | zba = details->first_index; |
2405 | if (zba < vba) | 2404 | if (zba < vba) |
2406 | zba = vba; | 2405 | zba = vba; |
@@ -2619,7 +2618,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2619 | * parallel locked swapcache. | 2618 | * parallel locked swapcache. |
2620 | */ | 2619 | */ |
2621 | unlock_page(swapcache); | 2620 | unlock_page(swapcache); |
2622 | page_cache_release(swapcache); | 2621 | put_page(swapcache); |
2623 | } | 2622 | } |
2624 | 2623 | ||
2625 | if (flags & FAULT_FLAG_WRITE) { | 2624 | if (flags & FAULT_FLAG_WRITE) { |
@@ -2641,10 +2640,10 @@ out_nomap: | |||
2641 | out_page: | 2640 | out_page: |
2642 | unlock_page(page); | 2641 | unlock_page(page); |
2643 | out_release: | 2642 | out_release: |
2644 | page_cache_release(page); | 2643 | put_page(page); |
2645 | if (page != swapcache) { | 2644 | if (page != swapcache) { |
2646 | unlock_page(swapcache); | 2645 | unlock_page(swapcache); |
2647 | page_cache_release(swapcache); | 2646 | put_page(swapcache); |
2648 | } | 2647 | } |
2649 | return ret; | 2648 | return ret; |
2650 | } | 2649 | } |
@@ -2752,7 +2751,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2752 | if (userfaultfd_missing(vma)) { | 2751 | if (userfaultfd_missing(vma)) { |
2753 | pte_unmap_unlock(page_table, ptl); | 2752 | pte_unmap_unlock(page_table, ptl); |
2754 | mem_cgroup_cancel_charge(page, memcg, false); | 2753 | mem_cgroup_cancel_charge(page, memcg, false); |
2755 | page_cache_release(page); | 2754 | put_page(page); |
2756 | return handle_userfault(vma, address, flags, | 2755 | return handle_userfault(vma, address, flags, |
2757 | VM_UFFD_MISSING); | 2756 | VM_UFFD_MISSING); |
2758 | } | 2757 | } |
@@ -2771,10 +2770,10 @@ unlock: | |||
2771 | return 0; | 2770 | return 0; |
2772 | release: | 2771 | release: |
2773 | mem_cgroup_cancel_charge(page, memcg, false); | 2772 | mem_cgroup_cancel_charge(page, memcg, false); |
2774 | page_cache_release(page); | 2773 | put_page(page); |
2775 | goto unlock; | 2774 | goto unlock; |
2776 | oom_free_page: | 2775 | oom_free_page: |
2777 | page_cache_release(page); | 2776 | put_page(page); |
2778 | oom: | 2777 | oom: |
2779 | return VM_FAULT_OOM; | 2778 | return VM_FAULT_OOM; |
2780 | } | 2779 | } |
@@ -2807,7 +2806,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, | |||
2807 | if (unlikely(PageHWPoison(vmf.page))) { | 2806 | if (unlikely(PageHWPoison(vmf.page))) { |
2808 | if (ret & VM_FAULT_LOCKED) | 2807 | if (ret & VM_FAULT_LOCKED) |
2809 | unlock_page(vmf.page); | 2808 | unlock_page(vmf.page); |
2810 | page_cache_release(vmf.page); | 2809 | put_page(vmf.page); |
2811 | return VM_FAULT_HWPOISON; | 2810 | return VM_FAULT_HWPOISON; |
2812 | } | 2811 | } |
2813 | 2812 | ||
@@ -2996,7 +2995,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2996 | if (unlikely(!pte_same(*pte, orig_pte))) { | 2995 | if (unlikely(!pte_same(*pte, orig_pte))) { |
2997 | pte_unmap_unlock(pte, ptl); | 2996 | pte_unmap_unlock(pte, ptl); |
2998 | unlock_page(fault_page); | 2997 | unlock_page(fault_page); |
2999 | page_cache_release(fault_page); | 2998 | put_page(fault_page); |
3000 | return ret; | 2999 | return ret; |
3001 | } | 3000 | } |
3002 | do_set_pte(vma, address, fault_page, pte, false, false); | 3001 | do_set_pte(vma, address, fault_page, pte, false, false); |
@@ -3024,7 +3023,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3024 | return VM_FAULT_OOM; | 3023 | return VM_FAULT_OOM; |
3025 | 3024 | ||
3026 | if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { | 3025 | if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { |
3027 | page_cache_release(new_page); | 3026 | put_page(new_page); |
3028 | return VM_FAULT_OOM; | 3027 | return VM_FAULT_OOM; |
3029 | } | 3028 | } |
3030 | 3029 | ||
@@ -3041,7 +3040,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3041 | pte_unmap_unlock(pte, ptl); | 3040 | pte_unmap_unlock(pte, ptl); |
3042 | if (fault_page) { | 3041 | if (fault_page) { |
3043 | unlock_page(fault_page); | 3042 | unlock_page(fault_page); |
3044 | page_cache_release(fault_page); | 3043 | put_page(fault_page); |
3045 | } else { | 3044 | } else { |
3046 | /* | 3045 | /* |
3047 | * The fault handler has no page to lock, so it holds | 3046 | * The fault handler has no page to lock, so it holds |
@@ -3057,7 +3056,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3057 | pte_unmap_unlock(pte, ptl); | 3056 | pte_unmap_unlock(pte, ptl); |
3058 | if (fault_page) { | 3057 | if (fault_page) { |
3059 | unlock_page(fault_page); | 3058 | unlock_page(fault_page); |
3060 | page_cache_release(fault_page); | 3059 | put_page(fault_page); |
3061 | } else { | 3060 | } else { |
3062 | /* | 3061 | /* |
3063 | * The fault handler has no page to lock, so it holds | 3062 | * The fault handler has no page to lock, so it holds |
@@ -3068,7 +3067,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3068 | return ret; | 3067 | return ret; |
3069 | uncharge_out: | 3068 | uncharge_out: |
3070 | mem_cgroup_cancel_charge(new_page, memcg, false); | 3069 | mem_cgroup_cancel_charge(new_page, memcg, false); |
3071 | page_cache_release(new_page); | 3070 | put_page(new_page); |
3072 | return ret; | 3071 | return ret; |
3073 | } | 3072 | } |
3074 | 3073 | ||
@@ -3096,7 +3095,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3096 | tmp = do_page_mkwrite(vma, fault_page, address); | 3095 | tmp = do_page_mkwrite(vma, fault_page, address); |
3097 | if (unlikely(!tmp || | 3096 | if (unlikely(!tmp || |
3098 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { | 3097 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
3099 | page_cache_release(fault_page); | 3098 | put_page(fault_page); |
3100 | return tmp; | 3099 | return tmp; |
3101 | } | 3100 | } |
3102 | } | 3101 | } |
@@ -3105,7 +3104,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3105 | if (unlikely(!pte_same(*pte, orig_pte))) { | 3104 | if (unlikely(!pte_same(*pte, orig_pte))) { |
3106 | pte_unmap_unlock(pte, ptl); | 3105 | pte_unmap_unlock(pte, ptl); |
3107 | unlock_page(fault_page); | 3106 | unlock_page(fault_page); |
3108 | page_cache_release(fault_page); | 3107 | put_page(fault_page); |
3109 | return ret; | 3108 | return ret; |
3110 | } | 3109 | } |
3111 | do_set_pte(vma, address, fault_page, pte, true, false); | 3110 | do_set_pte(vma, address, fault_page, pte, true, false); |
@@ -3736,7 +3735,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
3736 | buf, maddr + offset, bytes); | 3735 | buf, maddr + offset, bytes); |
3737 | } | 3736 | } |
3738 | kunmap(page); | 3737 | kunmap(page); |
3739 | page_cache_release(page); | 3738 | put_page(page); |
3740 | } | 3739 | } |
3741 | len -= bytes; | 3740 | len -= bytes; |
3742 | buf += bytes; | 3741 | buf += bytes; |