aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index af82741caaa4..02e48aa0ed13 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -736,7 +736,7 @@ again:
736 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 736 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
737 if (!dst_pte) 737 if (!dst_pte)
738 return -ENOMEM; 738 return -ENOMEM;
739 src_pte = pte_offset_map_nested(src_pmd, addr); 739 src_pte = pte_offset_map(src_pmd, addr);
740 src_ptl = pte_lockptr(src_mm, src_pmd); 740 src_ptl = pte_lockptr(src_mm, src_pmd);
741 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 741 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
742 orig_src_pte = src_pte; 742 orig_src_pte = src_pte;
@@ -767,7 +767,7 @@ again:
767 767
768 arch_leave_lazy_mmu_mode(); 768 arch_leave_lazy_mmu_mode();
769 spin_unlock(src_ptl); 769 spin_unlock(src_ptl);
770 pte_unmap_nested(orig_src_pte); 770 pte_unmap(orig_src_pte);
771 add_mm_rss_vec(dst_mm, rss); 771 add_mm_rss_vec(dst_mm, rss);
772 pte_unmap_unlock(orig_dst_pte, dst_ptl); 772 pte_unmap_unlock(orig_dst_pte, dst_ptl);
773 cond_resched(); 773 cond_resched();
@@ -1591,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr)
1591} 1591}
1592#endif /* CONFIG_ELF_CORE */ 1592#endif /* CONFIG_ELF_CORE */
1593 1593
1594pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1594pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1595 spinlock_t **ptl) 1595 spinlock_t **ptl)
1596{ 1596{
1597 pgd_t * pgd = pgd_offset(mm, addr); 1597 pgd_t * pgd = pgd_offset(mm, addr);
@@ -2080,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2080 * zeroes. 2080 * zeroes.
2081 */ 2081 */
2082 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 2082 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2083 memset(kaddr, 0, PAGE_SIZE); 2083 clear_page(kaddr);
2084 kunmap_atomic(kaddr, KM_USER0); 2084 kunmap_atomic(kaddr, KM_USER0);
2085 flush_dcache_page(dst); 2085 flush_dcache_page(dst);
2086 } else 2086 } else
@@ -2108,6 +2108,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2108static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 2108static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2109 unsigned long address, pte_t *page_table, pmd_t *pmd, 2109 unsigned long address, pte_t *page_table, pmd_t *pmd,
2110 spinlock_t *ptl, pte_t orig_pte) 2110 spinlock_t *ptl, pte_t orig_pte)
2111 __releases(ptl)
2111{ 2112{
2112 struct page *old_page, *new_page; 2113 struct page *old_page, *new_page;
2113 pte_t entry; 2114 pte_t entry;
@@ -2627,6 +2628,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2627 struct page *page, *swapcache = NULL; 2628 struct page *page, *swapcache = NULL;
2628 swp_entry_t entry; 2629 swp_entry_t entry;
2629 pte_t pte; 2630 pte_t pte;
2631 int locked;
2630 struct mem_cgroup *ptr = NULL; 2632 struct mem_cgroup *ptr = NULL;
2631 int exclusive = 0; 2633 int exclusive = 0;
2632 int ret = 0; 2634 int ret = 0;
@@ -2677,8 +2679,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2677 goto out_release; 2679 goto out_release;
2678 } 2680 }
2679 2681
2680 lock_page(page); 2682 locked = lock_page_or_retry(page, mm, flags);
2681 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2683 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2684 if (!locked) {
2685 ret |= VM_FAULT_RETRY;
2686 goto out_release;
2687 }
2682 2688
2683 /* 2689 /*
2684 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not 2690 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
@@ -2927,7 +2933,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2927 vmf.page = NULL; 2933 vmf.page = NULL;
2928 2934
2929 ret = vma->vm_ops->fault(vma, &vmf); 2935 ret = vma->vm_ops->fault(vma, &vmf);
2930 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2936 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
2937 VM_FAULT_RETRY)))
2931 return ret; 2938 return ret;
2932 2939
2933 if (unlikely(PageHWPoison(vmf.page))) { 2940 if (unlikely(PageHWPoison(vmf.page))) {
@@ -3344,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr)
3344 3351
3345#endif /* __HAVE_ARCH_GATE_AREA */ 3352#endif /* __HAVE_ARCH_GATE_AREA */
3346 3353
3347static int follow_pte(struct mm_struct *mm, unsigned long address, 3354static int __follow_pte(struct mm_struct *mm, unsigned long address,
3348 pte_t **ptepp, spinlock_t **ptlp) 3355 pte_t **ptepp, spinlock_t **ptlp)
3349{ 3356{
3350 pgd_t *pgd; 3357 pgd_t *pgd;
@@ -3381,6 +3388,17 @@ out:
3381 return -EINVAL; 3388 return -EINVAL;
3382} 3389}
3383 3390
3391static inline int follow_pte(struct mm_struct *mm, unsigned long address,
3392 pte_t **ptepp, spinlock_t **ptlp)
3393{
3394 int res;
3395
3396 /* (void) is needed to make gcc happy */
3397 (void) __cond_lock(*ptlp,
3398 !(res = __follow_pte(mm, address, ptepp, ptlp)));
3399 return res;
3400}
3401
3384/** 3402/**
3385 * follow_pfn - look up PFN at a user virtual address 3403 * follow_pfn - look up PFN at a user virtual address
3386 * @vma: memory mapping 3404 * @vma: memory mapping