aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index cde56ee51ef7..506fa44403df 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -41,13 +41,15 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
41 return NULL; 41 return NULL;
42 42
43 pmd = pmd_offset(pud, addr); 43 pmd = pmd_offset(pud, addr);
44 split_huge_page_pmd(mm, pmd);
44 if (pmd_none_or_clear_bad(pmd)) 45 if (pmd_none_or_clear_bad(pmd))
45 return NULL; 46 return NULL;
46 47
47 return pmd; 48 return pmd;
48} 49}
49 50
50static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) 51static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
52 unsigned long addr)
51{ 53{
52 pgd_t *pgd; 54 pgd_t *pgd;
53 pud_t *pud; 55 pud_t *pud;
@@ -62,7 +64,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
62 if (!pmd) 64 if (!pmd)
63 return NULL; 65 return NULL;
64 66
65 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) 67 VM_BUG_ON(pmd_trans_huge(*pmd));
68 if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
66 return NULL; 69 return NULL;
67 70
68 return pmd; 71 return pmd;
@@ -90,10 +93,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
90 * and we propagate stale pages into the dst afterward. 93 * and we propagate stale pages into the dst afterward.
91 */ 94 */
92 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
93 spin_lock(&mapping->i_mmap_lock); 96 mutex_lock(&mapping->i_mmap_mutex);
94 if (new_vma->vm_truncate_count &&
95 new_vma->vm_truncate_count != vma->vm_truncate_count)
96 new_vma->vm_truncate_count = 0;
97 } 97 }
98 98
99 /* 99 /*
@@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
101 * pte locks because exclusive mmap_sem prevents deadlock. 101 * pte locks because exclusive mmap_sem prevents deadlock.
102 */ 102 */
103 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 103 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
104 new_pte = pte_offset_map_nested(new_pmd, new_addr); 104 new_pte = pte_offset_map(new_pmd, new_addr);
105 new_ptl = pte_lockptr(mm, new_pmd); 105 new_ptl = pte_lockptr(mm, new_pmd);
106 if (new_ptl != old_ptl) 106 if (new_ptl != old_ptl)
107 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 107 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -119,10 +119,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
119 arch_leave_lazy_mmu_mode(); 119 arch_leave_lazy_mmu_mode();
120 if (new_ptl != old_ptl) 120 if (new_ptl != old_ptl)
121 spin_unlock(new_ptl); 121 spin_unlock(new_ptl);
122 pte_unmap_nested(new_pte - 1); 122 pte_unmap(new_pte - 1);
123 pte_unmap_unlock(old_pte - 1, old_ptl); 123 pte_unmap_unlock(old_pte - 1, old_ptl);
124 if (mapping) 124 if (mapping)
125 spin_unlock(&mapping->i_mmap_lock); 125 mutex_unlock(&mapping->i_mmap_mutex);
126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); 126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
127} 127}
128 128
@@ -147,7 +147,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
147 old_pmd = get_old_pmd(vma->vm_mm, old_addr); 147 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
148 if (!old_pmd) 148 if (!old_pmd)
149 continue; 149 continue;
150 new_pmd = alloc_new_pmd(vma->vm_mm, new_addr); 150 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
151 if (!new_pmd) 151 if (!new_pmd)
152 break; 152 break;
153 next = (new_addr + PMD_SIZE) & PMD_MASK; 153 next = (new_addr + PMD_SIZE) & PMD_MASK;
@@ -276,9 +276,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
276 if (old_len > vma->vm_end - addr) 276 if (old_len > vma->vm_end - addr)
277 goto Efault; 277 goto Efault;
278 278
279 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { 279 /* Need to be careful about a growing mapping */
280 if (new_len > old_len) 280 if (new_len > old_len) {
281 unsigned long pgoff;
282
283 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
281 goto Efault; 284 goto Efault;
285 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
286 pgoff += vma->vm_pgoff;
287 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
288 goto Einval;
282 } 289 }
283 290
284 if (vma->vm_flags & VM_LOCKED) { 291 if (vma->vm_flags & VM_LOCKED) {