diff options
Diffstat (limited to 'mm/mremap.c')
| -rw-r--r-- | mm/mremap.c | 193 |
1 files changed, 86 insertions, 107 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index f343fc73a8bd..b535438c363c 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
| @@ -22,35 +22,7 @@ | |||
| 22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
| 23 | #include <asm/tlbflush.h> | 23 | #include <asm/tlbflush.h> |
| 24 | 24 | ||
| 25 | static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr) | 25 | static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
| 26 | { | ||
| 27 | pgd_t *pgd; | ||
| 28 | pud_t *pud; | ||
| 29 | pmd_t *pmd; | ||
| 30 | pte_t *pte = NULL; | ||
| 31 | |||
| 32 | pgd = pgd_offset(mm, addr); | ||
| 33 | if (pgd_none_or_clear_bad(pgd)) | ||
| 34 | goto end; | ||
| 35 | |||
| 36 | pud = pud_offset(pgd, addr); | ||
| 37 | if (pud_none_or_clear_bad(pud)) | ||
| 38 | goto end; | ||
| 39 | |||
| 40 | pmd = pmd_offset(pud, addr); | ||
| 41 | if (pmd_none_or_clear_bad(pmd)) | ||
| 42 | goto end; | ||
| 43 | |||
| 44 | pte = pte_offset_map_nested(pmd, addr); | ||
| 45 | if (pte_none(*pte)) { | ||
| 46 | pte_unmap_nested(pte); | ||
| 47 | pte = NULL; | ||
| 48 | } | ||
| 49 | end: | ||
| 50 | return pte; | ||
| 51 | } | ||
| 52 | |||
| 53 | static pte_t *get_one_pte_map(struct mm_struct *mm, unsigned long addr) | ||
| 54 | { | 26 | { |
| 55 | pgd_t *pgd; | 27 | pgd_t *pgd; |
| 56 | pud_t *pud; | 28 | pud_t *pud; |
| @@ -68,35 +40,39 @@ static pte_t *get_one_pte_map(struct mm_struct *mm, unsigned long addr) | |||
| 68 | if (pmd_none_or_clear_bad(pmd)) | 40 | if (pmd_none_or_clear_bad(pmd)) |
| 69 | return NULL; | 41 | return NULL; |
| 70 | 42 | ||
| 71 | return pte_offset_map(pmd, addr); | 43 | return pmd; |
| 72 | } | 44 | } |
| 73 | 45 | ||
| 74 | static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr) | 46 | static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) |
| 75 | { | 47 | { |
| 76 | pgd_t *pgd; | 48 | pgd_t *pgd; |
| 77 | pud_t *pud; | 49 | pud_t *pud; |
| 78 | pmd_t *pmd; | 50 | pmd_t *pmd; |
| 79 | pte_t *pte = NULL; | ||
| 80 | 51 | ||
| 81 | pgd = pgd_offset(mm, addr); | 52 | pgd = pgd_offset(mm, addr); |
| 82 | |||
| 83 | pud = pud_alloc(mm, pgd, addr); | 53 | pud = pud_alloc(mm, pgd, addr); |
| 84 | if (!pud) | 54 | if (!pud) |
| 85 | return NULL; | 55 | return NULL; |
| 56 | |||
| 86 | pmd = pmd_alloc(mm, pud, addr); | 57 | pmd = pmd_alloc(mm, pud, addr); |
| 87 | if (pmd) | 58 | if (!pmd) |
| 88 | pte = pte_alloc_map(mm, pmd, addr); | 59 | return NULL; |
| 89 | return pte; | 60 | |
| 61 | if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) | ||
| 62 | return NULL; | ||
| 63 | |||
| 64 | return pmd; | ||
| 90 | } | 65 | } |
| 91 | 66 | ||
| 92 | static int | 67 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
| 93 | move_one_page(struct vm_area_struct *vma, unsigned long old_addr, | 68 | unsigned long old_addr, unsigned long old_end, |
| 94 | struct vm_area_struct *new_vma, unsigned long new_addr) | 69 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
| 70 | unsigned long new_addr) | ||
| 95 | { | 71 | { |
| 96 | struct address_space *mapping = NULL; | 72 | struct address_space *mapping = NULL; |
| 97 | struct mm_struct *mm = vma->vm_mm; | 73 | struct mm_struct *mm = vma->vm_mm; |
| 98 | int error = 0; | 74 | pte_t *old_pte, *new_pte, pte; |
| 99 | pte_t *src, *dst; | 75 | spinlock_t *old_ptl, *new_ptl; |
| 100 | 76 | ||
| 101 | if (vma->vm_file) { | 77 | if (vma->vm_file) { |
| 102 | /* | 78 | /* |
| @@ -111,74 +87,69 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, | |||
| 111 | new_vma->vm_truncate_count != vma->vm_truncate_count) | 87 | new_vma->vm_truncate_count != vma->vm_truncate_count) |
| 112 | new_vma->vm_truncate_count = 0; | 88 | new_vma->vm_truncate_count = 0; |
| 113 | } | 89 | } |
| 114 | spin_lock(&mm->page_table_lock); | ||
| 115 | 90 | ||
| 116 | src = get_one_pte_map_nested(mm, old_addr); | 91 | /* |
| 117 | if (src) { | 92 | * We don't have to worry about the ordering of src and dst |
| 118 | /* | 93 | * pte locks because exclusive mmap_sem prevents deadlock. |
| 119 | * Look to see whether alloc_one_pte_map needs to perform a | 94 | */ |
| 120 | * memory allocation. If it does then we need to drop the | 95 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
| 121 | * atomic kmap | 96 | new_pte = pte_offset_map_nested(new_pmd, new_addr); |
| 122 | */ | 97 | new_ptl = pte_lockptr(mm, new_pmd); |
| 123 | dst = get_one_pte_map(mm, new_addr); | 98 | if (new_ptl != old_ptl) |
| 124 | if (unlikely(!dst)) { | 99 | spin_lock(new_ptl); |
| 125 | pte_unmap_nested(src); | 100 | |
| 126 | if (mapping) | 101 | for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, |
| 127 | spin_unlock(&mapping->i_mmap_lock); | 102 | new_pte++, new_addr += PAGE_SIZE) { |
| 128 | dst = alloc_one_pte_map(mm, new_addr); | 103 | if (pte_none(*old_pte)) |
| 129 | if (mapping && !spin_trylock(&mapping->i_mmap_lock)) { | 104 | continue; |
| 130 | spin_unlock(&mm->page_table_lock); | 105 | pte = ptep_clear_flush(vma, old_addr, old_pte); |
| 131 | spin_lock(&mapping->i_mmap_lock); | 106 | /* ZERO_PAGE can be dependant on virtual addr */ |
| 132 | spin_lock(&mm->page_table_lock); | 107 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
| 133 | } | 108 | set_pte_at(mm, new_addr, new_pte, pte); |
| 134 | src = get_one_pte_map_nested(mm, old_addr); | ||
| 135 | } | ||
| 136 | /* | ||
| 137 | * Since alloc_one_pte_map can drop and re-acquire | ||
| 138 | * page_table_lock, we should re-check the src entry... | ||
| 139 | */ | ||
| 140 | if (src) { | ||
| 141 | if (dst) { | ||
| 142 | pte_t pte; | ||
| 143 | pte = ptep_clear_flush(vma, old_addr, src); | ||
| 144 | |||
| 145 | /* ZERO_PAGE can be dependant on virtual addr */ | ||
| 146 | pte = move_pte(pte, new_vma->vm_page_prot, | ||
| 147 | old_addr, new_addr); | ||
| 148 | set_pte_at(mm, new_addr, dst, pte); | ||
| 149 | } else | ||
| 150 | error = -ENOMEM; | ||
| 151 | pte_unmap_nested(src); | ||
| 152 | } | ||
| 153 | if (dst) | ||
| 154 | pte_unmap(dst); | ||
| 155 | } | 109 | } |
| 156 | spin_unlock(&mm->page_table_lock); | 110 | |
| 111 | if (new_ptl != old_ptl) | ||
| 112 | spin_unlock(new_ptl); | ||
| 113 | pte_unmap_nested(new_pte - 1); | ||
| 114 | pte_unmap_unlock(old_pte - 1, old_ptl); | ||
| 157 | if (mapping) | 115 | if (mapping) |
| 158 | spin_unlock(&mapping->i_mmap_lock); | 116 | spin_unlock(&mapping->i_mmap_lock); |
| 159 | return error; | ||
| 160 | } | 117 | } |
| 161 | 118 | ||
| 119 | #define LATENCY_LIMIT (64 * PAGE_SIZE) | ||
| 120 | |||
| 162 | static unsigned long move_page_tables(struct vm_area_struct *vma, | 121 | static unsigned long move_page_tables(struct vm_area_struct *vma, |
| 163 | unsigned long old_addr, struct vm_area_struct *new_vma, | 122 | unsigned long old_addr, struct vm_area_struct *new_vma, |
| 164 | unsigned long new_addr, unsigned long len) | 123 | unsigned long new_addr, unsigned long len) |
| 165 | { | 124 | { |
| 166 | unsigned long offset; | 125 | unsigned long extent, next, old_end; |
| 126 | pmd_t *old_pmd, *new_pmd; | ||
| 167 | 127 | ||
| 168 | flush_cache_range(vma, old_addr, old_addr + len); | 128 | old_end = old_addr + len; |
| 129 | flush_cache_range(vma, old_addr, old_end); | ||
| 169 | 130 | ||
| 170 | /* | 131 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
| 171 | * This is not the clever way to do this, but we're taking the | ||
| 172 | * easy way out on the assumption that most remappings will be | ||
| 173 | * only a few pages.. This also makes error recovery easier. | ||
| 174 | */ | ||
| 175 | for (offset = 0; offset < len; offset += PAGE_SIZE) { | ||
| 176 | if (move_one_page(vma, old_addr + offset, | ||
| 177 | new_vma, new_addr + offset) < 0) | ||
| 178 | break; | ||
| 179 | cond_resched(); | 132 | cond_resched(); |
| 133 | next = (old_addr + PMD_SIZE) & PMD_MASK; | ||
| 134 | if (next - 1 > old_end) | ||
| 135 | next = old_end; | ||
| 136 | extent = next - old_addr; | ||
| 137 | old_pmd = get_old_pmd(vma->vm_mm, old_addr); | ||
| 138 | if (!old_pmd) | ||
| 139 | continue; | ||
| 140 | new_pmd = alloc_new_pmd(vma->vm_mm, new_addr); | ||
| 141 | if (!new_pmd) | ||
| 142 | break; | ||
| 143 | next = (new_addr + PMD_SIZE) & PMD_MASK; | ||
| 144 | if (extent > next - new_addr) | ||
| 145 | extent = next - new_addr; | ||
| 146 | if (extent > LATENCY_LIMIT) | ||
| 147 | extent = LATENCY_LIMIT; | ||
| 148 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, | ||
| 149 | new_vma, new_pmd, new_addr); | ||
| 180 | } | 150 | } |
| 181 | return offset; | 151 | |
| 152 | return len + old_addr - old_end; /* how much done */ | ||
| 182 | } | 153 | } |
| 183 | 154 | ||
| 184 | static unsigned long move_vma(struct vm_area_struct *vma, | 155 | static unsigned long move_vma(struct vm_area_struct *vma, |
| @@ -191,6 +162,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, | |||
| 191 | unsigned long new_pgoff; | 162 | unsigned long new_pgoff; |
| 192 | unsigned long moved_len; | 163 | unsigned long moved_len; |
| 193 | unsigned long excess = 0; | 164 | unsigned long excess = 0; |
| 165 | unsigned long hiwater_vm; | ||
| 194 | int split = 0; | 166 | int split = 0; |
| 195 | 167 | ||
| 196 | /* | 168 | /* |
| @@ -229,17 +201,24 @@ static unsigned long move_vma(struct vm_area_struct *vma, | |||
| 229 | } | 201 | } |
| 230 | 202 | ||
| 231 | /* | 203 | /* |
| 232 | * if we failed to move page tables we still do total_vm increment | 204 | * If we failed to move page tables we still do total_vm increment |
| 233 | * since do_munmap() will decrement it by old_len == new_len | 205 | * since do_munmap() will decrement it by old_len == new_len. |
| 206 | * | ||
| 207 | * Since total_vm is about to be raised artificially high for a | ||
| 208 | * moment, we need to restore high watermark afterwards: if stats | ||
| 209 | * are taken meanwhile, total_vm and hiwater_vm appear too high. | ||
| 210 | * If this were a serious issue, we'd add a flag to do_munmap(). | ||
| 234 | */ | 211 | */ |
| 212 | hiwater_vm = mm->hiwater_vm; | ||
| 235 | mm->total_vm += new_len >> PAGE_SHIFT; | 213 | mm->total_vm += new_len >> PAGE_SHIFT; |
| 236 | __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); | 214 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
| 237 | 215 | ||
| 238 | if (do_munmap(mm, old_addr, old_len) < 0) { | 216 | if (do_munmap(mm, old_addr, old_len) < 0) { |
| 239 | /* OOM: unable to split vma, just get accounts right */ | 217 | /* OOM: unable to split vma, just get accounts right */ |
| 240 | vm_unacct_memory(excess >> PAGE_SHIFT); | 218 | vm_unacct_memory(excess >> PAGE_SHIFT); |
| 241 | excess = 0; | 219 | excess = 0; |
| 242 | } | 220 | } |
| 221 | mm->hiwater_vm = hiwater_vm; | ||
| 243 | 222 | ||
| 244 | /* Restore VM_ACCOUNT if one or two pieces of vma left */ | 223 | /* Restore VM_ACCOUNT if one or two pieces of vma left */ |
| 245 | if (excess) { | 224 | if (excess) { |
| @@ -269,6 +248,7 @@ unsigned long do_mremap(unsigned long addr, | |||
| 269 | unsigned long old_len, unsigned long new_len, | 248 | unsigned long old_len, unsigned long new_len, |
| 270 | unsigned long flags, unsigned long new_addr) | 249 | unsigned long flags, unsigned long new_addr) |
| 271 | { | 250 | { |
| 251 | struct mm_struct *mm = current->mm; | ||
| 272 | struct vm_area_struct *vma; | 252 | struct vm_area_struct *vma; |
| 273 | unsigned long ret = -EINVAL; | 253 | unsigned long ret = -EINVAL; |
| 274 | unsigned long charged = 0; | 254 | unsigned long charged = 0; |
| @@ -309,7 +289,7 @@ unsigned long do_mremap(unsigned long addr, | |||
| 309 | if ((addr <= new_addr) && (addr+old_len) > new_addr) | 289 | if ((addr <= new_addr) && (addr+old_len) > new_addr) |
| 310 | goto out; | 290 | goto out; |
| 311 | 291 | ||
| 312 | ret = do_munmap(current->mm, new_addr, new_len); | 292 | ret = do_munmap(mm, new_addr, new_len); |
| 313 | if (ret) | 293 | if (ret) |
| 314 | goto out; | 294 | goto out; |
| 315 | } | 295 | } |
| @@ -320,7 +300,7 @@ unsigned long do_mremap(unsigned long addr, | |||
| 320 | * do_munmap does all the needed commit accounting | 300 | * do_munmap does all the needed commit accounting |
| 321 | */ | 301 | */ |
| 322 | if (old_len >= new_len) { | 302 | if (old_len >= new_len) { |
| 323 | ret = do_munmap(current->mm, addr+new_len, old_len - new_len); | 303 | ret = do_munmap(mm, addr+new_len, old_len - new_len); |
| 324 | if (ret && old_len != new_len) | 304 | if (ret && old_len != new_len) |
| 325 | goto out; | 305 | goto out; |
| 326 | ret = addr; | 306 | ret = addr; |
| @@ -333,7 +313,7 @@ unsigned long do_mremap(unsigned long addr, | |||
| 333 | * Ok, we need to grow.. or relocate. | 313 | * Ok, we need to grow.. or relocate. |
| 334 | */ | 314 | */ |
| 335 | ret = -EFAULT; | 315 | ret = -EFAULT; |
| 336 | vma = find_vma(current->mm, addr); | 316 | vma = find_vma(mm, addr); |
| 337 | if (!vma || vma->vm_start > addr) | 317 | if (!vma || vma->vm_start > addr) |
| 338 | goto out; | 318 | goto out; |
| 339 | if (is_vm_hugetlb_page(vma)) { | 319 | if (is_vm_hugetlb_page(vma)) { |
| @@ -349,14 +329,14 @@ unsigned long do_mremap(unsigned long addr, | |||
| 349 | } | 329 | } |
| 350 | if (vma->vm_flags & VM_LOCKED) { | 330 | if (vma->vm_flags & VM_LOCKED) { |
| 351 | unsigned long locked, lock_limit; | 331 | unsigned long locked, lock_limit; |
| 352 | locked = current->mm->locked_vm << PAGE_SHIFT; | 332 | locked = mm->locked_vm << PAGE_SHIFT; |
| 353 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 333 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; |
| 354 | locked += new_len - old_len; | 334 | locked += new_len - old_len; |
| 355 | ret = -EAGAIN; | 335 | ret = -EAGAIN; |
| 356 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | 336 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
| 357 | goto out; | 337 | goto out; |
| 358 | } | 338 | } |
| 359 | if (!may_expand_vm(current->mm, (new_len - old_len) >> PAGE_SHIFT)) { | 339 | if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) { |
| 360 | ret = -ENOMEM; | 340 | ret = -ENOMEM; |
| 361 | goto out; | 341 | goto out; |
| 362 | } | 342 | } |
| @@ -383,11 +363,10 @@ unsigned long do_mremap(unsigned long addr, | |||
| 383 | vma_adjust(vma, vma->vm_start, | 363 | vma_adjust(vma, vma->vm_start, |
| 384 | addr + new_len, vma->vm_pgoff, NULL); | 364 | addr + new_len, vma->vm_pgoff, NULL); |
| 385 | 365 | ||
| 386 | current->mm->total_vm += pages; | 366 | mm->total_vm += pages; |
| 387 | __vm_stat_account(vma->vm_mm, vma->vm_flags, | 367 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
| 388 | vma->vm_file, pages); | ||
| 389 | if (vma->vm_flags & VM_LOCKED) { | 368 | if (vma->vm_flags & VM_LOCKED) { |
| 390 | current->mm->locked_vm += pages; | 369 | mm->locked_vm += pages; |
| 391 | make_pages_present(addr + old_len, | 370 | make_pages_present(addr + old_len, |
| 392 | addr + new_len); | 371 | addr + new_len); |
| 393 | } | 372 | } |
