diff options
author | Hugh Dickins <hughd@google.com> | 2012-10-08 19:29:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:20 -0400 |
commit | 6597d783397aebb793fb529474cce5089aa4c67f (patch) | |
tree | 059d7e4c18c8cec61a71a6c15994867dceb7b93a /mm/mmap.c | |
parent | d741c9cdeee6a569dae0dbbaf028065402955b59 (diff) |
mm/mmap.c: replace find_vma_prepare() with clearer find_vma_links()
People get confused by find_vma_prepare(), because it doesn't care about
what it returns in its output args, when its callers won't be interested.
Clarify by passing in end-of-range address too, and returning failure if
any existing vma overlaps the new range: instead of returning an ambiguous
vma which most callers then must check. find_vma_links() is a clearer
name.
This does revert 2.6.27's dfe195fb79e88 ("mm: fix uninitialized variables
for find_vma_prepare callers"), but it looks like gcc 4.3.0 was one of
those releases too eager to shout about uninitialized variables: only
copy_vma() warns with 4.5.1 and 4.7.1, which a BUG on error silences.
[hughd@google.com: fix warning, remove BUG()]
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Acked-by: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 45 |
1 files changed, 21 insertions, 24 deletions
@@ -353,17 +353,14 @@ void validate_mm(struct mm_struct *mm) | |||
353 | #define validate_mm(mm) do { } while (0) | 353 | #define validate_mm(mm) do { } while (0) |
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | static struct vm_area_struct * | 356 | static int find_vma_links(struct mm_struct *mm, unsigned long addr, |
357 | find_vma_prepare(struct mm_struct *mm, unsigned long addr, | 357 | unsigned long end, struct vm_area_struct **pprev, |
358 | struct vm_area_struct **pprev, struct rb_node ***rb_link, | 358 | struct rb_node ***rb_link, struct rb_node **rb_parent) |
359 | struct rb_node ** rb_parent) | ||
360 | { | 359 | { |
361 | struct vm_area_struct * vma; | 360 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; |
362 | struct rb_node ** __rb_link, * __rb_parent, * rb_prev; | ||
363 | 361 | ||
364 | __rb_link = &mm->mm_rb.rb_node; | 362 | __rb_link = &mm->mm_rb.rb_node; |
365 | rb_prev = __rb_parent = NULL; | 363 | rb_prev = __rb_parent = NULL; |
366 | vma = NULL; | ||
367 | 364 | ||
368 | while (*__rb_link) { | 365 | while (*__rb_link) { |
369 | struct vm_area_struct *vma_tmp; | 366 | struct vm_area_struct *vma_tmp; |
@@ -372,9 +369,9 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr, | |||
372 | vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); | 369 | vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); |
373 | 370 | ||
374 | if (vma_tmp->vm_end > addr) { | 371 | if (vma_tmp->vm_end > addr) { |
375 | vma = vma_tmp; | 372 | /* Fail if an existing vma overlaps the area */ |
376 | if (vma_tmp->vm_start <= addr) | 373 | if (vma_tmp->vm_start < end) |
377 | break; | 374 | return -ENOMEM; |
378 | __rb_link = &__rb_parent->rb_left; | 375 | __rb_link = &__rb_parent->rb_left; |
379 | } else { | 376 | } else { |
380 | rb_prev = __rb_parent; | 377 | rb_prev = __rb_parent; |
@@ -387,7 +384,7 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr, | |||
387 | *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); | 384 | *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); |
388 | *rb_link = __rb_link; | 385 | *rb_link = __rb_link; |
389 | *rb_parent = __rb_parent; | 386 | *rb_parent = __rb_parent; |
390 | return vma; | 387 | return 0; |
391 | } | 388 | } |
392 | 389 | ||
393 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | 390 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
@@ -456,11 +453,12 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, | |||
456 | */ | 453 | */ |
457 | static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) | 454 | static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) |
458 | { | 455 | { |
459 | struct vm_area_struct *__vma, *prev; | 456 | struct vm_area_struct *prev; |
460 | struct rb_node **rb_link, *rb_parent; | 457 | struct rb_node **rb_link, *rb_parent; |
461 | 458 | ||
462 | __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); | 459 | if (find_vma_links(mm, vma->vm_start, vma->vm_end, |
463 | BUG_ON(__vma && __vma->vm_start < vma->vm_end); | 460 | &prev, &rb_link, &rb_parent)) |
461 | BUG(); | ||
464 | __vma_link(mm, vma, prev, rb_link, rb_parent); | 462 | __vma_link(mm, vma, prev, rb_link, rb_parent); |
465 | mm->map_count++; | 463 | mm->map_count++; |
466 | } | 464 | } |
@@ -1221,8 +1219,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, | |||
1221 | /* Clear old maps */ | 1219 | /* Clear old maps */ |
1222 | error = -ENOMEM; | 1220 | error = -ENOMEM; |
1223 | munmap_back: | 1221 | munmap_back: |
1224 | vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); | 1222 | if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { |
1225 | if (vma && vma->vm_start < addr + len) { | ||
1226 | if (do_munmap(mm, addr, len)) | 1223 | if (do_munmap(mm, addr, len)) |
1227 | return -ENOMEM; | 1224 | return -ENOMEM; |
1228 | goto munmap_back; | 1225 | goto munmap_back; |
@@ -2183,8 +2180,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2183 | * Clear old maps. this also does some error checking for us | 2180 | * Clear old maps. this also does some error checking for us |
2184 | */ | 2181 | */ |
2185 | munmap_back: | 2182 | munmap_back: |
2186 | vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); | 2183 | if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { |
2187 | if (vma && vma->vm_start < addr + len) { | ||
2188 | if (do_munmap(mm, addr, len)) | 2184 | if (do_munmap(mm, addr, len)) |
2189 | return -ENOMEM; | 2185 | return -ENOMEM; |
2190 | goto munmap_back; | 2186 | goto munmap_back; |
@@ -2298,10 +2294,10 @@ void exit_mmap(struct mm_struct *mm) | |||
2298 | * and into the inode's i_mmap tree. If vm_file is non-NULL | 2294 | * and into the inode's i_mmap tree. If vm_file is non-NULL |
2299 | * then i_mmap_mutex is taken here. | 2295 | * then i_mmap_mutex is taken here. |
2300 | */ | 2296 | */ |
2301 | int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) | 2297 | int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) |
2302 | { | 2298 | { |
2303 | struct vm_area_struct * __vma, * prev; | 2299 | struct vm_area_struct *prev; |
2304 | struct rb_node ** rb_link, * rb_parent; | 2300 | struct rb_node **rb_link, *rb_parent; |
2305 | 2301 | ||
2306 | /* | 2302 | /* |
2307 | * The vm_pgoff of a purely anonymous vma should be irrelevant | 2303 | * The vm_pgoff of a purely anonymous vma should be irrelevant |
@@ -2319,8 +2315,8 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) | |||
2319 | BUG_ON(vma->anon_vma); | 2315 | BUG_ON(vma->anon_vma); |
2320 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; | 2316 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; |
2321 | } | 2317 | } |
2322 | __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); | 2318 | if (find_vma_links(mm, vma->vm_start, vma->vm_end, |
2323 | if (__vma && __vma->vm_start < vma->vm_end) | 2319 | &prev, &rb_link, &rb_parent)) |
2324 | return -ENOMEM; | 2320 | return -ENOMEM; |
2325 | if ((vma->vm_flags & VM_ACCOUNT) && | 2321 | if ((vma->vm_flags & VM_ACCOUNT) && |
2326 | security_vm_enough_memory_mm(mm, vma_pages(vma))) | 2322 | security_vm_enough_memory_mm(mm, vma_pages(vma))) |
@@ -2354,7 +2350,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2354 | faulted_in_anon_vma = false; | 2350 | faulted_in_anon_vma = false; |
2355 | } | 2351 | } |
2356 | 2352 | ||
2357 | find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); | 2353 | if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) |
2354 | return NULL; /* should never get here */ | ||
2358 | new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, | 2355 | new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, |
2359 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | 2356 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); |
2360 | if (new_vma) { | 2357 | if (new_vma) { |