diff options
author | Namhyung Kim <namhyung@gmail.com> | 2011-05-24 20:11:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:05 -0400 |
commit | 6038def0d11b322019d0dbb43f2a611247dfbdb6 (patch) | |
tree | 3b7bd8a20af5749566bfba9ca39a3d0c2cc25e0a /mm | |
parent | ac3bbec5ec69b973317677e038de2d1a0c90c18c (diff) |
mm: nommu: sort mm->mmap list properly
When I was reading nommu code, I found that it handles the vma list/tree
in an unusual way. IIUC, because there can be more than one
identical/overrapped vmas in the list/tree, it sorts the tree more
strictly and does a linear search on the tree. But it doesn't applied to
the list (i.e. the list could be constructed in a different order than
the tree so that we can't use the list when finding the first vma in that
order).
Since inserting/sorting a vma in the tree and link is done at the same
time, we can easily construct both of them in the same order. And linear
searching on the tree could be more costly than doing it on the list, it
can be converted to use the list.
Also, after the commit 297c5eee3724 ("mm: make the vma list be doubly
linked") made the list be doubly linked, there were a couple of code need
to be fixed to construct the list properly.
Patch 1/6 is a preparation. It maintains the list sorted same as the tree
and construct doubly-linked list properly. Patch 2/6 is a simple
optimization for the vma deletion. Patch 3/6 and 4/6 convert tree
traversal to list traversal and the rest are simple fixes and cleanups.
This patch:
@vma added into @mm should be sorted by start addr, end addr and VMA
struct addr in that order because we may get identical VMAs in the @mm.
However this was true only for the rbtree, not for the list.
This patch fixes this by remembering 'rb_prev' during the tree traversal
like find_vma_prepare() does and linking the @vma via __vma_link_list().
After this patch, we can iterate the whole VMAs in correct order simply by
using @mm->mmap list.
[akpm@linux-foundation.org: avoid duplicating __vma_link_list()]
Signed-off-by: Namhyung Kim <namhyung@gmail.com>
Acked-by: Greg Ungerer <gerg@uclinux.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 4 | ||||
-rw-r--r-- | mm/mmap.c | 23 | ||||
-rw-r--r-- | mm/nommu.c | 38 | ||||
-rw-r--r-- | mm/util.c | 24 |
4 files changed, 44 insertions, 45 deletions
diff --git a/mm/internal.h b/mm/internal.h index 9d0ced8e505e..d071d380fb49 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -66,6 +66,10 @@ static inline unsigned long page_order(struct page *page) | |||
66 | return page_private(page); | 66 | return page_private(page); |
67 | } | 67 | } |
68 | 68 | ||
69 | /* mm/util.c */ | ||
70 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | ||
71 | struct vm_area_struct *prev, struct rb_node *rb_parent); | ||
72 | |||
69 | #ifdef CONFIG_MMU | 73 | #ifdef CONFIG_MMU |
70 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | 74 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
71 | unsigned long start, unsigned long end); | 75 | unsigned long start, unsigned long end); |
@@ -398,29 +398,6 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr, | |||
398 | return vma; | 398 | return vma; |
399 | } | 399 | } |
400 | 400 | ||
401 | static inline void | ||
402 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | ||
403 | struct vm_area_struct *prev, struct rb_node *rb_parent) | ||
404 | { | ||
405 | struct vm_area_struct *next; | ||
406 | |||
407 | vma->vm_prev = prev; | ||
408 | if (prev) { | ||
409 | next = prev->vm_next; | ||
410 | prev->vm_next = vma; | ||
411 | } else { | ||
412 | mm->mmap = vma; | ||
413 | if (rb_parent) | ||
414 | next = rb_entry(rb_parent, | ||
415 | struct vm_area_struct, vm_rb); | ||
416 | else | ||
417 | next = NULL; | ||
418 | } | ||
419 | vma->vm_next = next; | ||
420 | if (next) | ||
421 | next->vm_prev = vma; | ||
422 | } | ||
423 | |||
424 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | 401 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
425 | struct rb_node **rb_link, struct rb_node *rb_parent) | 402 | struct rb_node **rb_link, struct rb_node *rb_parent) |
426 | { | 403 | { |
diff --git a/mm/nommu.c b/mm/nommu.c index 5afce48ce339..0b16cb4c517b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -680,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | |||
680 | */ | 680 | */ |
681 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 681 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
682 | { | 682 | { |
683 | struct vm_area_struct *pvma, **pp, *next; | 683 | struct vm_area_struct *pvma, *prev; |
684 | struct address_space *mapping; | 684 | struct address_space *mapping; |
685 | struct rb_node **p, *parent; | 685 | struct rb_node **p, *parent, *rb_prev; |
686 | 686 | ||
687 | kenter(",%p", vma); | 687 | kenter(",%p", vma); |
688 | 688 | ||
@@ -703,7 +703,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
703 | } | 703 | } |
704 | 704 | ||
705 | /* add the VMA to the tree */ | 705 | /* add the VMA to the tree */ |
706 | parent = NULL; | 706 | parent = rb_prev = NULL; |
707 | p = &mm->mm_rb.rb_node; | 707 | p = &mm->mm_rb.rb_node; |
708 | while (*p) { | 708 | while (*p) { |
709 | parent = *p; | 709 | parent = *p; |
@@ -713,17 +713,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
713 | * (the latter is necessary as we may get identical VMAs) */ | 713 | * (the latter is necessary as we may get identical VMAs) */ |
714 | if (vma->vm_start < pvma->vm_start) | 714 | if (vma->vm_start < pvma->vm_start) |
715 | p = &(*p)->rb_left; | 715 | p = &(*p)->rb_left; |
716 | else if (vma->vm_start > pvma->vm_start) | 716 | else if (vma->vm_start > pvma->vm_start) { |
717 | rb_prev = parent; | ||
717 | p = &(*p)->rb_right; | 718 | p = &(*p)->rb_right; |
718 | else if (vma->vm_end < pvma->vm_end) | 719 | } else if (vma->vm_end < pvma->vm_end) |
719 | p = &(*p)->rb_left; | 720 | p = &(*p)->rb_left; |
720 | else if (vma->vm_end > pvma->vm_end) | 721 | else if (vma->vm_end > pvma->vm_end) { |
722 | rb_prev = parent; | ||
721 | p = &(*p)->rb_right; | 723 | p = &(*p)->rb_right; |
722 | else if (vma < pvma) | 724 | } else if (vma < pvma) |
723 | p = &(*p)->rb_left; | 725 | p = &(*p)->rb_left; |
724 | else if (vma > pvma) | 726 | else if (vma > pvma) { |
727 | rb_prev = parent; | ||
725 | p = &(*p)->rb_right; | 728 | p = &(*p)->rb_right; |
726 | else | 729 | } else |
727 | BUG(); | 730 | BUG(); |
728 | } | 731 | } |
729 | 732 | ||
@@ -731,20 +734,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
731 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 734 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
732 | 735 | ||
733 | /* add VMA to the VMA list also */ | 736 | /* add VMA to the VMA list also */ |
734 | for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { | 737 | prev = NULL; |
735 | if (pvma->vm_start > vma->vm_start) | 738 | if (rb_prev) |
736 | break; | 739 | prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); |
737 | if (pvma->vm_start < vma->vm_start) | ||
738 | continue; | ||
739 | if (pvma->vm_end < vma->vm_end) | ||
740 | break; | ||
741 | } | ||
742 | 740 | ||
743 | next = *pp; | 741 | __vma_link_list(mm, vma, prev, parent); |
744 | *pp = vma; | ||
745 | vma->vm_next = next; | ||
746 | if (next) | ||
747 | next->vm_prev = vma; | ||
748 | } | 742 | } |
749 | 743 | ||
750 | /* | 744 | /* |
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <asm/uaccess.h> | 7 | #include <asm/uaccess.h> |
8 | 8 | ||
9 | #include "internal.h" | ||
10 | |||
9 | #define CREATE_TRACE_POINTS | 11 | #define CREATE_TRACE_POINTS |
10 | #include <trace/events/kmem.h> | 12 | #include <trace/events/kmem.h> |
11 | 13 | ||
@@ -215,6 +217,28 @@ char *strndup_user(const char __user *s, long n) | |||
215 | } | 217 | } |
216 | EXPORT_SYMBOL(strndup_user); | 218 | EXPORT_SYMBOL(strndup_user); |
217 | 219 | ||
220 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | ||
221 | struct vm_area_struct *prev, struct rb_node *rb_parent) | ||
222 | { | ||
223 | struct vm_area_struct *next; | ||
224 | |||
225 | vma->vm_prev = prev; | ||
226 | if (prev) { | ||
227 | next = prev->vm_next; | ||
228 | prev->vm_next = vma; | ||
229 | } else { | ||
230 | mm->mmap = vma; | ||
231 | if (rb_parent) | ||
232 | next = rb_entry(rb_parent, | ||
233 | struct vm_area_struct, vm_rb); | ||
234 | else | ||
235 | next = NULL; | ||
236 | } | ||
237 | vma->vm_next = next; | ||
238 | if (next) | ||
239 | next->vm_prev = vma; | ||
240 | } | ||
241 | |||
218 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) | 242 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
219 | void arch_pick_mmap_layout(struct mm_struct *mm) | 243 | void arch_pick_mmap_layout(struct mm_struct *mm) |
220 | { | 244 | { |