aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-20 19:24:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-21 11:49:21 -0400
commit297c5eee372478fc32fec5fe8eed711eedb13f3d (patch)
tree18415eae0833ff4767943f985900524d6b1d73f1
parent36423a5ed5e4ea95ceedb68fad52965033e11639 (diff)
mm: make the vma list be doubly linked
It's a really simple list, and several of the users want to go backwards in it to find the previous vma. So rather than have to look up the previous entry with 'find_vma_prev()' or something similar, just make it doubly linked instead. Tested-by: Ian Campbell <ijc@hellion.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--kernel/fork.c7
-rw-r--r--mm/mmap.c21
-rw-r--r--mm/nommu.c7
4 files changed, 28 insertions, 9 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b8bb9a6a1f37..ee7e258627f9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -134,7 +134,7 @@ struct vm_area_struct {
134 within vm_mm. */ 134 within vm_mm. */
135 135
136 /* linked list of VM areas per task, sorted by address */ 136 /* linked list of VM areas per task, sorted by address */
137 struct vm_area_struct *vm_next; 137 struct vm_area_struct *vm_next, *vm_prev;
138 138
139 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 139 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
140 unsigned long vm_flags; /* Flags, see mm.h. */ 140 unsigned long vm_flags; /* Flags, see mm.h. */
diff --git a/kernel/fork.c b/kernel/fork.c
index 856eac3ec52e..b7e9d60a675d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -300,7 +300,7 @@ out:
300#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
302{ 302{
303 struct vm_area_struct *mpnt, *tmp, **pprev; 303 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
304 struct rb_node **rb_link, *rb_parent; 304 struct rb_node **rb_link, *rb_parent;
305 int retval; 305 int retval;
306 unsigned long charge; 306 unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
328 if (retval) 328 if (retval)
329 goto out; 329 goto out;
330 330
331 prev = NULL;
331 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 332 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
332 struct file *file; 333 struct file *file;
333 334
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
359 goto fail_nomem_anon_vma_fork; 360 goto fail_nomem_anon_vma_fork;
360 tmp->vm_flags &= ~VM_LOCKED; 361 tmp->vm_flags &= ~VM_LOCKED;
361 tmp->vm_mm = mm; 362 tmp->vm_mm = mm;
362 tmp->vm_next = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
363 file = tmp->vm_file; 364 file = tmp->vm_file;
364 if (file) { 365 if (file) {
365 struct inode *inode = file->f_path.dentry->d_inode; 366 struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
392 */ 393 */
393 *pprev = tmp; 394 *pprev = tmp;
394 pprev = &tmp->vm_next; 395 pprev = &tmp->vm_next;
396 tmp->vm_prev = prev;
397 prev = tmp;
395 398
396 __vma_link_rb(mm, tmp, rb_link, rb_parent); 399 __vma_link_rb(mm, tmp, rb_link, rb_parent);
397 rb_link = &tmp->vm_rb.rb_right; 400 rb_link = &tmp->vm_rb.rb_right;
diff --git a/mm/mmap.c b/mm/mmap.c
index 31003338b978..331e51af38c9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
389 struct vm_area_struct *prev, struct rb_node *rb_parent) 389 struct vm_area_struct *prev, struct rb_node *rb_parent)
390{ 390{
391 struct vm_area_struct *next;
392
393 vma->vm_prev = prev;
391 if (prev) { 394 if (prev) {
392 vma->vm_next = prev->vm_next; 395 next = prev->vm_next;
393 prev->vm_next = vma; 396 prev->vm_next = vma;
394 } else { 397 } else {
395 mm->mmap = vma; 398 mm->mmap = vma;
396 if (rb_parent) 399 if (rb_parent)
397 vma->vm_next = rb_entry(rb_parent, 400 next = rb_entry(rb_parent,
398 struct vm_area_struct, vm_rb); 401 struct vm_area_struct, vm_rb);
399 else 402 else
400 vma->vm_next = NULL; 403 next = NULL;
401 } 404 }
405 vma->vm_next = next;
406 if (next)
407 next->vm_prev = vma;
402} 408}
403 409
404void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 410void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -483,7 +489,11 @@ static inline void
483__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 489__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
484 struct vm_area_struct *prev) 490 struct vm_area_struct *prev)
485{ 491{
486 prev->vm_next = vma->vm_next; 492 struct vm_area_struct *next = vma->vm_next;
493
494 prev->vm_next = next;
495 if (next)
496 next->vm_prev = prev;
487 rb_erase(&vma->vm_rb, &mm->mm_rb); 497 rb_erase(&vma->vm_rb, &mm->mm_rb);
488 if (mm->mmap_cache == vma) 498 if (mm->mmap_cache == vma)
489 mm->mmap_cache = prev; 499 mm->mmap_cache = prev;
@@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1915 unsigned long addr; 1925 unsigned long addr;
1916 1926
1917 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1927 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1928 vma->vm_prev = NULL;
1918 do { 1929 do {
1919 rb_erase(&vma->vm_rb, &mm->mm_rb); 1930 rb_erase(&vma->vm_rb, &mm->mm_rb);
1920 mm->map_count--; 1931 mm->map_count--;
@@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1922 vma = vma->vm_next; 1933 vma = vma->vm_next;
1923 } while (vma && vma->vm_start < end); 1934 } while (vma && vma->vm_start < end);
1924 *insertion_point = vma; 1935 *insertion_point = vma;
1936 if (vma)
1937 vma->vm_prev = prev;
1925 tail_vma->vm_next = NULL; 1938 tail_vma->vm_next = NULL;
1926 if (mm->unmap_area == arch_unmap_area) 1939 if (mm->unmap_area == arch_unmap_area)
1927 addr = prev ? prev->vm_end : mm->mmap_base; 1940 addr = prev ? prev->vm_end : mm->mmap_base;
diff --git a/mm/nommu.c b/mm/nommu.c
index efa9a380335e..88ff091eb07a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
604 */ 604 */
605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
606{ 606{
607 struct vm_area_struct *pvma, **pp; 607 struct vm_area_struct *pvma, **pp, *next;
608 struct address_space *mapping; 608 struct address_space *mapping;
609 struct rb_node **p, *parent; 609 struct rb_node **p, *parent;
610 610
@@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
664 break; 664 break;
665 } 665 }
666 666
667 vma->vm_next = *pp; 667 next = *pp;
668 *pp = vma; 668 *pp = vma;
669 vma->vm_next = next;
670 if (next)
671 next->vm_prev = vma;
669} 672}
670 673
671/* 674/*