aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2010-08-09 20:18:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:44:55 -0400
commit012f18004da33ba672e3c60838cc4898126174d3 (patch)
tree990382f9f8c0d885463ac9195b8e9a18043f716d /include/linux
parent5c341ee1dfc8fe69d66b1c8b19e463c6d7201ae1 (diff)
mm: always lock the root (oldest) anon_vma
Always (and only) lock the root (oldest) anon_vma whenever we do something in an anon_vma. The recently introduced anon_vma scalability is due to the rmap code scanning only the VMAs that need to be scanned. Many common operations still took the anon_vma lock on the root anon_vma, so always taking that lock is not expected to introduce any scalability issues. However, always taking the same lock does mean we only need to take one lock, which means rmap_walk on pages from any anon_vma in the vma is excluded from occurring during an munmap, expand_stack or other operation that needs to exclude rmap_walk and similar functions. Also add the proper locking to vma_adjust. Signed-off-by: Rik van Riel <riel@redhat.com> Tested-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/rmap.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 41fa6ddc6214..af43cb9a0506 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -104,24 +104,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
104{ 104{
105 struct anon_vma *anon_vma = vma->anon_vma; 105 struct anon_vma *anon_vma = vma->anon_vma;
106 if (anon_vma) 106 if (anon_vma)
107 spin_lock(&anon_vma->lock); 107 spin_lock(&anon_vma->root->lock);
108} 108}
109 109
110static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 110static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
111{ 111{
112 struct anon_vma *anon_vma = vma->anon_vma; 112 struct anon_vma *anon_vma = vma->anon_vma;
113 if (anon_vma) 113 if (anon_vma)
114 spin_unlock(&anon_vma->lock); 114 spin_unlock(&anon_vma->root->lock);
115} 115}
116 116
117static inline void anon_vma_lock(struct anon_vma *anon_vma) 117static inline void anon_vma_lock(struct anon_vma *anon_vma)
118{ 118{
119 spin_lock(&anon_vma->lock); 119 spin_lock(&anon_vma->root->lock);
120} 120}
121 121
122static inline void anon_vma_unlock(struct anon_vma *anon_vma) 122static inline void anon_vma_unlock(struct anon_vma *anon_vma)
123{ 123{
124 spin_unlock(&anon_vma->lock); 124 spin_unlock(&anon_vma->root->lock);
125} 125}
126 126
127/* 127/*