aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:19 -0400
commit2b575eb64f7a9c701fb4bfdb12388ac547f6c2b6 (patch)
tree965739cbf570567a26f1512ae9a9fe35ce1afbed /include
parent746b18d421da7f27e948e8af1ad82b6d0309324d (diff)
mm: convert anon_vma->lock to a mutex
Straightforward conversion of anon_vma->lock to a mutex. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Hugh Dickins <hughd@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/rmap.h14
3 files changed, 10 insertions, 14 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8847c8c29791..48c32ebf65a7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -92,12 +92,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
92#define wait_split_huge_page(__anon_vma, __pmd) \ 92#define wait_split_huge_page(__anon_vma, __pmd) \
93 do { \ 93 do { \
94 pmd_t *____pmd = (__pmd); \ 94 pmd_t *____pmd = (__pmd); \
95 spin_unlock_wait(&(__anon_vma)->root->lock); \ 95 anon_vma_lock(__anon_vma); \
96 /* \ 96 anon_vma_unlock(__anon_vma); \
97 * spin_unlock_wait() is just a loop in C and so the \
98 * CPU can reorder anything around it. \
99 */ \
100 smp_mb(); \
101 BUG_ON(pmd_trans_splitting(*____pmd) || \ 97 BUG_ON(pmd_trans_splitting(*____pmd) || \
102 pmd_trans_huge(*____pmd)); \ 98 pmd_trans_huge(*____pmd)); \
103 } while (0) 99 } while (0)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a877dfc243eb..1d1b1e13f79f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
150 * Therefore notifier chains can only be traversed when either 150 * Therefore notifier chains can only be traversed when either
151 * 151 *
152 * 1. mmap_sem is held. 152 * 1. mmap_sem is held.
153 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->lock). 153 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
154 * 3. No other concurrent thread can access the list (release) 154 * 3. No other concurrent thread can access the list (release)
155 */ 155 */
156struct mmu_notifier { 156struct mmu_notifier {
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 590c291a8cd9..2148b122779b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -7,7 +7,7 @@
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/spinlock.h> 10#include <linux/mutex.h>
11#include <linux/memcontrol.h> 11#include <linux/memcontrol.h>
12 12
13/* 13/*
@@ -26,7 +26,7 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 spinlock_t lock; /* Serialize access to vma list */ 29 struct mutex mutex; /* Serialize access to vma list */
30 /* 30 /*
31 * The refcount is taken on an anon_vma when there is no 31 * The refcount is taken on an anon_vma when there is no
32 * guarantee that the vma of page tables will exist for 32 * guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
64 struct vm_area_struct *vma; 64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma; 65 struct anon_vma *anon_vma;
66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
67 struct list_head same_anon_vma; /* locked by anon_vma->lock */ 67 struct list_head same_anon_vma; /* locked by anon_vma->mutex */
68}; 68};
69 69
70#ifdef CONFIG_MMU 70#ifdef CONFIG_MMU
@@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93{ 93{
94 struct anon_vma *anon_vma = vma->anon_vma; 94 struct anon_vma *anon_vma = vma->anon_vma;
95 if (anon_vma) 95 if (anon_vma)
96 spin_lock(&anon_vma->root->lock); 96 mutex_lock(&anon_vma->root->mutex);
97} 97}
98 98
99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100{ 100{
101 struct anon_vma *anon_vma = vma->anon_vma; 101 struct anon_vma *anon_vma = vma->anon_vma;
102 if (anon_vma) 102 if (anon_vma)
103 spin_unlock(&anon_vma->root->lock); 103 mutex_unlock(&anon_vma->root->mutex);
104} 104}
105 105
106static inline void anon_vma_lock(struct anon_vma *anon_vma) 106static inline void anon_vma_lock(struct anon_vma *anon_vma)
107{ 107{
108 spin_lock(&anon_vma->root->lock); 108 mutex_lock(&anon_vma->root->mutex);
109} 109}
110 110
111static inline void anon_vma_unlock(struct anon_vma *anon_vma) 111static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112{ 112{
113 spin_unlock(&anon_vma->root->lock); 113 mutex_unlock(&anon_vma->root->mutex);
114} 114}
115 115
116/* 116/*