diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/rmap.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r-- | include/linux/rmap.h | 74 |
1 files changed, 31 insertions, 43 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 31b2fd75dcba..2148b122779b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/list.h> | 7 | #include <linux/list.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/spinlock.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/memcontrol.h> | 11 | #include <linux/memcontrol.h> |
12 | 12 | ||
13 | /* | 13 | /* |
@@ -25,20 +25,17 @@ | |||
25 | * pointing to this anon_vma once its vma list is empty. | 25 | * pointing to this anon_vma once its vma list is empty. |
26 | */ | 26 | */ |
27 | struct anon_vma { | 27 | struct anon_vma { |
28 | spinlock_t lock; /* Serialize access to vma list */ | ||
29 | struct anon_vma *root; /* Root of this anon_vma tree */ | 28 | struct anon_vma *root; /* Root of this anon_vma tree */ |
30 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | 29 | struct mutex mutex; /* Serialize access to vma list */ |
31 | |||
32 | /* | 30 | /* |
33 | * The external_refcount is taken by either KSM or page migration | 31 | * The refcount is taken on an anon_vma when there is no |
34 | * to take a reference to an anon_vma when there is no | ||
35 | * guarantee that the vma of page tables will exist for | 32 | * guarantee that the vma of page tables will exist for |
36 | * the duration of the operation. A caller that takes | 33 | * the duration of the operation. A caller that takes |
37 | * the reference is responsible for clearing up the | 34 | * the reference is responsible for clearing up the |
38 | * anon_vma if they are the last user on release | 35 | * anon_vma if they are the last user on release |
39 | */ | 36 | */ |
40 | atomic_t external_refcount; | 37 | atomic_t refcount; |
41 | #endif | 38 | |
42 | /* | 39 | /* |
43 | * NOTE: the LSB of the head.next is set by | 40 | * NOTE: the LSB of the head.next is set by |
44 | * mm_take_all_locks() _after_ taking the above lock. So the | 41 | * mm_take_all_locks() _after_ taking the above lock. So the |
@@ -67,45 +64,22 @@ struct anon_vma_chain { | |||
67 | struct vm_area_struct *vma; | 64 | struct vm_area_struct *vma; |
68 | struct anon_vma *anon_vma; | 65 | struct anon_vma *anon_vma; |
69 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | 66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
70 | struct list_head same_anon_vma; /* locked by anon_vma->lock */ | 67 | struct list_head same_anon_vma; /* locked by anon_vma->mutex */ |
71 | }; | 68 | }; |
72 | 69 | ||
73 | #ifdef CONFIG_MMU | 70 | #ifdef CONFIG_MMU |
74 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | ||
75 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | ||
76 | { | ||
77 | atomic_set(&anon_vma->external_refcount, 0); | ||
78 | } | ||
79 | |||
80 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | ||
81 | { | ||
82 | return atomic_read(&anon_vma->external_refcount); | ||
83 | } | ||
84 | |||
85 | static inline void get_anon_vma(struct anon_vma *anon_vma) | 71 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
86 | { | 72 | { |
87 | atomic_inc(&anon_vma->external_refcount); | 73 | atomic_inc(&anon_vma->refcount); |
88 | } | ||
89 | |||
90 | void drop_anon_vma(struct anon_vma *); | ||
91 | #else | ||
92 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | ||
93 | { | ||
94 | } | ||
95 | |||
96 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | ||
97 | { | ||
98 | return 0; | ||
99 | } | 74 | } |
100 | 75 | ||
101 | static inline void get_anon_vma(struct anon_vma *anon_vma) | 76 | void __put_anon_vma(struct anon_vma *anon_vma); |
102 | { | ||
103 | } | ||
104 | 77 | ||
105 | static inline void drop_anon_vma(struct anon_vma *anon_vma) | 78 | static inline void put_anon_vma(struct anon_vma *anon_vma) |
106 | { | 79 | { |
80 | if (atomic_dec_and_test(&anon_vma->refcount)) | ||
81 | __put_anon_vma(anon_vma); | ||
107 | } | 82 | } |
108 | #endif /* CONFIG_KSM */ | ||
109 | 83 | ||
110 | static inline struct anon_vma *page_anon_vma(struct page *page) | 84 | static inline struct anon_vma *page_anon_vma(struct page *page) |
111 | { | 85 | { |
@@ -119,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma) | |||
119 | { | 93 | { |
120 | struct anon_vma *anon_vma = vma->anon_vma; | 94 | struct anon_vma *anon_vma = vma->anon_vma; |
121 | if (anon_vma) | 95 | if (anon_vma) |
122 | spin_lock(&anon_vma->root->lock); | 96 | mutex_lock(&anon_vma->root->mutex); |
123 | } | 97 | } |
124 | 98 | ||
125 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) | 99 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) |
126 | { | 100 | { |
127 | struct anon_vma *anon_vma = vma->anon_vma; | 101 | struct anon_vma *anon_vma = vma->anon_vma; |
128 | if (anon_vma) | 102 | if (anon_vma) |
129 | spin_unlock(&anon_vma->root->lock); | 103 | mutex_unlock(&anon_vma->root->mutex); |
130 | } | 104 | } |
131 | 105 | ||
132 | static inline void anon_vma_lock(struct anon_vma *anon_vma) | 106 | static inline void anon_vma_lock(struct anon_vma *anon_vma) |
133 | { | 107 | { |
134 | spin_lock(&anon_vma->root->lock); | 108 | mutex_lock(&anon_vma->root->mutex); |
135 | } | 109 | } |
136 | 110 | ||
137 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) | 111 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) |
138 | { | 112 | { |
139 | spin_unlock(&anon_vma->root->lock); | 113 | mutex_unlock(&anon_vma->root->mutex); |
140 | } | 114 | } |
141 | 115 | ||
142 | /* | 116 | /* |
@@ -148,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *); | |||
148 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | 122 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
149 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | 123 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
150 | void __anon_vma_link(struct vm_area_struct *); | 124 | void __anon_vma_link(struct vm_area_struct *); |
151 | void anon_vma_free(struct anon_vma *); | ||
152 | 125 | ||
153 | static inline void anon_vma_merge(struct vm_area_struct *vma, | 126 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
154 | struct vm_area_struct *next) | 127 | struct vm_area_struct *next) |
@@ -157,6 +130,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, | |||
157 | unlink_anon_vmas(next); | 130 | unlink_anon_vmas(next); |
158 | } | 131 | } |
159 | 132 | ||
133 | struct anon_vma *page_get_anon_vma(struct page *page); | ||
134 | |||
160 | /* | 135 | /* |
161 | * rmap interfaces called when adding or removing pte of page | 136 | * rmap interfaces called when adding or removing pte of page |
162 | */ | 137 | */ |
@@ -198,6 +173,8 @@ enum ttu_flags { | |||
198 | }; | 173 | }; |
199 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 174 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
200 | 175 | ||
176 | bool is_vma_temporary_stack(struct vm_area_struct *vma); | ||
177 | |||
201 | int try_to_unmap(struct page *, enum ttu_flags flags); | 178 | int try_to_unmap(struct page *, enum ttu_flags flags); |
202 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | 179 | int try_to_unmap_one(struct page *, struct vm_area_struct *, |
203 | unsigned long address, enum ttu_flags flags); | 180 | unsigned long address, enum ttu_flags flags); |
@@ -205,9 +182,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *, | |||
205 | /* | 182 | /* |
206 | * Called from mm/filemap_xip.c to unmap empty zero page | 183 | * Called from mm/filemap_xip.c to unmap empty zero page |
207 | */ | 184 | */ |
208 | pte_t *page_check_address(struct page *, struct mm_struct *, | 185 | pte_t *__page_check_address(struct page *, struct mm_struct *, |
209 | unsigned long, spinlock_t **, int); | 186 | unsigned long, spinlock_t **, int); |
210 | 187 | ||
188 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, | ||
189 | unsigned long address, | ||
190 | spinlock_t **ptlp, int sync) | ||
191 | { | ||
192 | pte_t *ptep; | ||
193 | |||
194 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | ||
195 | ptlp, sync)); | ||
196 | return ptep; | ||
197 | } | ||
198 | |||
211 | /* | 199 | /* |
212 | * Used by swapoff to help locate where page is expected in vma. | 200 | * Used by swapoff to help locate where page is expected in vma. |
213 | */ | 201 | */ |