diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-07-11 08:15:48 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-07-11 08:15:55 -0400 |
commit | b7e9c223be8ce335e30f2cf6ba588e6a4092275c (patch) | |
tree | 2d1e3b75606abc18df7ad65e51ac3f90cd68b38d /mm/rmap.c | |
parent | c172d82500a6cf3c32d1e650722a1055d72ce858 (diff) | |
parent | e3bbfa78bab125f58b831b5f7f45b5a305091d72 (diff) |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply pending patches that
are based on newer code already present upstream.
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 111 |
1 files changed, 77 insertions, 34 deletions
@@ -38,9 +38,8 @@ | |||
38 | * in arch-dependent flush_dcache_mmap_lock, | 38 | * in arch-dependent flush_dcache_mmap_lock, |
39 | * within inode_wb_list_lock in __sync_single_inode) | 39 | * within inode_wb_list_lock in __sync_single_inode) |
40 | * | 40 | * |
41 | * (code doesn't rely on that order so it could be switched around) | 41 | * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon) |
42 | * ->tasklist_lock | 42 | * ->tasklist_lock |
43 | * anon_vma->mutex (memory_failure, collect_procs_anon) | ||
44 | * pte map lock | 43 | * pte map lock |
45 | */ | 44 | */ |
46 | 45 | ||
@@ -112,9 +111,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) | |||
112 | kmem_cache_free(anon_vma_cachep, anon_vma); | 111 | kmem_cache_free(anon_vma_cachep, anon_vma); |
113 | } | 112 | } |
114 | 113 | ||
115 | static inline struct anon_vma_chain *anon_vma_chain_alloc(void) | 114 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
116 | { | 115 | { |
117 | return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); | 116 | return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
118 | } | 117 | } |
119 | 118 | ||
120 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) | 119 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
@@ -159,7 +158,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
159 | struct mm_struct *mm = vma->vm_mm; | 158 | struct mm_struct *mm = vma->vm_mm; |
160 | struct anon_vma *allocated; | 159 | struct anon_vma *allocated; |
161 | 160 | ||
162 | avc = anon_vma_chain_alloc(); | 161 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
163 | if (!avc) | 162 | if (!avc) |
164 | goto out_enomem; | 163 | goto out_enomem; |
165 | 164 | ||
@@ -200,6 +199,32 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
200 | return -ENOMEM; | 199 | return -ENOMEM; |
201 | } | 200 | } |
202 | 201 | ||
202 | /* | ||
203 | * This is a useful helper function for locking the anon_vma root as | ||
204 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that | ||
205 | * have the same vma. | ||
206 | * | ||
207 | * Such anon_vma's should have the same root, so you'd expect to see | ||
208 | * just a single mutex_lock for the whole traversal. | ||
209 | */ | ||
210 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) | ||
211 | { | ||
212 | struct anon_vma *new_root = anon_vma->root; | ||
213 | if (new_root != root) { | ||
214 | if (WARN_ON_ONCE(root)) | ||
215 | mutex_unlock(&root->mutex); | ||
216 | root = new_root; | ||
217 | mutex_lock(&root->mutex); | ||
218 | } | ||
219 | return root; | ||
220 | } | ||
221 | |||
222 | static inline void unlock_anon_vma_root(struct anon_vma *root) | ||
223 | { | ||
224 | if (root) | ||
225 | mutex_unlock(&root->mutex); | ||
226 | } | ||
227 | |||
203 | static void anon_vma_chain_link(struct vm_area_struct *vma, | 228 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
204 | struct anon_vma_chain *avc, | 229 | struct anon_vma_chain *avc, |
205 | struct anon_vma *anon_vma) | 230 | struct anon_vma *anon_vma) |
@@ -208,13 +233,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, | |||
208 | avc->anon_vma = anon_vma; | 233 | avc->anon_vma = anon_vma; |
209 | list_add(&avc->same_vma, &vma->anon_vma_chain); | 234 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
210 | 235 | ||
211 | anon_vma_lock(anon_vma); | ||
212 | /* | 236 | /* |
213 | * It's critical to add new vmas to the tail of the anon_vma, | 237 | * It's critical to add new vmas to the tail of the anon_vma, |
214 | * see comment in huge_memory.c:__split_huge_page(). | 238 | * see comment in huge_memory.c:__split_huge_page(). |
215 | */ | 239 | */ |
216 | list_add_tail(&avc->same_anon_vma, &anon_vma->head); | 240 | list_add_tail(&avc->same_anon_vma, &anon_vma->head); |
217 | anon_vma_unlock(anon_vma); | ||
218 | } | 241 | } |
219 | 242 | ||
220 | /* | 243 | /* |
@@ -224,13 +247,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, | |||
224 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | 247 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
225 | { | 248 | { |
226 | struct anon_vma_chain *avc, *pavc; | 249 | struct anon_vma_chain *avc, *pavc; |
250 | struct anon_vma *root = NULL; | ||
227 | 251 | ||
228 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { | 252 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
229 | avc = anon_vma_chain_alloc(); | 253 | struct anon_vma *anon_vma; |
230 | if (!avc) | 254 | |
231 | goto enomem_failure; | 255 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
232 | anon_vma_chain_link(dst, avc, pavc->anon_vma); | 256 | if (unlikely(!avc)) { |
257 | unlock_anon_vma_root(root); | ||
258 | root = NULL; | ||
259 | avc = anon_vma_chain_alloc(GFP_KERNEL); | ||
260 | if (!avc) | ||
261 | goto enomem_failure; | ||
262 | } | ||
263 | anon_vma = pavc->anon_vma; | ||
264 | root = lock_anon_vma_root(root, anon_vma); | ||
265 | anon_vma_chain_link(dst, avc, anon_vma); | ||
233 | } | 266 | } |
267 | unlock_anon_vma_root(root); | ||
234 | return 0; | 268 | return 0; |
235 | 269 | ||
236 | enomem_failure: | 270 | enomem_failure: |
@@ -263,7 +297,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
263 | anon_vma = anon_vma_alloc(); | 297 | anon_vma = anon_vma_alloc(); |
264 | if (!anon_vma) | 298 | if (!anon_vma) |
265 | goto out_error; | 299 | goto out_error; |
266 | avc = anon_vma_chain_alloc(); | 300 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
267 | if (!avc) | 301 | if (!avc) |
268 | goto out_error_free_anon_vma; | 302 | goto out_error_free_anon_vma; |
269 | 303 | ||
@@ -280,7 +314,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
280 | get_anon_vma(anon_vma->root); | 314 | get_anon_vma(anon_vma->root); |
281 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ | 315 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
282 | vma->anon_vma = anon_vma; | 316 | vma->anon_vma = anon_vma; |
317 | anon_vma_lock(anon_vma); | ||
283 | anon_vma_chain_link(vma, avc, anon_vma); | 318 | anon_vma_chain_link(vma, avc, anon_vma); |
319 | anon_vma_unlock(anon_vma); | ||
284 | 320 | ||
285 | return 0; | 321 | return 0; |
286 | 322 | ||
@@ -291,36 +327,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
291 | return -ENOMEM; | 327 | return -ENOMEM; |
292 | } | 328 | } |
293 | 329 | ||
294 | static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) | ||
295 | { | ||
296 | struct anon_vma *anon_vma = anon_vma_chain->anon_vma; | ||
297 | int empty; | ||
298 | |||
299 | /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ | ||
300 | if (!anon_vma) | ||
301 | return; | ||
302 | |||
303 | anon_vma_lock(anon_vma); | ||
304 | list_del(&anon_vma_chain->same_anon_vma); | ||
305 | |||
306 | /* We must garbage collect the anon_vma if it's empty */ | ||
307 | empty = list_empty(&anon_vma->head); | ||
308 | anon_vma_unlock(anon_vma); | ||
309 | |||
310 | if (empty) | ||
311 | put_anon_vma(anon_vma); | ||
312 | } | ||
313 | |||
314 | void unlink_anon_vmas(struct vm_area_struct *vma) | 330 | void unlink_anon_vmas(struct vm_area_struct *vma) |
315 | { | 331 | { |
316 | struct anon_vma_chain *avc, *next; | 332 | struct anon_vma_chain *avc, *next; |
333 | struct anon_vma *root = NULL; | ||
317 | 334 | ||
318 | /* | 335 | /* |
319 | * Unlink each anon_vma chained to the VMA. This list is ordered | 336 | * Unlink each anon_vma chained to the VMA. This list is ordered |
320 | * from newest to oldest, ensuring the root anon_vma gets freed last. | 337 | * from newest to oldest, ensuring the root anon_vma gets freed last. |
321 | */ | 338 | */ |
322 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { | 339 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
323 | anon_vma_unlink(avc); | 340 | struct anon_vma *anon_vma = avc->anon_vma; |
341 | |||
342 | root = lock_anon_vma_root(root, anon_vma); | ||
343 | list_del(&avc->same_anon_vma); | ||
344 | |||
345 | /* | ||
346 | * Leave empty anon_vmas on the list - we'll need | ||
347 | * to free them outside the lock. | ||
348 | */ | ||
349 | if (list_empty(&anon_vma->head)) | ||
350 | continue; | ||
351 | |||
352 | list_del(&avc->same_vma); | ||
353 | anon_vma_chain_free(avc); | ||
354 | } | ||
355 | unlock_anon_vma_root(root); | ||
356 | |||
357 | /* | ||
358 | * Iterate the list once more, it now only contains empty and unlinked | ||
359 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() | ||
360 | * needing to acquire the anon_vma->root->mutex. | ||
361 | */ | ||
362 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { | ||
363 | struct anon_vma *anon_vma = avc->anon_vma; | ||
364 | |||
365 | put_anon_vma(anon_vma); | ||
366 | |||
324 | list_del(&avc->same_vma); | 367 | list_del(&avc->same_vma); |
325 | anon_vma_chain_free(avc); | 368 | anon_vma_chain_free(avc); |
326 | } | 369 | } |