diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-29 05:54:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-29 05:54:24 -0400 |
commit | 3825c9e8d01e4310c40a3903a354c433c32a7b6f (patch) | |
tree | 87c94a8076bbb38bd2cf20ab9bc23d6f74f6a0a8 /mm/mmap.c | |
parent | 5d7b605245b1aa1a9cd6549b1f57d69273eb0c37 (diff) | |
parent | 6e86841d05f371b5b9b86ce76c02aaee83352298 (diff) |
Merge commit 'v2.6.27-rc1' into x86/microcode
Conflicts:
arch/x86/kernel/microcode.c
Manual resolutions:
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_intel.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 160 |
1 files changed, 160 insertions, 0 deletions
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/mempolicy.h> | 27 | #include <linux/mempolicy.h> |
28 | #include <linux/rmap.h> | 28 | #include <linux/rmap.h> |
29 | #include <linux/mmu_notifier.h> | ||
29 | 30 | ||
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
@@ -2061,6 +2062,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2061 | 2062 | ||
2062 | /* mm's last user has gone, and its about to be pulled down */ | 2063 | /* mm's last user has gone, and its about to be pulled down */ |
2063 | arch_exit_mmap(mm); | 2064 | arch_exit_mmap(mm); |
2065 | mmu_notifier_release(mm); | ||
2064 | 2066 | ||
2065 | lru_add_drain(); | 2067 | lru_add_drain(); |
2066 | flush_cache_mm(mm); | 2068 | flush_cache_mm(mm); |
@@ -2268,3 +2270,161 @@ int install_special_mapping(struct mm_struct *mm, | |||
2268 | 2270 | ||
2269 | return 0; | 2271 | return 0; |
2270 | } | 2272 | } |
2273 | |||
2274 | static DEFINE_MUTEX(mm_all_locks_mutex); | ||
2275 | |||
2276 | static void vm_lock_anon_vma(struct anon_vma *anon_vma) | ||
2277 | { | ||
2278 | if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { | ||
2279 | /* | ||
2280 | * The LSB of head.next can't change from under us | ||
2281 | * because we hold the mm_all_locks_mutex. | ||
2282 | */ | ||
2283 | spin_lock(&anon_vma->lock); | ||
2284 | /* | ||
2285 | * We can safely modify head.next after taking the | ||
2286 | * anon_vma->lock. If some other vma in this mm shares | ||
2287 | * the same anon_vma we won't take it again. | ||
2288 | * | ||
2289 | * No need of atomic instructions here, head.next | ||
2290 | * can't change from under us thanks to the | ||
2291 | * anon_vma->lock. | ||
2292 | */ | ||
2293 | if (__test_and_set_bit(0, (unsigned long *) | ||
2294 | &anon_vma->head.next)) | ||
2295 | BUG(); | ||
2296 | } | ||
2297 | } | ||
2298 | |||
2299 | static void vm_lock_mapping(struct address_space *mapping) | ||
2300 | { | ||
2301 | if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { | ||
2302 | /* | ||
2303 | * AS_MM_ALL_LOCKS can't change from under us because | ||
2304 | * we hold the mm_all_locks_mutex. | ||
2305 | * | ||
2306 | * Operations on ->flags have to be atomic because | ||
2307 | * even if AS_MM_ALL_LOCKS is stable thanks to the | ||
2308 | * mm_all_locks_mutex, there may be other cpus | ||
2309 | * changing other bitflags in parallel to us. | ||
2310 | */ | ||
2311 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) | ||
2312 | BUG(); | ||
2313 | spin_lock(&mapping->i_mmap_lock); | ||
2314 | } | ||
2315 | } | ||
2316 | |||
2317 | /* | ||
2318 | * This operation locks against the VM for all pte/vma/mm related | ||
2319 | * operations that could ever happen on a certain mm. This includes | ||
2320 | * vmtruncate, try_to_unmap, and all page faults. | ||
2321 | * | ||
2322 | * The caller must take the mmap_sem in write mode before calling | ||
2323 | * mm_take_all_locks(). The caller isn't allowed to release the | ||
2324 | * mmap_sem until mm_drop_all_locks() returns. | ||
2325 | * | ||
2326 | * mmap_sem in write mode is required in order to block all operations | ||
2327 | * that could modify pagetables and free pages without need of | ||
2328 | * altering the vma layout (for example populate_range() with | ||
2329 | * nonlinear vmas). It's also needed in write mode to avoid new | ||
2330 | * anon_vmas to be associated with existing vmas. | ||
2331 | * | ||
2332 | * A single task can't take more than one mm_take_all_locks() in a row | ||
2333 | * or it would deadlock. | ||
2334 | * | ||
2335 | * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in | ||
2336 | * mapping->flags avoid to take the same lock twice, if more than one | ||
2337 | * vma in this mm is backed by the same anon_vma or address_space. | ||
2338 | * | ||
2339 | * We can take all the locks in random order because the VM code | ||
2340 | * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never | ||
2341 | * takes more than one of them in a row. Secondly we're protected | ||
2342 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. | ||
2343 | * | ||
2344 | * mm_take_all_locks() and mm_drop_all_locks are expensive operations | ||
2345 | * that may have to take thousand of locks. | ||
2346 | * | ||
2347 | * mm_take_all_locks() can fail if it's interrupted by signals. | ||
2348 | */ | ||
2349 | int mm_take_all_locks(struct mm_struct *mm) | ||
2350 | { | ||
2351 | struct vm_area_struct *vma; | ||
2352 | int ret = -EINTR; | ||
2353 | |||
2354 | BUG_ON(down_read_trylock(&mm->mmap_sem)); | ||
2355 | |||
2356 | mutex_lock(&mm_all_locks_mutex); | ||
2357 | |||
2358 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
2359 | if (signal_pending(current)) | ||
2360 | goto out_unlock; | ||
2361 | if (vma->anon_vma) | ||
2362 | vm_lock_anon_vma(vma->anon_vma); | ||
2363 | if (vma->vm_file && vma->vm_file->f_mapping) | ||
2364 | vm_lock_mapping(vma->vm_file->f_mapping); | ||
2365 | } | ||
2366 | ret = 0; | ||
2367 | |||
2368 | out_unlock: | ||
2369 | if (ret) | ||
2370 | mm_drop_all_locks(mm); | ||
2371 | |||
2372 | return ret; | ||
2373 | } | ||
2374 | |||
2375 | static void vm_unlock_anon_vma(struct anon_vma *anon_vma) | ||
2376 | { | ||
2377 | if (test_bit(0, (unsigned long *) &anon_vma->head.next)) { | ||
2378 | /* | ||
2379 | * The LSB of head.next can't change to 0 from under | ||
2380 | * us because we hold the mm_all_locks_mutex. | ||
2381 | * | ||
2382 | * We must however clear the bitflag before unlocking | ||
2383 | * the vma so the users using the anon_vma->head will | ||
2384 | * never see our bitflag. | ||
2385 | * | ||
2386 | * No need of atomic instructions here, head.next | ||
2387 | * can't change from under us until we release the | ||
2388 | * anon_vma->lock. | ||
2389 | */ | ||
2390 | if (!__test_and_clear_bit(0, (unsigned long *) | ||
2391 | &anon_vma->head.next)) | ||
2392 | BUG(); | ||
2393 | spin_unlock(&anon_vma->lock); | ||
2394 | } | ||
2395 | } | ||
2396 | |||
2397 | static void vm_unlock_mapping(struct address_space *mapping) | ||
2398 | { | ||
2399 | if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { | ||
2400 | /* | ||
2401 | * AS_MM_ALL_LOCKS can't change to 0 from under us | ||
2402 | * because we hold the mm_all_locks_mutex. | ||
2403 | */ | ||
2404 | spin_unlock(&mapping->i_mmap_lock); | ||
2405 | if (!test_and_clear_bit(AS_MM_ALL_LOCKS, | ||
2406 | &mapping->flags)) | ||
2407 | BUG(); | ||
2408 | } | ||
2409 | } | ||
2410 | |||
2411 | /* | ||
2412 | * The mmap_sem cannot be released by the caller until | ||
2413 | * mm_drop_all_locks() returns. | ||
2414 | */ | ||
2415 | void mm_drop_all_locks(struct mm_struct *mm) | ||
2416 | { | ||
2417 | struct vm_area_struct *vma; | ||
2418 | |||
2419 | BUG_ON(down_read_trylock(&mm->mmap_sem)); | ||
2420 | BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); | ||
2421 | |||
2422 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
2423 | if (vma->anon_vma) | ||
2424 | vm_unlock_anon_vma(vma->anon_vma); | ||
2425 | if (vma->vm_file && vma->vm_file->f_mapping) | ||
2426 | vm_unlock_mapping(vma->vm_file->f_mapping); | ||
2427 | } | ||
2428 | |||
2429 | mutex_unlock(&mm_all_locks_mutex); | ||
2430 | } | ||