diff options
author | Peter Zijlstra <peterz@infradead.org> | 2011-06-27 19:18:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-27 21:00:13 -0400 |
commit | 9b679320a5fbf46454011e5c62e0b8991b0956d1 (patch) | |
tree | 49ea7be5e8bb669ef99533a18c73555e2882efc5 /mm | |
parent | aa2c96d6f329e66cc59352b0f12e8f04e6a9593b (diff) |
mm/memory-failure.c: fix spinlock vs mutex order
We cannot take a mutex while holding a spinlock, so flip the order and
fix the locking documentation.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory-failure.c | 21 | ||||
-rw-r--r-- | mm/rmap.c | 5 |
2 files changed, 8 insertions, 18 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index eac0ba561491..740c4f52059c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
391 | struct task_struct *tsk; | 391 | struct task_struct *tsk; |
392 | struct anon_vma *av; | 392 | struct anon_vma *av; |
393 | 393 | ||
394 | read_lock(&tasklist_lock); | ||
395 | av = page_lock_anon_vma(page); | 394 | av = page_lock_anon_vma(page); |
396 | if (av == NULL) /* Not actually mapped anymore */ | 395 | if (av == NULL) /* Not actually mapped anymore */ |
397 | goto out; | 396 | return; |
397 | |||
398 | read_lock(&tasklist_lock); | ||
398 | for_each_process (tsk) { | 399 | for_each_process (tsk) { |
399 | struct anon_vma_chain *vmac; | 400 | struct anon_vma_chain *vmac; |
400 | 401 | ||
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
408 | add_to_kill(tsk, page, vma, to_kill, tkc); | 409 | add_to_kill(tsk, page, vma, to_kill, tkc); |
409 | } | 410 | } |
410 | } | 411 | } |
411 | page_unlock_anon_vma(av); | ||
412 | out: | ||
413 | read_unlock(&tasklist_lock); | 412 | read_unlock(&tasklist_lock); |
413 | page_unlock_anon_vma(av); | ||
414 | } | 414 | } |
415 | 415 | ||
416 | /* | 416 | /* |
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, | |||
424 | struct prio_tree_iter iter; | 424 | struct prio_tree_iter iter; |
425 | struct address_space *mapping = page->mapping; | 425 | struct address_space *mapping = page->mapping; |
426 | 426 | ||
427 | /* | ||
428 | * A note on the locking order between the two locks. | ||
429 | * We don't rely on this particular order. | ||
430 | * If you have some other code that needs a different order | ||
431 | * feel free to switch them around. Or add a reverse link | ||
432 | * from mm_struct to task_struct, then this could be all | ||
433 | * done without taking tasklist_lock and looping over all tasks. | ||
434 | */ | ||
435 | |||
436 | read_lock(&tasklist_lock); | ||
437 | mutex_lock(&mapping->i_mmap_mutex); | 427 | mutex_lock(&mapping->i_mmap_mutex); |
428 | read_lock(&tasklist_lock); | ||
438 | for_each_process(tsk) { | 429 | for_each_process(tsk) { |
439 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 430 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
440 | 431 | ||
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, | |||
454 | add_to_kill(tsk, page, vma, to_kill, tkc); | 445 | add_to_kill(tsk, page, vma, to_kill, tkc); |
455 | } | 446 | } |
456 | } | 447 | } |
457 | mutex_unlock(&mapping->i_mmap_mutex); | ||
458 | read_unlock(&tasklist_lock); | 448 | read_unlock(&tasklist_lock); |
449 | mutex_unlock(&mapping->i_mmap_mutex); | ||
459 | } | 450 | } |
460 | 451 | ||
461 | /* | 452 | /* |
@@ -38,9 +38,8 @@ | |||
38 | * in arch-dependent flush_dcache_mmap_lock, | 38 | * in arch-dependent flush_dcache_mmap_lock, |
39 | * within inode_wb_list_lock in __sync_single_inode) | 39 | * within inode_wb_list_lock in __sync_single_inode) |
40 | * | 40 | * |
41 | * (code doesn't rely on that order so it could be switched around) | 41 | * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon) |
42 | * ->tasklist_lock | 42 | * ->tasklist_lock |
43 | * anon_vma->mutex (memory_failure, collect_procs_anon) | ||
44 | * pte map lock | 43 | * pte map lock |
45 | */ | 44 | */ |
46 | 45 | ||