diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 10 |
1 files changed, 5 insertions, 5 deletions
@@ -2502,15 +2502,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) | |||
2502 | * The LSB of head.next can't change from under us | 2502 | * The LSB of head.next can't change from under us |
2503 | * because we hold the mm_all_locks_mutex. | 2503 | * because we hold the mm_all_locks_mutex. |
2504 | */ | 2504 | */ |
2505 | spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); | 2505 | mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem); |
2506 | /* | 2506 | /* |
2507 | * We can safely modify head.next after taking the | 2507 | * We can safely modify head.next after taking the |
2508 | * anon_vma->root->lock. If some other vma in this mm shares | 2508 | * anon_vma->root->mutex. If some other vma in this mm shares |
2509 | * the same anon_vma we won't take it again. | 2509 | * the same anon_vma we won't take it again. |
2510 | * | 2510 | * |
2511 | * No need of atomic instructions here, head.next | 2511 | * No need of atomic instructions here, head.next |
2512 | * can't change from under us thanks to the | 2512 | * can't change from under us thanks to the |
2513 | * anon_vma->root->lock. | 2513 | * anon_vma->root->mutex. |
2514 | */ | 2514 | */ |
2515 | if (__test_and_set_bit(0, (unsigned long *) | 2515 | if (__test_and_set_bit(0, (unsigned long *) |
2516 | &anon_vma->root->head.next)) | 2516 | &anon_vma->root->head.next)) |
@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) | |||
2559 | * vma in this mm is backed by the same anon_vma or address_space. | 2559 | * vma in this mm is backed by the same anon_vma or address_space. |
2560 | * | 2560 | * |
2561 | * We can take all the locks in random order because the VM code | 2561 | * We can take all the locks in random order because the VM code |
2562 | * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never | 2562 | * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never |
2563 | * takes more than one of them in a row. Secondly we're protected | 2563 | * takes more than one of them in a row. Secondly we're protected |
2564 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. | 2564 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. |
2565 | * | 2565 | * |
@@ -2615,7 +2615,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma) | |||
2615 | * | 2615 | * |
2616 | * No need of atomic instructions here, head.next | 2616 | * No need of atomic instructions here, head.next |
2617 | * can't change from under us until we release the | 2617 | * can't change from under us until we release the |
2618 | * anon_vma->root->lock. | 2618 | * anon_vma->root->mutex. |
2619 | */ | 2619 | */ |
2620 | if (!__test_and_clear_bit(0, (unsigned long *) | 2620 | if (!__test_and_clear_bit(0, (unsigned long *) |
2621 | &anon_vma->root->head.next)) | 2621 | &anon_vma->root->head.next)) |