diff options
-rw-r--r-- | include/linux/huge_mm.h | 2 | ||||
-rw-r--r-- | include/linux/rmap.h | 17 | ||||
-rw-r--r-- | mm/huge_memory.c | 6 | ||||
-rw-r--r-- | mm/ksm.c | 6 | ||||
-rw-r--r-- | mm/memory-failure.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 48 |
9 files changed, 50 insertions, 39 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 027ad04ef3a8..0d1208c0bdc4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -102,7 +102,7 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | |||
102 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 102 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
103 | do { \ | 103 | do { \ |
104 | pmd_t *____pmd = (__pmd); \ | 104 | pmd_t *____pmd = (__pmd); \ |
105 | anon_vma_lock(__anon_vma); \ | 105 | anon_vma_lock_write(__anon_vma); \ |
106 | anon_vma_unlock(__anon_vma); \ | 106 | anon_vma_unlock(__anon_vma); \ |
107 | BUG_ON(pmd_trans_splitting(*____pmd) || \ | 107 | BUG_ON(pmd_trans_splitting(*____pmd) || \ |
108 | pmd_trans_huge(*____pmd)); \ | 108 | pmd_trans_huge(*____pmd)); \ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index f3f41d242e25..c20635c527a9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -118,7 +118,7 @@ static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) | |||
118 | up_write(&anon_vma->root->rwsem); | 118 | up_write(&anon_vma->root->rwsem); |
119 | } | 119 | } |
120 | 120 | ||
121 | static inline void anon_vma_lock(struct anon_vma *anon_vma) | 121 | static inline void anon_vma_lock_write(struct anon_vma *anon_vma) |
122 | { | 122 | { |
123 | down_write(&anon_vma->root->rwsem); | 123 | down_write(&anon_vma->root->rwsem); |
124 | } | 124 | } |
@@ -128,6 +128,17 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma) | |||
128 | up_write(&anon_vma->root->rwsem); | 128 | up_write(&anon_vma->root->rwsem); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) | ||
132 | { | ||
133 | down_read(&anon_vma->root->rwsem); | ||
134 | } | ||
135 | |||
136 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) | ||
137 | { | ||
138 | up_read(&anon_vma->root->rwsem); | ||
139 | } | ||
140 | |||
141 | |||
131 | /* | 142 | /* |
132 | * anon_vma helper functions. | 143 | * anon_vma helper functions. |
133 | */ | 144 | */ |
@@ -220,8 +231,8 @@ int try_to_munlock(struct page *); | |||
220 | /* | 231 | /* |
221 | * Called by memory-failure.c to kill processes. | 232 | * Called by memory-failure.c to kill processes. |
222 | */ | 233 | */ |
223 | struct anon_vma *page_lock_anon_vma(struct page *page); | 234 | struct anon_vma *page_lock_anon_vma_read(struct page *page); |
224 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | 235 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
225 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | 236 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
226 | 237 | ||
227 | /* | 238 | /* |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index acd37fe55eb7..a24c9cb9c83e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1549,7 +1549,7 @@ int split_huge_page(struct page *page) | |||
1549 | int ret = 1; | 1549 | int ret = 1; |
1550 | 1550 | ||
1551 | BUG_ON(!PageAnon(page)); | 1551 | BUG_ON(!PageAnon(page)); |
1552 | anon_vma = page_lock_anon_vma(page); | 1552 | anon_vma = page_lock_anon_vma_read(page); |
1553 | if (!anon_vma) | 1553 | if (!anon_vma) |
1554 | goto out; | 1554 | goto out; |
1555 | ret = 0; | 1555 | ret = 0; |
@@ -1562,7 +1562,7 @@ int split_huge_page(struct page *page) | |||
1562 | 1562 | ||
1563 | BUG_ON(PageCompound(page)); | 1563 | BUG_ON(PageCompound(page)); |
1564 | out_unlock: | 1564 | out_unlock: |
1565 | page_unlock_anon_vma(anon_vma); | 1565 | page_unlock_anon_vma_read(anon_vma); |
1566 | out: | 1566 | out: |
1567 | return ret; | 1567 | return ret; |
1568 | } | 1568 | } |
@@ -2074,7 +2074,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2074 | if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) | 2074 | if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) |
2075 | goto out; | 2075 | goto out; |
2076 | 2076 | ||
2077 | anon_vma_lock(vma->anon_vma); | 2077 | anon_vma_lock_write(vma->anon_vma); |
2078 | 2078 | ||
2079 | pte = pte_offset_map(pmd, address); | 2079 | pte = pte_offset_map(pmd, address); |
2080 | ptl = pte_lockptr(mm, pmd); | 2080 | ptl = pte_lockptr(mm, pmd); |
@@ -1634,7 +1634,7 @@ again: | |||
1634 | struct anon_vma_chain *vmac; | 1634 | struct anon_vma_chain *vmac; |
1635 | struct vm_area_struct *vma; | 1635 | struct vm_area_struct *vma; |
1636 | 1636 | ||
1637 | anon_vma_lock(anon_vma); | 1637 | anon_vma_lock_write(anon_vma); |
1638 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 1638 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1639 | 0, ULONG_MAX) { | 1639 | 0, ULONG_MAX) { |
1640 | vma = vmac->vma; | 1640 | vma = vmac->vma; |
@@ -1688,7 +1688,7 @@ again: | |||
1688 | struct anon_vma_chain *vmac; | 1688 | struct anon_vma_chain *vmac; |
1689 | struct vm_area_struct *vma; | 1689 | struct vm_area_struct *vma; |
1690 | 1690 | ||
1691 | anon_vma_lock(anon_vma); | 1691 | anon_vma_lock_write(anon_vma); |
1692 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 1692 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1693 | 0, ULONG_MAX) { | 1693 | 0, ULONG_MAX) { |
1694 | vma = vmac->vma; | 1694 | vma = vmac->vma; |
@@ -1741,7 +1741,7 @@ again: | |||
1741 | struct anon_vma_chain *vmac; | 1741 | struct anon_vma_chain *vmac; |
1742 | struct vm_area_struct *vma; | 1742 | struct vm_area_struct *vma; |
1743 | 1743 | ||
1744 | anon_vma_lock(anon_vma); | 1744 | anon_vma_lock_write(anon_vma); |
1745 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 1745 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1746 | 0, ULONG_MAX) { | 1746 | 0, ULONG_MAX) { |
1747 | vma = vmac->vma; | 1747 | vma = vmac->vma; |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ddb68a169e45..f2cd830f66c0 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -402,7 +402,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
402 | struct anon_vma *av; | 402 | struct anon_vma *av; |
403 | pgoff_t pgoff; | 403 | pgoff_t pgoff; |
404 | 404 | ||
405 | av = page_lock_anon_vma(page); | 405 | av = page_lock_anon_vma_read(page); |
406 | if (av == NULL) /* Not actually mapped anymore */ | 406 | if (av == NULL) /* Not actually mapped anymore */ |
407 | return; | 407 | return; |
408 | 408 | ||
@@ -423,7 +423,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | read_unlock(&tasklist_lock); | 425 | read_unlock(&tasklist_lock); |
426 | page_unlock_anon_vma(av); | 426 | page_unlock_anon_vma_read(av); |
427 | } | 427 | } |
428 | 428 | ||
429 | /* | 429 | /* |
diff --git a/mm/migrate.c b/mm/migrate.c index f24e9cc49cc4..6e46485f014c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -754,7 +754,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
754 | */ | 754 | */ |
755 | if (PageAnon(page)) { | 755 | if (PageAnon(page)) { |
756 | /* | 756 | /* |
757 | * Only page_lock_anon_vma() understands the subtleties of | 757 | * Only page_lock_anon_vma_read() understands the subtleties of |
758 | * getting a hold on an anon_vma from outside one of its mms. | 758 | * getting a hold on an anon_vma from outside one of its mms. |
759 | */ | 759 | */ |
760 | anon_vma = page_get_anon_vma(page); | 760 | anon_vma = page_get_anon_vma(page); |
@@ -602,7 +602,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
602 | if (anon_vma) { | 602 | if (anon_vma) { |
603 | VM_BUG_ON(adjust_next && next->anon_vma && | 603 | VM_BUG_ON(adjust_next && next->anon_vma && |
604 | anon_vma != next->anon_vma); | 604 | anon_vma != next->anon_vma); |
605 | anon_vma_lock(anon_vma); | 605 | anon_vma_lock_write(anon_vma); |
606 | anon_vma_interval_tree_pre_update_vma(vma); | 606 | anon_vma_interval_tree_pre_update_vma(vma); |
607 | if (adjust_next) | 607 | if (adjust_next) |
608 | anon_vma_interval_tree_pre_update_vma(next); | 608 | anon_vma_interval_tree_pre_update_vma(next); |
diff --git a/mm/mremap.c b/mm/mremap.c index 1b61c2d3307a..3dabd170753a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -104,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
104 | } | 104 | } |
105 | if (vma->anon_vma) { | 105 | if (vma->anon_vma) { |
106 | anon_vma = vma->anon_vma; | 106 | anon_vma = vma->anon_vma; |
107 | anon_vma_lock(anon_vma); | 107 | anon_vma_lock_write(anon_vma); |
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
@@ -87,24 +87,24 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) | |||
87 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); | 87 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * Synchronize against page_lock_anon_vma() such that | 90 | * Synchronize against page_lock_anon_vma_read() such that |
91 | * we can safely hold the lock without the anon_vma getting | 91 | * we can safely hold the lock without the anon_vma getting |
92 | * freed. | 92 | * freed. |
93 | * | 93 | * |
94 | * Relies on the full mb implied by the atomic_dec_and_test() from | 94 | * Relies on the full mb implied by the atomic_dec_and_test() from |
95 | * put_anon_vma() against the acquire barrier implied by | 95 | * put_anon_vma() against the acquire barrier implied by |
96 | * mutex_trylock() from page_lock_anon_vma(). This orders: | 96 | * down_read_trylock() from page_lock_anon_vma_read(). This orders: |
97 | * | 97 | * |
98 | * page_lock_anon_vma() VS put_anon_vma() | 98 | * page_lock_anon_vma_read() VS put_anon_vma() |
99 | * mutex_trylock() atomic_dec_and_test() | 99 | * down_read_trylock() atomic_dec_and_test() |
100 | * LOCK MB | 100 | * LOCK MB |
101 | * atomic_read() mutex_is_locked() | 101 | * atomic_read() rwsem_is_locked() |
102 | * | 102 | * |
103 | * LOCK should suffice since the actual taking of the lock must | 103 | * LOCK should suffice since the actual taking of the lock must |
104 | * happen _before_ what follows. | 104 | * happen _before_ what follows. |
105 | */ | 105 | */ |
106 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { | 106 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
107 | anon_vma_lock(anon_vma); | 107 | anon_vma_lock_write(anon_vma); |
108 | anon_vma_unlock(anon_vma); | 108 | anon_vma_unlock(anon_vma); |
109 | } | 109 | } |
110 | 110 | ||
@@ -146,7 +146,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, | |||
146 | * allocate a new one. | 146 | * allocate a new one. |
147 | * | 147 | * |
148 | * Anon-vma allocations are very subtle, because we may have | 148 | * Anon-vma allocations are very subtle, because we may have |
149 | * optimistically looked up an anon_vma in page_lock_anon_vma() | 149 | * optimistically looked up an anon_vma in page_lock_anon_vma_read() |
150 | * and that may actually touch the spinlock even in the newly | 150 | * and that may actually touch the spinlock even in the newly |
151 | * allocated vma (it depends on RCU to make sure that the | 151 | * allocated vma (it depends on RCU to make sure that the |
152 | * anon_vma isn't actually destroyed). | 152 | * anon_vma isn't actually destroyed). |
@@ -181,7 +181,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
181 | allocated = anon_vma; | 181 | allocated = anon_vma; |
182 | } | 182 | } |
183 | 183 | ||
184 | anon_vma_lock(anon_vma); | 184 | anon_vma_lock_write(anon_vma); |
185 | /* page_table_lock to protect against threads */ | 185 | /* page_table_lock to protect against threads */ |
186 | spin_lock(&mm->page_table_lock); | 186 | spin_lock(&mm->page_table_lock); |
187 | if (likely(!vma->anon_vma)) { | 187 | if (likely(!vma->anon_vma)) { |
@@ -306,7 +306,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
306 | get_anon_vma(anon_vma->root); | 306 | get_anon_vma(anon_vma->root); |
307 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ | 307 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
308 | vma->anon_vma = anon_vma; | 308 | vma->anon_vma = anon_vma; |
309 | anon_vma_lock(anon_vma); | 309 | anon_vma_lock_write(anon_vma); |
310 | anon_vma_chain_link(vma, avc, anon_vma); | 310 | anon_vma_chain_link(vma, avc, anon_vma); |
311 | anon_vma_unlock(anon_vma); | 311 | anon_vma_unlock(anon_vma); |
312 | 312 | ||
@@ -442,7 +442,7 @@ out: | |||
442 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a | 442 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a |
443 | * reference like with page_get_anon_vma() and then block on the mutex. | 443 | * reference like with page_get_anon_vma() and then block on the mutex. |
444 | */ | 444 | */ |
445 | struct anon_vma *page_lock_anon_vma(struct page *page) | 445 | struct anon_vma *page_lock_anon_vma_read(struct page *page) |
446 | { | 446 | { |
447 | struct anon_vma *anon_vma = NULL; | 447 | struct anon_vma *anon_vma = NULL; |
448 | struct anon_vma *root_anon_vma; | 448 | struct anon_vma *root_anon_vma; |
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page) | |||
457 | 457 | ||
458 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 458 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
459 | root_anon_vma = ACCESS_ONCE(anon_vma->root); | 459 | root_anon_vma = ACCESS_ONCE(anon_vma->root); |
460 | if (down_write_trylock(&root_anon_vma->rwsem)) { | 460 | if (down_read_trylock(&root_anon_vma->rwsem)) { |
461 | /* | 461 | /* |
462 | * If the page is still mapped, then this anon_vma is still | 462 | * If the page is still mapped, then this anon_vma is still |
463 | * its anon_vma, and holding the mutex ensures that it will | 463 | * its anon_vma, and holding the mutex ensures that it will |
464 | * not go away, see anon_vma_free(). | 464 | * not go away, see anon_vma_free(). |
465 | */ | 465 | */ |
466 | if (!page_mapped(page)) { | 466 | if (!page_mapped(page)) { |
467 | up_write(&root_anon_vma->rwsem); | 467 | up_read(&root_anon_vma->rwsem); |
468 | anon_vma = NULL; | 468 | anon_vma = NULL; |
469 | } | 469 | } |
470 | goto out; | 470 | goto out; |
@@ -484,15 +484,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page) | |||
484 | 484 | ||
485 | /* we pinned the anon_vma, its safe to sleep */ | 485 | /* we pinned the anon_vma, its safe to sleep */ |
486 | rcu_read_unlock(); | 486 | rcu_read_unlock(); |
487 | anon_vma_lock(anon_vma); | 487 | anon_vma_lock_read(anon_vma); |
488 | 488 | ||
489 | if (atomic_dec_and_test(&anon_vma->refcount)) { | 489 | if (atomic_dec_and_test(&anon_vma->refcount)) { |
490 | /* | 490 | /* |
491 | * Oops, we held the last refcount, release the lock | 491 | * Oops, we held the last refcount, release the lock |
492 | * and bail -- can't simply use put_anon_vma() because | 492 | * and bail -- can't simply use put_anon_vma() because |
493 | * we'll deadlock on the anon_vma_lock() recursion. | 493 | * we'll deadlock on the anon_vma_lock_write() recursion. |
494 | */ | 494 | */ |
495 | anon_vma_unlock(anon_vma); | 495 | anon_vma_unlock_read(anon_vma); |
496 | __put_anon_vma(anon_vma); | 496 | __put_anon_vma(anon_vma); |
497 | anon_vma = NULL; | 497 | anon_vma = NULL; |
498 | } | 498 | } |
@@ -504,9 +504,9 @@ out: | |||
504 | return anon_vma; | 504 | return anon_vma; |
505 | } | 505 | } |
506 | 506 | ||
507 | void page_unlock_anon_vma(struct anon_vma *anon_vma) | 507 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma) |
508 | { | 508 | { |
509 | anon_vma_unlock(anon_vma); | 509 | anon_vma_unlock_read(anon_vma); |
510 | } | 510 | } |
511 | 511 | ||
512 | /* | 512 | /* |
@@ -732,7 +732,7 @@ static int page_referenced_anon(struct page *page, | |||
732 | struct anon_vma_chain *avc; | 732 | struct anon_vma_chain *avc; |
733 | int referenced = 0; | 733 | int referenced = 0; |
734 | 734 | ||
735 | anon_vma = page_lock_anon_vma(page); | 735 | anon_vma = page_lock_anon_vma_read(page); |
736 | if (!anon_vma) | 736 | if (!anon_vma) |
737 | return referenced; | 737 | return referenced; |
738 | 738 | ||
@@ -754,7 +754,7 @@ static int page_referenced_anon(struct page *page, | |||
754 | break; | 754 | break; |
755 | } | 755 | } |
756 | 756 | ||
757 | page_unlock_anon_vma(anon_vma); | 757 | page_unlock_anon_vma_read(anon_vma); |
758 | return referenced; | 758 | return referenced; |
759 | } | 759 | } |
760 | 760 | ||
@@ -1474,7 +1474,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1474 | struct anon_vma_chain *avc; | 1474 | struct anon_vma_chain *avc; |
1475 | int ret = SWAP_AGAIN; | 1475 | int ret = SWAP_AGAIN; |
1476 | 1476 | ||
1477 | anon_vma = page_lock_anon_vma(page); | 1477 | anon_vma = page_lock_anon_vma_read(page); |
1478 | if (!anon_vma) | 1478 | if (!anon_vma) |
1479 | return ret; | 1479 | return ret; |
1480 | 1480 | ||
@@ -1501,7 +1501,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1501 | break; | 1501 | break; |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | page_unlock_anon_vma(anon_vma); | 1504 | page_unlock_anon_vma_read(anon_vma); |
1505 | return ret; | 1505 | return ret; |
1506 | } | 1506 | } |
1507 | 1507 | ||
@@ -1696,7 +1696,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, | |||
1696 | int ret = SWAP_AGAIN; | 1696 | int ret = SWAP_AGAIN; |
1697 | 1697 | ||
1698 | /* | 1698 | /* |
1699 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma() | 1699 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() |
1700 | * because that depends on page_mapped(); but not all its usages | 1700 | * because that depends on page_mapped(); but not all its usages |
1701 | * are holding mmap_sem. Users without mmap_sem are required to | 1701 | * are holding mmap_sem. Users without mmap_sem are required to |
1702 | * take a reference count to prevent the anon_vma disappearing | 1702 | * take a reference count to prevent the anon_vma disappearing |
@@ -1704,7 +1704,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, | |||
1704 | anon_vma = page_anon_vma(page); | 1704 | anon_vma = page_anon_vma(page); |
1705 | if (!anon_vma) | 1705 | if (!anon_vma) |
1706 | return ret; | 1706 | return ret; |
1707 | anon_vma_lock(anon_vma); | 1707 | anon_vma_lock_read(anon_vma); |
1708 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { | 1708 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { |
1709 | struct vm_area_struct *vma = avc->vma; | 1709 | struct vm_area_struct *vma = avc->vma; |
1710 | unsigned long address = vma_address(page, vma); | 1710 | unsigned long address = vma_address(page, vma); |
@@ -1712,7 +1712,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, | |||
1712 | if (ret != SWAP_AGAIN) | 1712 | if (ret != SWAP_AGAIN) |
1713 | break; | 1713 | break; |
1714 | } | 1714 | } |
1715 | anon_vma_unlock(anon_vma); | 1715 | anon_vma_unlock_read(anon_vma); |
1716 | return ret; | 1716 | return ret; |
1717 | } | 1717 | } |
1718 | 1718 | ||