diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 10 |
1 files changed, 5 insertions, 5 deletions
@@ -1162,7 +1162,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1162 | if (vma->vm_flags & VM_LOCKED) | 1162 | if (vma->vm_flags & VM_LOCKED) |
1163 | goto out_mlock; | 1163 | goto out_mlock; |
1164 | 1164 | ||
1165 | if (TTU_ACTION(flags) == TTU_MUNLOCK) | 1165 | if (flags & TTU_MUNLOCK) |
1166 | goto out_unmap; | 1166 | goto out_unmap; |
1167 | } | 1167 | } |
1168 | if (!(flags & TTU_IGNORE_ACCESS)) { | 1168 | if (!(flags & TTU_IGNORE_ACCESS)) { |
@@ -1230,7 +1230,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1230 | * pte. do_swap_page() will wait until the migration | 1230 | * pte. do_swap_page() will wait until the migration |
1231 | * pte is removed and then restart fault handling. | 1231 | * pte is removed and then restart fault handling. |
1232 | */ | 1232 | */ |
1233 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); | 1233 | BUG_ON(!(flags & TTU_MIGRATION)); |
1234 | entry = make_migration_entry(page, pte_write(pteval)); | 1234 | entry = make_migration_entry(page, pte_write(pteval)); |
1235 | } | 1235 | } |
1236 | swp_pte = swp_entry_to_pte(entry); | 1236 | swp_pte = swp_entry_to_pte(entry); |
@@ -1239,7 +1239,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1239 | set_pte_at(mm, address, pte, swp_pte); | 1239 | set_pte_at(mm, address, pte, swp_pte); |
1240 | BUG_ON(pte_file(*pte)); | 1240 | BUG_ON(pte_file(*pte)); |
1241 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | 1241 | } else if (IS_ENABLED(CONFIG_MIGRATION) && |
1242 | (TTU_ACTION(flags) == TTU_MIGRATION)) { | 1242 | (flags & TTU_MIGRATION)) { |
1243 | /* Establish migration entry for a file page */ | 1243 | /* Establish migration entry for a file page */ |
1244 | swp_entry_t entry; | 1244 | swp_entry_t entry; |
1245 | entry = make_migration_entry(page, pte_write(pteval)); | 1245 | entry = make_migration_entry(page, pte_write(pteval)); |
@@ -1252,7 +1252,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1252 | 1252 | ||
1253 | out_unmap: | 1253 | out_unmap: |
1254 | pte_unmap_unlock(pte, ptl); | 1254 | pte_unmap_unlock(pte, ptl); |
1255 | if (ret != SWAP_FAIL && TTU_ACTION(flags) != TTU_MUNLOCK) | 1255 | if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) |
1256 | mmu_notifier_invalidate_page(mm, address); | 1256 | mmu_notifier_invalidate_page(mm, address); |
1257 | out: | 1257 | out: |
1258 | return ret; | 1258 | return ret; |
@@ -1539,7 +1539,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1539 | * locking requirements of exec(), migration skips | 1539 | * locking requirements of exec(), migration skips |
1540 | * temporary VMAs until after exec() completes. | 1540 | * temporary VMAs until after exec() completes. |
1541 | */ | 1541 | */ |
1542 | if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) | 1542 | if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) |
1543 | rwc.invalid_vma = invalid_migration_vma; | 1543 | rwc.invalid_vma = invalid_migration_vma; |
1544 | 1544 | ||
1545 | ret = rmap_walk(page, &rwc); | 1545 | ret = rmap_walk(page, &rwc); |