aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <koct9i@gmail.com>2014-06-04 19:10:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:12 -0400
commitdaa5ba768b9e15da8867824d2f1e8d455f1acac2 (patch)
tree03bbfb4962a7f7c3a95284f4c5e79a102898be71
parent3d92860f979f725a9c10c2fc26c0415a4332adbf (diff)
mm/rmap.c: cleanup ttu_flags
Transform action part of ttu_flags into individiual bits. These flags aren't part of any uses-space visible api or even trace events. Signed-off-by: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h7
-rw-r--r--mm/rmap.c10
2 files changed, 8 insertions, 9 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 9be55c7617da..be574506e6a9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -72,10 +72,9 @@ struct anon_vma_chain {
72}; 72};
73 73
74enum ttu_flags { 74enum ttu_flags {
75 TTU_UNMAP = 0, /* unmap mode */ 75 TTU_UNMAP = 1, /* unmap mode */
76 TTU_MIGRATION = 1, /* migration mode */ 76 TTU_MIGRATION = 2, /* migration mode */
77 TTU_MUNLOCK = 2, /* munlock mode */ 77 TTU_MUNLOCK = 4, /* munlock mode */
78 TTU_ACTION_MASK = 0xff,
79 78
80 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 79 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
81 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 80 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
diff --git a/mm/rmap.c b/mm/rmap.c
index ab74290d185d..ea8e20d75b29 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1162,7 +1162,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1162 if (vma->vm_flags & VM_LOCKED) 1162 if (vma->vm_flags & VM_LOCKED)
1163 goto out_mlock; 1163 goto out_mlock;
1164 1164
1165 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1165 if (flags & TTU_MUNLOCK)
1166 goto out_unmap; 1166 goto out_unmap;
1167 } 1167 }
1168 if (!(flags & TTU_IGNORE_ACCESS)) { 1168 if (!(flags & TTU_IGNORE_ACCESS)) {
@@ -1230,7 +1230,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1230 * pte. do_swap_page() will wait until the migration 1230 * pte. do_swap_page() will wait until the migration
1231 * pte is removed and then restart fault handling. 1231 * pte is removed and then restart fault handling.
1232 */ 1232 */
1233 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1233 BUG_ON(!(flags & TTU_MIGRATION));
1234 entry = make_migration_entry(page, pte_write(pteval)); 1234 entry = make_migration_entry(page, pte_write(pteval));
1235 } 1235 }
1236 swp_pte = swp_entry_to_pte(entry); 1236 swp_pte = swp_entry_to_pte(entry);
@@ -1239,7 +1239,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1239 set_pte_at(mm, address, pte, swp_pte); 1239 set_pte_at(mm, address, pte, swp_pte);
1240 BUG_ON(pte_file(*pte)); 1240 BUG_ON(pte_file(*pte));
1241 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1241 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1242 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1242 (flags & TTU_MIGRATION)) {
1243 /* Establish migration entry for a file page */ 1243 /* Establish migration entry for a file page */
1244 swp_entry_t entry; 1244 swp_entry_t entry;
1245 entry = make_migration_entry(page, pte_write(pteval)); 1245 entry = make_migration_entry(page, pte_write(pteval));
@@ -1252,7 +1252,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1252 1252
1253out_unmap: 1253out_unmap:
1254 pte_unmap_unlock(pte, ptl); 1254 pte_unmap_unlock(pte, ptl);
1255 if (ret != SWAP_FAIL && TTU_ACTION(flags) != TTU_MUNLOCK) 1255 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1256 mmu_notifier_invalidate_page(mm, address); 1256 mmu_notifier_invalidate_page(mm, address);
1257out: 1257out:
1258 return ret; 1258 return ret;
@@ -1539,7 +1539,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1539 * locking requirements of exec(), migration skips 1539 * locking requirements of exec(), migration skips
1540 * temporary VMAs until after exec() completes. 1540 * temporary VMAs until after exec() completes.
1541 */ 1541 */
1542 if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) 1542 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1543 rwc.invalid_vma = invalid_migration_vma; 1543 rwc.invalid_vma = invalid_migration_vma;
1544 1544
1545 ret = rmap_walk(page, &rwc); 1545 ret = rmap_walk(page, &rwc);