diff options
author | Andi Kleen <andi@firstfloor.org> | 2009-09-16 05:50:10 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2009-09-16 05:50:10 -0400 |
commit | 14fa31b89c5ae79e4131da41761378a6df674352 (patch) | |
tree | c6c79e89e0aa0b2efeaf657d4715250a406ab699 | |
parent | a6e04aa92965565968573a220a35b4e907385697 (diff) |
HWPOISON: Use bitmask/action code for try_to_unmap behaviour
try_to_unmap currently has multiple modi (migration, munlock, normal unmap)
which are selected by magic flag variables. The logic is not very straight
forward, because each of these flag change multiple behaviours (e.g.
migration turns off aging, not only sets up migration ptes etc.)
Also the different flags interact in magic ways.
A later patch in this series adds another mode to try_to_unmap, so
this becomes quickly unmanageable.
Replace the different flags with a action code (migration, munlock, munmap)
and some additional flags as modifiers (ignore mlock, ignore aging).
This makes the logic more straight forward and allows easier extension
to new behaviours. Change all the caller to declare what they want to
do.
This patch is supposed to be a nop in behaviour. If anyone can prove
it is not that would be a bug.
Cc: Lee.Schermerhorn@hp.com
Cc: npiggin@suse.de
Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r-- | include/linux/rmap.h | 13 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 40 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
4 files changed, 36 insertions, 21 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8dff2ffab82c..4c4a2d4d289e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -85,7 +85,18 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, | |||
85 | */ | 85 | */ |
86 | int page_referenced(struct page *, int is_locked, | 86 | int page_referenced(struct page *, int is_locked, |
87 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 87 | struct mem_cgroup *cnt, unsigned long *vm_flags); |
88 | int try_to_unmap(struct page *, int ignore_refs); | 88 | enum ttu_flags { |
89 | TTU_UNMAP = 0, /* unmap mode */ | ||
90 | TTU_MIGRATION = 1, /* migration mode */ | ||
91 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
92 | TTU_ACTION_MASK = 0xff, | ||
93 | |||
94 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
95 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
96 | }; | ||
97 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | ||
98 | |||
99 | int try_to_unmap(struct page *, enum ttu_flags flags); | ||
89 | 100 | ||
90 | /* | 101 | /* |
91 | * Called from mm/filemap_xip.c to unmap empty zero page | 102 | * Called from mm/filemap_xip.c to unmap empty zero page |
diff --git a/mm/migrate.c b/mm/migrate.c index 939888f9ddab..e3a0cd3859a9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -669,7 +669,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
669 | } | 669 | } |
670 | 670 | ||
671 | /* Establish migration ptes or remove ptes */ | 671 | /* Establish migration ptes or remove ptes */ |
672 | try_to_unmap(page, 1); | 672 | try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
673 | 673 | ||
674 | if (!page_mapped(page)) | 674 | if (!page_mapped(page)) |
675 | rc = move_to_new_page(newpage, page); | 675 | rc = move_to_new_page(newpage, page); |
@@ -774,7 +774,7 @@ void page_remove_rmap(struct page *page) | |||
774 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 774 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. |
775 | */ | 775 | */ |
776 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | 776 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, |
777 | int migration) | 777 | enum ttu_flags flags) |
778 | { | 778 | { |
779 | struct mm_struct *mm = vma->vm_mm; | 779 | struct mm_struct *mm = vma->vm_mm; |
780 | unsigned long address; | 780 | unsigned long address; |
@@ -796,11 +796,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
796 | * If it's recently referenced (perhaps page_referenced | 796 | * If it's recently referenced (perhaps page_referenced |
797 | * skipped over this mm) then we should reactivate it. | 797 | * skipped over this mm) then we should reactivate it. |
798 | */ | 798 | */ |
799 | if (!migration) { | 799 | if (!(flags & TTU_IGNORE_MLOCK)) { |
800 | if (vma->vm_flags & VM_LOCKED) { | 800 | if (vma->vm_flags & VM_LOCKED) { |
801 | ret = SWAP_MLOCK; | 801 | ret = SWAP_MLOCK; |
802 | goto out_unmap; | 802 | goto out_unmap; |
803 | } | 803 | } |
804 | } | ||
805 | if (!(flags & TTU_IGNORE_ACCESS)) { | ||
804 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 806 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
805 | ret = SWAP_FAIL; | 807 | ret = SWAP_FAIL; |
806 | goto out_unmap; | 808 | goto out_unmap; |
@@ -840,12 +842,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
840 | * pte. do_swap_page() will wait until the migration | 842 | * pte. do_swap_page() will wait until the migration |
841 | * pte is removed and then restart fault handling. | 843 | * pte is removed and then restart fault handling. |
842 | */ | 844 | */ |
843 | BUG_ON(!migration); | 845 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); |
844 | entry = make_migration_entry(page, pte_write(pteval)); | 846 | entry = make_migration_entry(page, pte_write(pteval)); |
845 | } | 847 | } |
846 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 848 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); |
847 | BUG_ON(pte_file(*pte)); | 849 | BUG_ON(pte_file(*pte)); |
848 | } else if (PAGE_MIGRATION && migration) { | 850 | } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { |
849 | /* Establish migration entry for a file page */ | 851 | /* Establish migration entry for a file page */ |
850 | swp_entry_t entry; | 852 | swp_entry_t entry; |
851 | entry = make_migration_entry(page, pte_write(pteval)); | 853 | entry = make_migration_entry(page, pte_write(pteval)); |
@@ -1014,12 +1016,13 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) | |||
1014 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be | 1016 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be |
1015 | * 'LOCKED. | 1017 | * 'LOCKED. |
1016 | */ | 1018 | */ |
1017 | static int try_to_unmap_anon(struct page *page, int unlock, int migration) | 1019 | static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) |
1018 | { | 1020 | { |
1019 | struct anon_vma *anon_vma; | 1021 | struct anon_vma *anon_vma; |
1020 | struct vm_area_struct *vma; | 1022 | struct vm_area_struct *vma; |
1021 | unsigned int mlocked = 0; | 1023 | unsigned int mlocked = 0; |
1022 | int ret = SWAP_AGAIN; | 1024 | int ret = SWAP_AGAIN; |
1025 | int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; | ||
1023 | 1026 | ||
1024 | if (MLOCK_PAGES && unlikely(unlock)) | 1027 | if (MLOCK_PAGES && unlikely(unlock)) |
1025 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ | 1028 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ |
@@ -1035,7 +1038,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) | |||
1035 | continue; /* must visit all unlocked vmas */ | 1038 | continue; /* must visit all unlocked vmas */ |
1036 | ret = SWAP_MLOCK; /* saw at least one mlocked vma */ | 1039 | ret = SWAP_MLOCK; /* saw at least one mlocked vma */ |
1037 | } else { | 1040 | } else { |
1038 | ret = try_to_unmap_one(page, vma, migration); | 1041 | ret = try_to_unmap_one(page, vma, flags); |
1039 | if (ret == SWAP_FAIL || !page_mapped(page)) | 1042 | if (ret == SWAP_FAIL || !page_mapped(page)) |
1040 | break; | 1043 | break; |
1041 | } | 1044 | } |
@@ -1059,8 +1062,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) | |||
1059 | /** | 1062 | /** |
1060 | * try_to_unmap_file - unmap/unlock file page using the object-based rmap method | 1063 | * try_to_unmap_file - unmap/unlock file page using the object-based rmap method |
1061 | * @page: the page to unmap/unlock | 1064 | * @page: the page to unmap/unlock |
1062 | * @unlock: request for unlock rather than unmap [unlikely] | 1065 | * @flags: action and flags |
1063 | * @migration: unmapping for migration - ignored if @unlock | ||
1064 | * | 1066 | * |
1065 | * Find all the mappings of a page using the mapping pointer and the vma chains | 1067 | * Find all the mappings of a page using the mapping pointer and the vma chains |
1066 | * contained in the address_space struct it points to. | 1068 | * contained in the address_space struct it points to. |
@@ -1072,7 +1074,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) | |||
1072 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be | 1074 | * vm_flags for that VMA. That should be OK, because that vma shouldn't be |
1073 | * 'LOCKED. | 1075 | * 'LOCKED. |
1074 | */ | 1076 | */ |
1075 | static int try_to_unmap_file(struct page *page, int unlock, int migration) | 1077 | static int try_to_unmap_file(struct page *page, enum ttu_flags flags) |
1076 | { | 1078 | { |
1077 | struct address_space *mapping = page->mapping; | 1079 | struct address_space *mapping = page->mapping; |
1078 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 1080 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
@@ -1084,6 +1086,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) | |||
1084 | unsigned long max_nl_size = 0; | 1086 | unsigned long max_nl_size = 0; |
1085 | unsigned int mapcount; | 1087 | unsigned int mapcount; |
1086 | unsigned int mlocked = 0; | 1088 | unsigned int mlocked = 0; |
1089 | int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; | ||
1087 | 1090 | ||
1088 | if (MLOCK_PAGES && unlikely(unlock)) | 1091 | if (MLOCK_PAGES && unlikely(unlock)) |
1089 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ | 1092 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ |
@@ -1096,7 +1099,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) | |||
1096 | continue; /* must visit all vmas */ | 1099 | continue; /* must visit all vmas */ |
1097 | ret = SWAP_MLOCK; | 1100 | ret = SWAP_MLOCK; |
1098 | } else { | 1101 | } else { |
1099 | ret = try_to_unmap_one(page, vma, migration); | 1102 | ret = try_to_unmap_one(page, vma, flags); |
1100 | if (ret == SWAP_FAIL || !page_mapped(page)) | 1103 | if (ret == SWAP_FAIL || !page_mapped(page)) |
1101 | goto out; | 1104 | goto out; |
1102 | } | 1105 | } |
@@ -1121,7 +1124,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) | |||
1121 | ret = SWAP_MLOCK; /* leave mlocked == 0 */ | 1124 | ret = SWAP_MLOCK; /* leave mlocked == 0 */ |
1122 | goto out; /* no need to look further */ | 1125 | goto out; /* no need to look further */ |
1123 | } | 1126 | } |
1124 | if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED)) | 1127 | if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && |
1128 | (vma->vm_flags & VM_LOCKED)) | ||
1125 | continue; | 1129 | continue; |
1126 | cursor = (unsigned long) vma->vm_private_data; | 1130 | cursor = (unsigned long) vma->vm_private_data; |
1127 | if (cursor > max_nl_cursor) | 1131 | if (cursor > max_nl_cursor) |
@@ -1155,7 +1159,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) | |||
1155 | do { | 1159 | do { |
1156 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 1160 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
1157 | shared.vm_set.list) { | 1161 | shared.vm_set.list) { |
1158 | if (!MLOCK_PAGES && !migration && | 1162 | if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && |
1159 | (vma->vm_flags & VM_LOCKED)) | 1163 | (vma->vm_flags & VM_LOCKED)) |
1160 | continue; | 1164 | continue; |
1161 | cursor = (unsigned long) vma->vm_private_data; | 1165 | cursor = (unsigned long) vma->vm_private_data; |
@@ -1195,7 +1199,7 @@ out: | |||
1195 | /** | 1199 | /** |
1196 | * try_to_unmap - try to remove all page table mappings to a page | 1200 | * try_to_unmap - try to remove all page table mappings to a page |
1197 | * @page: the page to get unmapped | 1201 | * @page: the page to get unmapped |
1198 | * @migration: migration flag | 1202 | * @flags: action and flags |
1199 | * | 1203 | * |
1200 | * Tries to remove all the page table entries which are mapping this | 1204 | * Tries to remove all the page table entries which are mapping this |
1201 | * page, used in the pageout path. Caller must hold the page lock. | 1205 | * page, used in the pageout path. Caller must hold the page lock. |
@@ -1206,16 +1210,16 @@ out: | |||
1206 | * SWAP_FAIL - the page is unswappable | 1210 | * SWAP_FAIL - the page is unswappable |
1207 | * SWAP_MLOCK - page is mlocked. | 1211 | * SWAP_MLOCK - page is mlocked. |
1208 | */ | 1212 | */ |
1209 | int try_to_unmap(struct page *page, int migration) | 1213 | int try_to_unmap(struct page *page, enum ttu_flags flags) |
1210 | { | 1214 | { |
1211 | int ret; | 1215 | int ret; |
1212 | 1216 | ||
1213 | BUG_ON(!PageLocked(page)); | 1217 | BUG_ON(!PageLocked(page)); |
1214 | 1218 | ||
1215 | if (PageAnon(page)) | 1219 | if (PageAnon(page)) |
1216 | ret = try_to_unmap_anon(page, 0, migration); | 1220 | ret = try_to_unmap_anon(page, flags); |
1217 | else | 1221 | else |
1218 | ret = try_to_unmap_file(page, 0, migration); | 1222 | ret = try_to_unmap_file(page, flags); |
1219 | if (ret != SWAP_MLOCK && !page_mapped(page)) | 1223 | if (ret != SWAP_MLOCK && !page_mapped(page)) |
1220 | ret = SWAP_SUCCESS; | 1224 | ret = SWAP_SUCCESS; |
1221 | return ret; | 1225 | return ret; |
@@ -1240,8 +1244,8 @@ int try_to_munlock(struct page *page) | |||
1240 | VM_BUG_ON(!PageLocked(page) || PageLRU(page)); | 1244 | VM_BUG_ON(!PageLocked(page) || PageLRU(page)); |
1241 | 1245 | ||
1242 | if (PageAnon(page)) | 1246 | if (PageAnon(page)) |
1243 | return try_to_unmap_anon(page, 1, 0); | 1247 | return try_to_unmap_anon(page, TTU_MUNLOCK); |
1244 | else | 1248 | else |
1245 | return try_to_unmap_file(page, 1, 0); | 1249 | return try_to_unmap_file(page, TTU_MUNLOCK); |
1246 | } | 1250 | } |
1247 | 1251 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index ba8228e0a806..ab3b0ad3ce52 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -659,7 +659,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
659 | * processes. Try to unmap it here. | 659 | * processes. Try to unmap it here. |
660 | */ | 660 | */ |
661 | if (page_mapped(page) && mapping) { | 661 | if (page_mapped(page) && mapping) { |
662 | switch (try_to_unmap(page, 0)) { | 662 | switch (try_to_unmap(page, TTU_UNMAP)) { |
663 | case SWAP_FAIL: | 663 | case SWAP_FAIL: |
664 | goto activate_locked; | 664 | goto activate_locked; |
665 | case SWAP_AGAIN: | 665 | case SWAP_AGAIN: |