diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 110 |
1 files changed, 31 insertions, 79 deletions
@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
788 | ret = SWAP_MLOCK; | 788 | ret = SWAP_MLOCK; |
789 | goto out_unmap; | 789 | goto out_unmap; |
790 | } | 790 | } |
791 | if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK) | ||
792 | goto out_unmap; | ||
791 | } | 793 | } |
792 | if (!(flags & TTU_IGNORE_ACCESS)) { | 794 | if (!(flags & TTU_IGNORE_ACCESS)) { |
793 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 795 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
@@ -853,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
853 | } else | 855 | } else |
854 | dec_mm_counter(mm, file_rss); | 856 | dec_mm_counter(mm, file_rss); |
855 | 857 | ||
856 | |||
857 | page_remove_rmap(page); | 858 | page_remove_rmap(page); |
858 | page_cache_release(page); | 859 | page_cache_release(page); |
859 | 860 | ||
860 | out_unmap: | 861 | out_unmap: |
861 | pte_unmap_unlock(pte, ptl); | 862 | pte_unmap_unlock(pte, ptl); |
863 | |||
864 | if (MLOCK_PAGES && ret == SWAP_MLOCK) { | ||
865 | ret = SWAP_AGAIN; | ||
866 | if (down_read_trylock(&vma->vm_mm->mmap_sem)) { | ||
867 | if (vma->vm_flags & VM_LOCKED) { | ||
868 | mlock_vma_page(page); | ||
869 | ret = SWAP_MLOCK; | ||
870 | } | ||
871 | up_read(&vma->vm_mm->mmap_sem); | ||
872 | } | ||
873 | } | ||
862 | out: | 874 | out: |
863 | return ret; | 875 | return ret; |
864 | } | 876 | } |
@@ -980,23 +992,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
980 | return ret; | 992 | return ret; |
981 | } | 993 | } |
982 | 994 | ||
983 | /* | ||
984 | * common handling for pages mapped in VM_LOCKED vmas | ||
985 | */ | ||
986 | static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) | ||
987 | { | ||
988 | int mlocked = 0; | ||
989 | |||
990 | if (down_read_trylock(&vma->vm_mm->mmap_sem)) { | ||
991 | if (vma->vm_flags & VM_LOCKED) { | ||
992 | mlock_vma_page(page); | ||
993 | mlocked++; /* really mlocked the page */ | ||
994 | } | ||
995 | up_read(&vma->vm_mm->mmap_sem); | ||
996 | } | ||
997 | return mlocked; | ||
998 | } | ||
999 | |||
1000 | /** | 995 | /** |
1001 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based | 996 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based |
1002 | * rmap method | 997 | * rmap method |
@@ -1017,42 +1012,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1017 | { | 1012 | { |
1018 | struct anon_vma *anon_vma; | 1013 | struct anon_vma *anon_vma; |
1019 | struct vm_area_struct *vma; | 1014 | struct vm_area_struct *vma; |
1020 | unsigned int mlocked = 0; | ||
1021 | int ret = SWAP_AGAIN; | 1015 | int ret = SWAP_AGAIN; |
1022 | int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; | ||
1023 | |||
1024 | if (MLOCK_PAGES && unlikely(unlock)) | ||
1025 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ | ||
1026 | 1016 | ||
1027 | anon_vma = page_lock_anon_vma(page); | 1017 | anon_vma = page_lock_anon_vma(page); |
1028 | if (!anon_vma) | 1018 | if (!anon_vma) |
1029 | return ret; | 1019 | return ret; |
1030 | 1020 | ||
1031 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 1021 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
1032 | if (MLOCK_PAGES && unlikely(unlock)) { | 1022 | ret = try_to_unmap_one(page, vma, flags); |
1033 | if (!((vma->vm_flags & VM_LOCKED) && | 1023 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1034 | page_mapped_in_vma(page, vma))) | 1024 | break; |
1035 | continue; /* must visit all unlocked vmas */ | ||
1036 | ret = SWAP_MLOCK; /* saw at least one mlocked vma */ | ||
1037 | } else { | ||
1038 | ret = try_to_unmap_one(page, vma, flags); | ||
1039 | if (ret == SWAP_FAIL || !page_mapped(page)) | ||
1040 | break; | ||
1041 | } | ||
1042 | if (ret == SWAP_MLOCK) { | ||
1043 | mlocked = try_to_mlock_page(page, vma); | ||
1044 | if (mlocked) | ||
1045 | break; /* stop if actually mlocked page */ | ||
1046 | } | ||
1047 | } | 1025 | } |
1048 | 1026 | ||
1049 | page_unlock_anon_vma(anon_vma); | 1027 | page_unlock_anon_vma(anon_vma); |
1050 | |||
1051 | if (mlocked) | ||
1052 | ret = SWAP_MLOCK; /* actually mlocked the page */ | ||
1053 | else if (ret == SWAP_MLOCK) | ||
1054 | ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ | ||
1055 | |||
1056 | return ret; | 1028 | return ret; |
1057 | } | 1029 | } |
1058 | 1030 | ||
@@ -1082,42 +1054,27 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1082 | unsigned long max_nl_cursor = 0; | 1054 | unsigned long max_nl_cursor = 0; |
1083 | unsigned long max_nl_size = 0; | 1055 | unsigned long max_nl_size = 0; |
1084 | unsigned int mapcount; | 1056 | unsigned int mapcount; |
1085 | unsigned int mlocked = 0; | ||
1086 | int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; | ||
1087 | |||
1088 | if (MLOCK_PAGES && unlikely(unlock)) | ||
1089 | ret = SWAP_SUCCESS; /* default for try_to_munlock() */ | ||
1090 | 1057 | ||
1091 | spin_lock(&mapping->i_mmap_lock); | 1058 | spin_lock(&mapping->i_mmap_lock); |
1092 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1059 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
1093 | if (MLOCK_PAGES && unlikely(unlock)) { | 1060 | ret = try_to_unmap_one(page, vma, flags); |
1094 | if (!((vma->vm_flags & VM_LOCKED) && | 1061 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1095 | page_mapped_in_vma(page, vma))) | 1062 | goto out; |
1096 | continue; /* must visit all vmas */ | ||
1097 | ret = SWAP_MLOCK; | ||
1098 | } else { | ||
1099 | ret = try_to_unmap_one(page, vma, flags); | ||
1100 | if (ret == SWAP_FAIL || !page_mapped(page)) | ||
1101 | goto out; | ||
1102 | } | ||
1103 | if (ret == SWAP_MLOCK) { | ||
1104 | mlocked = try_to_mlock_page(page, vma); | ||
1105 | if (mlocked) | ||
1106 | goto out; /* stop if actually mlocked page */ | ||
1107 | } | ||
1108 | } | 1063 | } |
1109 | 1064 | ||
1110 | if (list_empty(&mapping->i_mmap_nonlinear)) | 1065 | if (list_empty(&mapping->i_mmap_nonlinear)) |
1111 | goto out; | 1066 | goto out; |
1112 | 1067 | ||
1068 | /* | ||
1069 | * We don't bother to try to find the munlocked page in nonlinears. | ||
1070 | * It's costly. Instead, later, page reclaim logic may call | ||
1071 | * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. | ||
1072 | */ | ||
1073 | if (TTU_ACTION(flags) == TTU_MUNLOCK) | ||
1074 | goto out; | ||
1075 | |||
1113 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 1076 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
1114 | shared.vm_set.list) { | 1077 | shared.vm_set.list) { |
1115 | if (MLOCK_PAGES && unlikely(unlock)) { | ||
1116 | if (!(vma->vm_flags & VM_LOCKED)) | ||
1117 | continue; /* must visit all vmas */ | ||
1118 | ret = SWAP_MLOCK; /* leave mlocked == 0 */ | ||
1119 | goto out; /* no need to look further */ | ||
1120 | } | ||
1121 | if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && | 1078 | if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && |
1122 | (vma->vm_flags & VM_LOCKED)) | 1079 | (vma->vm_flags & VM_LOCKED)) |
1123 | continue; | 1080 | continue; |
@@ -1159,10 +1116,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1159 | cursor = (unsigned long) vma->vm_private_data; | 1116 | cursor = (unsigned long) vma->vm_private_data; |
1160 | while ( cursor < max_nl_cursor && | 1117 | while ( cursor < max_nl_cursor && |
1161 | cursor < vma->vm_end - vma->vm_start) { | 1118 | cursor < vma->vm_end - vma->vm_start) { |
1162 | ret = try_to_unmap_cluster(cursor, &mapcount, | 1119 | if (try_to_unmap_cluster(cursor, &mapcount, |
1163 | vma, page); | 1120 | vma, page) == SWAP_MLOCK) |
1164 | if (ret == SWAP_MLOCK) | 1121 | ret = SWAP_MLOCK; |
1165 | mlocked = 2; /* to return below */ | ||
1166 | cursor += CLUSTER_SIZE; | 1122 | cursor += CLUSTER_SIZE; |
1167 | vma->vm_private_data = (void *) cursor; | 1123 | vma->vm_private_data = (void *) cursor; |
1168 | if ((int)mapcount <= 0) | 1124 | if ((int)mapcount <= 0) |
@@ -1183,10 +1139,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1183 | vma->vm_private_data = NULL; | 1139 | vma->vm_private_data = NULL; |
1184 | out: | 1140 | out: |
1185 | spin_unlock(&mapping->i_mmap_lock); | 1141 | spin_unlock(&mapping->i_mmap_lock); |
1186 | if (mlocked) | ||
1187 | ret = SWAP_MLOCK; /* actually mlocked the page */ | ||
1188 | else if (ret == SWAP_MLOCK) | ||
1189 | ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ | ||
1190 | return ret; | 1142 | return ret; |
1191 | } | 1143 | } |
1192 | 1144 | ||
@@ -1229,7 +1181,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) | |||
1229 | * | 1181 | * |
1230 | * Return values are: | 1182 | * Return values are: |
1231 | * | 1183 | * |
1232 | * SWAP_SUCCESS - no vma's holding page mlocked. | 1184 | * SWAP_AGAIN - no vma is holding page mlocked, or, |
1233 | * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem | 1185 | * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem |
1234 | * SWAP_MLOCK - page is now mlocked. | 1186 | * SWAP_MLOCK - page is now mlocked. |
1235 | */ | 1187 | */ |