aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 20:58:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:17 -0500
commit53f79acb6ecb648afd63e0f13deba167f1a934df (patch)
tree48d9b998fa45c4fe510447d17cdc050f31be5e23 /mm/rmap.c
parent3ca7b3c5b64d35fe02c35b5d44c2c58b49499fee (diff)
mm: mlocking in try_to_unmap_one
There's contorted mlock/munlock handling in try_to_unmap_anon() and try_to_unmap_file(), which we'd prefer not to repeat for KSM swapping. Simplify it by moving it all down into try_to_unmap_one(). One thing is then lost, try_to_munlock()'s distinction between when no vma holds the page mlocked, and when a vma does mlock it, but we could not get mmap_sem to set the page flag. But its only caller takes no interest in that distinction (and is better testing SWAP_MLOCK anyway), so let's keep the code simple and return SWAP_AGAIN for both cases. try_to_unmap_file()'s TTU_MUNLOCK nonlinear handling was particularly amusing: once unravelled, it turns out to have been choosing between two different ways of doing the same nothing. Ah, no, one way was actually returning SWAP_FAIL when it meant to return SWAP_SUCCESS. [kosaki.motohiro@jp.fujitsu.com: comment adding to mlocking in try_to_unmap_one] [akpm@linux-foundation.org: remove test of MLOCK_PAGES] Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c110
1 files changed, 31 insertions, 79 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index f06cee48eca..c3d6dc4223a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
788 ret = SWAP_MLOCK; 788 ret = SWAP_MLOCK;
789 goto out_unmap; 789 goto out_unmap;
790 } 790 }
791 if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
792 goto out_unmap;
791 } 793 }
792 if (!(flags & TTU_IGNORE_ACCESS)) { 794 if (!(flags & TTU_IGNORE_ACCESS)) {
793 if (ptep_clear_flush_young_notify(vma, address, pte)) { 795 if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -853,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
853 } else 855 } else
854 dec_mm_counter(mm, file_rss); 856 dec_mm_counter(mm, file_rss);
855 857
856
857 page_remove_rmap(page); 858 page_remove_rmap(page);
858 page_cache_release(page); 859 page_cache_release(page);
859 860
860out_unmap: 861out_unmap:
861 pte_unmap_unlock(pte, ptl); 862 pte_unmap_unlock(pte, ptl);
863
864 if (MLOCK_PAGES && ret == SWAP_MLOCK) {
865 ret = SWAP_AGAIN;
866 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
867 if (vma->vm_flags & VM_LOCKED) {
868 mlock_vma_page(page);
869 ret = SWAP_MLOCK;
870 }
871 up_read(&vma->vm_mm->mmap_sem);
872 }
873 }
862out: 874out:
863 return ret; 875 return ret;
864} 876}
@@ -980,23 +992,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
980 return ret; 992 return ret;
981} 993}
982 994
983/*
984 * common handling for pages mapped in VM_LOCKED vmas
985 */
986static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
987{
988 int mlocked = 0;
989
990 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
991 if (vma->vm_flags & VM_LOCKED) {
992 mlock_vma_page(page);
993 mlocked++; /* really mlocked the page */
994 }
995 up_read(&vma->vm_mm->mmap_sem);
996 }
997 return mlocked;
998}
999
1000/** 995/**
1001 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 996 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1002 * rmap method 997 * rmap method
@@ -1017,42 +1012,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1017{ 1012{
1018 struct anon_vma *anon_vma; 1013 struct anon_vma *anon_vma;
1019 struct vm_area_struct *vma; 1014 struct vm_area_struct *vma;
1020 unsigned int mlocked = 0;
1021 int ret = SWAP_AGAIN; 1015 int ret = SWAP_AGAIN;
1022 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1023
1024 if (MLOCK_PAGES && unlikely(unlock))
1025 ret = SWAP_SUCCESS; /* default for try_to_munlock() */
1026 1016
1027 anon_vma = page_lock_anon_vma(page); 1017 anon_vma = page_lock_anon_vma(page);
1028 if (!anon_vma) 1018 if (!anon_vma)
1029 return ret; 1019 return ret;
1030 1020
1031 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1021 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1032 if (MLOCK_PAGES && unlikely(unlock)) { 1022 ret = try_to_unmap_one(page, vma, flags);
1033 if (!((vma->vm_flags & VM_LOCKED) && 1023 if (ret != SWAP_AGAIN || !page_mapped(page))
1034 page_mapped_in_vma(page, vma))) 1024 break;
1035 continue; /* must visit all unlocked vmas */
1036 ret = SWAP_MLOCK; /* saw at least one mlocked vma */
1037 } else {
1038 ret = try_to_unmap_one(page, vma, flags);
1039 if (ret == SWAP_FAIL || !page_mapped(page))
1040 break;
1041 }
1042 if (ret == SWAP_MLOCK) {
1043 mlocked = try_to_mlock_page(page, vma);
1044 if (mlocked)
1045 break; /* stop if actually mlocked page */
1046 }
1047 } 1025 }
1048 1026
1049 page_unlock_anon_vma(anon_vma); 1027 page_unlock_anon_vma(anon_vma);
1050
1051 if (mlocked)
1052 ret = SWAP_MLOCK; /* actually mlocked the page */
1053 else if (ret == SWAP_MLOCK)
1054 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
1055
1056 return ret; 1028 return ret;
1057} 1029}
1058 1030
@@ -1082,42 +1054,27 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1082 unsigned long max_nl_cursor = 0; 1054 unsigned long max_nl_cursor = 0;
1083 unsigned long max_nl_size = 0; 1055 unsigned long max_nl_size = 0;
1084 unsigned int mapcount; 1056 unsigned int mapcount;
1085 unsigned int mlocked = 0;
1086 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1087
1088 if (MLOCK_PAGES && unlikely(unlock))
1089 ret = SWAP_SUCCESS; /* default for try_to_munlock() */
1090 1057
1091 spin_lock(&mapping->i_mmap_lock); 1058 spin_lock(&mapping->i_mmap_lock);
1092 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1059 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1093 if (MLOCK_PAGES && unlikely(unlock)) { 1060 ret = try_to_unmap_one(page, vma, flags);
1094 if (!((vma->vm_flags & VM_LOCKED) && 1061 if (ret != SWAP_AGAIN || !page_mapped(page))
1095 page_mapped_in_vma(page, vma))) 1062 goto out;
1096 continue; /* must visit all vmas */
1097 ret = SWAP_MLOCK;
1098 } else {
1099 ret = try_to_unmap_one(page, vma, flags);
1100 if (ret == SWAP_FAIL || !page_mapped(page))
1101 goto out;
1102 }
1103 if (ret == SWAP_MLOCK) {
1104 mlocked = try_to_mlock_page(page, vma);
1105 if (mlocked)
1106 goto out; /* stop if actually mlocked page */
1107 }
1108 } 1063 }
1109 1064
1110 if (list_empty(&mapping->i_mmap_nonlinear)) 1065 if (list_empty(&mapping->i_mmap_nonlinear))
1111 goto out; 1066 goto out;
1112 1067
1068 /*
1069 * We don't bother to try to find the munlocked page in nonlinears.
1070 * It's costly. Instead, later, page reclaim logic may call
1071 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1072 */
1073 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1074 goto out;
1075
1113 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1076 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1114 shared.vm_set.list) { 1077 shared.vm_set.list) {
1115 if (MLOCK_PAGES && unlikely(unlock)) {
1116 if (!(vma->vm_flags & VM_LOCKED))
1117 continue; /* must visit all vmas */
1118 ret = SWAP_MLOCK; /* leave mlocked == 0 */
1119 goto out; /* no need to look further */
1120 }
1121 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && 1078 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1122 (vma->vm_flags & VM_LOCKED)) 1079 (vma->vm_flags & VM_LOCKED))
1123 continue; 1080 continue;
@@ -1159,10 +1116,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1159 cursor = (unsigned long) vma->vm_private_data; 1116 cursor = (unsigned long) vma->vm_private_data;
1160 while ( cursor < max_nl_cursor && 1117 while ( cursor < max_nl_cursor &&
1161 cursor < vma->vm_end - vma->vm_start) { 1118 cursor < vma->vm_end - vma->vm_start) {
1162 ret = try_to_unmap_cluster(cursor, &mapcount, 1119 if (try_to_unmap_cluster(cursor, &mapcount,
1163 vma, page); 1120 vma, page) == SWAP_MLOCK)
1164 if (ret == SWAP_MLOCK) 1121 ret = SWAP_MLOCK;
1165 mlocked = 2; /* to return below */
1166 cursor += CLUSTER_SIZE; 1122 cursor += CLUSTER_SIZE;
1167 vma->vm_private_data = (void *) cursor; 1123 vma->vm_private_data = (void *) cursor;
1168 if ((int)mapcount <= 0) 1124 if ((int)mapcount <= 0)
@@ -1183,10 +1139,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1183 vma->vm_private_data = NULL; 1139 vma->vm_private_data = NULL;
1184out: 1140out:
1185 spin_unlock(&mapping->i_mmap_lock); 1141 spin_unlock(&mapping->i_mmap_lock);
1186 if (mlocked)
1187 ret = SWAP_MLOCK; /* actually mlocked the page */
1188 else if (ret == SWAP_MLOCK)
1189 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
1190 return ret; 1142 return ret;
1191} 1143}
1192 1144
@@ -1229,7 +1181,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1229 * 1181 *
1230 * Return values are: 1182 * Return values are:
1231 * 1183 *
1232 * SWAP_SUCCESS - no vma's holding page mlocked. 1184 * SWAP_AGAIN - no vma is holding page mlocked, or,
1233 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1185 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1234 * SWAP_MLOCK - page is now mlocked. 1186 * SWAP_MLOCK - page is now mlocked.
1235 */ 1187 */