aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 20:58:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:16 -0500
commitaaa468653b4a0d11c603c48d716f765177a5a9e4 (patch)
tree3cb8c9232dd405001fa38d5519937c5e6c6f32ab /mm/swapfile.c
parent570a335b8e22579e2a51a68136d2b1f907a20eec (diff)
swap_info: note SWAP_MAP_SHMEM
While we're fiddling with the swap_map values, let's assign a particular value to shmem/tmpfs swap pages: their swap counts are never incremented, and it helps swapoff's try_to_unuse() a little if it can immediately distinguish those pages from process pages. Since we've no use for SWAP_MAP_BAD | COUNT_CONTINUED, we might as well use that 0xbf value for SWAP_MAP_SHMEM. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cc5e7ebf2d2c..58bec6600167 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
548 if (usage == SWAP_HAS_CACHE) { 548 if (usage == SWAP_HAS_CACHE) {
549 VM_BUG_ON(!has_cache); 549 VM_BUG_ON(!has_cache);
550 has_cache = 0; 550 has_cache = 0;
551 } else if (count == SWAP_MAP_SHMEM) {
552 /*
553 * Or we could insist on shmem.c using a special
554 * swap_shmem_free() and free_shmem_swap_and_cache()...
555 */
556 count = 0;
551 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 557 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
552 if (count == COUNT_CONTINUED) { 558 if (count == COUNT_CONTINUED) {
553 if (swap_count_continued(p, offset, count)) 559 if (swap_count_continued(p, offset, count))
@@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type)
1031 swp_entry_t entry; 1037 swp_entry_t entry;
1032 unsigned int i = 0; 1038 unsigned int i = 0;
1033 int retval = 0; 1039 int retval = 0;
1034 int shmem;
1035 1040
1036 /* 1041 /*
1037 * When searching mms for an entry, a good strategy is to 1042 * When searching mms for an entry, a good strategy is to
@@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type)
1107 1112
1108 /* 1113 /*
1109 * Remove all references to entry. 1114 * Remove all references to entry.
1110 * Whenever we reach init_mm, there's no address space
1111 * to search, but use it as a reminder to search shmem.
1112 */ 1115 */
1113 shmem = 0;
1114 swcount = *swap_map; 1116 swcount = *swap_map;
1115 if (swap_count(swcount)) { 1117 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1116 if (start_mm == &init_mm) 1118 retval = shmem_unuse(entry, page);
1117 shmem = shmem_unuse(entry, page); 1119 /* page has already been unlocked and released */
1118 else 1120 if (retval < 0)
1119 retval = unuse_mm(start_mm, entry, page); 1121 break;
1122 continue;
1120 } 1123 }
1124 if (swap_count(swcount) && start_mm != &init_mm)
1125 retval = unuse_mm(start_mm, entry, page);
1126
1121 if (swap_count(*swap_map)) { 1127 if (swap_count(*swap_map)) {
1122 int set_start_mm = (*swap_map >= swcount); 1128 int set_start_mm = (*swap_map >= swcount);
1123 struct list_head *p = &start_mm->mmlist; 1129 struct list_head *p = &start_mm->mmlist;
@@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type)
1128 atomic_inc(&new_start_mm->mm_users); 1134 atomic_inc(&new_start_mm->mm_users);
1129 atomic_inc(&prev_mm->mm_users); 1135 atomic_inc(&prev_mm->mm_users);
1130 spin_lock(&mmlist_lock); 1136 spin_lock(&mmlist_lock);
1131 while (swap_count(*swap_map) && !retval && !shmem && 1137 while (swap_count(*swap_map) && !retval &&
1132 (p = p->next) != &start_mm->mmlist) { 1138 (p = p->next) != &start_mm->mmlist) {
1133 mm = list_entry(p, struct mm_struct, mmlist); 1139 mm = list_entry(p, struct mm_struct, mmlist);
1134 if (!atomic_inc_not_zero(&mm->mm_users)) 1140 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type)
1142 swcount = *swap_map; 1148 swcount = *swap_map;
1143 if (!swap_count(swcount)) /* any usage ? */ 1149 if (!swap_count(swcount)) /* any usage ? */
1144 ; 1150 ;
1145 else if (mm == &init_mm) { 1151 else if (mm == &init_mm)
1146 set_start_mm = 1; 1152 set_start_mm = 1;
1147 shmem = shmem_unuse(entry, page); 1153 else
1148 } else
1149 retval = unuse_mm(mm, entry, page); 1154 retval = unuse_mm(mm, entry, page);
1150 1155
1151 if (set_start_mm && *swap_map < swcount) { 1156 if (set_start_mm && *swap_map < swcount) {
@@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type)
1161 mmput(start_mm); 1166 mmput(start_mm);
1162 start_mm = new_start_mm; 1167 start_mm = new_start_mm;
1163 } 1168 }
1164 if (shmem) {
1165 /* page has already been unlocked and released */
1166 if (shmem > 0)
1167 continue;
1168 retval = shmem;
1169 break;
1170 }
1171 if (retval) { 1169 if (retval) {
1172 unlock_page(page); 1170 unlock_page(page);
1173 page_cache_release(page); 1171 page_cache_release(page);
@@ -2127,6 +2125,15 @@ bad_file:
2127} 2125}
2128 2126
2129/* 2127/*
2128 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2129 * (in which case its reference count is never incremented).
2130 */
2131void swap_shmem_alloc(swp_entry_t entry)
2132{
2133 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2134}
2135
2136/*
2130 * increase reference count of swap entry by 1. 2137 * increase reference count of swap entry by 1.
2131 */ 2138 */
2132int swap_duplicate(swp_entry_t entry) 2139int swap_duplicate(swp_entry_t entry)