aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 20:58:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:16 -0500
commitaaa468653b4a0d11c603c48d716f765177a5a9e4 (patch)
tree3cb8c9232dd405001fa38d5519937c5e6c6f32ab
parent570a335b8e22579e2a51a68136d2b1f907a20eec (diff)
swap_info: note SWAP_MAP_SHMEM
While we're fiddling with the swap_map values, let's assign a particular value to shmem/tmpfs swap pages: their swap counts are never incremented, and it helps swapoff's try_to_unuse() a little if it can immediately distinguish those pages from process pages. Since we've no use for SWAP_MAP_BAD | COUNT_CONTINUED, we might as well use that 0xbf value for SWAP_MAP_SHMEM. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h6
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/swapfile.c47
3 files changed, 42 insertions, 22 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 389e7bd92cca..ac43d87b89b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -157,6 +157,7 @@ enum {
157#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 157#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
158#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ 158#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
159#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ 159#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
160#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
160 161
161/* 162/*
162 * The in-memory structure used to track swap areas. 163 * The in-memory structure used to track swap areas.
@@ -316,6 +317,7 @@ extern swp_entry_t get_swap_page(void);
316extern swp_entry_t get_swap_page_of_type(int); 317extern swp_entry_t get_swap_page_of_type(int);
317extern int valid_swaphandles(swp_entry_t, unsigned long *); 318extern int valid_swaphandles(swp_entry_t, unsigned long *);
318extern int add_swap_count_continuation(swp_entry_t, gfp_t); 319extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t);
319extern int swap_duplicate(swp_entry_t); 321extern int swap_duplicate(swp_entry_t);
320extern int swapcache_prepare(swp_entry_t); 322extern int swapcache_prepare(swp_entry_t);
321extern void swap_free(swp_entry_t); 323extern void swap_free(swp_entry_t);
@@ -394,6 +396,10 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
394 return 0; 396 return 0;
395} 397}
396 398
399static inline void swap_shmem_alloc(swp_entry_t swp)
400{
401}
402
397static inline int swap_duplicate(swp_entry_t swp) 403static inline int swap_duplicate(swp_entry_t swp)
398{ 404{
399 return 0; 405 return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 356dd99566ec..4fb41c83daca 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
1017 goto out; 1017 goto out;
1018 } 1018 }
1019 mutex_unlock(&shmem_swaplist_mutex); 1019 mutex_unlock(&shmem_swaplist_mutex);
1020out: return found; /* 0 or 1 or -ENOMEM */ 1020 /*
1021 * Can some race bring us here? We've been holding page lock,
1022 * so I think not; but would rather try again later than BUG()
1023 */
1024 unlock_page(page);
1025 page_cache_release(page);
1026out:
1027 return (found < 0) ? found : 0;
1021} 1028}
1022 1029
1023/* 1030/*
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1080 else 1087 else
1081 inode = NULL; 1088 inode = NULL;
1082 spin_unlock(&info->lock); 1089 spin_unlock(&info->lock);
1083 swap_duplicate(swap); 1090 swap_shmem_alloc(swap);
1084 BUG_ON(page_mapped(page)); 1091 BUG_ON(page_mapped(page));
1085 page_cache_release(page); /* pagecache ref */ 1092 page_cache_release(page); /* pagecache ref */
1086 swap_writepage(page, wbc); 1093 swap_writepage(page, wbc);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cc5e7ebf2d2c..58bec6600167 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
548 if (usage == SWAP_HAS_CACHE) { 548 if (usage == SWAP_HAS_CACHE) {
549 VM_BUG_ON(!has_cache); 549 VM_BUG_ON(!has_cache);
550 has_cache = 0; 550 has_cache = 0;
551 } else if (count == SWAP_MAP_SHMEM) {
552 /*
553 * Or we could insist on shmem.c using a special
554 * swap_shmem_free() and free_shmem_swap_and_cache()...
555 */
556 count = 0;
551 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 557 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
552 if (count == COUNT_CONTINUED) { 558 if (count == COUNT_CONTINUED) {
553 if (swap_count_continued(p, offset, count)) 559 if (swap_count_continued(p, offset, count))
@@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type)
1031 swp_entry_t entry; 1037 swp_entry_t entry;
1032 unsigned int i = 0; 1038 unsigned int i = 0;
1033 int retval = 0; 1039 int retval = 0;
1034 int shmem;
1035 1040
1036 /* 1041 /*
1037 * When searching mms for an entry, a good strategy is to 1042 * When searching mms for an entry, a good strategy is to
@@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type)
1107 1112
1108 /* 1113 /*
1109 * Remove all references to entry. 1114 * Remove all references to entry.
1110 * Whenever we reach init_mm, there's no address space
1111 * to search, but use it as a reminder to search shmem.
1112 */ 1115 */
1113 shmem = 0;
1114 swcount = *swap_map; 1116 swcount = *swap_map;
1115 if (swap_count(swcount)) { 1117 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1116 if (start_mm == &init_mm) 1118 retval = shmem_unuse(entry, page);
1117 shmem = shmem_unuse(entry, page); 1119 /* page has already been unlocked and released */
1118 else 1120 if (retval < 0)
1119 retval = unuse_mm(start_mm, entry, page); 1121 break;
1122 continue;
1120 } 1123 }
1124 if (swap_count(swcount) && start_mm != &init_mm)
1125 retval = unuse_mm(start_mm, entry, page);
1126
1121 if (swap_count(*swap_map)) { 1127 if (swap_count(*swap_map)) {
1122 int set_start_mm = (*swap_map >= swcount); 1128 int set_start_mm = (*swap_map >= swcount);
1123 struct list_head *p = &start_mm->mmlist; 1129 struct list_head *p = &start_mm->mmlist;
@@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type)
1128 atomic_inc(&new_start_mm->mm_users); 1134 atomic_inc(&new_start_mm->mm_users);
1129 atomic_inc(&prev_mm->mm_users); 1135 atomic_inc(&prev_mm->mm_users);
1130 spin_lock(&mmlist_lock); 1136 spin_lock(&mmlist_lock);
1131 while (swap_count(*swap_map) && !retval && !shmem && 1137 while (swap_count(*swap_map) && !retval &&
1132 (p = p->next) != &start_mm->mmlist) { 1138 (p = p->next) != &start_mm->mmlist) {
1133 mm = list_entry(p, struct mm_struct, mmlist); 1139 mm = list_entry(p, struct mm_struct, mmlist);
1134 if (!atomic_inc_not_zero(&mm->mm_users)) 1140 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type)
1142 swcount = *swap_map; 1148 swcount = *swap_map;
1143 if (!swap_count(swcount)) /* any usage ? */ 1149 if (!swap_count(swcount)) /* any usage ? */
1144 ; 1150 ;
1145 else if (mm == &init_mm) { 1151 else if (mm == &init_mm)
1146 set_start_mm = 1; 1152 set_start_mm = 1;
1147 shmem = shmem_unuse(entry, page); 1153 else
1148 } else
1149 retval = unuse_mm(mm, entry, page); 1154 retval = unuse_mm(mm, entry, page);
1150 1155
1151 if (set_start_mm && *swap_map < swcount) { 1156 if (set_start_mm && *swap_map < swcount) {
@@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type)
1161 mmput(start_mm); 1166 mmput(start_mm);
1162 start_mm = new_start_mm; 1167 start_mm = new_start_mm;
1163 } 1168 }
1164 if (shmem) {
1165 /* page has already been unlocked and released */
1166 if (shmem > 0)
1167 continue;
1168 retval = shmem;
1169 break;
1170 }
1171 if (retval) { 1169 if (retval) {
1172 unlock_page(page); 1170 unlock_page(page);
1173 page_cache_release(page); 1171 page_cache_release(page);
@@ -2127,6 +2125,15 @@ bad_file:
2127} 2125}
2128 2126
2129/* 2127/*
2128 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2129 * (in which case its reference count is never incremented).
2130 */
2131void swap_shmem_alloc(swp_entry_t entry)
2132{
2133 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2134}
2135
2136/*
2130 * increase reference count of swap entry by 1. 2137 * increase reference count of swap entry by 1.
2131 */ 2138 */
2132int swap_duplicate(swp_entry_t entry) 2139int swap_duplicate(swp_entry_t entry)