aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 17:39:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:06 -0500
commitc60aa176c6de82703f064082b909496fc4fee956 (patch)
tree2a6e58bda416af2c87f470023041f7049dcf1167 /mm
parent20137a490f397d9c01fc9fadd83a8d198bda4477 (diff)
swapfile: swap allocation cycle if nonrot
Though attempting to find free clusters (Andrea), swap allocation has always restarted its searches from the beginning of the swap area (sct), to reduce seek times between swap pages, by not scattering them all over the partition. But on a solidstate swap device, seeks are cheap, and block remapping to level the wear may be limited by zones: in that case it's better to cycle around the whole partition. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Joern Engel <joern@logfs.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Donjun Shin <djshin90@gmail.com> Cc: Tejun Heo <teheo@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c50
1 files changed, 46 insertions, 4 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b0f56603b9be..763210732b5f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -169,6 +169,7 @@ static int wait_for_discard(void *word)
169static inline unsigned long scan_swap_map(struct swap_info_struct *si) 169static inline unsigned long scan_swap_map(struct swap_info_struct *si)
170{ 170{
171 unsigned long offset; 171 unsigned long offset;
172 unsigned long scan_base;
172 unsigned long last_in_cluster = 0; 173 unsigned long last_in_cluster = 0;
173 int latency_ration = LATENCY_LIMIT; 174 int latency_ration = LATENCY_LIMIT;
174 int found_free_cluster = 0; 175 int found_free_cluster = 0;
@@ -181,10 +182,11 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
181 * all over the entire swap partition, so that we reduce 182 * all over the entire swap partition, so that we reduce
182 * overall disk seek times between swap pages. -- sct 183 * overall disk seek times between swap pages. -- sct
183 * But we do now try to find an empty cluster. -Andrea 184 * But we do now try to find an empty cluster. -Andrea
185 * And we let swap pages go all over an SSD partition. Hugh
184 */ 186 */
185 187
186 si->flags += SWP_SCANNING; 188 si->flags += SWP_SCANNING;
187 offset = si->cluster_next; 189 scan_base = offset = si->cluster_next;
188 190
189 if (unlikely(!si->cluster_nr--)) { 191 if (unlikely(!si->cluster_nr--)) {
190 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 192 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
@@ -206,7 +208,16 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
206 } 208 }
207 spin_unlock(&swap_lock); 209 spin_unlock(&swap_lock);
208 210
209 offset = si->lowest_bit; 211 /*
212 * If seek is expensive, start searching for new cluster from
213 * start of partition, to minimize the span of allocated swap.
214 * But if seek is cheap, search from our current position, so
215 * that swap is allocated from all over the partition: if the
216 * Flash Translation Layer only remaps within limited zones,
217 * we don't want to wear out the first zone too quickly.
218 */
219 if (!(si->flags & SWP_SOLIDSTATE))
220 scan_base = offset = si->lowest_bit;
210 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 221 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
211 222
212 /* Locate the first empty (unaligned) cluster */ 223 /* Locate the first empty (unaligned) cluster */
@@ -228,6 +239,27 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
228 } 239 }
229 240
230 offset = si->lowest_bit; 241 offset = si->lowest_bit;
242 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
243
244 /* Locate the first empty (unaligned) cluster */
245 for (; last_in_cluster < scan_base; offset++) {
246 if (si->swap_map[offset])
247 last_in_cluster = offset + SWAPFILE_CLUSTER;
248 else if (offset == last_in_cluster) {
249 spin_lock(&swap_lock);
250 offset -= SWAPFILE_CLUSTER - 1;
251 si->cluster_next = offset;
252 si->cluster_nr = SWAPFILE_CLUSTER - 1;
253 found_free_cluster = 1;
254 goto checks;
255 }
256 if (unlikely(--latency_ration < 0)) {
257 cond_resched();
258 latency_ration = LATENCY_LIMIT;
259 }
260 }
261
262 offset = scan_base;
231 spin_lock(&swap_lock); 263 spin_lock(&swap_lock);
232 si->cluster_nr = SWAPFILE_CLUSTER - 1; 264 si->cluster_nr = SWAPFILE_CLUSTER - 1;
233 si->lowest_alloc = 0; 265 si->lowest_alloc = 0;
@@ -239,7 +271,7 @@ checks:
239 if (!si->highest_bit) 271 if (!si->highest_bit)
240 goto no_page; 272 goto no_page;
241 if (offset > si->highest_bit) 273 if (offset > si->highest_bit)
242 offset = si->lowest_bit; 274 scan_base = offset = si->lowest_bit;
243 if (si->swap_map[offset]) 275 if (si->swap_map[offset])
244 goto scan; 276 goto scan;
245 277
@@ -323,8 +355,18 @@ scan:
323 latency_ration = LATENCY_LIMIT; 355 latency_ration = LATENCY_LIMIT;
324 } 356 }
325 } 357 }
358 offset = si->lowest_bit;
359 while (++offset < scan_base) {
360 if (!si->swap_map[offset]) {
361 spin_lock(&swap_lock);
362 goto checks;
363 }
364 if (unlikely(--latency_ration < 0)) {
365 cond_resched();
366 latency_ration = LATENCY_LIMIT;
367 }
368 }
326 spin_lock(&swap_lock); 369 spin_lock(&swap_lock);
327 goto checks;
328 370
329no_page: 371no_page:
330 si->flags -= SWP_SCANNING; 372 si->flags -= SWP_SCANNING;