summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorChen Yucong <slaoub@gmail.com>2014-06-04 19:10:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:12 -0400
commit50088c440910730baf3248acfad2c846fb3eea77 (patch)
tree2a1b426e134e17869862fb87f7f35f92da86983c /mm/swapfile.c
parent100873d7a777b67ad35197c5a998b5e778f8bf3f (diff)
mm/swapfile.c: delete the "last_in_cluster < scan_base" loop in the body of scan_swap_map()
Via commit ebc2a1a69111 ("swap: make cluster allocation per-cpu"), we can find that all SWP_SOLIDSTATE "seek is cheap"(SSD case) has already gone to si->cluster_info scan_swap_map_try_ssd_cluster() route. So that the "last_in_cluster < scan_base" loop in the body of scan_swap_map() has already become a dead code snippet, and it should have been deleted. This patch is to delete the redundant loop as Hugh and Shaohua suggested. [hughd@google.com: fix comment, simplify code] Signed-off-by: Chen Yucong <slaoub@gmail.com> Cc: Shaohua Li <shli@kernel.org> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c29
1 files changed, 3 insertions, 26 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index beeeef8a1b2d..4c524f7bd0bf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -523,13 +523,10 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
523 /* 523 /*
524 * If seek is expensive, start searching for new cluster from 524 * If seek is expensive, start searching for new cluster from
525 * start of partition, to minimize the span of allocated swap. 525 * start of partition, to minimize the span of allocated swap.
526 * But if seek is cheap, search from our current position, so 526 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
527 * that swap is allocated from all over the partition: if the 527 * case, just handled by scan_swap_map_try_ssd_cluster() above.
528 * Flash Translation Layer only remaps within limited zones,
529 * we don't want to wear out the first zone too quickly.
530 */ 528 */
531 if (!(si->flags & SWP_SOLIDSTATE)) 529 scan_base = offset = si->lowest_bit;
532 scan_base = offset = si->lowest_bit;
533 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 530 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
534 531
535 /* Locate the first empty (unaligned) cluster */ 532 /* Locate the first empty (unaligned) cluster */
@@ -549,26 +546,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
549 } 546 }
550 } 547 }
551 548
552 offset = si->lowest_bit;
553 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
554
555 /* Locate the first empty (unaligned) cluster */
556 for (; last_in_cluster < scan_base; offset++) {
557 if (si->swap_map[offset])
558 last_in_cluster = offset + SWAPFILE_CLUSTER;
559 else if (offset == last_in_cluster) {
560 spin_lock(&si->lock);
561 offset -= SWAPFILE_CLUSTER - 1;
562 si->cluster_next = offset;
563 si->cluster_nr = SWAPFILE_CLUSTER - 1;
564 goto checks;
565 }
566 if (unlikely(--latency_ration < 0)) {
567 cond_resched();
568 latency_ration = LATENCY_LIMIT;
569 }
570 }
571
572 offset = scan_base; 549 offset = scan_base;
573 spin_lock(&si->lock); 550 spin_lock(&si->lock);
574 si->cluster_nr = SWAPFILE_CLUSTER - 1; 551 si->cluster_nr = SWAPFILE_CLUSTER - 1;