aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 17:39:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:05 -0500
commitebebbbe904634b0ca1c674457b399f68db5e05b1 (patch)
tree168c6c1193580e40ba1916ebaff822e15e8ee186 /mm
parent81e33971271ec8603fe696731ff9967afb99e729 (diff)
swapfile: rearrange scan and swap_info
Before making functional changes, rearrange scan_swap_map() to simplify subsequent diffs. Actually, there is one functional change in there: leave cluster_nr negative while scanning for a new cluster - resetting it early increased the likelihood that when we have difficulty finding a free cluster, another task may come in and try doing exactly the same - just a waste of cpu. Before making functional changes, rearrange struct swap_info_struct slightly: flags will be needed as an unsigned long (for wait_on_bit), next is a good int to pair with prio, old_block_size is uninteresting so shift it to the end. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c66
1 files changed, 37 insertions, 29 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 85ff603385c3..4d9855f86e7d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -89,7 +89,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
89 89
90static inline unsigned long scan_swap_map(struct swap_info_struct *si) 90static inline unsigned long scan_swap_map(struct swap_info_struct *si)
91{ 91{
92 unsigned long offset, last_in_cluster; 92 unsigned long offset;
93 unsigned long last_in_cluster;
93 int latency_ration = LATENCY_LIMIT; 94 int latency_ration = LATENCY_LIMIT;
94 95
95 /* 96 /*
@@ -103,10 +104,13 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
103 */ 104 */
104 105
105 si->flags += SWP_SCANNING; 106 si->flags += SWP_SCANNING;
106 if (unlikely(!si->cluster_nr)) { 107 offset = si->cluster_next;
107 si->cluster_nr = SWAPFILE_CLUSTER - 1; 108
108 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) 109 if (unlikely(!si->cluster_nr--)) {
109 goto lowest; 110 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
111 si->cluster_nr = SWAPFILE_CLUSTER - 1;
112 goto checks;
113 }
110 spin_unlock(&swap_lock); 114 spin_unlock(&swap_lock);
111 115
112 offset = si->lowest_bit; 116 offset = si->lowest_bit;
@@ -118,43 +122,47 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
118 last_in_cluster = offset + SWAPFILE_CLUSTER; 122 last_in_cluster = offset + SWAPFILE_CLUSTER;
119 else if (offset == last_in_cluster) { 123 else if (offset == last_in_cluster) {
120 spin_lock(&swap_lock); 124 spin_lock(&swap_lock);
121 si->cluster_next = offset-SWAPFILE_CLUSTER+1; 125 offset -= SWAPFILE_CLUSTER - 1;
122 goto cluster; 126 si->cluster_next = offset;
127 si->cluster_nr = SWAPFILE_CLUSTER - 1;
128 goto checks;
123 } 129 }
124 if (unlikely(--latency_ration < 0)) { 130 if (unlikely(--latency_ration < 0)) {
125 cond_resched(); 131 cond_resched();
126 latency_ration = LATENCY_LIMIT; 132 latency_ration = LATENCY_LIMIT;
127 } 133 }
128 } 134 }
135
136 offset = si->lowest_bit;
129 spin_lock(&swap_lock); 137 spin_lock(&swap_lock);
130 goto lowest; 138 si->cluster_nr = SWAPFILE_CLUSTER - 1;
131 } 139 }
132 140
133 si->cluster_nr--; 141checks:
134cluster: 142 if (!(si->flags & SWP_WRITEOK))
135 offset = si->cluster_next;
136 if (offset > si->highest_bit)
137lowest: offset = si->lowest_bit;
138checks: if (!(si->flags & SWP_WRITEOK))
139 goto no_page; 143 goto no_page;
140 if (!si->highest_bit) 144 if (!si->highest_bit)
141 goto no_page; 145 goto no_page;
142 if (!si->swap_map[offset]) { 146 if (offset > si->highest_bit)
143 if (offset == si->lowest_bit) 147 offset = si->lowest_bit;
144 si->lowest_bit++; 148 if (si->swap_map[offset])
145 if (offset == si->highest_bit) 149 goto scan;
146 si->highest_bit--; 150
147 si->inuse_pages++; 151 if (offset == si->lowest_bit)
148 if (si->inuse_pages == si->pages) { 152 si->lowest_bit++;
149 si->lowest_bit = si->max; 153 if (offset == si->highest_bit)
150 si->highest_bit = 0; 154 si->highest_bit--;
151 } 155 si->inuse_pages++;
152 si->swap_map[offset] = 1; 156 if (si->inuse_pages == si->pages) {
153 si->cluster_next = offset + 1; 157 si->lowest_bit = si->max;
154 si->flags -= SWP_SCANNING; 158 si->highest_bit = 0;
155 return offset;
156 } 159 }
160 si->swap_map[offset] = 1;
161 si->cluster_next = offset + 1;
162 si->flags -= SWP_SCANNING;
163 return offset;
157 164
165scan:
158 spin_unlock(&swap_lock); 166 spin_unlock(&swap_lock);
159 while (++offset <= si->highest_bit) { 167 while (++offset <= si->highest_bit) {
160 if (!si->swap_map[offset]) { 168 if (!si->swap_map[offset]) {
@@ -167,7 +175,7 @@ checks: if (!(si->flags & SWP_WRITEOK))
167 } 175 }
168 } 176 }
169 spin_lock(&swap_lock); 177 spin_lock(&swap_lock);
170 goto lowest; 178 goto checks;
171 179
172no_page: 180no_page:
173 si->flags -= SWP_SCANNING; 181 si->flags -= SWP_SCANNING;