summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-09-11 17:20:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:15 -0400
commit2a8f9449343260373398d59228a62a4332ea513a (patch)
tree76c6ddf2a99d9dc7519585ba65c9883005908286 /mm/swapfile.c
parent15ca220e1a63af06e000691e4ae1beaba5430c32 (diff)
swap: change block allocation algorithm for SSD
I'm using a fast SSD to do swap. scan_swap_map() sometimes uses up to 20~30% CPU time (when cluster is hard to find, the CPU time can be up to 80%), which becomes a bottleneck. scan_swap_map() scans a byte array to search a 256 page cluster, which is very slow. Here I introduced a simple algorithm to search cluster. Since we only care about 256 pages cluster, we can just use a counter to track if a cluster is free. Every 256 pages use one int to store the counter. If the counter of a cluster is 0, the cluster is free. All free clusters will be added to a list, so searching cluster is very efficient. With this, scap_swap_map() overhead disappears. This might help low end SD card swap too. Because if the cluster is aligned, SD firmware can do flash erase more efficiently. We only enable the algorithm for SSD. Hard disk swap isn't fast enough and has downside with the algorithm which might introduce regression (see below). The patch slightly changes which cluster is choosen. It always adds free cluster to list tail. This can help wear leveling for low end SSD too. And if no cluster found, the scan_swap_map() will do search from the end of last cluster. So if no cluster found, the scan_swap_map() will do search from the end of last free cluster, which is random. For SSD, this isn't a problem at all. Another downside is the cluster must be aligned to 256 pages, which will reduce the chance to find a cluster. I would expect this isn't a big problem for SSD because of the non-seek penality. (And this is the reason I only enable the algorithm for SSD). Signed-off-by: Shaohua Li <shli@fusionio.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Kyungmin Park <kmpark@infradead.org> Cc: Hugh Dickins <hughd@google.com> Cc: Rafael Aquini <aquini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c288
1 files changed, 250 insertions, 38 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6ef2d15c5fe3..d1fbeb486de5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -184,6 +184,134 @@ static int wait_for_discard(void *word)
184#define SWAPFILE_CLUSTER 256 184#define SWAPFILE_CLUSTER 256
185#define LATENCY_LIMIT 256 185#define LATENCY_LIMIT 256
186 186
187static inline void cluster_set_flag(struct swap_cluster_info *info,
188 unsigned int flag)
189{
190 info->flags = flag;
191}
192
193static inline unsigned int cluster_count(struct swap_cluster_info *info)
194{
195 return info->data;
196}
197
198static inline void cluster_set_count(struct swap_cluster_info *info,
199 unsigned int c)
200{
201 info->data = c;
202}
203
204static inline void cluster_set_count_flag(struct swap_cluster_info *info,
205 unsigned int c, unsigned int f)
206{
207 info->flags = f;
208 info->data = c;
209}
210
211static inline unsigned int cluster_next(struct swap_cluster_info *info)
212{
213 return info->data;
214}
215
216static inline void cluster_set_next(struct swap_cluster_info *info,
217 unsigned int n)
218{
219 info->data = n;
220}
221
222static inline void cluster_set_next_flag(struct swap_cluster_info *info,
223 unsigned int n, unsigned int f)
224{
225 info->flags = f;
226 info->data = n;
227}
228
229static inline bool cluster_is_free(struct swap_cluster_info *info)
230{
231 return info->flags & CLUSTER_FLAG_FREE;
232}
233
234static inline bool cluster_is_null(struct swap_cluster_info *info)
235{
236 return info->flags & CLUSTER_FLAG_NEXT_NULL;
237}
238
239static inline void cluster_set_null(struct swap_cluster_info *info)
240{
241 info->flags = CLUSTER_FLAG_NEXT_NULL;
242 info->data = 0;
243}
244
245/*
246 * The cluster corresponding to page_nr will be used. The cluster will be
247 * removed from free cluster list and its usage counter will be increased.
248 */
249static void inc_cluster_info_page(struct swap_info_struct *p,
250 struct swap_cluster_info *cluster_info, unsigned long page_nr)
251{
252 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
253
254 if (!cluster_info)
255 return;
256 if (cluster_is_free(&cluster_info[idx])) {
257 VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
258 cluster_set_next_flag(&p->free_cluster_head,
259 cluster_next(&cluster_info[idx]), 0);
260 if (cluster_next(&p->free_cluster_tail) == idx) {
261 cluster_set_null(&p->free_cluster_tail);
262 cluster_set_null(&p->free_cluster_head);
263 }
264 cluster_set_count_flag(&cluster_info[idx], 0, 0);
265 }
266
267 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
268 cluster_set_count(&cluster_info[idx],
269 cluster_count(&cluster_info[idx]) + 1);
270}
271
272/*
273 * The cluster corresponding to page_nr decreases one usage. If the usage
274 * counter becomes 0, which means no page in the cluster is in using, we can
275 * optionally discard the cluster and add it to free cluster list.
276 */
277static void dec_cluster_info_page(struct swap_info_struct *p,
278 struct swap_cluster_info *cluster_info, unsigned long page_nr)
279{
280 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
281
282 if (!cluster_info)
283 return;
284
285 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
286 cluster_set_count(&cluster_info[idx],
287 cluster_count(&cluster_info[idx]) - 1);
288
289 if (cluster_count(&cluster_info[idx]) == 0) {
290 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
291 if (cluster_is_null(&p->free_cluster_head)) {
292 cluster_set_next_flag(&p->free_cluster_head, idx, 0);
293 cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
294 } else {
295 unsigned int tail = cluster_next(&p->free_cluster_tail);
296 cluster_set_next(&cluster_info[tail], idx);
297 cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
298 }
299 }
300}
301
302/*
303 * It's possible scan_swap_map() uses a free cluster in the middle of free
304 * cluster list. Avoiding such abuse to avoid list corruption.
305 */
306static inline bool scan_swap_map_recheck_cluster(struct swap_info_struct *si,
307 unsigned long offset)
308{
309 offset /= SWAPFILE_CLUSTER;
310 return !cluster_is_null(&si->free_cluster_head) &&
311 offset != cluster_next(&si->free_cluster_head) &&
312 cluster_is_free(&si->cluster_info[offset]);
313}
314
187static unsigned long scan_swap_map(struct swap_info_struct *si, 315static unsigned long scan_swap_map(struct swap_info_struct *si,
188 unsigned char usage) 316 unsigned char usage)
189{ 317{
@@ -225,6 +353,25 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
225 si->lowest_alloc = si->max; 353 si->lowest_alloc = si->max;
226 si->highest_alloc = 0; 354 si->highest_alloc = 0;
227 } 355 }
356check_cluster:
357 if (!cluster_is_null(&si->free_cluster_head)) {
358 offset = cluster_next(&si->free_cluster_head) *
359 SWAPFILE_CLUSTER;
360 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
361 si->cluster_next = offset;
362 si->cluster_nr = SWAPFILE_CLUSTER - 1;
363 found_free_cluster = 1;
364 goto checks;
365 } else if (si->cluster_info) {
366 /*
367 * Checking free cluster is fast enough, we can do the
368 * check every time
369 */
370 si->cluster_nr = 0;
371 si->lowest_alloc = 0;
372 goto checks;
373 }
374
228 spin_unlock(&si->lock); 375 spin_unlock(&si->lock);
229 376
230 /* 377 /*
@@ -285,6 +432,8 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
285 } 432 }
286 433
287checks: 434checks:
435 if (scan_swap_map_recheck_cluster(si, offset))
436 goto check_cluster;
288 if (!(si->flags & SWP_WRITEOK)) 437 if (!(si->flags & SWP_WRITEOK))
289 goto no_page; 438 goto no_page;
290 if (!si->highest_bit) 439 if (!si->highest_bit)
@@ -317,6 +466,7 @@ checks:
317 si->highest_bit = 0; 466 si->highest_bit = 0;
318 } 467 }
319 si->swap_map[offset] = usage; 468 si->swap_map[offset] = usage;
469 inc_cluster_info_page(si, si->cluster_info, offset);
320 si->cluster_next = offset + 1; 470 si->cluster_next = offset + 1;
321 si->flags -= SWP_SCANNING; 471 si->flags -= SWP_SCANNING;
322 472
@@ -600,6 +750,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
600 750
601 /* free if no reference */ 751 /* free if no reference */
602 if (!usage) { 752 if (!usage) {
753 dec_cluster_info_page(p, p->cluster_info, offset);
603 if (offset < p->lowest_bit) 754 if (offset < p->lowest_bit)
604 p->lowest_bit = offset; 755 p->lowest_bit = offset;
605 if (offset > p->highest_bit) 756 if (offset > p->highest_bit)
@@ -1524,7 +1675,8 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1524} 1675}
1525 1676
1526static void _enable_swap_info(struct swap_info_struct *p, int prio, 1677static void _enable_swap_info(struct swap_info_struct *p, int prio,
1527 unsigned char *swap_map) 1678 unsigned char *swap_map,
1679 struct swap_cluster_info *cluster_info)
1528{ 1680{
1529 int i, prev; 1681 int i, prev;
1530 1682
@@ -1533,6 +1685,7 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
1533 else 1685 else
1534 p->prio = --least_priority; 1686 p->prio = --least_priority;
1535 p->swap_map = swap_map; 1687 p->swap_map = swap_map;
1688 p->cluster_info = cluster_info;
1536 p->flags |= SWP_WRITEOK; 1689 p->flags |= SWP_WRITEOK;
1537 atomic_long_add(p->pages, &nr_swap_pages); 1690 atomic_long_add(p->pages, &nr_swap_pages);
1538 total_swap_pages += p->pages; 1691 total_swap_pages += p->pages;
@@ -1553,12 +1706,13 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
1553 1706
1554static void enable_swap_info(struct swap_info_struct *p, int prio, 1707static void enable_swap_info(struct swap_info_struct *p, int prio,
1555 unsigned char *swap_map, 1708 unsigned char *swap_map,
1709 struct swap_cluster_info *cluster_info,
1556 unsigned long *frontswap_map) 1710 unsigned long *frontswap_map)
1557{ 1711{
1558 frontswap_init(p->type, frontswap_map); 1712 frontswap_init(p->type, frontswap_map);
1559 spin_lock(&swap_lock); 1713 spin_lock(&swap_lock);
1560 spin_lock(&p->lock); 1714 spin_lock(&p->lock);
1561 _enable_swap_info(p, prio, swap_map); 1715 _enable_swap_info(p, prio, swap_map, cluster_info);
1562 spin_unlock(&p->lock); 1716 spin_unlock(&p->lock);
1563 spin_unlock(&swap_lock); 1717 spin_unlock(&swap_lock);
1564} 1718}
@@ -1567,7 +1721,7 @@ static void reinsert_swap_info(struct swap_info_struct *p)
1567{ 1721{
1568 spin_lock(&swap_lock); 1722 spin_lock(&swap_lock);
1569 spin_lock(&p->lock); 1723 spin_lock(&p->lock);
1570 _enable_swap_info(p, p->prio, p->swap_map); 1724 _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
1571 spin_unlock(&p->lock); 1725 spin_unlock(&p->lock);
1572 spin_unlock(&swap_lock); 1726 spin_unlock(&swap_lock);
1573} 1727}
@@ -1576,6 +1730,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1576{ 1730{
1577 struct swap_info_struct *p = NULL; 1731 struct swap_info_struct *p = NULL;
1578 unsigned char *swap_map; 1732 unsigned char *swap_map;
1733 struct swap_cluster_info *cluster_info;
1579 unsigned long *frontswap_map; 1734 unsigned long *frontswap_map;
1580 struct file *swap_file, *victim; 1735 struct file *swap_file, *victim;
1581 struct address_space *mapping; 1736 struct address_space *mapping;
@@ -1675,6 +1830,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1675 p->max = 0; 1830 p->max = 0;
1676 swap_map = p->swap_map; 1831 swap_map = p->swap_map;
1677 p->swap_map = NULL; 1832 p->swap_map = NULL;
1833 cluster_info = p->cluster_info;
1834 p->cluster_info = NULL;
1678 p->flags = 0; 1835 p->flags = 0;
1679 frontswap_map = frontswap_map_get(p); 1836 frontswap_map = frontswap_map_get(p);
1680 frontswap_map_set(p, NULL); 1837 frontswap_map_set(p, NULL);
@@ -1683,6 +1840,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1683 frontswap_invalidate_area(type); 1840 frontswap_invalidate_area(type);
1684 mutex_unlock(&swapon_mutex); 1841 mutex_unlock(&swapon_mutex);
1685 vfree(swap_map); 1842 vfree(swap_map);
1843 vfree(cluster_info);
1686 vfree(frontswap_map); 1844 vfree(frontswap_map);
1687 /* Destroy swap account informatin */ 1845 /* Destroy swap account informatin */
1688 swap_cgroup_swapoff(type); 1846 swap_cgroup_swapoff(type);
@@ -2000,15 +2158,21 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
2000static int setup_swap_map_and_extents(struct swap_info_struct *p, 2158static int setup_swap_map_and_extents(struct swap_info_struct *p,
2001 union swap_header *swap_header, 2159 union swap_header *swap_header,
2002 unsigned char *swap_map, 2160 unsigned char *swap_map,
2161 struct swap_cluster_info *cluster_info,
2003 unsigned long maxpages, 2162 unsigned long maxpages,
2004 sector_t *span) 2163 sector_t *span)
2005{ 2164{
2006 int i; 2165 int i;
2007 unsigned int nr_good_pages; 2166 unsigned int nr_good_pages;
2008 int nr_extents; 2167 int nr_extents;
2168 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2169 unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
2009 2170
2010 nr_good_pages = maxpages - 1; /* omit header page */ 2171 nr_good_pages = maxpages - 1; /* omit header page */
2011 2172
2173 cluster_set_null(&p->free_cluster_head);
2174 cluster_set_null(&p->free_cluster_tail);
2175
2012 for (i = 0; i < swap_header->info.nr_badpages; i++) { 2176 for (i = 0; i < swap_header->info.nr_badpages; i++) {
2013 unsigned int page_nr = swap_header->info.badpages[i]; 2177 unsigned int page_nr = swap_header->info.badpages[i];
2014 if (page_nr == 0 || page_nr > swap_header->info.last_page) 2178 if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -2016,11 +2180,25 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
2016 if (page_nr < maxpages) { 2180 if (page_nr < maxpages) {
2017 swap_map[page_nr] = SWAP_MAP_BAD; 2181 swap_map[page_nr] = SWAP_MAP_BAD;
2018 nr_good_pages--; 2182 nr_good_pages--;
2183 /*
2184 * Haven't marked the cluster free yet, no list
2185 * operation involved
2186 */
2187 inc_cluster_info_page(p, cluster_info, page_nr);
2019 } 2188 }
2020 } 2189 }
2021 2190
2191 /* Haven't marked the cluster free yet, no list operation involved */
2192 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2193 inc_cluster_info_page(p, cluster_info, i);
2194
2022 if (nr_good_pages) { 2195 if (nr_good_pages) {
2023 swap_map[0] = SWAP_MAP_BAD; 2196 swap_map[0] = SWAP_MAP_BAD;
2197 /*
2198 * Not mark the cluster free yet, no list
2199 * operation involved
2200 */
2201 inc_cluster_info_page(p, cluster_info, 0);
2024 p->max = maxpages; 2202 p->max = maxpages;
2025 p->pages = nr_good_pages; 2203 p->pages = nr_good_pages;
2026 nr_extents = setup_swap_extents(p, span); 2204 nr_extents = setup_swap_extents(p, span);
@@ -2033,6 +2211,30 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
2033 return -EINVAL; 2211 return -EINVAL;
2034 } 2212 }
2035 2213
2214 if (!cluster_info)
2215 return nr_extents;
2216
2217 for (i = 0; i < nr_clusters; i++) {
2218 if (!cluster_count(&cluster_info[idx])) {
2219 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2220 if (cluster_is_null(&p->free_cluster_head)) {
2221 cluster_set_next_flag(&p->free_cluster_head,
2222 idx, 0);
2223 cluster_set_next_flag(&p->free_cluster_tail,
2224 idx, 0);
2225 } else {
2226 unsigned int tail;
2227
2228 tail = cluster_next(&p->free_cluster_tail);
2229 cluster_set_next(&cluster_info[tail], idx);
2230 cluster_set_next_flag(&p->free_cluster_tail,
2231 idx, 0);
2232 }
2233 }
2234 idx++;
2235 if (idx == nr_clusters)
2236 idx = 0;
2237 }
2036 return nr_extents; 2238 return nr_extents;
2037} 2239}
2038 2240
@@ -2064,6 +2266,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2064 sector_t span; 2266 sector_t span;
2065 unsigned long maxpages; 2267 unsigned long maxpages;
2066 unsigned char *swap_map = NULL; 2268 unsigned char *swap_map = NULL;
2269 struct swap_cluster_info *cluster_info = NULL;
2067 unsigned long *frontswap_map = NULL; 2270 unsigned long *frontswap_map = NULL;
2068 struct page *page = NULL; 2271 struct page *page = NULL;
2069 struct inode *inode = NULL; 2272 struct inode *inode = NULL;
@@ -2137,13 +2340,28 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2137 error = -ENOMEM; 2340 error = -ENOMEM;
2138 goto bad_swap; 2341 goto bad_swap;
2139 } 2342 }
2343 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2344 p->flags |= SWP_SOLIDSTATE;
2345 /*
2346 * select a random position to start with to help wear leveling
2347 * SSD
2348 */
2349 p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
2350
2351 cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
2352 SWAPFILE_CLUSTER) * sizeof(*cluster_info));
2353 if (!cluster_info) {
2354 error = -ENOMEM;
2355 goto bad_swap;
2356 }
2357 }
2140 2358
2141 error = swap_cgroup_swapon(p->type, maxpages); 2359 error = swap_cgroup_swapon(p->type, maxpages);
2142 if (error) 2360 if (error)
2143 goto bad_swap; 2361 goto bad_swap;
2144 2362
2145 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 2363 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2146 maxpages, &span); 2364 cluster_info, maxpages, &span);
2147 if (unlikely(nr_extents < 0)) { 2365 if (unlikely(nr_extents < 0)) {
2148 error = nr_extents; 2366 error = nr_extents;
2149 goto bad_swap; 2367 goto bad_swap;
@@ -2152,40 +2370,33 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2152 if (frontswap_enabled) 2370 if (frontswap_enabled)
2153 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); 2371 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
2154 2372
2155 if (p->bdev) { 2373 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2156 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2374 /*
2157 p->flags |= SWP_SOLIDSTATE; 2375 * When discard is enabled for swap with no particular
2158 p->cluster_next = 1 + (prandom_u32() % p->highest_bit); 2376 * policy flagged, we set all swap discard flags here in
2159 } 2377 * order to sustain backward compatibility with older
2160 2378 * swapon(8) releases.
2161 if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 2379 */
2162 /* 2380 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2163 * When discard is enabled for swap with no particular 2381 SWP_PAGE_DISCARD);
2164 * policy flagged, we set all swap discard flags here in
2165 * order to sustain backward compatibility with older
2166 * swapon(8) releases.
2167 */
2168 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2169 SWP_PAGE_DISCARD);
2170 2382
2171 /* 2383 /*
2172 * By flagging sys_swapon, a sysadmin can tell us to 2384 * By flagging sys_swapon, a sysadmin can tell us to
2173 * either do single-time area discards only, or to just 2385 * either do single-time area discards only, or to just
2174 * perform discards for released swap page-clusters. 2386 * perform discards for released swap page-clusters.
2175 * Now it's time to adjust the p->flags accordingly. 2387 * Now it's time to adjust the p->flags accordingly.
2176 */ 2388 */
2177 if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 2389 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
2178 p->flags &= ~SWP_PAGE_DISCARD; 2390 p->flags &= ~SWP_PAGE_DISCARD;
2179 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 2391 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
2180 p->flags &= ~SWP_AREA_DISCARD; 2392 p->flags &= ~SWP_AREA_DISCARD;
2181 2393
2182 /* issue a swapon-time discard if it's still required */ 2394 /* issue a swapon-time discard if it's still required */
2183 if (p->flags & SWP_AREA_DISCARD) { 2395 if (p->flags & SWP_AREA_DISCARD) {
2184 int err = discard_swap(p); 2396 int err = discard_swap(p);
2185 if (unlikely(err)) 2397 if (unlikely(err))
2186 pr_err("swapon: discard_swap(%p): %d\n", 2398 pr_err("swapon: discard_swap(%p): %d\n",
2187 p, err); 2399 p, err);
2188 }
2189 } 2400 }
2190 } 2401 }
2191 2402
@@ -2194,7 +2405,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2194 if (swap_flags & SWAP_FLAG_PREFER) 2405 if (swap_flags & SWAP_FLAG_PREFER)
2195 prio = 2406 prio =
2196 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2407 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2197 enable_swap_info(p, prio, swap_map, frontswap_map); 2408 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
2198 2409
2199 pr_info("Adding %uk swap on %s. " 2410 pr_info("Adding %uk swap on %s. "
2200 "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 2411 "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
@@ -2226,6 +2437,7 @@ bad_swap:
2226 p->flags = 0; 2437 p->flags = 0;
2227 spin_unlock(&swap_lock); 2438 spin_unlock(&swap_lock);
2228 vfree(swap_map); 2439 vfree(swap_map);
2440 vfree(cluster_info);
2229 if (swap_file) { 2441 if (swap_file) {
2230 if (inode && S_ISREG(inode->i_mode)) { 2442 if (inode && S_ISREG(inode->i_mode)) {
2231 mutex_unlock(&inode->i_mutex); 2443 mutex_unlock(&inode->i_mutex);