diff options
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 79 |
1 files changed, 68 insertions, 11 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index e6f15f8ca2af..e76ace30d436 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void) | |||
63 | return ret; | 63 | return ret; |
64 | } | 64 | } |
65 | 65 | ||
66 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); | ||
67 | |||
66 | void show_swap_cache_info(void) | 68 | void show_swap_cache_info(void) |
67 | { | 69 | { |
68 | printk("%lu pages in swap cache\n", total_swapcache_pages()); | 70 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
@@ -83,9 +85,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
83 | int error; | 85 | int error; |
84 | struct address_space *address_space; | 86 | struct address_space *address_space; |
85 | 87 | ||
86 | VM_BUG_ON(!PageLocked(page)); | 88 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
87 | VM_BUG_ON(PageSwapCache(page)); | 89 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
88 | VM_BUG_ON(!PageSwapBacked(page)); | 90 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
89 | 91 | ||
90 | page_cache_get(page); | 92 | page_cache_get(page); |
91 | SetPageSwapCache(page); | 93 | SetPageSwapCache(page); |
@@ -139,9 +141,9 @@ void __delete_from_swap_cache(struct page *page) | |||
139 | swp_entry_t entry; | 141 | swp_entry_t entry; |
140 | struct address_space *address_space; | 142 | struct address_space *address_space; |
141 | 143 | ||
142 | VM_BUG_ON(!PageLocked(page)); | 144 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
143 | VM_BUG_ON(!PageSwapCache(page)); | 145 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
144 | VM_BUG_ON(PageWriteback(page)); | 146 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
145 | 147 | ||
146 | entry.val = page_private(page); | 148 | entry.val = page_private(page); |
147 | address_space = swap_address_space(entry); | 149 | address_space = swap_address_space(entry); |
@@ -165,8 +167,8 @@ int add_to_swap(struct page *page, struct list_head *list) | |||
165 | swp_entry_t entry; | 167 | swp_entry_t entry; |
166 | int err; | 168 | int err; |
167 | 169 | ||
168 | VM_BUG_ON(!PageLocked(page)); | 170 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
169 | VM_BUG_ON(!PageUptodate(page)); | 171 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
170 | 172 | ||
171 | entry = get_swap_page(); | 173 | entry = get_swap_page(); |
172 | if (!entry.val) | 174 | if (!entry.val) |
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry) | |||
286 | 288 | ||
287 | page = find_get_page(swap_address_space(entry), entry.val); | 289 | page = find_get_page(swap_address_space(entry), entry.val); |
288 | 290 | ||
289 | if (page) | 291 | if (page) { |
290 | INC_CACHE_INFO(find_success); | 292 | INC_CACHE_INFO(find_success); |
293 | if (TestClearPageReadahead(page)) | ||
294 | atomic_inc(&swapin_readahead_hits); | ||
295 | } | ||
291 | 296 | ||
292 | INC_CACHE_INFO(find_total); | 297 | INC_CACHE_INFO(find_total); |
293 | return page; | 298 | return page; |
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
389 | return found_page; | 394 | return found_page; |
390 | } | 395 | } |
391 | 396 | ||
397 | static unsigned long swapin_nr_pages(unsigned long offset) | ||
398 | { | ||
399 | static unsigned long prev_offset; | ||
400 | unsigned int pages, max_pages, last_ra; | ||
401 | static atomic_t last_readahead_pages; | ||
402 | |||
403 | max_pages = 1 << ACCESS_ONCE(page_cluster); | ||
404 | if (max_pages <= 1) | ||
405 | return 1; | ||
406 | |||
407 | /* | ||
408 | * This heuristic has been found to work well on both sequential and | ||
409 | * random loads, swapping to hard disk or to SSD: please don't ask | ||
410 | * what the "+ 2" means, it just happens to work well, that's all. | ||
411 | */ | ||
412 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; | ||
413 | if (pages == 2) { | ||
414 | /* | ||
415 | * We can have no readahead hits to judge by: but must not get | ||
416 | * stuck here forever, so check for an adjacent offset instead | ||
417 | * (and don't even bother to check whether swap type is same). | ||
418 | */ | ||
419 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | ||
420 | pages = 1; | ||
421 | prev_offset = offset; | ||
422 | } else { | ||
423 | unsigned int roundup = 4; | ||
424 | while (roundup < pages) | ||
425 | roundup <<= 1; | ||
426 | pages = roundup; | ||
427 | } | ||
428 | |||
429 | if (pages > max_pages) | ||
430 | pages = max_pages; | ||
431 | |||
432 | /* Don't shrink readahead too fast */ | ||
433 | last_ra = atomic_read(&last_readahead_pages) / 2; | ||
434 | if (pages < last_ra) | ||
435 | pages = last_ra; | ||
436 | atomic_set(&last_readahead_pages, pages); | ||
437 | |||
438 | return pages; | ||
439 | } | ||
440 | |||
392 | /** | 441 | /** |
393 | * swapin_readahead - swap in pages in hope we need them soon | 442 | * swapin_readahead - swap in pages in hope we need them soon |
394 | * @entry: swap entry of this memory | 443 | * @entry: swap entry of this memory |
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |||
412 | struct vm_area_struct *vma, unsigned long addr) | 461 | struct vm_area_struct *vma, unsigned long addr) |
413 | { | 462 | { |
414 | struct page *page; | 463 | struct page *page; |
415 | unsigned long offset = swp_offset(entry); | 464 | unsigned long entry_offset = swp_offset(entry); |
465 | unsigned long offset = entry_offset; | ||
416 | unsigned long start_offset, end_offset; | 466 | unsigned long start_offset, end_offset; |
417 | unsigned long mask = (1UL << page_cluster) - 1; | 467 | unsigned long mask; |
418 | struct blk_plug plug; | 468 | struct blk_plug plug; |
419 | 469 | ||
470 | mask = swapin_nr_pages(offset) - 1; | ||
471 | if (!mask) | ||
472 | goto skip; | ||
473 | |||
420 | /* Read a page_cluster sized and aligned cluster around offset. */ | 474 | /* Read a page_cluster sized and aligned cluster around offset. */ |
421 | start_offset = offset & ~mask; | 475 | start_offset = offset & ~mask; |
422 | end_offset = offset | mask; | 476 | end_offset = offset | mask; |
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |||
430 | gfp_mask, vma, addr); | 484 | gfp_mask, vma, addr); |
431 | if (!page) | 485 | if (!page) |
432 | continue; | 486 | continue; |
487 | if (offset != entry_offset) | ||
488 | SetPageReadahead(page); | ||
433 | page_cache_release(page); | 489 | page_cache_release(page); |
434 | } | 490 | } |
435 | blk_finish_plug(&plug); | 491 | blk_finish_plug(&plug); |
436 | 492 | ||
437 | lru_add_drain(); /* Push any new pages onto the LRU now */ | 493 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
494 | skip: | ||
438 | return read_swap_cache_async(entry, gfp_mask, vma, addr); | 495 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
439 | } | 496 | } |