diff options
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index b52635601dfe..668a80422630 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/kernel_stat.h> | 11 | #include <linux/kernel_stat.h> |
12 | #include <linux/swap.h> | 12 | #include <linux/swap.h> |
13 | #include <linux/swapops.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
15 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
@@ -368,3 +369,49 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
368 | page_cache_release(new_page); | 369 | page_cache_release(new_page); |
369 | return found_page; | 370 | return found_page; |
370 | } | 371 | } |
372 | |||
373 | /** | ||
374 | * swapin_readahead - swap in pages in hope we need them soon | ||
375 | * @entry: swap entry of this memory | ||
376 | * @vma: user vma this address belongs to | ||
377 | * @addr: target address for mempolicy | ||
378 | * | ||
379 | * Returns the struct page for entry and addr, after queueing swapin. | ||
380 | * | ||
381 | * Primitive swap readahead code. We simply read an aligned block of | ||
382 | * (1 << page_cluster) entries in the swap area. This method is chosen | ||
383 | * because it doesn't cost us any seek time. We also make sure to queue | ||
384 | * the 'original' request together with the readahead ones... | ||
385 | * | ||
386 | * This has been extended to use the NUMA policies from the mm triggering | ||
387 | * the readahead. | ||
388 | * | ||
389 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | ||
390 | */ | ||
391 | struct page *swapin_readahead(swp_entry_t entry, | ||
392 | struct vm_area_struct *vma, unsigned long addr) | ||
393 | { | ||
394 | int nr_pages; | ||
395 | struct page *page; | ||
396 | unsigned long offset; | ||
397 | unsigned long end_offset; | ||
398 | |||
399 | /* | ||
400 | * Get starting offset for readaround, and number of pages to read. | ||
401 | * Adjust starting address by readbehind (for NUMA interleave case)? | ||
402 | * No, it's very unlikely that swap layout would follow vma layout, | ||
403 | * more likely that neighbouring swap pages came from the same node: | ||
404 | * so use the same "addr" to choose the same node for each swap read. | ||
405 | */ | ||
406 | nr_pages = valid_swaphandles(entry, &offset); | ||
407 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { | ||
408 | /* Ok, do the async read-ahead now */ | ||
409 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | ||
410 | vma, addr); | ||
411 | if (!page) | ||
412 | break; | ||
413 | page_cache_release(page); | ||
414 | } | ||
415 | lru_add_drain(); /* Push any new pages onto the LRU now */ | ||
416 | return read_swap_cache_async(entry, vma, addr); | ||
417 | } | ||