aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:41 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:14 -0500
commit46017e954826ac59e91df76341a3f76b45467847 (patch)
tree711a35e3936118665d0eac2afeef8758b4f4e95f /mm/memory.c
parentc4cc6d07b2f465fbf5efd99bbe772a49c515f3f2 (diff)
swapin_readahead: move and rearrange args
swapin_readahead has never sat well in mm/memory.c: move it to mm/swap_state.c beside its kindred read_swap_cache_async. Why were its args in a different order? rearrange them. And since it was always followed by a read_swap_cache_async of the target page, fold that in and return struct page*. Then CONFIG_SWAP=n no longer needs valid_swaphandles and read_swap_cache_async stubs. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c45
1 files changed, 1 insertions, 44 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1d803c2d0184..ccc9403d5352 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1980,48 +1980,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1980 return 0; 1980 return 0;
1981} 1981}
1982 1982
1983/**
1984 * swapin_readahead - swap in pages in hope we need them soon
1985 * @entry: swap entry of this memory
1986 * @addr: address to start
1987 * @vma: user vma this addresses belong to
1988 *
1989 * Primitive swap readahead code. We simply read an aligned block of
1990 * (1 << page_cluster) entries in the swap area. This method is chosen
1991 * because it doesn't cost us any seek time. We also make sure to queue
1992 * the 'original' request together with the readahead ones...
1993 *
1994 * This has been extended to use the NUMA policies from the mm triggering
1995 * the readahead.
1996 *
1997 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1998 */
1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
2000{
2001 int nr_pages;
2002 struct page *page;
2003 unsigned long offset;
2004 unsigned long end_offset;
2005
2006 /*
2007 * Get starting offset for readaround, and number of pages to read.
2008 * Adjust starting address by readbehind (for NUMA interleave case)?
2009 * No, it's very unlikely that swap layout would follow vma layout,
2010 * more likely that neighbouring swap pages came from the same node:
2011 * so use the same "addr" to choose the same node for each swap read.
2012 */
2013 nr_pages = valid_swaphandles(entry, &offset);
2014 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
2015 /* Ok, do the async read-ahead now */
2016 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
2017 vma, addr);
2018 if (!page)
2019 break;
2020 page_cache_release(page);
2021 }
2022 lru_add_drain(); /* Push any new pages onto the LRU now */
2023}
2024
2025/* 1983/*
2026 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2027 * but allow concurrent faults), and pte mapped but not yet locked. 1985 * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2049,8 +2007,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2049 page = lookup_swap_cache(entry); 2007 page = lookup_swap_cache(entry);
2050 if (!page) { 2008 if (!page) {
2051 grab_swap_token(); /* Contend for token _before_ read-in */ 2009 grab_swap_token(); /* Contend for token _before_ read-in */
2052 swapin_readahead(entry, address, vma); 2010 page = swapin_readahead(entry, vma, address);
2053 page = read_swap_cache_async(entry, vma, address);
2054 if (!page) { 2011 if (!page) {
2055 /* 2012 /*
2056 * Back out if somebody else faulted in this pte 2013 * Back out if somebody else faulted in this pte