aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c47
1 files changed, 14 insertions, 33 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1b8ca160f1d0..1d803c2d0184 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1998,45 +1998,26 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1998 */ 1998 */
1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) 1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
2000{ 2000{
2001#ifdef CONFIG_NUMA 2001 int nr_pages;
2002 struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; 2002 struct page *page;
2003#endif
2004 int i, num;
2005 struct page *new_page;
2006 unsigned long offset; 2003 unsigned long offset;
2004 unsigned long end_offset;
2007 2005
2008 /* 2006 /*
2009 * Get the number of handles we should do readahead io to. 2007 * Get starting offset for readaround, and number of pages to read.
2008 * Adjust starting address by readbehind (for NUMA interleave case)?
2009 * No, it's very unlikely that swap layout would follow vma layout,
2010 * more likely that neighbouring swap pages came from the same node:
2011 * so use the same "addr" to choose the same node for each swap read.
2010 */ 2012 */
2011 num = valid_swaphandles(entry, &offset); 2013 nr_pages = valid_swaphandles(entry, &offset);
2012 for (i = 0; i < num; offset++, i++) { 2014 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
2013 /* Ok, do the async read-ahead now */ 2015 /* Ok, do the async read-ahead now */
2014 new_page = read_swap_cache_async(swp_entry(swp_type(entry), 2016 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
2015 offset), vma, addr); 2017 vma, addr);
2016 if (!new_page) 2018 if (!page)
2017 break; 2019 break;
2018 page_cache_release(new_page); 2020 page_cache_release(page);
2019#ifdef CONFIG_NUMA
2020 /*
2021 * Find the next applicable VMA for the NUMA policy.
2022 */
2023 addr += PAGE_SIZE;
2024 if (addr == 0)
2025 vma = NULL;
2026 if (vma) {
2027 if (addr >= vma->vm_end) {
2028 vma = next_vma;
2029 next_vma = vma ? vma->vm_next : NULL;
2030 }
2031 if (vma && addr < vma->vm_start)
2032 vma = NULL;
2033 } else {
2034 if (next_vma && addr >= next_vma->vm_start) {
2035 vma = next_vma;
2036 next_vma = vma->vm_next;
2037 }
2038 }
2039#endif
2040 } 2021 }
2041 lru_add_drain(); /* Push any new pages onto the LRU now */ 2022 lru_add_drain(); /* Push any new pages onto the LRU now */
2042} 2023}