aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/memory.c47
-rw-r--r--mm/shmem.c43
2 files changed, 26 insertions, 64 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1b8ca160f1d0..1d803c2d0184 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1998,45 +1998,26 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1998 */ 1998 */
1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) 1999void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
2000{ 2000{
2001#ifdef CONFIG_NUMA 2001 int nr_pages;
2002 struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; 2002 struct page *page;
2003#endif
2004 int i, num;
2005 struct page *new_page;
2006 unsigned long offset; 2003 unsigned long offset;
2004 unsigned long end_offset;
2007 2005
2008 /* 2006 /*
2009 * Get the number of handles we should do readahead io to. 2007 * Get starting offset for readaround, and number of pages to read.
2008 * Adjust starting address by readbehind (for NUMA interleave case)?
2009 * No, it's very unlikely that swap layout would follow vma layout,
2010 * more likely that neighbouring swap pages came from the same node:
2011 * so use the same "addr" to choose the same node for each swap read.
2010 */ 2012 */
2011 num = valid_swaphandles(entry, &offset); 2013 nr_pages = valid_swaphandles(entry, &offset);
2012 for (i = 0; i < num; offset++, i++) { 2014 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
2013 /* Ok, do the async read-ahead now */ 2015 /* Ok, do the async read-ahead now */
2014 new_page = read_swap_cache_async(swp_entry(swp_type(entry), 2016 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
2015 offset), vma, addr); 2017 vma, addr);
2016 if (!new_page) 2018 if (!page)
2017 break; 2019 break;
2018 page_cache_release(new_page); 2020 page_cache_release(page);
2019#ifdef CONFIG_NUMA
2020 /*
2021 * Find the next applicable VMA for the NUMA policy.
2022 */
2023 addr += PAGE_SIZE;
2024 if (addr == 0)
2025 vma = NULL;
2026 if (vma) {
2027 if (addr >= vma->vm_end) {
2028 vma = next_vma;
2029 next_vma = vma ? vma->vm_next : NULL;
2030 }
2031 if (vma && addr < vma->vm_start)
2032 vma = NULL;
2033 } else {
2034 if (next_vma && addr >= next_vma->vm_start) {
2035 vma = next_vma;
2036 next_vma = vma->vm_next;
2037 }
2038 }
2039#endif
2040 } 2021 }
2041 lru_add_drain(); /* Push any new pages onto the LRU now */ 2022 lru_add_drain(); /* Push any new pages onto the LRU now */
2042} 2023}
diff --git a/mm/shmem.c b/mm/shmem.c
index 51b3d6ccddab..88c6685f16b7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1025,53 +1025,34 @@ out:
1025 return err; 1025 return err;
1026} 1026}
1027 1027
1028static struct page *shmem_swapin_async(struct shared_policy *p, 1028static struct page *shmem_swapin(struct shmem_inode_info *info,
1029 swp_entry_t entry, unsigned long idx) 1029 swp_entry_t entry, unsigned long idx)
1030{ 1030{
1031 struct page *page;
1032 struct vm_area_struct pvma; 1031 struct vm_area_struct pvma;
1032 struct page *page;
1033 1033
1034 /* Create a pseudo vma that just contains the policy */ 1034 /* Create a pseudo vma that just contains the policy */
1035 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1035 pvma.vm_start = 0;
1036 pvma.vm_end = PAGE_SIZE;
1037 pvma.vm_pgoff = idx; 1036 pvma.vm_pgoff = idx;
1038 pvma.vm_policy = mpol_shared_policy_lookup(p, idx); 1037 pvma.vm_ops = NULL;
1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039 swapin_readahead(entry, 0, &pvma);
1039 page = read_swap_cache_async(entry, &pvma, 0); 1040 page = read_swap_cache_async(entry, &pvma, 0);
1040 mpol_free(pvma.vm_policy); 1041 mpol_free(pvma.vm_policy);
1041 return page; 1042 return page;
1042} 1043}
1043 1044
1044static struct page *shmem_swapin(struct shmem_inode_info *info, 1045static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1045 swp_entry_t entry, unsigned long idx) 1046 unsigned long idx)
1046{
1047 struct shared_policy *p = &info->policy;
1048 int i, num;
1049 struct page *page;
1050 unsigned long offset;
1051
1052 num = valid_swaphandles(entry, &offset);
1053 for (i = 0; i < num; offset++, i++) {
1054 page = shmem_swapin_async(p,
1055 swp_entry(swp_type(entry), offset), idx);
1056 if (!page)
1057 break;
1058 page_cache_release(page);
1059 }
1060 lru_add_drain(); /* Push any new pages onto the LRU now */
1061 return shmem_swapin_async(p, entry, idx);
1062}
1063
1064static struct page *
1065shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1066 unsigned long idx)
1067{ 1047{
1068 struct vm_area_struct pvma; 1048 struct vm_area_struct pvma;
1069 struct page *page; 1049 struct page *page;
1070 1050
1071 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1051 /* Create a pseudo vma that just contains the policy */
1072 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1052 pvma.vm_start = 0;
1073 pvma.vm_pgoff = idx; 1053 pvma.vm_pgoff = idx;
1074 pvma.vm_end = PAGE_SIZE; 1054 pvma.vm_ops = NULL;
1055 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1075 page = alloc_page_vma(gfp, &pvma, 0); 1056 page = alloc_page_vma(gfp, &pvma, 0);
1076 mpol_free(pvma.vm_policy); 1057 mpol_free(pvma.vm_policy);
1077 return page; 1058 return page;