aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h6
-rw-r--r--mm/memory.c3
-rw-r--r--mm/shmem.c28
-rw-r--r--mm/swap_state.c18
-rw-r--r--mm/swapfile.c3
5 files changed, 30 insertions, 28 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9fa1aef1b82c..16fd1209e9fa 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -228,9 +228,9 @@ extern int move_from_swap_cache(struct page *, unsigned long,
228extern void free_page_and_swap_cache(struct page *); 228extern void free_page_and_swap_cache(struct page *);
229extern void free_pages_and_swap_cache(struct page **, int); 229extern void free_pages_and_swap_cache(struct page **, int);
230extern struct page *lookup_swap_cache(swp_entry_t); 230extern struct page *lookup_swap_cache(swp_entry_t);
231extern struct page *read_swap_cache_async(swp_entry_t, 231extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
232 struct vm_area_struct *vma, unsigned long addr); 232 struct vm_area_struct *vma, unsigned long addr);
233extern struct page *swapin_readahead(swp_entry_t, 233extern struct page *swapin_readahead(swp_entry_t, gfp_t,
234 struct vm_area_struct *vma, unsigned long addr); 234 struct vm_area_struct *vma, unsigned long addr);
235 235
236/* linux/mm/swapfile.c */ 236/* linux/mm/swapfile.c */
@@ -306,7 +306,7 @@ static inline void swap_free(swp_entry_t swp)
306{ 306{
307} 307}
308 308
309static inline struct page *swapin_readahead(swp_entry_t swp, 309static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
310 struct vm_area_struct *vma, unsigned long addr) 310 struct vm_area_struct *vma, unsigned long addr)
311{ 311{
312 return NULL; 312 return NULL;
diff --git a/mm/memory.c b/mm/memory.c
index ccc9403d5352..bc137751da7f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2007,7 +2007,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2007 page = lookup_swap_cache(entry); 2007 page = lookup_swap_cache(entry);
2008 if (!page) { 2008 if (!page) {
2009 grab_swap_token(); /* Contend for token _before_ read-in */ 2009 grab_swap_token(); /* Contend for token _before_ read-in */
2010 page = swapin_readahead(entry, vma, address); 2010 page = swapin_readahead(entry,
2011 GFP_HIGHUSER_MOVABLE, vma, address);
2011 if (!page) { 2012 if (!page) {
2012 /* 2013 /*
2013 * Back out if somebody else faulted in this pte 2014 * Back out if somebody else faulted in this pte
diff --git a/mm/shmem.c b/mm/shmem.c
index 3a22a8f79331..55b696aa3ddd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1025,8 +1025,8 @@ out:
1025 return err; 1025 return err;
1026} 1026}
1027 1027
1028static struct page *shmem_swapin(struct shmem_inode_info *info, 1028static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1029 swp_entry_t entry, unsigned long idx) 1029 struct shmem_inode_info *info, unsigned long idx)
1030{ 1030{
1031 struct vm_area_struct pvma; 1031 struct vm_area_struct pvma;
1032 struct page *page; 1032 struct page *page;
@@ -1036,13 +1036,13 @@ static struct page *shmem_swapin(struct shmem_inode_info *info,
1036 pvma.vm_pgoff = idx; 1036 pvma.vm_pgoff = idx;
1037 pvma.vm_ops = NULL; 1037 pvma.vm_ops = NULL;
1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039 page = swapin_readahead(entry, &pvma, 0); 1039 page = swapin_readahead(entry, gfp, &pvma, 0);
1040 mpol_free(pvma.vm_policy); 1040 mpol_free(pvma.vm_policy);
1041 return page; 1041 return page;
1042} 1042}
1043 1043
1044static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, 1044static struct page *shmem_alloc_page(gfp_t gfp,
1045 unsigned long idx) 1045 struct shmem_inode_info *info, unsigned long idx)
1046{ 1046{
1047 struct vm_area_struct pvma; 1047 struct vm_area_struct pvma;
1048 struct page *page; 1048 struct page *page;
@@ -1063,14 +1063,14 @@ static inline int shmem_parse_mpol(char *value, int *policy,
1063 return 1; 1063 return 1;
1064} 1064}
1065 1065
1066static inline struct page * 1066static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1067shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 1067 struct shmem_inode_info *info, unsigned long idx)
1068{ 1068{
1069 return swapin_readahead(entry, NULL, 0); 1069 return swapin_readahead(entry, gfp, NULL, 0);
1070} 1070}
1071 1071
1072static inline struct page * 1072static inline struct page *shmem_alloc_page(gfp_t gfp,
1073shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 1073 struct shmem_inode_info *info, unsigned long idx)
1074{ 1074{
1075 return alloc_page(gfp); 1075 return alloc_page(gfp);
1076} 1076}
@@ -1093,6 +1093,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1093 struct page *swappage; 1093 struct page *swappage;
1094 swp_entry_t *entry; 1094 swp_entry_t *entry;
1095 swp_entry_t swap; 1095 swp_entry_t swap;
1096 gfp_t gfp;
1096 int error; 1097 int error;
1097 1098
1098 if (idx >= SHMEM_MAX_INDEX) 1099 if (idx >= SHMEM_MAX_INDEX)
@@ -1117,6 +1118,7 @@ repeat:
1117 error = 0; 1118 error = 0;
1118 if (sgp == SGP_QUICK) 1119 if (sgp == SGP_QUICK)
1119 goto failed; 1120 goto failed;
1121 gfp = mapping_gfp_mask(mapping);
1120 1122
1121 spin_lock(&info->lock); 1123 spin_lock(&info->lock);
1122 shmem_recalc_inode(inode); 1124 shmem_recalc_inode(inode);
@@ -1139,7 +1141,7 @@ repeat:
1139 *type |= VM_FAULT_MAJOR; 1141 *type |= VM_FAULT_MAJOR;
1140 } 1142 }
1141 spin_unlock(&info->lock); 1143 spin_unlock(&info->lock);
1142 swappage = shmem_swapin(info, swap, idx); 1144 swappage = shmem_swapin(swap, gfp, info, idx);
1143 if (!swappage) { 1145 if (!swappage) {
1144 spin_lock(&info->lock); 1146 spin_lock(&info->lock);
1145 entry = shmem_swp_alloc(info, idx, sgp); 1147 entry = shmem_swp_alloc(info, idx, sgp);
@@ -1251,9 +1253,7 @@ repeat:
1251 1253
1252 if (!filepage) { 1254 if (!filepage) {
1253 spin_unlock(&info->lock); 1255 spin_unlock(&info->lock);
1254 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1256 filepage = shmem_alloc_page(gfp, info, idx);
1255 info,
1256 idx);
1257 if (!filepage) { 1257 if (!filepage) {
1258 shmem_unacct_blocks(info->flags, 1); 1258 shmem_unacct_blocks(info->flags, 1);
1259 shmem_free_blocks(inode, 1); 1259 shmem_free_blocks(inode, 1);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 668a80422630..e7875642e2cf 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -96,7 +96,8 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
96 return error; 96 return error;
97} 97}
98 98
99static int add_to_swap_cache(struct page *page, swp_entry_t entry) 99static int add_to_swap_cache(struct page *page, swp_entry_t entry,
100 gfp_t gfp_mask)
100{ 101{
101 int error; 102 int error;
102 103
@@ -106,7 +107,7 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry)
106 return -ENOENT; 107 return -ENOENT;
107 } 108 }
108 SetPageLocked(page); 109 SetPageLocked(page);
109 error = __add_to_swap_cache(page, entry, GFP_KERNEL); 110 error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL);
110 /* 111 /*
111 * Anon pages are already on the LRU, we don't run lru_cache_add here. 112 * Anon pages are already on the LRU, we don't run lru_cache_add here.
112 */ 113 */
@@ -318,7 +319,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
318 * A failure return means that either the page allocation failed or that 319 * A failure return means that either the page allocation failed or that
319 * the swap entry is no longer in use. 320 * the swap entry is no longer in use.
320 */ 321 */
321struct page *read_swap_cache_async(swp_entry_t entry, 322struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
322 struct vm_area_struct *vma, unsigned long addr) 323 struct vm_area_struct *vma, unsigned long addr)
323{ 324{
324 struct page *found_page, *new_page = NULL; 325 struct page *found_page, *new_page = NULL;
@@ -338,8 +339,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
338 * Get a new page to read into from swap. 339 * Get a new page to read into from swap.
339 */ 340 */
340 if (!new_page) { 341 if (!new_page) {
341 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 342 new_page = alloc_page_vma(gfp_mask, vma, addr);
342 vma, addr);
343 if (!new_page) 343 if (!new_page)
344 break; /* Out of memory */ 344 break; /* Out of memory */
345 } 345 }
@@ -354,7 +354,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
354 * the just freed swap entry for an existing page. 354 * the just freed swap entry for an existing page.
355 * May fail (-ENOMEM) if radix-tree node allocation failed. 355 * May fail (-ENOMEM) if radix-tree node allocation failed.
356 */ 356 */
357 err = add_to_swap_cache(new_page, entry); 357 err = add_to_swap_cache(new_page, entry, gfp_mask);
358 if (!err) { 358 if (!err) {
359 /* 359 /*
360 * Initiate read into locked page and return. 360 * Initiate read into locked page and return.
@@ -388,7 +388,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
388 * 388 *
389 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 389 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
390 */ 390 */
391struct page *swapin_readahead(swp_entry_t entry, 391struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
392 struct vm_area_struct *vma, unsigned long addr) 392 struct vm_area_struct *vma, unsigned long addr)
393{ 393{
394 int nr_pages; 394 int nr_pages;
@@ -407,11 +407,11 @@ struct page *swapin_readahead(swp_entry_t entry,
407 for (end_offset = offset + nr_pages; offset < end_offset; offset++) { 407 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
408 /* Ok, do the async read-ahead now */ 408 /* Ok, do the async read-ahead now */
409 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 409 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
410 vma, addr); 410 gfp_mask, vma, addr);
411 if (!page) 411 if (!page)
412 break; 412 break;
413 page_cache_release(page); 413 page_cache_release(page);
414 } 414 }
415 lru_add_drain(); /* Push any new pages onto the LRU now */ 415 lru_add_drain(); /* Push any new pages onto the LRU now */
416 return read_swap_cache_async(entry, vma, addr); 416 return read_swap_cache_async(entry, gfp_mask, vma, addr);
417} 417}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f071648e1360..ab93505dfbf4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -730,7 +730,8 @@ static int try_to_unuse(unsigned int type)
730 */ 730 */
731 swap_map = &si->swap_map[i]; 731 swap_map = &si->swap_map[i];
732 entry = swp_entry(type, i); 732 entry = swp_entry(type, i);
733 page = read_swap_cache_async(entry, NULL, 0); 733 page = read_swap_cache_async(entry,
734 GFP_HIGHUSER_MOVABLE, NULL, 0);
734 if (!page) { 735 if (!page) {
735 /* 736 /*
736 * Either swap_duplicate() failed because entry 737 * Either swap_duplicate() failed because entry