aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:42 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:14 -0500
commit02098feaa42b2e0087fbbe6c6ab9a23e4653b16a (patch)
tree494eaf13f204c9384d4316202fd76cd1b5d960ad /mm/shmem.c
parent46017e954826ac59e91df76341a3f76b45467847 (diff)
swapin needs gfp_mask for loop on tmpfs
Building in a filesystem on a loop device on a tmpfs file can hang when swapping, the loop thread caught in that infamous throttle_vm_writeout. In theory this is a long standing problem, which I've either never seen in practice, or long ago suppressed the recollection, after discounting my load and my tmpfs size as unrealistically high. But now, with the new aops, it has become easy to hang on one machine. Loop used to grab_cache_page before the old prepare_write to tmpfs, which seems to have been enough to free up some memory for any swapin needed; but the new write_begin lets tmpfs find or allocate the page (much nicer, since grab_cache_page missed tmpfs pages in swapcache). When allocating a fresh page, tmpfs respects loop's mapping_gfp_mask, which has __GFP_IO|__GFP_FS stripped off, and throttle_vm_writeout is designed to break out when __GFP_IO or GFP_FS is unset; but when tmfps swaps in, read_swap_cache_async allocates with GFP_HIGHUSER_MOVABLE regardless of the mapping_gfp_mask - hence the hang. So, pass gfp_mask down the line from shmem_getpage to shmem_swapin to swapin_readahead to read_swap_cache_async to add_to_swap_cache. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 3a22a8f79331..55b696aa3ddd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1025,8 +1025,8 @@ out:
1025 return err; 1025 return err;
1026} 1026}
1027 1027
1028static struct page *shmem_swapin(struct shmem_inode_info *info, 1028static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1029 swp_entry_t entry, unsigned long idx) 1029 struct shmem_inode_info *info, unsigned long idx)
1030{ 1030{
1031 struct vm_area_struct pvma; 1031 struct vm_area_struct pvma;
1032 struct page *page; 1032 struct page *page;
@@ -1036,13 +1036,13 @@ static struct page *shmem_swapin(struct shmem_inode_info *info,
1036 pvma.vm_pgoff = idx; 1036 pvma.vm_pgoff = idx;
1037 pvma.vm_ops = NULL; 1037 pvma.vm_ops = NULL;
1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039 page = swapin_readahead(entry, &pvma, 0); 1039 page = swapin_readahead(entry, gfp, &pvma, 0);
1040 mpol_free(pvma.vm_policy); 1040 mpol_free(pvma.vm_policy);
1041 return page; 1041 return page;
1042} 1042}
1043 1043
1044static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, 1044static struct page *shmem_alloc_page(gfp_t gfp,
1045 unsigned long idx) 1045 struct shmem_inode_info *info, unsigned long idx)
1046{ 1046{
1047 struct vm_area_struct pvma; 1047 struct vm_area_struct pvma;
1048 struct page *page; 1048 struct page *page;
@@ -1063,14 +1063,14 @@ static inline int shmem_parse_mpol(char *value, int *policy,
1063 return 1; 1063 return 1;
1064} 1064}
1065 1065
1066static inline struct page * 1066static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1067shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 1067 struct shmem_inode_info *info, unsigned long idx)
1068{ 1068{
1069 return swapin_readahead(entry, NULL, 0); 1069 return swapin_readahead(entry, gfp, NULL, 0);
1070} 1070}
1071 1071
1072static inline struct page * 1072static inline struct page *shmem_alloc_page(gfp_t gfp,
1073shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 1073 struct shmem_inode_info *info, unsigned long idx)
1074{ 1074{
1075 return alloc_page(gfp); 1075 return alloc_page(gfp);
1076} 1076}
@@ -1093,6 +1093,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1093 struct page *swappage; 1093 struct page *swappage;
1094 swp_entry_t *entry; 1094 swp_entry_t *entry;
1095 swp_entry_t swap; 1095 swp_entry_t swap;
1096 gfp_t gfp;
1096 int error; 1097 int error;
1097 1098
1098 if (idx >= SHMEM_MAX_INDEX) 1099 if (idx >= SHMEM_MAX_INDEX)
@@ -1117,6 +1118,7 @@ repeat:
1117 error = 0; 1118 error = 0;
1118 if (sgp == SGP_QUICK) 1119 if (sgp == SGP_QUICK)
1119 goto failed; 1120 goto failed;
1121 gfp = mapping_gfp_mask(mapping);
1120 1122
1121 spin_lock(&info->lock); 1123 spin_lock(&info->lock);
1122 shmem_recalc_inode(inode); 1124 shmem_recalc_inode(inode);
@@ -1139,7 +1141,7 @@ repeat:
1139 *type |= VM_FAULT_MAJOR; 1141 *type |= VM_FAULT_MAJOR;
1140 } 1142 }
1141 spin_unlock(&info->lock); 1143 spin_unlock(&info->lock);
1142 swappage = shmem_swapin(info, swap, idx); 1144 swappage = shmem_swapin(swap, gfp, info, idx);
1143 if (!swappage) { 1145 if (!swappage) {
1144 spin_lock(&info->lock); 1146 spin_lock(&info->lock);
1145 entry = shmem_swp_alloc(info, idx, sgp); 1147 entry = shmem_swp_alloc(info, idx, sgp);
@@ -1251,9 +1253,7 @@ repeat:
1251 1253
1252 if (!filepage) { 1254 if (!filepage) {
1253 spin_unlock(&info->lock); 1255 spin_unlock(&info->lock);
1254 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1256 filepage = shmem_alloc_page(gfp, info, idx);
1255 info,
1256 idx);
1257 if (!filepage) { 1257 if (!filepage) {
1258 shmem_unacct_blocks(info->flags, 1); 1258 shmem_unacct_blocks(info->flags, 1);
1259 shmem_free_blocks(inode, 1); 1259 shmem_free_blocks(inode, 1);