aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-07-25 20:12:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:11 -0400
commit68da9f055755ee2609a1686722e6d6a7980019ee (patch)
tree9f2810f28a7852b0ed3390f7fa0d29979a2997c2 /mm/shmem.c
parent71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872 (diff)
tmpfs: pass gfp to shmem_getpage_gfp
Make shmem_getpage() a wrapper, passing mapping_gfp_mask() down to shmem_getpage_gfp(), which in turn passes gfp down to shmem_swp_alloc(). Change shmem_read_mapping_page_gfp() to use shmem_getpage_gfp() in the CONFIG_SHMEM case; but leave tiny !SHMEM using read_cache_page_gfp(). Add a BUG_ON() in case anyone happens to call this on a non-shmem mapping; though we might later want to let that case route to read_cache_page_gfp(). It annoys me to have these two almost-redundant args, gfp and fault_type: I can't find a better way; but initialize fault_type only in shmem_fault(). Note that before, read_cache_page_gfp() was allocating i915_gem's pages with __GFP_NORETRY as intended; but the corresponding swap vector pages got allocated without it, leaving a small possibility of OOM. Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c67
1 files changed, 44 insertions, 23 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index f96614526d1c..f6c94ba87808 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -127,8 +127,15 @@ static unsigned long shmem_default_max_inodes(void)
127} 127}
128#endif 128#endif
129 129
130static int shmem_getpage(struct inode *inode, unsigned long idx, 130static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
131 struct page **pagep, enum sgp_type sgp, int *type); 131 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
132
133static inline int shmem_getpage(struct inode *inode, pgoff_t index,
134 struct page **pagep, enum sgp_type sgp, int *fault_type)
135{
136 return shmem_getpage_gfp(inode, index, pagep, sgp,
137 mapping_gfp_mask(inode->i_mapping), fault_type);
138}
132 139
133static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 140static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
134{ 141{
@@ -404,10 +411,12 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns
404 * @info: info structure for the inode 411 * @info: info structure for the inode
405 * @index: index of the page to find 412 * @index: index of the page to find
406 * @sgp: check and recheck i_size? skip allocation? 413 * @sgp: check and recheck i_size? skip allocation?
414 * @gfp: gfp mask to use for any page allocation
407 * 415 *
408 * If the entry does not exist, allocate it. 416 * If the entry does not exist, allocate it.
409 */ 417 */
410static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 418static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info,
419 unsigned long index, enum sgp_type sgp, gfp_t gfp)
411{ 420{
412 struct inode *inode = &info->vfs_inode; 421 struct inode *inode = &info->vfs_inode;
413 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 422 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
@@ -435,7 +444,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
435 } 444 }
436 445
437 spin_unlock(&info->lock); 446 spin_unlock(&info->lock);
438 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); 447 page = shmem_dir_alloc(gfp);
439 spin_lock(&info->lock); 448 spin_lock(&info->lock);
440 449
441 if (!page) { 450 if (!page) {
@@ -1225,14 +1234,14 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1225#endif 1234#endif
1226 1235
1227/* 1236/*
1228 * shmem_getpage - either get the page from swap or allocate a new one 1237 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1229 * 1238 *
1230 * If we allocate a new one we do not mark it dirty. That's up to the 1239 * If we allocate a new one we do not mark it dirty. That's up to the
1231 * vm. If we swap it in we mark it dirty since we also free the swap 1240 * vm. If we swap it in we mark it dirty since we also free the swap
1232 * entry since a page cannot live in both the swap and page cache 1241 * entry since a page cannot live in both the swap and page cache
1233 */ 1242 */
1234static int shmem_getpage(struct inode *inode, unsigned long idx, 1243static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
1235 struct page **pagep, enum sgp_type sgp, int *type) 1244 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1236{ 1245{
1237 struct address_space *mapping = inode->i_mapping; 1246 struct address_space *mapping = inode->i_mapping;
1238 struct shmem_inode_info *info = SHMEM_I(inode); 1247 struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1242,15 +1251,11 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1242 struct page *prealloc_page = NULL; 1251 struct page *prealloc_page = NULL;
1243 swp_entry_t *entry; 1252 swp_entry_t *entry;
1244 swp_entry_t swap; 1253 swp_entry_t swap;
1245 gfp_t gfp;
1246 int error; 1254 int error;
1247 1255
1248 if (idx >= SHMEM_MAX_INDEX) 1256 if (idx >= SHMEM_MAX_INDEX)
1249 return -EFBIG; 1257 return -EFBIG;
1250 1258
1251 if (type)
1252 *type = 0;
1253
1254 /* 1259 /*
1255 * Normally, filepage is NULL on entry, and either found 1260 * Normally, filepage is NULL on entry, and either found
1256 * uptodate immediately, or allocated and zeroed, or read 1261 * uptodate immediately, or allocated and zeroed, or read
@@ -1264,13 +1269,12 @@ repeat:
1264 filepage = find_lock_page(mapping, idx); 1269 filepage = find_lock_page(mapping, idx);
1265 if (filepage && PageUptodate(filepage)) 1270 if (filepage && PageUptodate(filepage))
1266 goto done; 1271 goto done;
1267 gfp = mapping_gfp_mask(mapping);
1268 if (!filepage) { 1272 if (!filepage) {
1269 /* 1273 /*
1270 * Try to preload while we can wait, to not make a habit of 1274 * Try to preload while we can wait, to not make a habit of
1271 * draining atomic reserves; but don't latch on to this cpu. 1275 * draining atomic reserves; but don't latch on to this cpu.
1272 */ 1276 */
1273 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 1277 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
1274 if (error) 1278 if (error)
1275 goto failed; 1279 goto failed;
1276 radix_tree_preload_end(); 1280 radix_tree_preload_end();
@@ -1290,7 +1294,7 @@ repeat:
1290 1294
1291 spin_lock(&info->lock); 1295 spin_lock(&info->lock);
1292 shmem_recalc_inode(inode); 1296 shmem_recalc_inode(inode);
1293 entry = shmem_swp_alloc(info, idx, sgp); 1297 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1294 if (IS_ERR(entry)) { 1298 if (IS_ERR(entry)) {
1295 spin_unlock(&info->lock); 1299 spin_unlock(&info->lock);
1296 error = PTR_ERR(entry); 1300 error = PTR_ERR(entry);
@@ -1305,12 +1309,12 @@ repeat:
1305 shmem_swp_unmap(entry); 1309 shmem_swp_unmap(entry);
1306 spin_unlock(&info->lock); 1310 spin_unlock(&info->lock);
1307 /* here we actually do the io */ 1311 /* here we actually do the io */
1308 if (type) 1312 if (fault_type)
1309 *type |= VM_FAULT_MAJOR; 1313 *fault_type |= VM_FAULT_MAJOR;
1310 swappage = shmem_swapin(swap, gfp, info, idx); 1314 swappage = shmem_swapin(swap, gfp, info, idx);
1311 if (!swappage) { 1315 if (!swappage) {
1312 spin_lock(&info->lock); 1316 spin_lock(&info->lock);
1313 entry = shmem_swp_alloc(info, idx, sgp); 1317 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1314 if (IS_ERR(entry)) 1318 if (IS_ERR(entry))
1315 error = PTR_ERR(entry); 1319 error = PTR_ERR(entry);
1316 else { 1320 else {
@@ -1461,7 +1465,7 @@ repeat:
1461 SetPageSwapBacked(filepage); 1465 SetPageSwapBacked(filepage);
1462 } 1466 }
1463 1467
1464 entry = shmem_swp_alloc(info, idx, sgp); 1468 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1465 if (IS_ERR(entry)) 1469 if (IS_ERR(entry))
1466 error = PTR_ERR(entry); 1470 error = PTR_ERR(entry);
1467 else { 1471 else {
@@ -1539,7 +1543,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1539{ 1543{
1540 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1544 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1541 int error; 1545 int error;
1542 int ret; 1546 int ret = VM_FAULT_LOCKED;
1543 1547
1544 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1548 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1545 return VM_FAULT_SIGBUS; 1549 return VM_FAULT_SIGBUS;
@@ -1547,11 +1551,12 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1547 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1551 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1548 if (error) 1552 if (error)
1549 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1553 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1554
1550 if (ret & VM_FAULT_MAJOR) { 1555 if (ret & VM_FAULT_MAJOR) {
1551 count_vm_event(PGMAJFAULT); 1556 count_vm_event(PGMAJFAULT);
1552 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1557 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1553 } 1558 }
1554 return ret | VM_FAULT_LOCKED; 1559 return ret;
1555} 1560}
1556 1561
1557#ifdef CONFIG_NUMA 1562#ifdef CONFIG_NUMA
@@ -3162,13 +3167,29 @@ int shmem_zero_setup(struct vm_area_struct *vma)
3162 * suit tmpfs, since it may have pages in swapcache, and needs to find those 3167 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3163 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 3168 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3164 * 3169 *
3165 * Provide a stub for those callers to start using now, then later 3170 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3166 * flesh it out to call shmem_getpage() with additional gfp mask, when 3171 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3167 * shmem_file_splice_read() is added and shmem_readpage() is removed.
3168 */ 3172 */
3169struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 3173struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3170 pgoff_t index, gfp_t gfp) 3174 pgoff_t index, gfp_t gfp)
3171{ 3175{
3176#ifdef CONFIG_SHMEM
3177 struct inode *inode = mapping->host;
3178 struct page *page = NULL;
3179 int error;
3180
3181 BUG_ON(mapping->a_ops != &shmem_aops);
3182 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3183 if (error)
3184 page = ERR_PTR(error);
3185 else
3186 unlock_page(page);
3187 return page;
3188#else
3189 /*
3190 * The tiny !SHMEM case uses ramfs without swap
3191 */
3172 return read_cache_page_gfp(mapping, index, gfp); 3192 return read_cache_page_gfp(mapping, index, gfp);
3193#endif
3173} 3194}
3174EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3195EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);