summaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2015-11-06 19:28:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 20:50:42 -0500
commitc62d25556be6c965dc14288e796a576e8e39a7e9 (patch)
treed82b41091f4dd8d378e2e2c841c90093da1485e0 /mm/readahead.c
parent89903327607232de32f05100cf03f9390b858e0b (diff)
mm, fs: introduce mapping_gfp_constraint()
There are many places which use mapping_gfp_mask to restrict a more generic gfp mask which would be used for allocations which are not directly related to the page cache but they are performed in the same context. Let's introduce a helper function which makes the restriction explicit and easier to track. This patch doesn't introduce any functional changes. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Michal Hocko <mhocko@suse.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 998ad592f408..ba22d7fe0afb 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -90,7 +90,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
90 page = list_to_page(pages); 90 page = list_to_page(pages);
91 list_del(&page->lru); 91 list_del(&page->lru);
92 if (add_to_page_cache_lru(page, mapping, page->index, 92 if (add_to_page_cache_lru(page, mapping, page->index,
93 GFP_KERNEL & mapping_gfp_mask(mapping))) { 93 mapping_gfp_constraint(mapping, GFP_KERNEL))) {
94 read_cache_pages_invalidate_page(mapping, page); 94 read_cache_pages_invalidate_page(mapping, page);
95 continue; 95 continue;
96 } 96 }
@@ -128,7 +128,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
128 struct page *page = list_to_page(pages); 128 struct page *page = list_to_page(pages);
129 list_del(&page->lru); 129 list_del(&page->lru);
130 if (!add_to_page_cache_lru(page, mapping, page->index, 130 if (!add_to_page_cache_lru(page, mapping, page->index,
131 GFP_KERNEL & mapping_gfp_mask(mapping))) { 131 mapping_gfp_constraint(mapping, GFP_KERNEL))) {
132 mapping->a_ops->readpage(filp, page); 132 mapping->a_ops->readpage(filp, page);
133 } 133 }
134 page_cache_release(page); 134 page_cache_release(page);