diff options
author | Wang Sheng-Hui <shhuiw@gmail.com> | 2014-08-06 19:07:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:20 -0400 |
commit | fed400a181447ba975d40e1df5e0d555eae51795 (patch) | |
tree | 479962a83347082a9ad6c1a57e0e4e9baf392c9c /mm | |
parent | 9a95f3cf7b33d66fa64727cff8cd2f2a9d09f335 (diff) |
mm/shmem.c: remove the unused gfp arg to shmem_add_to_page_cache()
The gfp arg is not used in shmem_add_to_page_cache. Remove this unused
arg.
Signed-off-by: Wang Sheng-Hui <shhuiw@gmail.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index fe15d96c3166..302d1cf7ad07 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -293,7 +293,7 @@ static bool shmem_confirm_swap(struct address_space *mapping, | |||
293 | */ | 293 | */ |
294 | static int shmem_add_to_page_cache(struct page *page, | 294 | static int shmem_add_to_page_cache(struct page *page, |
295 | struct address_space *mapping, | 295 | struct address_space *mapping, |
296 | pgoff_t index, gfp_t gfp, void *expected) | 296 | pgoff_t index, void *expected) |
297 | { | 297 | { |
298 | int error; | 298 | int error; |
299 | 299 | ||
@@ -666,7 +666,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, | |||
666 | */ | 666 | */ |
667 | if (!error) | 667 | if (!error) |
668 | error = shmem_add_to_page_cache(*pagep, mapping, index, | 668 | error = shmem_add_to_page_cache(*pagep, mapping, index, |
669 | GFP_NOWAIT, radswap); | 669 | radswap); |
670 | if (error != -ENOMEM) { | 670 | if (error != -ENOMEM) { |
671 | /* | 671 | /* |
672 | * Truncation and eviction use free_swap_and_cache(), which | 672 | * Truncation and eviction use free_swap_and_cache(), which |
@@ -1112,7 +1112,7 @@ repeat: | |||
1112 | gfp & GFP_RECLAIM_MASK); | 1112 | gfp & GFP_RECLAIM_MASK); |
1113 | if (!error) { | 1113 | if (!error) { |
1114 | error = shmem_add_to_page_cache(page, mapping, index, | 1114 | error = shmem_add_to_page_cache(page, mapping, index, |
1115 | gfp, swp_to_radix_entry(swap)); | 1115 | swp_to_radix_entry(swap)); |
1116 | /* | 1116 | /* |
1117 | * We already confirmed swap under page lock, and make | 1117 | * We already confirmed swap under page lock, and make |
1118 | * no memory allocation here, so usually no possibility | 1118 | * no memory allocation here, so usually no possibility |
@@ -1175,7 +1175,7 @@ repeat: | |||
1175 | error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); | 1175 | error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); |
1176 | if (!error) { | 1176 | if (!error) { |
1177 | error = shmem_add_to_page_cache(page, mapping, index, | 1177 | error = shmem_add_to_page_cache(page, mapping, index, |
1178 | gfp, NULL); | 1178 | NULL); |
1179 | radix_tree_preload_end(); | 1179 | radix_tree_preload_end(); |
1180 | } | 1180 | } |
1181 | if (error) { | 1181 | if (error) { |