aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 0493e4d0bcaa..e49181d9d893 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -93,8 +93,11 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
93 * The above definition of ENTRIES_PER_PAGE, and the use of 93 * The above definition of ENTRIES_PER_PAGE, and the use of
94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95 * might be reconsidered if it ever diverges from PAGE_SIZE. 95 * might be reconsidered if it ever diverges from PAGE_SIZE.
96 *
97 * __GFP_MOVABLE is masked out as swap vectors cannot move
96 */ 98 */
97 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); 99 return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
100 PAGE_CACHE_SHIFT-PAGE_SHIFT);
98} 101}
99 102
100static inline void shmem_dir_free(struct page *page) 103static inline void shmem_dir_free(struct page *page)
@@ -372,7 +375,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
372 } 375 }
373 376
374 spin_unlock(&info->lock); 377 spin_unlock(&info->lock);
375 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 378 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
376 if (page) 379 if (page)
377 set_page_private(page, 0); 380 set_page_private(page, 0);
378 spin_lock(&info->lock); 381 spin_lock(&info->lock);