diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 3 |
4 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d575a3ee8dd8..29f4de1423c9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) | |||
172 | 172 | ||
173 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | 173 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) |
174 | { | 174 | { |
175 | return ((gfp_flags & __GFP_MOVABLE) != 0); | 175 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
176 | |||
177 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | ||
178 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | ||
176 | } | 179 | } |
177 | 180 | ||
178 | #else | 181 | #else |
@@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
676 | * the free lists for the desirable migrate type are depleted | 679 | * the free lists for the desirable migrate type are depleted |
677 | */ | 680 | */ |
678 | static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | 681 | static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { |
679 | [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE }, | 682 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, |
680 | [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, | 683 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, |
684 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, | ||
681 | }; | 685 | }; |
682 | 686 | ||
683 | /* | 687 | /* |
diff --git a/mm/shmem.c b/mm/shmem.c index 855b93b3637c..76ecbac0d55b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) | |||
95 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: | 95 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: |
96 | * might be reconsidered if it ever diverges from PAGE_SIZE. | 96 | * might be reconsidered if it ever diverges from PAGE_SIZE. |
97 | * | 97 | * |
98 | * __GFP_MOVABLE is masked out as swap vectors cannot move | 98 | * Mobility flags are masked out as swap vectors cannot move |
99 | */ | 99 | */ |
100 | return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, | 100 | return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, |
101 | PAGE_CACHE_SHIFT-PAGE_SHIFT); | 101 | PAGE_CACHE_SHIFT-PAGE_SHIFT); |
102 | } | 102 | } |
103 | 103 | ||
@@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1643 | #endif | 1643 | #endif |
1644 | 1644 | ||
1645 | flags |= cachep->gfpflags; | 1645 | flags |= cachep->gfpflags; |
1646 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | ||
1647 | flags |= __GFP_RECLAIMABLE; | ||
1646 | 1648 | ||
1647 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1649 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1648 | if (!page) | 1650 | if (!page) |
@@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1055 | if (s->flags & SLAB_CACHE_DMA) | 1055 | if (s->flags & SLAB_CACHE_DMA) |
1056 | flags |= SLUB_DMA; | 1056 | flags |= SLUB_DMA; |
1057 | 1057 | ||
1058 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | ||
1059 | flags |= __GFP_RECLAIMABLE; | ||
1060 | |||
1058 | if (node == -1) | 1061 | if (node == -1) |
1059 | page = alloc_pages(flags, s->order); | 1062 | page = alloc_pages(flags, s->order); |
1060 | else | 1063 | else |