diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 5 | ||||
-rw-r--r-- | mm/vmalloc.c | 5 |
3 files changed, 10 insertions, 8 deletions
@@ -2746,9 +2746,9 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2746 | * Be lazy and only check for valid flags here, keeping it out of the | 2746 | * Be lazy and only check for valid flags here, keeping it out of the |
2747 | * critical path in kmem_cache_alloc(). | 2747 | * critical path in kmem_cache_alloc(). |
2748 | */ | 2748 | */ |
2749 | BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); | 2749 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
2750 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | ||
2750 | 2751 | ||
2751 | local_flags = (flags & GFP_LEVEL_MASK); | ||
2752 | /* Take the l3 list lock to change the colour_next on this node */ | 2752 | /* Take the l3 list lock to change the colour_next on this node */ |
2753 | check_irq_off(); | 2753 | check_irq_off(); |
2754 | l3 = cachep->nodelists[nodeid]; | 2754 | l3 = cachep->nodelists[nodeid]; |
@@ -2785,7 +2785,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2785 | 2785 | ||
2786 | /* Get slab management. */ | 2786 | /* Get slab management. */ |
2787 | slabp = alloc_slabmgmt(cachep, objp, offset, | 2787 | slabp = alloc_slabmgmt(cachep, objp, offset, |
2788 | local_flags & ~GFP_THISNODE, nodeid); | 2788 | local_flags & ~GFP_CONSTRAINT_MASK, nodeid); |
2789 | if (!slabp) | 2789 | if (!slabp) |
2790 | goto opps1; | 2790 | goto opps1; |
2791 | 2791 | ||
@@ -3225,7 +3225,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3225 | 3225 | ||
3226 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 3226 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) |
3227 | ->node_zonelists[gfp_zone(flags)]; | 3227 | ->node_zonelists[gfp_zone(flags)]; |
3228 | local_flags = (flags & GFP_LEVEL_MASK); | 3228 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
3229 | 3229 | ||
3230 | retry: | 3230 | retry: |
3231 | /* | 3231 | /* |
@@ -1088,12 +1088,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1088 | void *last; | 1088 | void *last; |
1089 | void *p; | 1089 | void *p; |
1090 | 1090 | ||
1091 | BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); | 1091 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
1092 | 1092 | ||
1093 | if (flags & __GFP_WAIT) | 1093 | if (flags & __GFP_WAIT) |
1094 | local_irq_enable(); | 1094 | local_irq_enable(); |
1095 | 1095 | ||
1096 | page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); | 1096 | page = allocate_slab(s, |
1097 | flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); | ||
1097 | if (!page) | 1098 | if (!page) |
1098 | goto out; | 1099 | goto out; |
1099 | 1100 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3cee76a8c9f0..2e01af365848 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -190,7 +190,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl | |||
190 | if (unlikely(!size)) | 190 | if (unlikely(!size)) |
191 | return NULL; | 191 | return NULL; |
192 | 192 | ||
193 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node); | 193 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
194 | |||
194 | if (unlikely(!area)) | 195 | if (unlikely(!area)) |
195 | return NULL; | 196 | return NULL; |
196 | 197 | ||
@@ -439,7 +440,7 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
439 | area->flags |= VM_VPAGES; | 440 | area->flags |= VM_VPAGES; |
440 | } else { | 441 | } else { |
441 | pages = kmalloc_node(array_size, | 442 | pages = kmalloc_node(array_size, |
442 | (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, | 443 | (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, |
443 | node); | 444 | node); |
444 | } | 445 | } |
445 | area->pages = pages; | 446 | area->pages = pages; |