diff options
-rw-r--r-- | include/linux/gfp.h | 20 | ||||
-rw-r--r-- | mm/slab.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 5 | ||||
-rw-r--r-- | mm/vmalloc.c | 5 |
4 files changed, 23 insertions, 15 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 12a90a191c11..da8aa872eb6e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -53,13 +53,6 @@ struct vm_area_struct; | |||
53 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ | 53 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ |
54 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 54 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
55 | 55 | ||
56 | /* if you forget to add the bitmask here kernel will crash, period */ | ||
57 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ | ||
58 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ | ||
59 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ | ||
60 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \ | ||
61 | __GFP_MOVABLE) | ||
62 | |||
63 | /* This equals 0, but use constants in case they ever change */ | 56 | /* This equals 0, but use constants in case they ever change */ |
64 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | 57 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
65 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ | 58 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
@@ -86,6 +79,19 @@ struct vm_area_struct; | |||
86 | #define GFP_THISNODE ((__force gfp_t)0) | 79 | #define GFP_THISNODE ((__force gfp_t)0) |
87 | #endif | 80 | #endif |
88 | 81 | ||
82 | /* This mask makes up all the page movable related flags */ | ||
83 | #define GFP_MOVABLE_MASK (__GFP_MOVABLE) | ||
84 | |||
85 | /* Control page allocator reclaim behavior */ | ||
86 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | ||
87 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | ||
88 | __GFP_NORETRY|__GFP_NOMEMALLOC) | ||
89 | |||
90 | /* Control allocation constraints */ | ||
91 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | ||
92 | |||
93 | /* Do not use these with a slab allocator */ | ||
94 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | ||
89 | 95 | ||
90 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some | 96 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
91 | platforms, used as appropriate on others */ | 97 | platforms, used as appropriate on others */ |
@@ -2746,9 +2746,9 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2746 | * Be lazy and only check for valid flags here, keeping it out of the | 2746 | * Be lazy and only check for valid flags here, keeping it out of the |
2747 | * critical path in kmem_cache_alloc(). | 2747 | * critical path in kmem_cache_alloc(). |
2748 | */ | 2748 | */ |
2749 | BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); | 2749 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
2750 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | ||
2750 | 2751 | ||
2751 | local_flags = (flags & GFP_LEVEL_MASK); | ||
2752 | /* Take the l3 list lock to change the colour_next on this node */ | 2752 | /* Take the l3 list lock to change the colour_next on this node */ |
2753 | check_irq_off(); | 2753 | check_irq_off(); |
2754 | l3 = cachep->nodelists[nodeid]; | 2754 | l3 = cachep->nodelists[nodeid]; |
@@ -2785,7 +2785,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2785 | 2785 | ||
2786 | /* Get slab management. */ | 2786 | /* Get slab management. */ |
2787 | slabp = alloc_slabmgmt(cachep, objp, offset, | 2787 | slabp = alloc_slabmgmt(cachep, objp, offset, |
2788 | local_flags & ~GFP_THISNODE, nodeid); | 2788 | local_flags & ~GFP_CONSTRAINT_MASK, nodeid); |
2789 | if (!slabp) | 2789 | if (!slabp) |
2790 | goto opps1; | 2790 | goto opps1; |
2791 | 2791 | ||
@@ -3225,7 +3225,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3225 | 3225 | ||
3226 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 3226 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) |
3227 | ->node_zonelists[gfp_zone(flags)]; | 3227 | ->node_zonelists[gfp_zone(flags)]; |
3228 | local_flags = (flags & GFP_LEVEL_MASK); | 3228 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
3229 | 3229 | ||
3230 | retry: | 3230 | retry: |
3231 | /* | 3231 | /* |
@@ -1088,12 +1088,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1088 | void *last; | 1088 | void *last; |
1089 | void *p; | 1089 | void *p; |
1090 | 1090 | ||
1091 | BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); | 1091 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
1092 | 1092 | ||
1093 | if (flags & __GFP_WAIT) | 1093 | if (flags & __GFP_WAIT) |
1094 | local_irq_enable(); | 1094 | local_irq_enable(); |
1095 | 1095 | ||
1096 | page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); | 1096 | page = allocate_slab(s, |
1097 | flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); | ||
1097 | if (!page) | 1098 | if (!page) |
1098 | goto out; | 1099 | goto out; |
1099 | 1100 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3cee76a8c9f0..2e01af365848 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -190,7 +190,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl | |||
190 | if (unlikely(!size)) | 190 | if (unlikely(!size)) |
191 | return NULL; | 191 | return NULL; |
192 | 192 | ||
193 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node); | 193 | area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
194 | |||
194 | if (unlikely(!area)) | 195 | if (unlikely(!area)) |
195 | return NULL; | 196 | return NULL; |
196 | 197 | ||
@@ -439,7 +440,7 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
439 | area->flags |= VM_VPAGES; | 440 | area->flags |= VM_VPAGES; |
440 | } else { | 441 | } else { |
441 | pages = kmalloc_node(array_size, | 442 | pages = kmalloc_node(array_size, |
442 | (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, | 443 | (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, |
443 | node); | 444 | node); |
444 | } | 445 | } |
445 | area->pages = pages; | 446 | area->pages = pages; |