diff options
Diffstat (limited to 'include/linux/slub_def.h')
| -rw-r--r-- | include/linux/slub_def.h | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5e6d3d634d5b..57deecc79d52 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -71,6 +71,7 @@ struct kmem_cache { | |||
| 71 | 71 | ||
| 72 | /* Allocation and freeing of slabs */ | 72 | /* Allocation and freeing of slabs */ |
| 73 | int objects; /* Number of objects in slab */ | 73 | int objects; /* Number of objects in slab */ |
| 74 | gfp_t allocflags; /* gfp flags to use on each alloc */ | ||
| 74 | int refcount; /* Refcount for slab cache destroy */ | 75 | int refcount; /* Refcount for slab cache destroy */ |
| 75 | void (*ctor)(struct kmem_cache *, void *); | 76 | void (*ctor)(struct kmem_cache *, void *); |
| 76 | int inuse; /* Offset to metadata */ | 77 | int inuse; /* Offset to metadata */ |
| @@ -110,7 +111,7 @@ struct kmem_cache { | |||
| 110 | * We keep the general caches in an array of slab caches that are used for | 111 | * We keep the general caches in an array of slab caches that are used for |
| 111 | * 2^x bytes of allocations. | 112 | * 2^x bytes of allocations. |
| 112 | */ | 113 | */ |
| 113 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; | 114 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; |
| 114 | 115 | ||
| 115 | /* | 116 | /* |
| 116 | * Sorry that the following has to be that ugly but some versions of GCC | 117 | * Sorry that the following has to be that ugly but some versions of GCC |
| @@ -188,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
| 188 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 189 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 189 | void *__kmalloc(size_t size, gfp_t flags); | 190 | void *__kmalloc(size_t size, gfp_t flags); |
| 190 | 191 | ||
| 192 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
| 193 | { | ||
| 194 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | ||
| 195 | } | ||
| 196 | |||
| 191 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 197 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 192 | { | 198 | { |
| 193 | if (__builtin_constant_p(size)) { | 199 | if (__builtin_constant_p(size)) { |
| 194 | if (size > PAGE_SIZE / 2) | 200 | if (size > PAGE_SIZE) |
| 195 | return (void *)__get_free_pages(flags | __GFP_COMP, | 201 | return kmalloc_large(size, flags); |
| 196 | get_order(size)); | ||
| 197 | 202 | ||
| 198 | if (!(flags & SLUB_DMA)) { | 203 | if (!(flags & SLUB_DMA)) { |
| 199 | struct kmem_cache *s = kmalloc_slab(size); | 204 | struct kmem_cache *s = kmalloc_slab(size); |
| @@ -214,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |||
| 214 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 219 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 215 | { | 220 | { |
| 216 | if (__builtin_constant_p(size) && | 221 | if (__builtin_constant_p(size) && |
| 217 | size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { | 222 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
| 218 | struct kmem_cache *s = kmalloc_slab(size); | 223 | struct kmem_cache *s = kmalloc_slab(size); |
| 219 | 224 | ||
| 220 | if (!s) | 225 | if (!s) |
