aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-15 00:24:02 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-15 00:24:02 -0500
commitf527cf405017e60ceb28f84e2d60ab16fc34f209 (patch)
treeeadf0bfa385dad2e76a27d9a01cdcb22bad0efc1 /include
parentcead99dcf48eeaaac0a1ececff9c979756b79294 (diff)
parent331dc558fa020451ff773973cee855fd721aa88e (diff)
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slub: Support 4k kmallocs again to compensate for page allocator slowness slub: Fallback to kmalloc_large for failing higher order allocs slub: Determine gfpflags once and not every time a slab is allocated make slub.c:slab_address() static slub: kmalloc page allocator pass-through cleanup slab: avoid double initialization & do initialization in 1 place
Diffstat (limited to 'include')
-rw-r--r--include/linux/slub_def.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5e6d3d634d5b..57deecc79d52 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -71,6 +71,7 @@ struct kmem_cache {
71 71
72 /* Allocation and freeing of slabs */ 72 /* Allocation and freeing of slabs */
73 int objects; /* Number of objects in slab */ 73 int objects; /* Number of objects in slab */
74 gfp_t allocflags; /* gfp flags to use on each alloc */
74 int refcount; /* Refcount for slab cache destroy */ 75 int refcount; /* Refcount for slab cache destroy */
75 void (*ctor)(struct kmem_cache *, void *); 76 void (*ctor)(struct kmem_cache *, void *);
76 int inuse; /* Offset to metadata */ 77 int inuse; /* Offset to metadata */
@@ -110,7 +111,7 @@ struct kmem_cache {
110 * We keep the general caches in an array of slab caches that are used for 111 * We keep the general caches in an array of slab caches that are used for
111 * 2^x bytes of allocations. 112 * 2^x bytes of allocations.
112 */ 113 */
113extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; 114extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
114 115
115/* 116/*
116 * Sorry that the following has to be that ugly but some versions of GCC 117 * Sorry that the following has to be that ugly but some versions of GCC
@@ -188,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
188void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 189void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
189void *__kmalloc(size_t size, gfp_t flags); 190void *__kmalloc(size_t size, gfp_t flags);
190 191
192static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
193{
194 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
195}
196
191static __always_inline void *kmalloc(size_t size, gfp_t flags) 197static __always_inline void *kmalloc(size_t size, gfp_t flags)
192{ 198{
193 if (__builtin_constant_p(size)) { 199 if (__builtin_constant_p(size)) {
194 if (size > PAGE_SIZE / 2) 200 if (size > PAGE_SIZE)
195 return (void *)__get_free_pages(flags | __GFP_COMP, 201 return kmalloc_large(size, flags);
196 get_order(size));
197 202
198 if (!(flags & SLUB_DMA)) { 203 if (!(flags & SLUB_DMA)) {
199 struct kmem_cache *s = kmalloc_slab(size); 204 struct kmem_cache *s = kmalloc_slab(size);
@@ -214,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
214static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 219static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
215{ 220{
216 if (__builtin_constant_p(size) && 221 if (__builtin_constant_p(size) &&
217 size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { 222 size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
218 struct kmem_cache *s = kmalloc_slab(size); 223 struct kmem_cache *s = kmalloc_slab(size);
219 224
220 if (!s) 225 if (!s)