aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-13 03:34:24 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 12:05:49 -0500
commit55935a34a428a1497e3b37982e2782c09c6f914d (patch)
tree270e68db0a0c8819986fd5150d942812a02d8f42 /include
parent2e892f43ccb602e8ffad73396a1000f2040c9e0b (diff)
[PATCH] More slab.h cleanups
More cleanups for slab.h 1. Remove tabs from weird locations as suggested by Pekka 2. Drop the check for NUMA and SLAB_DEBUG from the fallback section as suggested by Pekka. 3. Uses static inline for the fallback defs as also suggested by Pekka. 4. Make kmem_ptr_valid take a const * argument. 5. Separate the NUMA fallback definitions from the kmalloc_track fallback definitions. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h44
1 files changed, 24 insertions, 20 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index e7a9c6b42412..1ef822e31c77 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,11 +20,11 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
20 * Flags to pass to kmem_cache_create(). 20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22 */ 22 */
23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ 24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ 29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
@@ -34,9 +34,9 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
35 35
36/* Flags passed to a constructor functions */ 36/* Flags passed to a constructor functions */
37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ 37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ 38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ 39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
40 40
41/* 41/*
42 * struct kmem_cache related prototypes 42 * struct kmem_cache related prototypes
@@ -55,7 +55,7 @@ void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
55void kmem_cache_free(struct kmem_cache *, void *); 55void kmem_cache_free(struct kmem_cache *, void *);
56unsigned int kmem_cache_size(struct kmem_cache *); 56unsigned int kmem_cache_size(struct kmem_cache *);
57const char *kmem_cache_name(struct kmem_cache *); 57const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, void *ptr); 58int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
59 59
60#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
@@ -93,19 +93,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting 93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
94 * the appropriate general cache at compile time. 94 * the appropriate general cache at compile time.
95 */ 95 */
96
96#ifdef CONFIG_SLAB 97#ifdef CONFIG_SLAB
97#include <linux/slab_def.h> 98#include <linux/slab_def.h>
98#else 99#else
99
100/* 100/*
101 * Fallback definitions for an allocator not wanting to provide 101 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB). 102 * its own optimized kmalloc definitions (like SLOB).
103 */ 103 */
104 104
105#if defined(CONFIG_NUMA) || defined(CONFIG_DEBUG_SLAB)
106#error "SLAB fallback definitions not usable for NUMA or Slab debug"
107#endif
108
109/** 105/**
110 * kmalloc - allocate memory 106 * kmalloc - allocate memory
111 * @size: how many bytes of memory are required. 107 * @size: how many bytes of memory are required.
@@ -151,7 +147,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
151 * 147 *
152 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 148 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
153 */ 149 */
154void *kmalloc(size_t size, gfp_t flags) 150static inline void *kmalloc(size_t size, gfp_t flags)
155{ 151{
156 return __kmalloc(size, flags); 152 return __kmalloc(size, flags);
157} 153}
@@ -161,12 +157,24 @@ void *kmalloc(size_t size, gfp_t flags)
161 * @size: how many bytes of memory are required. 157 * @size: how many bytes of memory are required.
162 * @flags: the type of memory to allocate (see kmalloc). 158 * @flags: the type of memory to allocate (see kmalloc).
163 */ 159 */
164void *kzalloc(size_t size, gfp_t flags) 160static inline void *kzalloc(size_t size, gfp_t flags)
165{ 161{
166 return __kzalloc(size, flags); 162 return __kzalloc(size, flags);
167} 163}
168#endif 164#endif
169 165
166#ifndef CONFIG_NUMA
167static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
168{
169 return kmalloc(size, flags);
170}
171
172static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
173{
174 return __kmalloc(size, flags);
175}
176#endif /* !CONFIG_NUMA */
177
170/* 178/*
171 * kmalloc_track_caller is a special version of kmalloc that records the 179 * kmalloc_track_caller is a special version of kmalloc that records the
172 * calling function of the routine calling it for slab leak tracking instead 180 * calling function of the routine calling it for slab leak tracking instead
@@ -208,12 +216,8 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
208#define kmalloc_node_track_caller(size, flags, node) \ 216#define kmalloc_node_track_caller(size, flags, node) \
209 kmalloc_track_caller(size, flags) 217 kmalloc_track_caller(size, flags)
210 218
211static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 219#endif /* DEBUG_SLAB */
212{
213 return kmalloc(size, flags);
214}
215 220
216#endif /* !CONFIG_NUMA */
217#endif /* __KERNEL__ */ 221#endif /* __KERNEL__ */
218#endif /* _LINUX_SLAB_H */ 222#endif /* _LINUX_SLAB_H */
219 223