diff options
-rw-r--r-- | include/linux/slab.h | 44 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slob.c | 2 |
3 files changed, 26 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index e7a9c6b42412..1ef822e31c77 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -20,11 +20,11 @@ typedef struct kmem_cache kmem_cache_t __deprecated; | |||
20 | * Flags to pass to kmem_cache_create(). | 20 | * Flags to pass to kmem_cache_create(). |
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | 21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. |
22 | */ | 22 | */ |
23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ | 23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
24 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ | 24 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ |
25 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ | 25 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
26 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | 26 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ |
27 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | 27 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ |
28 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ | 28 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
29 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ | 29 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ |
30 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ | 30 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
@@ -34,9 +34,9 @@ typedef struct kmem_cache kmem_cache_t __deprecated; | |||
34 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | 34 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
35 | 35 | ||
36 | /* Flags passed to a constructor functions */ | 36 | /* Flags passed to a constructor functions */ |
37 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ | 37 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ |
38 | #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ | 38 | #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ |
39 | #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ | 39 | #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * struct kmem_cache related prototypes | 42 | * struct kmem_cache related prototypes |
@@ -55,7 +55,7 @@ void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | |||
55 | void kmem_cache_free(struct kmem_cache *, void *); | 55 | void kmem_cache_free(struct kmem_cache *, void *); |
56 | unsigned int kmem_cache_size(struct kmem_cache *); | 56 | unsigned int kmem_cache_size(struct kmem_cache *); |
57 | const char *kmem_cache_name(struct kmem_cache *); | 57 | const char *kmem_cache_name(struct kmem_cache *); |
58 | int kmem_ptr_validate(struct kmem_cache *cachep, void *ptr); | 58 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); |
59 | 59 | ||
60 | #ifdef CONFIG_NUMA | 60 | #ifdef CONFIG_NUMA |
61 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 61 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
@@ -93,19 +93,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
93 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting | 93 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting |
94 | * the appropriate general cache at compile time. | 94 | * the appropriate general cache at compile time. |
95 | */ | 95 | */ |
96 | |||
96 | #ifdef CONFIG_SLAB | 97 | #ifdef CONFIG_SLAB |
97 | #include <linux/slab_def.h> | 98 | #include <linux/slab_def.h> |
98 | #else | 99 | #else |
99 | |||
100 | /* | 100 | /* |
101 | * Fallback definitions for an allocator not wanting to provide | 101 | * Fallback definitions for an allocator not wanting to provide |
102 | * its own optimized kmalloc definitions (like SLOB). | 102 | * its own optimized kmalloc definitions (like SLOB). |
103 | */ | 103 | */ |
104 | 104 | ||
105 | #if defined(CONFIG_NUMA) || defined(CONFIG_DEBUG_SLAB) | ||
106 | #error "SLAB fallback definitions not usable for NUMA or Slab debug" | ||
107 | #endif | ||
108 | |||
109 | /** | 105 | /** |
110 | * kmalloc - allocate memory | 106 | * kmalloc - allocate memory |
111 | * @size: how many bytes of memory are required. | 107 | * @size: how many bytes of memory are required. |
@@ -151,7 +147,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
151 | * | 147 | * |
152 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | 148 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. |
153 | */ | 149 | */ |
154 | void *kmalloc(size_t size, gfp_t flags) | 150 | static inline void *kmalloc(size_t size, gfp_t flags) |
155 | { | 151 | { |
156 | return __kmalloc(size, flags); | 152 | return __kmalloc(size, flags); |
157 | } | 153 | } |
@@ -161,12 +157,24 @@ void *kmalloc(size_t size, gfp_t flags) | |||
161 | * @size: how many bytes of memory are required. | 157 | * @size: how many bytes of memory are required. |
162 | * @flags: the type of memory to allocate (see kmalloc). | 158 | * @flags: the type of memory to allocate (see kmalloc). |
163 | */ | 159 | */ |
164 | void *kzalloc(size_t size, gfp_t flags) | 160 | static inline void *kzalloc(size_t size, gfp_t flags) |
165 | { | 161 | { |
166 | return __kzalloc(size, flags); | 162 | return __kzalloc(size, flags); |
167 | } | 163 | } |
168 | #endif | 164 | #endif |
169 | 165 | ||
166 | #ifndef CONFIG_NUMA | ||
167 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
168 | { | ||
169 | return kmalloc(size, flags); | ||
170 | } | ||
171 | |||
172 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
173 | { | ||
174 | return __kmalloc(size, flags); | ||
175 | } | ||
176 | #endif /* !CONFIG_NUMA */ | ||
177 | |||
170 | /* | 178 | /* |
171 | * kmalloc_track_caller is a special version of kmalloc that records the | 179 | * kmalloc_track_caller is a special version of kmalloc that records the |
172 | * calling function of the routine calling it for slab leak tracking instead | 180 | * calling function of the routine calling it for slab leak tracking instead |
@@ -208,12 +216,8 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
208 | #define kmalloc_node_track_caller(size, flags, node) \ | 216 | #define kmalloc_node_track_caller(size, flags, node) \ |
209 | kmalloc_track_caller(size, flags) | 217 | kmalloc_track_caller(size, flags) |
210 | 218 | ||
211 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 219 | #endif /* DEBUG_SLAB */ |
212 | { | ||
213 | return kmalloc(size, flags); | ||
214 | } | ||
215 | 220 | ||
216 | #endif /* !CONFIG_NUMA */ | ||
217 | #endif /* __KERNEL__ */ | 221 | #endif /* __KERNEL__ */ |
218 | #endif /* _LINUX_SLAB_H */ | 222 | #endif /* _LINUX_SLAB_H */ |
219 | 223 | ||
@@ -3541,7 +3541,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc); | |||
3541 | * | 3541 | * |
3542 | * Currently only used for dentry validation. | 3542 | * Currently only used for dentry validation. |
3543 | */ | 3543 | */ |
3544 | int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) | 3544 | int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) |
3545 | { | 3545 | { |
3546 | unsigned long addr = (unsigned long)ptr; | 3546 | unsigned long addr = (unsigned long)ptr; |
3547 | unsigned long min_addr = PAGE_OFFSET; | 3547 | unsigned long min_addr = PAGE_OFFSET; |
@@ -334,7 +334,7 @@ int kmem_cache_shrink(struct kmem_cache *d) | |||
334 | } | 334 | } |
335 | EXPORT_SYMBOL(kmem_cache_shrink); | 335 | EXPORT_SYMBOL(kmem_cache_shrink); |
336 | 336 | ||
337 | int kmem_ptr_validate(struct kmem_cache *a, void *b) | 337 | int kmem_ptr_validate(struct kmem_cache *a, const void *b) |
338 | { | 338 | { |
339 | return 0; | 339 | return 0; |
340 | } | 340 | } |