diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | 81cda6626178cd55297831296ba8ecedbfd8b52d (patch) | |
tree | fa35a6a04db63080bbeb42f33f4b4a891b7fc96c /include | |
parent | ce15fea8274acca06daa1674322d37a7d3f0036b (diff) |
Slab allocators: Cleanup zeroing allocations
It becomes now easy to support the zeroing allocs with generic inline
functions in slab.h. Provide inline definitions to allow the continued use of
kzalloc, kmem_cache_zalloc etc but remove other definitions of zeroing
functions from the slab allocators and util.c.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/slab.h | 77 | ||||
-rw-r--r-- | include/linux/slab_def.h | 30 | ||||
-rw-r--r-- | include/linux/slub_def.h | 13 |
3 files changed, 46 insertions, 74 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0289ec89300a..0e1d0daef6a2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -55,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |||
55 | void (*)(void *, struct kmem_cache *, unsigned long)); | 55 | void (*)(void *, struct kmem_cache *, unsigned long)); |
56 | void kmem_cache_destroy(struct kmem_cache *); | 56 | void kmem_cache_destroy(struct kmem_cache *); |
57 | int kmem_cache_shrink(struct kmem_cache *); | 57 | int kmem_cache_shrink(struct kmem_cache *); |
58 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | ||
59 | void kmem_cache_free(struct kmem_cache *, void *); | 58 | void kmem_cache_free(struct kmem_cache *, void *); |
60 | unsigned int kmem_cache_size(struct kmem_cache *); | 59 | unsigned int kmem_cache_size(struct kmem_cache *); |
61 | const char *kmem_cache_name(struct kmem_cache *); | 60 | const char *kmem_cache_name(struct kmem_cache *); |
@@ -91,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | |||
91 | /* | 90 | /* |
92 | * Common kmalloc functions provided by all allocators | 91 | * Common kmalloc functions provided by all allocators |
93 | */ | 92 | */ |
94 | void *__kzalloc(size_t, gfp_t); | ||
95 | void * __must_check krealloc(const void *, size_t, gfp_t); | 93 | void * __must_check krealloc(const void *, size_t, gfp_t); |
96 | void kfree(const void *); | 94 | void kfree(const void *); |
97 | size_t ksize(const void *); | 95 | size_t ksize(const void *); |
98 | 96 | ||
97 | /* | ||
98 | * Allocator specific definitions. These are mainly used to establish optimized | ||
99 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
100 | * selecting the appropriate general cache at compile time. | ||
101 | * | ||
102 | * Allocators must define at least: | ||
103 | * | ||
104 | * kmem_cache_alloc() | ||
105 | * __kmalloc() | ||
106 | * kmalloc() | ||
107 | * | ||
108 | * Those wishing to support NUMA must also define: | ||
109 | * | ||
110 | * kmem_cache_alloc_node() | ||
111 | * kmalloc_node() | ||
112 | * | ||
113 | * See each allocator definition file for additional comments and | ||
114 | * implementation notes. | ||
115 | */ | ||
116 | #ifdef CONFIG_SLUB | ||
117 | #include <linux/slub_def.h> | ||
118 | #elif defined(CONFIG_SLOB) | ||
119 | #include <linux/slob_def.h> | ||
120 | #else | ||
121 | #include <linux/slab_def.h> | ||
122 | #endif | ||
123 | |||
99 | /** | 124 | /** |
100 | * kcalloc - allocate memory for an array. The memory is set to zero. | 125 | * kcalloc - allocate memory for an array. The memory is set to zero. |
101 | * @n: number of elements. | 126 | * @n: number of elements. |
@@ -151,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
151 | { | 176 | { |
152 | if (n != 0 && size > ULONG_MAX / n) | 177 | if (n != 0 && size > ULONG_MAX / n) |
153 | return NULL; | 178 | return NULL; |
154 | return __kzalloc(n * size, flags); | 179 | return __kmalloc(n * size, flags | __GFP_ZERO); |
155 | } | 180 | } |
156 | 181 | ||
157 | /* | ||
158 | * Allocator specific definitions. These are mainly used to establish optimized | ||
159 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
160 | * selecting the appropriate general cache at compile time. | ||
161 | * | ||
162 | * Allocators must define at least: | ||
163 | * | ||
164 | * kmem_cache_alloc() | ||
165 | * __kmalloc() | ||
166 | * kmalloc() | ||
167 | * kzalloc() | ||
168 | * | ||
169 | * Those wishing to support NUMA must also define: | ||
170 | * | ||
171 | * kmem_cache_alloc_node() | ||
172 | * kmalloc_node() | ||
173 | * | ||
174 | * See each allocator definition file for additional comments and | ||
175 | * implementation notes. | ||
176 | */ | ||
177 | #ifdef CONFIG_SLUB | ||
178 | #include <linux/slub_def.h> | ||
179 | #elif defined(CONFIG_SLOB) | ||
180 | #include <linux/slob_def.h> | ||
181 | #else | ||
182 | #include <linux/slab_def.h> | ||
183 | #endif | ||
184 | |||
185 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | 182 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) |
186 | /** | 183 | /** |
187 | * kmalloc_node - allocate memory from a specific node | 184 | * kmalloc_node - allocate memory from a specific node |
@@ -255,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
255 | 252 | ||
256 | #endif /* DEBUG_SLAB */ | 253 | #endif /* DEBUG_SLAB */ |
257 | 254 | ||
255 | /* | ||
256 | * Shortcuts | ||
257 | */ | ||
258 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | ||
259 | { | ||
260 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * kzalloc - allocate memory. The memory is set to zero. | ||
265 | * @size: how many bytes of memory are required. | ||
266 | * @flags: the type of memory to allocate (see kmalloc). | ||
267 | */ | ||
268 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
269 | { | ||
270 | return kmalloc(size, flags | __GFP_ZERO); | ||
271 | } | ||
272 | |||
258 | #endif /* __KERNEL__ */ | 273 | #endif /* __KERNEL__ */ |
259 | #endif /* _LINUX_SLAB_H */ | 274 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 16e814ffab8d..32bdc2ffd715 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -58,36 +58,6 @@ found: | |||
58 | return __kmalloc(size, flags); | 58 | return __kmalloc(size, flags); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
62 | { | ||
63 | if (__builtin_constant_p(size)) { | ||
64 | int i = 0; | ||
65 | |||
66 | if (!size) | ||
67 | return ZERO_SIZE_PTR; | ||
68 | |||
69 | #define CACHE(x) \ | ||
70 | if (size <= x) \ | ||
71 | goto found; \ | ||
72 | else \ | ||
73 | i++; | ||
74 | #include "kmalloc_sizes.h" | ||
75 | #undef CACHE | ||
76 | { | ||
77 | extern void __you_cannot_kzalloc_that_much(void); | ||
78 | __you_cannot_kzalloc_that_much(); | ||
79 | } | ||
80 | found: | ||
81 | #ifdef CONFIG_ZONE_DMA | ||
82 | if (flags & GFP_DMA) | ||
83 | return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep, | ||
84 | flags); | ||
85 | #endif | ||
86 | return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags); | ||
87 | } | ||
88 | return __kzalloc(size, flags); | ||
89 | } | ||
90 | |||
91 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
92 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
93 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index bae11111458f..07f7e4cbcee3 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -179,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
179 | return __kmalloc(size, flags); | 179 | return __kmalloc(size, flags); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
183 | { | ||
184 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | ||
185 | struct kmem_cache *s = kmalloc_slab(size); | ||
186 | |||
187 | if (!s) | ||
188 | return ZERO_SIZE_PTR; | ||
189 | |||
190 | return kmem_cache_zalloc(s, flags); | ||
191 | } else | ||
192 | return __kzalloc(size, flags); | ||
193 | } | ||
194 | |||
195 | #ifdef CONFIG_NUMA | 182 | #ifdef CONFIG_NUMA |
196 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 183 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
197 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 184 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |