diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | 81cda6626178cd55297831296ba8ecedbfd8b52d (patch) | |
tree | fa35a6a04db63080bbeb42f33f4b4a891b7fc96c | |
parent | ce15fea8274acca06daa1674322d37a7d3f0036b (diff) |
Slab allocators: Cleanup zeroing allocations
It becomes now easy to support the zeroing allocs with generic inline
functions in slab.h. Provide inline definitions to allow the continued use of
kzalloc, kmem_cache_zalloc etc but remove other definitions of zeroing
functions from the slab allocators and util.c.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slab.h | 77 | ||||
-rw-r--r-- | include/linux/slab_def.h | 30 | ||||
-rw-r--r-- | include/linux/slub_def.h | 13 | ||||
-rw-r--r-- | mm/slab.c | 17 | ||||
-rw-r--r-- | mm/slob.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 11 | ||||
-rw-r--r-- | mm/util.c | 14 |
7 files changed, 46 insertions, 126 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0289ec89300a..0e1d0daef6a2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -55,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |||
55 | void (*)(void *, struct kmem_cache *, unsigned long)); | 55 | void (*)(void *, struct kmem_cache *, unsigned long)); |
56 | void kmem_cache_destroy(struct kmem_cache *); | 56 | void kmem_cache_destroy(struct kmem_cache *); |
57 | int kmem_cache_shrink(struct kmem_cache *); | 57 | int kmem_cache_shrink(struct kmem_cache *); |
58 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | ||
59 | void kmem_cache_free(struct kmem_cache *, void *); | 58 | void kmem_cache_free(struct kmem_cache *, void *); |
60 | unsigned int kmem_cache_size(struct kmem_cache *); | 59 | unsigned int kmem_cache_size(struct kmem_cache *); |
61 | const char *kmem_cache_name(struct kmem_cache *); | 60 | const char *kmem_cache_name(struct kmem_cache *); |
@@ -91,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | |||
91 | /* | 90 | /* |
92 | * Common kmalloc functions provided by all allocators | 91 | * Common kmalloc functions provided by all allocators |
93 | */ | 92 | */ |
94 | void *__kzalloc(size_t, gfp_t); | ||
95 | void * __must_check krealloc(const void *, size_t, gfp_t); | 93 | void * __must_check krealloc(const void *, size_t, gfp_t); |
96 | void kfree(const void *); | 94 | void kfree(const void *); |
97 | size_t ksize(const void *); | 95 | size_t ksize(const void *); |
98 | 96 | ||
97 | /* | ||
98 | * Allocator specific definitions. These are mainly used to establish optimized | ||
99 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
100 | * selecting the appropriate general cache at compile time. | ||
101 | * | ||
102 | * Allocators must define at least: | ||
103 | * | ||
104 | * kmem_cache_alloc() | ||
105 | * __kmalloc() | ||
106 | * kmalloc() | ||
107 | * | ||
108 | * Those wishing to support NUMA must also define: | ||
109 | * | ||
110 | * kmem_cache_alloc_node() | ||
111 | * kmalloc_node() | ||
112 | * | ||
113 | * See each allocator definition file for additional comments and | ||
114 | * implementation notes. | ||
115 | */ | ||
116 | #ifdef CONFIG_SLUB | ||
117 | #include <linux/slub_def.h> | ||
118 | #elif defined(CONFIG_SLOB) | ||
119 | #include <linux/slob_def.h> | ||
120 | #else | ||
121 | #include <linux/slab_def.h> | ||
122 | #endif | ||
123 | |||
99 | /** | 124 | /** |
100 | * kcalloc - allocate memory for an array. The memory is set to zero. | 125 | * kcalloc - allocate memory for an array. The memory is set to zero. |
101 | * @n: number of elements. | 126 | * @n: number of elements. |
@@ -151,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
151 | { | 176 | { |
152 | if (n != 0 && size > ULONG_MAX / n) | 177 | if (n != 0 && size > ULONG_MAX / n) |
153 | return NULL; | 178 | return NULL; |
154 | return __kzalloc(n * size, flags); | 179 | return __kmalloc(n * size, flags | __GFP_ZERO); |
155 | } | 180 | } |
156 | 181 | ||
157 | /* | ||
158 | * Allocator specific definitions. These are mainly used to establish optimized | ||
159 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
160 | * selecting the appropriate general cache at compile time. | ||
161 | * | ||
162 | * Allocators must define at least: | ||
163 | * | ||
164 | * kmem_cache_alloc() | ||
165 | * __kmalloc() | ||
166 | * kmalloc() | ||
167 | * kzalloc() | ||
168 | * | ||
169 | * Those wishing to support NUMA must also define: | ||
170 | * | ||
171 | * kmem_cache_alloc_node() | ||
172 | * kmalloc_node() | ||
173 | * | ||
174 | * See each allocator definition file for additional comments and | ||
175 | * implementation notes. | ||
176 | */ | ||
177 | #ifdef CONFIG_SLUB | ||
178 | #include <linux/slub_def.h> | ||
179 | #elif defined(CONFIG_SLOB) | ||
180 | #include <linux/slob_def.h> | ||
181 | #else | ||
182 | #include <linux/slab_def.h> | ||
183 | #endif | ||
184 | |||
185 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | 182 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) |
186 | /** | 183 | /** |
187 | * kmalloc_node - allocate memory from a specific node | 184 | * kmalloc_node - allocate memory from a specific node |
@@ -255,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
255 | 252 | ||
256 | #endif /* DEBUG_SLAB */ | 253 | #endif /* DEBUG_SLAB */ |
257 | 254 | ||
255 | /* | ||
256 | * Shortcuts | ||
257 | */ | ||
258 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | ||
259 | { | ||
260 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * kzalloc - allocate memory. The memory is set to zero. | ||
265 | * @size: how many bytes of memory are required. | ||
266 | * @flags: the type of memory to allocate (see kmalloc). | ||
267 | */ | ||
268 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
269 | { | ||
270 | return kmalloc(size, flags | __GFP_ZERO); | ||
271 | } | ||
272 | |||
258 | #endif /* __KERNEL__ */ | 273 | #endif /* __KERNEL__ */ |
259 | #endif /* _LINUX_SLAB_H */ | 274 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 16e814ffab8d..32bdc2ffd715 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -58,36 +58,6 @@ found: | |||
58 | return __kmalloc(size, flags); | 58 | return __kmalloc(size, flags); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
62 | { | ||
63 | if (__builtin_constant_p(size)) { | ||
64 | int i = 0; | ||
65 | |||
66 | if (!size) | ||
67 | return ZERO_SIZE_PTR; | ||
68 | |||
69 | #define CACHE(x) \ | ||
70 | if (size <= x) \ | ||
71 | goto found; \ | ||
72 | else \ | ||
73 | i++; | ||
74 | #include "kmalloc_sizes.h" | ||
75 | #undef CACHE | ||
76 | { | ||
77 | extern void __you_cannot_kzalloc_that_much(void); | ||
78 | __you_cannot_kzalloc_that_much(); | ||
79 | } | ||
80 | found: | ||
81 | #ifdef CONFIG_ZONE_DMA | ||
82 | if (flags & GFP_DMA) | ||
83 | return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep, | ||
84 | flags); | ||
85 | #endif | ||
86 | return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags); | ||
87 | } | ||
88 | return __kzalloc(size, flags); | ||
89 | } | ||
90 | |||
91 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
92 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
93 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index bae11111458f..07f7e4cbcee3 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -179,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
179 | return __kmalloc(size, flags); | 179 | return __kmalloc(size, flags); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
183 | { | ||
184 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | ||
185 | struct kmem_cache *s = kmalloc_slab(size); | ||
186 | |||
187 | if (!s) | ||
188 | return ZERO_SIZE_PTR; | ||
189 | |||
190 | return kmem_cache_zalloc(s, flags); | ||
191 | } else | ||
192 | return __kzalloc(size, flags); | ||
193 | } | ||
194 | |||
195 | #ifdef CONFIG_NUMA | 182 | #ifdef CONFIG_NUMA |
196 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 183 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
197 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 184 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
@@ -3590,23 +3590,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3590 | EXPORT_SYMBOL(kmem_cache_alloc); | 3590 | EXPORT_SYMBOL(kmem_cache_alloc); |
3591 | 3591 | ||
3592 | /** | 3592 | /** |
3593 | * kmem_cache_zalloc - Allocate an object. The memory is set to zero. | ||
3594 | * @cache: The cache to allocate from. | ||
3595 | * @flags: See kmalloc(). | ||
3596 | * | ||
3597 | * Allocate an object from this cache and set the allocated memory to zero. | ||
3598 | * The flags are only relevant if the cache has no available objects. | ||
3599 | */ | ||
3600 | void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) | ||
3601 | { | ||
3602 | void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); | ||
3603 | if (ret) | ||
3604 | memset(ret, 0, obj_size(cache)); | ||
3605 | return ret; | ||
3606 | } | ||
3607 | EXPORT_SYMBOL(kmem_cache_zalloc); | ||
3608 | |||
3609 | /** | ||
3610 | * kmem_ptr_validate - check if an untrusted pointer might | 3593 | * kmem_ptr_validate - check if an untrusted pointer might |
3611 | * be a slab entry. | 3594 | * be a slab entry. |
3612 | * @cachep: the cache we're checking against | 3595 | * @cachep: the cache we're checking against |
@@ -543,16 +543,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
543 | } | 543 | } |
544 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 544 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
545 | 545 | ||
546 | void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) | ||
547 | { | ||
548 | void *ret = kmem_cache_alloc(c, flags); | ||
549 | if (ret) | ||
550 | memset(ret, 0, c->size); | ||
551 | |||
552 | return ret; | ||
553 | } | ||
554 | EXPORT_SYMBOL(kmem_cache_zalloc); | ||
555 | |||
556 | static void __kmem_cache_free(void *b, int size) | 546 | static void __kmem_cache_free(void *b, int size) |
557 | { | 547 | { |
558 | if (size < PAGE_SIZE) | 548 | if (size < PAGE_SIZE) |
@@ -2706,17 +2706,6 @@ err: | |||
2706 | } | 2706 | } |
2707 | EXPORT_SYMBOL(kmem_cache_create); | 2707 | EXPORT_SYMBOL(kmem_cache_create); |
2708 | 2708 | ||
2709 | void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) | ||
2710 | { | ||
2711 | void *x; | ||
2712 | |||
2713 | x = slab_alloc(s, flags, -1, __builtin_return_address(0)); | ||
2714 | if (x) | ||
2715 | memset(x, 0, s->objsize); | ||
2716 | return x; | ||
2717 | } | ||
2718 | EXPORT_SYMBOL(kmem_cache_zalloc); | ||
2719 | |||
2720 | #ifdef CONFIG_SMP | 2709 | #ifdef CONFIG_SMP |
2721 | /* | 2710 | /* |
2722 | * Use the cpu notifier to insure that the cpu slabs are flushed when | 2711 | * Use the cpu notifier to insure that the cpu slabs are flushed when |
@@ -5,20 +5,6 @@ | |||
5 | #include <asm/uaccess.h> | 5 | #include <asm/uaccess.h> |
6 | 6 | ||
7 | /** | 7 | /** |
8 | * __kzalloc - allocate memory. The memory is set to zero. | ||
9 | * @size: how many bytes of memory are required. | ||
10 | * @flags: the type of memory to allocate. | ||
11 | */ | ||
12 | void *__kzalloc(size_t size, gfp_t flags) | ||
13 | { | ||
14 | void *ret = kmalloc_track_caller(size, flags); | ||
15 | if (ret) | ||
16 | memset(ret, 0, size); | ||
17 | return ret; | ||
18 | } | ||
19 | EXPORT_SYMBOL(__kzalloc); | ||
20 | |||
21 | /* | ||
22 | * kstrdup - allocate space for and copy an existing string | 8 | * kstrdup - allocate space for and copy an existing string |
23 | * | 9 | * |
24 | * @s: the string to duplicate | 10 | * @s: the string to duplicate |