aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-13 03:34:23 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 12:05:49 -0500
commit2e892f43ccb602e8ffad73396a1000f2040c9e0b (patch)
tree2f799810eccebeb5d432daed93ed9654238887b6
parent872225ca77519a243d7e19270b062b0ac53418d8 (diff)
[PATCH] Cleanup slab headers / API to allow easy addition of new slab allocators
This is a response to an earlier discussion on linux-mm about splitting slab.h components per allocator. Patch is against 2.6.19-git11. See http://marc.theaimsgroup.com/?l=linux-mm&m=116469577431008&w=2 This patch cleans up the slab header definitions. We define the common functions of slob and slab in slab.h and put the extra definitions needed for slab's kmalloc implementations in <linux/slab_def.h>. In order to get a greater set of common functions we add several empty functions to slob.c and also rename slob's kmalloc to __kmalloc. Slob does not need any special definitions since we introduce a fallback case. If there is no need for a slab implementation to provide its own kmalloc mess^H^H^Hacros then we simply fall back to __kmalloc functions. That is sufficient for SLOB. Sort the function in slab.h according to their functionality. First the functions operating on struct kmem_cache * then the kmalloc related functions followed by special debug and fallback definitions. Also redo a lot of comments. Signed-off-by: Christoph Lameter <clameter@sgi.com>? Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/slab.h306
-rw-r--r--include/linux/slab_def.h100
-rw-r--r--mm/slob.c16
3 files changed, 223 insertions, 199 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2271886744f8..e7a9c6b42412 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/include/linux/slab.h 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 * Written by Mark Hemment, 1996. 3 *
4 * (markhe@nextd.demon.co.uk) 4 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
5 */ 7 */
6 8
7#ifndef _LINUX_SLAB_H 9#ifndef _LINUX_SLAB_H
@@ -10,64 +12,99 @@
10#ifdef __KERNEL__ 12#ifdef __KERNEL__
11 13
12#include <linux/gfp.h> 14#include <linux/gfp.h>
13#include <linux/init.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17#include <linux/compiler.h>
18 16
19/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
20typedef struct kmem_cache kmem_cache_t __deprecated; 17typedef struct kmem_cache kmem_cache_t __deprecated;
21 18
22/* flags to pass to kmem_cache_create(). 19/*
23 * The first 3 are only valid when the allocator as been build 20 * Flags to pass to kmem_cache_create().
24 * SLAB_DEBUG_SUPPORT. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
25 */ 22 */
26#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
27#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
28#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
29#define SLAB_POISON 0x00000800UL /* Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
30#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
31#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
32#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
33#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ 30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
34#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate 31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
35 what is reclaimable later*/ 32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
36#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ 33#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
37#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
38#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
39 35
40/* flags passed to a constructor func */ 36/* Flags passed to a constructor functions */
41#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
42#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
43#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
44 40
45#ifndef CONFIG_SLOB 41/*
46 42 * struct kmem_cache related prototypes
47/* prototypes */ 43 */
48extern void __init kmem_cache_init(void); 44void __init kmem_cache_init(void);
45extern int slab_is_available(void);
49 46
50extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 47struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
51 unsigned long, 48 unsigned long,
52 void (*)(void *, struct kmem_cache *, unsigned long), 49 void (*)(void *, struct kmem_cache *, unsigned long),
53 void (*)(void *, struct kmem_cache *, unsigned long)); 50 void (*)(void *, struct kmem_cache *, unsigned long));
54extern void kmem_cache_destroy(struct kmem_cache *); 51void kmem_cache_destroy(struct kmem_cache *);
55extern int kmem_cache_shrink(struct kmem_cache *); 52int kmem_cache_shrink(struct kmem_cache *);
56extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 53void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 54void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
58extern void kmem_cache_free(struct kmem_cache *, void *); 55void kmem_cache_free(struct kmem_cache *, void *);
59extern unsigned int kmem_cache_size(struct kmem_cache *); 56unsigned int kmem_cache_size(struct kmem_cache *);
60extern const char *kmem_cache_name(struct kmem_cache *); 57const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, void *ptr);
59
60#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62#else
63static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64 gfp_t flags, int node)
65{
66 return kmem_cache_alloc(cachep, flags);
67}
68#endif
69
70/*
71 * Common kmalloc functions provided by all allocators
72 */
73void *__kmalloc(size_t, gfp_t);
74void *__kzalloc(size_t, gfp_t);
75void kfree(const void *);
76unsigned int ksize(const void *);
77
78/**
79 * kcalloc - allocate memory for an array. The memory is set to zero.
80 * @n: number of elements.
81 * @size: element size.
82 * @flags: the type of memory to allocate.
83 */
84static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
85{
86 if (n != 0 && size > ULONG_MAX / n)
87 return NULL;
88 return __kzalloc(n * size, flags);
89}
61 90
62/* Size description struct for general caches. */ 91/*
63struct cache_sizes { 92 * Allocator specific definitions. These are mainly used to establish optimized
64 size_t cs_size; 93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
65 struct kmem_cache *cs_cachep; 94 * the appropriate general cache at compile time.
66 struct kmem_cache *cs_dmacachep; 95 */
67}; 96#ifdef CONFIG_SLAB
68extern struct cache_sizes malloc_sizes[]; 97#include <linux/slab_def.h>
98#else
69 99
70extern void *__kmalloc(size_t, gfp_t); 100/*
101 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB).
103 */
104
105#if defined(CONFIG_NUMA) || defined(CONFIG_DEBUG_SLAB)
106#error "SLAB fallback definitions not usable for NUMA or Slab debug"
107#endif
71 108
72/** 109/**
73 * kmalloc - allocate memory 110 * kmalloc - allocate memory
@@ -114,29 +151,22 @@ extern void *__kmalloc(size_t, gfp_t);
114 * 151 *
115 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 152 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
116 */ 153 */
117static inline void *kmalloc(size_t size, gfp_t flags) 154void *kmalloc(size_t size, gfp_t flags)
118{ 155{
119 if (__builtin_constant_p(size)) {
120 int i = 0;
121#define CACHE(x) \
122 if (size <= x) \
123 goto found; \
124 else \
125 i++;
126#include "kmalloc_sizes.h"
127#undef CACHE
128 {
129 extern void __you_cannot_kmalloc_that_much(void);
130 __you_cannot_kmalloc_that_much();
131 }
132found:
133 return kmem_cache_alloc((flags & GFP_DMA) ?
134 malloc_sizes[i].cs_dmacachep :
135 malloc_sizes[i].cs_cachep, flags);
136 }
137 return __kmalloc(size, flags); 156 return __kmalloc(size, flags);
138} 157}
139 158
159/**
160 * kzalloc - allocate memory. The memory is set to zero.
161 * @size: how many bytes of memory are required.
162 * @flags: the type of memory to allocate (see kmalloc).
163 */
164void *kzalloc(size_t size, gfp_t flags)
165{
166 return __kzalloc(size, flags);
167}
168#endif
169
140/* 170/*
141 * kmalloc_track_caller is a special version of kmalloc that records the 171 * kmalloc_track_caller is a special version of kmalloc that records the
142 * calling function of the routine calling it for slab leak tracking instead 172 * calling function of the routine calling it for slab leak tracking instead
@@ -145,89 +175,16 @@ found:
145 * allocator where we care about the real place the memory allocation 175 * allocator where we care about the real place the memory allocation
146 * request comes from. 176 * request comes from.
147 */ 177 */
148#ifndef CONFIG_DEBUG_SLAB 178#ifdef CONFIG_DEBUG_SLAB
149#define kmalloc_track_caller(size, flags) \
150 __kmalloc(size, flags)
151#else
152extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 179extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
153#define kmalloc_track_caller(size, flags) \ 180#define kmalloc_track_caller(size, flags) \
154 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 181 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
155#endif 182#else
156 183#define kmalloc_track_caller(size, flags) \
157extern void *__kzalloc(size_t, gfp_t); 184 __kmalloc(size, flags)
158 185#endif /* DEBUG_SLAB */
159/**
160 * kzalloc - allocate memory. The memory is set to zero.
161 * @size: how many bytes of memory are required.
162 * @flags: the type of memory to allocate (see kmalloc).
163 */
164static inline void *kzalloc(size_t size, gfp_t flags)
165{
166 if (__builtin_constant_p(size)) {
167 int i = 0;
168#define CACHE(x) \
169 if (size <= x) \
170 goto found; \
171 else \
172 i++;
173#include "kmalloc_sizes.h"
174#undef CACHE
175 {
176 extern void __you_cannot_kzalloc_that_much(void);
177 __you_cannot_kzalloc_that_much();
178 }
179found:
180 return kmem_cache_zalloc((flags & GFP_DMA) ?
181 malloc_sizes[i].cs_dmacachep :
182 malloc_sizes[i].cs_cachep, flags);
183 }
184 return __kzalloc(size, flags);
185}
186
187/**
188 * kcalloc - allocate memory for an array. The memory is set to zero.
189 * @n: number of elements.
190 * @size: element size.
191 * @flags: the type of memory to allocate.
192 */
193static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
194{
195 if (n != 0 && size > ULONG_MAX / n)
196 return NULL;
197 return kzalloc(n * size, flags);
198}
199
200extern void kfree(const void *);
201extern unsigned int ksize(const void *);
202extern int slab_is_available(void);
203 186
204#ifdef CONFIG_NUMA 187#ifdef CONFIG_NUMA
205extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
206extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
207
208static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209{
210 if (__builtin_constant_p(size)) {
211 int i = 0;
212#define CACHE(x) \
213 if (size <= x) \
214 goto found; \
215 else \
216 i++;
217#include "kmalloc_sizes.h"
218#undef CACHE
219 {
220 extern void __you_cannot_kmalloc_that_much(void);
221 __you_cannot_kmalloc_that_much();
222 }
223found:
224 return kmem_cache_alloc_node((flags & GFP_DMA) ?
225 malloc_sizes[i].cs_dmacachep :
226 malloc_sizes[i].cs_cachep, flags, node);
227 }
228 return __kmalloc_node(size, flags, node);
229}
230
231/* 188/*
232 * kmalloc_node_track_caller is a special version of kmalloc_node that 189 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 * records the calling function of the routine calling it for slab leak 190 * records the calling function of the routine calling it for slab leak
@@ -236,70 +193,27 @@ found:
236 * standard allocator where we care about the real place the memory 193 * standard allocator where we care about the real place the memory
237 * allocation request comes from. 194 * allocation request comes from.
238 */ 195 */
239#ifndef CONFIG_DEBUG_SLAB 196#ifdef CONFIG_DEBUG_SLAB
240#define kmalloc_node_track_caller(size, flags, node) \
241 __kmalloc_node(size, flags, node)
242#else
243extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 197extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244#define kmalloc_node_track_caller(size, flags, node) \ 198#define kmalloc_node_track_caller(size, flags, node) \
245 __kmalloc_node_track_caller(size, flags, node, \ 199 __kmalloc_node_track_caller(size, flags, node, \
246 __builtin_return_address(0)) 200 __builtin_return_address(0))
201#else
202#define kmalloc_node_track_caller(size, flags, node) \
203 __kmalloc_node(size, flags, node)
247#endif 204#endif
205
248#else /* CONFIG_NUMA */ 206#else /* CONFIG_NUMA */
249static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250 gfp_t flags, int node)
251{
252 return kmem_cache_alloc(cachep, flags);
253}
254static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
255{
256 return kmalloc(size, flags);
257}
258 207
259#define kmalloc_node_track_caller(size, flags, node) \ 208#define kmalloc_node_track_caller(size, flags, node) \
260 kmalloc_track_caller(size, flags) 209 kmalloc_track_caller(size, flags)
261#endif
262 210
263extern int FASTCALL(kmem_cache_reap(int)); 211static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
264extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
265
266#else /* CONFIG_SLOB */
267
268/* SLOB allocator routines */
269
270void kmem_cache_init(void);
271struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
272 unsigned long,
273 void (*)(void *, struct kmem_cache *, unsigned long),
274 void (*)(void *, struct kmem_cache *, unsigned long));
275void kmem_cache_destroy(struct kmem_cache *c);
276void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
277void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
278void kmem_cache_free(struct kmem_cache *c, void *b);
279const char *kmem_cache_name(struct kmem_cache *);
280void *kmalloc(size_t size, gfp_t flags);
281void *__kzalloc(size_t size, gfp_t flags);
282void kfree(const void *m);
283unsigned int ksize(const void *m);
284unsigned int kmem_cache_size(struct kmem_cache *c);
285
286static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
287{ 212{
288 return __kzalloc(n * size, flags); 213 return kmalloc(size, flags);
289} 214}
290 215
291#define kmem_cache_shrink(d) (0) 216#endif /* !CONFIG_NUMA */
292#define kmem_cache_reap(a)
293#define kmem_ptr_validate(a, b) (0)
294#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
295#define kmalloc_node(s, f, n) kmalloc(s, f)
296#define kzalloc(s, f) __kzalloc(s, f)
297#define kmalloc_track_caller kmalloc
298
299#define kmalloc_node_track_caller kmalloc_node
300
301#endif /* CONFIG_SLOB */
302
303#endif /* __KERNEL__ */ 217#endif /* __KERNEL__ */
304
305#endif /* _LINUX_SLAB_H */ 218#endif /* _LINUX_SLAB_H */
219
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 000000000000..4b463e66ddea
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,100 @@
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18/* Size description struct for general caches. */
19struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
22 struct kmem_cache *cs_dmacachep;
23};
24extern struct cache_sizes malloc_sizes[];
25
26static inline void *kmalloc(size_t size, gfp_t flags)
27{
28 if (__builtin_constant_p(size)) {
29 int i = 0;
30#define CACHE(x) \
31 if (size <= x) \
32 goto found; \
33 else \
34 i++;
35#include "kmalloc_sizes.h"
36#undef CACHE
37 {
38 extern void __you_cannot_kmalloc_that_much(void);
39 __you_cannot_kmalloc_that_much();
40 }
41found:
42 return kmem_cache_alloc((flags & GFP_DMA) ?
43 malloc_sizes[i].cs_dmacachep :
44 malloc_sizes[i].cs_cachep, flags);
45 }
46 return __kmalloc(size, flags);
47}
48
49static inline void *kzalloc(size_t size, gfp_t flags)
50{
51 if (__builtin_constant_p(size)) {
52 int i = 0;
53#define CACHE(x) \
54 if (size <= x) \
55 goto found; \
56 else \
57 i++;
58#include "kmalloc_sizes.h"
59#undef CACHE
60 {
61 extern void __you_cannot_kzalloc_that_much(void);
62 __you_cannot_kzalloc_that_much();
63 }
64found:
65 return kmem_cache_zalloc((flags & GFP_DMA) ?
66 malloc_sizes[i].cs_dmacachep :
67 malloc_sizes[i].cs_cachep, flags);
68 }
69 return __kzalloc(size, flags);
70}
71
72#ifdef CONFIG_NUMA
73extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
74
75static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76{
77 if (__builtin_constant_p(size)) {
78 int i = 0;
79#define CACHE(x) \
80 if (size <= x) \
81 goto found; \
82 else \
83 i++;
84#include "kmalloc_sizes.h"
85#undef CACHE
86 {
87 extern void __you_cannot_kmalloc_that_much(void);
88 __you_cannot_kmalloc_that_much();
89 }
90found:
91 return kmem_cache_alloc_node((flags & GFP_DMA) ?
92 malloc_sizes[i].cs_dmacachep :
93 malloc_sizes[i].cs_cachep, flags, node);
94 }
95 return __kmalloc_node(size, flags, node);
96}
97
98#endif /* CONFIG_NUMA */
99
100#endif /* _LINUX_SLAB_DEF_H */
diff --git a/mm/slob.c b/mm/slob.c
index 542394184a58..b90091c4b08c 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static int fastcall find_order(int size)
157 return order; 157 return order;
158} 158}
159 159
160void *kmalloc(size_t size, gfp_t gfp) 160void *__kmalloc(size_t size, gfp_t gfp)
161{ 161{
162 slob_t *m; 162 slob_t *m;
163 bigblock_t *bb; 163 bigblock_t *bb;
@@ -186,8 +186,7 @@ void *kmalloc(size_t size, gfp_t gfp)
186 slob_free(bb, sizeof(bigblock_t)); 186 slob_free(bb, sizeof(bigblock_t));
187 return 0; 187 return 0;
188} 188}
189 189EXPORT_SYMBOL(__kmalloc);
190EXPORT_SYMBOL(kmalloc);
191 190
192void kfree(const void *block) 191void kfree(const void *block)
193{ 192{
@@ -329,6 +328,17 @@ EXPORT_SYMBOL(kmem_cache_name);
329static struct timer_list slob_timer = TIMER_INITIALIZER( 328static struct timer_list slob_timer = TIMER_INITIALIZER(
330 (void (*)(unsigned long))kmem_cache_init, 0, 0); 329 (void (*)(unsigned long))kmem_cache_init, 0, 0);
331 330
331int kmem_cache_shrink(struct kmem_cache *d)
332{
333 return 0;
334}
335EXPORT_SYMBOL(kmem_cache_shrink);
336
337int kmem_ptr_validate(struct kmem_cache *a, void *b)
338{
339 return 0;
340}
341
332void kmem_cache_init(void) 342void kmem_cache_init(void)
333{ 343{
334 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); 344 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);