aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2006-01-08 04:01:45 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:13:41 -0500
commit10cef6029502915bdb3cf0821d425cf9dc30c817 (patch)
tree2c9dfef95d58b64dcf4cdf3c32b18164928b438e /include
parent30992c97ae9d01b17374fbfab76a869fb4bba500 (diff)
[PATCH] slob: introduce the SLOB allocator
configurable replacement for slab allocator This adds a CONFIG_SLAB option under CONFIG_EMBEDDED. When CONFIG_SLAB is disabled, the kernel falls back to using the 'SLOB' allocator. SLOB is a traditional K&R/UNIX allocator with a SLAB emulation layer, similar to the original Linux kmalloc allocator that SLAB replaced. It's signicantly smaller code and is more memory efficient. But like all similar allocators, it scales poorly and suffers from fragmentation more than SLAB, so it's only appropriate for small systems. It's been tested extensively in the Linux-tiny tree. I've also stress-tested it with make -j 8 compiles on a 3G SMP+PREEMPT box (not recommended). Here's a comparison for otherwise identical builds, showing SLOB saving nearly half a megabyte of RAM: $ size vmlinux* text data bss dec hex filename 3336372 529360 190812 4056544 3de5e0 vmlinux-slab 3323208 527948 190684 4041840 3dac70 vmlinux-slob $ size mm/{slab,slob}.o text data bss dec hex filename 13221 752 48 14021 36c5 mm/slab.o 1896 52 8 1956 7a4 mm/slob.o /proc/meminfo: SLAB SLOB delta MemTotal: 27964 kB 27980 kB +16 kB MemFree: 24596 kB 25092 kB +496 kB Buffers: 36 kB 36 kB 0 kB Cached: 1188 kB 1188 kB 0 kB SwapCached: 0 kB 0 kB 0 kB Active: 608 kB 600 kB -8 kB Inactive: 808 kB 812 kB +4 kB HighTotal: 0 kB 0 kB 0 kB HighFree: 0 kB 0 kB 0 kB LowTotal: 27964 kB 27980 kB +16 kB LowFree: 24596 kB 25092 kB +496 kB SwapTotal: 0 kB 0 kB 0 kB SwapFree: 0 kB 0 kB 0 kB Dirty: 4 kB 12 kB +8 kB Writeback: 0 kB 0 kB 0 kB Mapped: 560 kB 556 kB -4 kB Slab: 1756 kB 0 kB -1756 kB CommitLimit: 13980 kB 13988 kB +8 kB Committed_AS: 4208 kB 4208 kB 0 kB PageTables: 28 kB 28 kB 0 kB VmallocTotal: 1007312 kB 1007312 kB 0 kB VmallocUsed: 48 kB 48 kB 0 kB VmallocChunk: 1007264 kB 1007264 kB 0 kB (this work has been sponsored in part by CELF) From: Ingo Molnar <mingo@elte.hu> Fix 32-bitness bugs in mm/slob.c. Signed-off-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d1ea4051b996..1fb77a9cc148 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,6 +53,8 @@ typedef struct kmem_cache kmem_cache_t;
53#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 53#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
54#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 54#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
55 55
56#ifndef CONFIG_SLOB
57
56/* prototypes */ 58/* prototypes */
57extern void __init kmem_cache_init(void); 59extern void __init kmem_cache_init(void);
58 60
@@ -134,6 +136,39 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
134extern int FASTCALL(kmem_cache_reap(int)); 136extern int FASTCALL(kmem_cache_reap(int));
135extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); 137extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
136 138
139#else /* CONFIG_SLOB */
140
141/* SLOB allocator routines */
142
143void kmem_cache_init(void);
144struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
145struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
146 unsigned long,
147 void (*)(void *, struct kmem_cache *, unsigned long),
148 void (*)(void *, struct kmem_cache *, unsigned long));
149int kmem_cache_destroy(struct kmem_cache *c);
150void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
151void kmem_cache_free(struct kmem_cache *c, void *b);
152const char *kmem_cache_name(struct kmem_cache *);
153void *kmalloc(size_t size, gfp_t flags);
154void *kzalloc(size_t size, gfp_t flags);
155void kfree(const void *m);
156unsigned int ksize(const void *m);
157unsigned int kmem_cache_size(struct kmem_cache *c);
158
159static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
160{
161 return kzalloc(n * size, flags);
162}
163
164#define kmem_cache_shrink(d) (0)
165#define kmem_cache_reap(a)
166#define kmem_ptr_validate(a, b) (0)
167#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
168#define kmalloc_node(s, f, n) kmalloc(s, f)
169
170#endif /* CONFIG_SLOB */
171
137/* System wide caches */ 172/* System wide caches */
138extern kmem_cache_t *vm_area_cachep; 173extern kmem_cache_t *vm_area_cachep;
139extern kmem_cache_t *names_cachep; 174extern kmem_cache_t *names_cachep;