aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--init/Kconfig7
-rw-r--r--mm/slob.c52
2 files changed, 47 insertions, 12 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 4e009fde4b69..9264895ab331 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -577,14 +577,11 @@ config SLUB
577 and has enhanced diagnostics. 577 and has enhanced diagnostics.
578 578
579config SLOB 579config SLOB
580# 580 depends on EMBEDDED && !SPARSEMEM
581# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
582#
583 depends on EMBEDDED && !SMP && !SPARSEMEM
584 bool "SLOB (Simple Allocator)" 581 bool "SLOB (Simple Allocator)"
585 help 582 help
586 SLOB replaces the SLAB allocator with a drastically simpler 583 SLOB replaces the SLAB allocator with a drastically simpler
587 allocator. SLOB is more space efficient that SLAB but does not 584 allocator. SLOB is more space efficient than SLAB but does not
588 scale well (single lock for all operations) and is also highly 585 scale well (single lock for all operations) and is also highly
589 susceptible to fragmentation. SLUB can accomplish a higher object 586 susceptible to fragmentation. SLUB can accomplish a higher object
590 density. It is usually better to use SLUB instead of SLOB. 587 density. It is usually better to use SLUB instead of SLOB.
diff --git a/mm/slob.c b/mm/slob.c
index c6933bc19bcd..57bb72ed0d46 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/rcupdate.h>
38 39
39struct slob_block { 40struct slob_block {
40 int units; 41 int units;
@@ -53,6 +54,16 @@ struct bigblock {
53}; 54};
54typedef struct bigblock bigblock_t; 55typedef struct bigblock bigblock_t;
55 56
57/*
58 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
59 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
60 * the block using call_rcu.
61 */
62struct slob_rcu {
63 struct rcu_head head;
64 int size;
65};
66
56static slob_t arena = { .next = &arena, .units = 1 }; 67static slob_t arena = { .next = &arena, .units = 1 };
57static slob_t *slobfree = &arena; 68static slob_t *slobfree = &arena;
58static bigblock_t *bigblocks; 69static bigblock_t *bigblocks;
@@ -266,6 +277,7 @@ size_t ksize(const void *block)
266 277
267struct kmem_cache { 278struct kmem_cache {
268 unsigned int size, align; 279 unsigned int size, align;
280 unsigned long flags;
269 const char *name; 281 const char *name;
270 void (*ctor)(void *, struct kmem_cache *, unsigned long); 282 void (*ctor)(void *, struct kmem_cache *, unsigned long);
271 void (*dtor)(void *, struct kmem_cache *, unsigned long); 283 void (*dtor)(void *, struct kmem_cache *, unsigned long);
@@ -283,6 +295,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
283 if (c) { 295 if (c) {
284 c->name = name; 296 c->name = name;
285 c->size = size; 297 c->size = size;
298 if (flags & SLAB_DESTROY_BY_RCU) {
299 BUG_ON(dtor);
300 /* leave room for rcu footer at the end of object */
301 c->size += sizeof(struct slob_rcu);
302 }
303 c->flags = flags;
286 c->ctor = ctor; 304 c->ctor = ctor;
287 c->dtor = dtor; 305 c->dtor = dtor;
288 /* ignore alignment unless it's forced */ 306 /* ignore alignment unless it's forced */
@@ -328,15 +346,35 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
328} 346}
329EXPORT_SYMBOL(kmem_cache_zalloc); 347EXPORT_SYMBOL(kmem_cache_zalloc);
330 348
331void kmem_cache_free(struct kmem_cache *c, void *b) 349static void __kmem_cache_free(void *b, int size)
332{ 350{
333 if (c->dtor) 351 if (size < PAGE_SIZE)
334 c->dtor(b, c, 0); 352 slob_free(b, size);
335
336 if (c->size < PAGE_SIZE)
337 slob_free(b, c->size);
338 else 353 else
339 free_pages((unsigned long)b, get_order(c->size)); 354 free_pages((unsigned long)b, get_order(size));
355}
356
357static void kmem_rcu_free(struct rcu_head *head)
358{
359 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
360 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
361
362 __kmem_cache_free(b, slob_rcu->size);
363}
364
365void kmem_cache_free(struct kmem_cache *c, void *b)
366{
367 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
368 struct slob_rcu *slob_rcu;
369 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
370 INIT_RCU_HEAD(&slob_rcu->head);
371 slob_rcu->size = c->size;
372 call_rcu(&slob_rcu->head, kmem_rcu_free);
373 } else {
374 if (c->dtor)
375 c->dtor(b, c, 0);
376 __kmem_cache_free(b, c->size);
377 }
340} 378}
341EXPORT_SYMBOL(kmem_cache_free); 379EXPORT_SYMBOL(kmem_cache_free);
342 380