aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-11-28 11:23:07 -0500
committerPekka Enberg <penberg@kernel.org>2012-12-11 05:14:27 -0500
commitdffb4d605c23110e3ad54b8c9f244a8235c013c2 (patch)
treeba69e37ef8c5c69ea4ae68b2f3679ab77697613a /mm/slub.c
parent45530c4474d258b822e2639c786606d8257aad8b (diff)
slub: Use statically allocated kmem_cache boot structure for bootstrap
Simplify bootstrap by statically allocated two kmem_cache structures. These are freed after bootup is complete. Allows us to no longer worry about calculations of sizes of kmem_cache structures during bootstrap. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c67
1 files changed, 20 insertions, 47 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1be172c157c3..c82453ac812a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
176#define __OBJECT_POISON 0x80000000UL /* Poison object */ 176#define __OBJECT_POISON 0x80000000UL /* Poison object */
177#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ 177#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
178 178
179static int kmem_size = sizeof(struct kmem_cache);
180
181#ifdef CONFIG_SMP 179#ifdef CONFIG_SMP
182static struct notifier_block slab_notifier; 180static struct notifier_block slab_notifier;
183#endif 181#endif
@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self,
3634 3632
3635/* 3633/*
3636 * Used for early kmem_cache structures that were allocated using 3634 * Used for early kmem_cache structures that were allocated using
3637 * the page allocator 3635 * the page allocator. Allocate them properly then fix up the pointers
3636 * that may be pointing to the wrong kmem_cache structure.
3638 */ 3637 */
3639 3638
3640static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 3639static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3641{ 3640{
3642 int node; 3641 int node;
3642 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3643 3643
3644 list_add(&s->list, &slab_caches); 3644 memcpy(s, static_cache, kmem_cache->object_size);
3645 s->refcount = -1;
3646 3645
3647 for_each_node_state(node, N_NORMAL_MEMORY) { 3646 for_each_node_state(node, N_NORMAL_MEMORY) {
3648 struct kmem_cache_node *n = get_node(s, node); 3647 struct kmem_cache_node *n = get_node(s, node);
@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3658#endif 3657#endif
3659 } 3658 }
3660 } 3659 }
3660 list_add(&s->list, &slab_caches);
3661 return s;
3661} 3662}
3662 3663
3663void __init kmem_cache_init(void) 3664void __init kmem_cache_init(void)
3664{ 3665{
3666 static __initdata struct kmem_cache boot_kmem_cache,
3667 boot_kmem_cache_node;
3665 int i; 3668 int i;
3666 int caches = 0; 3669 int caches = 2;
3667 struct kmem_cache *temp_kmem_cache;
3668 int order;
3669 struct kmem_cache *temp_kmem_cache_node;
3670 unsigned long kmalloc_size;
3671 3670
3672 if (debug_guardpage_minorder()) 3671 if (debug_guardpage_minorder())
3673 slub_max_order = 0; 3672 slub_max_order = 0;
3674 3673
3675 kmem_size = offsetof(struct kmem_cache, node) + 3674 kmem_cache_node = &boot_kmem_cache_node;
3676 nr_node_ids * sizeof(struct kmem_cache_node *); 3675 kmem_cache = &boot_kmem_cache;
3677
3678 /* Allocate two kmem_caches from the page allocator */
3679 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3680 order = get_order(2 * kmalloc_size);
3681 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
3682 3676
3683 /* 3677 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3684 * Must first have the slab cache available for the allocations of the 3678 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
3685 * struct kmem_cache_node's. There is special bootstrap code in
3686 * kmem_cache_open for slab_state == DOWN.
3687 */
3688 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3689
3690 kmem_cache_node->name = "kmem_cache_node";
3691 kmem_cache_node->size = kmem_cache_node->object_size =
3692 sizeof(struct kmem_cache_node);
3693 kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3694 3679
3695 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3680 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3696 3681
3697 /* Able to allocate the per node structures */ 3682 /* Able to allocate the per node structures */
3698 slab_state = PARTIAL; 3683 slab_state = PARTIAL;
3699 3684
3700 temp_kmem_cache = kmem_cache; 3685 create_boot_cache(kmem_cache, "kmem_cache",
3701 kmem_cache->name = "kmem_cache"; 3686 offsetof(struct kmem_cache, node) +
3702 kmem_cache->size = kmem_cache->object_size = kmem_size; 3687 nr_node_ids * sizeof(struct kmem_cache_node *),
3703 kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3688 SLAB_HWCACHE_ALIGN);
3704 3689
3705 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3690 kmem_cache = bootstrap(&boot_kmem_cache);
3706 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3707 3691
3708 /* 3692 /*
3709 * Allocate kmem_cache_node properly from the kmem_cache slab. 3693 * Allocate kmem_cache_node properly from the kmem_cache slab.
3710 * kmem_cache_node is separately allocated so no need to 3694 * kmem_cache_node is separately allocated so no need to
3711 * update any list pointers. 3695 * update any list pointers.
3712 */ 3696 */
3713 temp_kmem_cache_node = kmem_cache_node; 3697 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3714
3715 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3716 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3717
3718 kmem_cache_bootstrap_fixup(kmem_cache_node);
3719
3720 caches++;
3721 kmem_cache_bootstrap_fixup(kmem_cache);
3722 caches++;
3723 /* Free temporary boot structure */
3724 free_pages((unsigned long)temp_kmem_cache, order);
3725 3698
3726 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3699 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3727 3700