aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 13:56:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 13:56:07 -0500
commitae664dba2724e59ddd66291b895f7370e28b9a7a (patch)
treed6e214bdc9999bcb8b0a067053aa6934cfd9d60e /mm/slub.c
parenta2faf2fc534f57ba26bc4d613795236ed4f5fb1c (diff)
parent08afe22c68d8c07e8e31ee6491c37f36199ba14b (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "This contains preparational work from Christoph Lameter and Glauber Costa for SLAB memcg and cleanups and improvements from Ezequiel Garcia and Joonsoo Kim. Please note that the SLOB cleanup commit from Arnd Bergmann already appears in your tree but I had also merged it myself which is why it shows up in the shortlog." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm/sl[aou]b: Common alignment code slab: Use the new create_boot_cache function to simplify bootstrap slub: Use statically allocated kmem_cache boot structure for bootstrap mm, sl[au]b: create common functions for boot slab creation slab: Simplify bootstrap slub: Use correct cpu_slab on dead cpu mm: fix slab.c kernel-doc warnings mm/slob: use min_t() to compare ARCH_SLAB_MINALIGN slab: Ignore internal flags in cache creation mm/slob: Use free_page instead of put_page for page-size kmalloc allocations mm/sl[aou]b: Move common kmem_cache_size() to slab.h mm/slob: Use object_size field in kmem_cache_size() mm/slob: Drop usage of page->private for storing page-sized allocations slub: Commonize slab_cache field in struct page sl[au]b: Process slabinfo_show in common code mm/sl[au]b: Move print_slabinfo_header to slab_common.c mm/sl[au]b: Move slabinfo processing to slab_common.c slub: remove one code path and reduce lock contention in __slab_free()
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c301
1 files changed, 70 insertions, 231 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 487f0bdd53c0..87f9f32bf0cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -112,9 +112,6 @@
112 * the fast path and disables lockless freelists. 112 * the fast path and disables lockless freelists.
113 */ 113 */
114 114
115#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
116 SLAB_TRACE | SLAB_DEBUG_FREE)
117
118static inline int kmem_cache_debug(struct kmem_cache *s) 115static inline int kmem_cache_debug(struct kmem_cache *s)
119{ 116{
120#ifdef CONFIG_SLUB_DEBUG 117#ifdef CONFIG_SLUB_DEBUG
@@ -179,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
179#define __OBJECT_POISON 0x80000000UL /* Poison object */ 176#define __OBJECT_POISON 0x80000000UL /* Poison object */
180#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ 177#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
181 178
182static int kmem_size = sizeof(struct kmem_cache);
183
184#ifdef CONFIG_SMP 179#ifdef CONFIG_SMP
185static struct notifier_block slab_notifier; 180static struct notifier_block slab_notifier;
186#endif 181#endif
@@ -1092,11 +1087,11 @@ static noinline struct kmem_cache_node *free_debug_processing(
1092 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1087 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1093 goto out; 1088 goto out;
1094 1089
1095 if (unlikely(s != page->slab)) { 1090 if (unlikely(s != page->slab_cache)) {
1096 if (!PageSlab(page)) { 1091 if (!PageSlab(page)) {
1097 slab_err(s, page, "Attempt to free object(0x%p) " 1092 slab_err(s, page, "Attempt to free object(0x%p) "
1098 "outside of slab", object); 1093 "outside of slab", object);
1099 } else if (!page->slab) { 1094 } else if (!page->slab_cache) {
1100 printk(KERN_ERR 1095 printk(KERN_ERR
1101 "SLUB <none>: no slab for object 0x%p.\n", 1096 "SLUB <none>: no slab for object 0x%p.\n",
1102 object); 1097 object);
@@ -1357,7 +1352,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1357 goto out; 1352 goto out;
1358 1353
1359 inc_slabs_node(s, page_to_nid(page), page->objects); 1354 inc_slabs_node(s, page_to_nid(page), page->objects);
1360 page->slab = s; 1355 page->slab_cache = s;
1361 __SetPageSlab(page); 1356 __SetPageSlab(page);
1362 if (page->pfmemalloc) 1357 if (page->pfmemalloc)
1363 SetPageSlabPfmemalloc(page); 1358 SetPageSlabPfmemalloc(page);
@@ -1424,7 +1419,7 @@ static void rcu_free_slab(struct rcu_head *h)
1424 else 1419 else
1425 page = container_of((struct list_head *)h, struct page, lru); 1420 page = container_of((struct list_head *)h, struct page, lru);
1426 1421
1427 __free_slab(page->slab, page); 1422 __free_slab(page->slab_cache, page);
1428} 1423}
1429 1424
1430static void free_slab(struct kmem_cache *s, struct page *page) 1425static void free_slab(struct kmem_cache *s, struct page *page)
@@ -1872,12 +1867,14 @@ redo:
1872/* 1867/*
1873 * Unfreeze all the cpu partial slabs. 1868 * Unfreeze all the cpu partial slabs.
1874 * 1869 *
1875 * This function must be called with interrupt disabled. 1870 * This function must be called with interrupts disabled
1871 * for the cpu using c (or some other guarantee must be there
1872 * to guarantee no concurrent accesses).
1876 */ 1873 */
1877static void unfreeze_partials(struct kmem_cache *s) 1874static void unfreeze_partials(struct kmem_cache *s,
1875 struct kmem_cache_cpu *c)
1878{ 1876{
1879 struct kmem_cache_node *n = NULL, *n2 = NULL; 1877 struct kmem_cache_node *n = NULL, *n2 = NULL;
1880 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1881 struct page *page, *discard_page = NULL; 1878 struct page *page, *discard_page = NULL;
1882 1879
1883 while ((page = c->partial)) { 1880 while ((page = c->partial)) {
@@ -1963,7 +1960,7 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1963 * set to the per node partial list. 1960 * set to the per node partial list.
1964 */ 1961 */
1965 local_irq_save(flags); 1962 local_irq_save(flags);
1966 unfreeze_partials(s); 1963 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
1967 local_irq_restore(flags); 1964 local_irq_restore(flags);
1968 oldpage = NULL; 1965 oldpage = NULL;
1969 pobjects = 0; 1966 pobjects = 0;
@@ -2006,7 +2003,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2006 if (c->page) 2003 if (c->page)
2007 flush_slab(s, c); 2004 flush_slab(s, c);
2008 2005
2009 unfreeze_partials(s); 2006 unfreeze_partials(s, c);
2010 } 2007 }
2011} 2008}
2012 2009
@@ -2459,7 +2456,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2459 void *prior; 2456 void *prior;
2460 void **object = (void *)x; 2457 void **object = (void *)x;
2461 int was_frozen; 2458 int was_frozen;
2462 int inuse;
2463 struct page new; 2459 struct page new;
2464 unsigned long counters; 2460 unsigned long counters;
2465 struct kmem_cache_node *n = NULL; 2461 struct kmem_cache_node *n = NULL;
@@ -2472,13 +2468,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2472 return; 2468 return;
2473 2469
2474 do { 2470 do {
2471 if (unlikely(n)) {
2472 spin_unlock_irqrestore(&n->list_lock, flags);
2473 n = NULL;
2474 }
2475 prior = page->freelist; 2475 prior = page->freelist;
2476 counters = page->counters; 2476 counters = page->counters;
2477 set_freepointer(s, object, prior); 2477 set_freepointer(s, object, prior);
2478 new.counters = counters; 2478 new.counters = counters;
2479 was_frozen = new.frozen; 2479 was_frozen = new.frozen;
2480 new.inuse--; 2480 new.inuse--;
2481 if ((!new.inuse || !prior) && !was_frozen && !n) { 2481 if ((!new.inuse || !prior) && !was_frozen) {
2482 2482
2483 if (!kmem_cache_debug(s) && !prior) 2483 if (!kmem_cache_debug(s) && !prior)
2484 2484
@@ -2503,7 +2503,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2503 2503
2504 } 2504 }
2505 } 2505 }
2506 inuse = new.inuse;
2507 2506
2508 } while (!cmpxchg_double_slab(s, page, 2507 } while (!cmpxchg_double_slab(s, page,
2509 prior, counters, 2508 prior, counters,
@@ -2529,25 +2528,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2529 return; 2528 return;
2530 } 2529 }
2531 2530
2531 if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2532 goto slab_empty;
2533
2532 /* 2534 /*
2533 * was_frozen may have been set after we acquired the list_lock in 2535 * Objects left in the slab. If it was not on the partial list before
2534 * an earlier loop. So we need to check it here again. 2536 * then add it.
2535 */ 2537 */
2536 if (was_frozen) 2538 if (kmem_cache_debug(s) && unlikely(!prior)) {
2537 stat(s, FREE_FROZEN); 2539 remove_full(s, page);
2538 else { 2540 add_partial(n, page, DEACTIVATE_TO_TAIL);
2539 if (unlikely(!inuse && n->nr_partial > s->min_partial)) 2541 stat(s, FREE_ADD_PARTIAL);
2540 goto slab_empty;
2541
2542 /*
2543 * Objects left in the slab. If it was not on the partial list before
2544 * then add it.
2545 */
2546 if (unlikely(!prior)) {
2547 remove_full(s, page);
2548 add_partial(n, page, DEACTIVATE_TO_TAIL);
2549 stat(s, FREE_ADD_PARTIAL);
2550 }
2551 } 2542 }
2552 spin_unlock_irqrestore(&n->list_lock, flags); 2543 spin_unlock_irqrestore(&n->list_lock, flags);
2553 return; 2544 return;
@@ -2623,9 +2614,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2623 2614
2624 page = virt_to_head_page(x); 2615 page = virt_to_head_page(x);
2625 2616
2626 if (kmem_cache_debug(s) && page->slab != s) { 2617 if (kmem_cache_debug(s) && page->slab_cache != s) {
2627 pr_err("kmem_cache_free: Wrong slab cache. %s but object" 2618 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2628 " is from %s\n", page->slab->name, s->name); 2619 " is from %s\n", page->slab_cache->name, s->name);
2629 WARN_ON_ONCE(1); 2620 WARN_ON_ONCE(1);
2630 return; 2621 return;
2631 } 2622 }
@@ -2769,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
2769 return -ENOSYS; 2760 return -ENOSYS;
2770} 2761}
2771 2762
2772/*
2773 * Figure out what the alignment of the objects will be.
2774 */
2775static unsigned long calculate_alignment(unsigned long flags,
2776 unsigned long align, unsigned long size)
2777{
2778 /*
2779 * If the user wants hardware cache aligned objects then follow that
2780 * suggestion if the object is sufficiently large.
2781 *
2782 * The hardware cache alignment cannot override the specified
2783 * alignment though. If that is greater then use it.
2784 */
2785 if (flags & SLAB_HWCACHE_ALIGN) {
2786 unsigned long ralign = cache_line_size();
2787 while (size <= ralign / 2)
2788 ralign /= 2;
2789 align = max(align, ralign);
2790 }
2791
2792 if (align < ARCH_SLAB_MINALIGN)
2793 align = ARCH_SLAB_MINALIGN;
2794
2795 return ALIGN(align, sizeof(void *));
2796}
2797
2798static void 2763static void
2799init_kmem_cache_node(struct kmem_cache_node *n) 2764init_kmem_cache_node(struct kmem_cache_node *n)
2800{ 2765{
@@ -2928,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2928{ 2893{
2929 unsigned long flags = s->flags; 2894 unsigned long flags = s->flags;
2930 unsigned long size = s->object_size; 2895 unsigned long size = s->object_size;
2931 unsigned long align = s->align;
2932 int order; 2896 int order;
2933 2897
2934 /* 2898 /*
@@ -3000,19 +2964,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3000#endif 2964#endif
3001 2965
3002 /* 2966 /*
3003 * Determine the alignment based on various parameters that the
3004 * user specified and the dynamic determination of cache line size
3005 * on bootup.
3006 */
3007 align = calculate_alignment(flags, align, s->object_size);
3008 s->align = align;
3009
3010 /*
3011 * SLUB stores one object immediately after another beginning from 2967 * SLUB stores one object immediately after another beginning from
3012 * offset 0. In order to align the objects we have to simply size 2968 * offset 0. In order to align the objects we have to simply size
3013 * each object to conform to the alignment. 2969 * each object to conform to the alignment.
3014 */ 2970 */
3015 size = ALIGN(size, align); 2971 size = ALIGN(size, s->align);
3016 s->size = size; 2972 s->size = size;
3017 if (forced_order >= 0) 2973 if (forced_order >= 0)
3018 order = forced_order; 2974 order = forced_order;
@@ -3041,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3041 s->max = s->oo; 2997 s->max = s->oo;
3042 2998
3043 return !!oo_objects(s->oo); 2999 return !!oo_objects(s->oo);
3044
3045} 3000}
3046 3001
3047static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) 3002static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
@@ -3127,15 +3082,6 @@ error:
3127 return -EINVAL; 3082 return -EINVAL;
3128} 3083}
3129 3084
3130/*
3131 * Determine the size of a slab object
3132 */
3133unsigned int kmem_cache_size(struct kmem_cache *s)
3134{
3135 return s->object_size;
3136}
3137EXPORT_SYMBOL(kmem_cache_size);
3138
3139static void list_slab_objects(struct kmem_cache *s, struct page *page, 3085static void list_slab_objects(struct kmem_cache *s, struct page *page,
3140 const char *text) 3086 const char *text)
3141{ 3087{
@@ -3261,32 +3207,6 @@ static int __init setup_slub_nomerge(char *str)
3261 3207
3262__setup("slub_nomerge", setup_slub_nomerge); 3208__setup("slub_nomerge", setup_slub_nomerge);
3263 3209
3264static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3265 int size, unsigned int flags)
3266{
3267 struct kmem_cache *s;
3268
3269 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3270
3271 s->name = name;
3272 s->size = s->object_size = size;
3273 s->align = ARCH_KMALLOC_MINALIGN;
3274
3275 /*
3276 * This function is called with IRQs disabled during early-boot on
3277 * single CPU so there's no need to take slab_mutex here.
3278 */
3279 if (kmem_cache_open(s, flags))
3280 goto panic;
3281
3282 list_add(&s->list, &slab_caches);
3283 return s;
3284
3285panic:
3286 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3287 return NULL;
3288}
3289
3290/* 3210/*
3291 * Conversion table for small slabs sizes / 8 to the index in the 3211 * Conversion table for small slabs sizes / 8 to the index in the
3292 * kmalloc array. This is necessary for slabs < 192 since we have non power 3212 * kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -3424,7 +3344,7 @@ size_t ksize(const void *object)
3424 return PAGE_SIZE << compound_order(page); 3344 return PAGE_SIZE << compound_order(page);
3425 } 3345 }
3426 3346
3427 return slab_ksize(page->slab); 3347 return slab_ksize(page->slab_cache);
3428} 3348}
3429EXPORT_SYMBOL(ksize); 3349EXPORT_SYMBOL(ksize);
3430 3350
@@ -3449,8 +3369,8 @@ bool verify_mem_not_deleted(const void *x)
3449 } 3369 }
3450 3370
3451 slab_lock(page); 3371 slab_lock(page);
3452 if (on_freelist(page->slab, page, object)) { 3372 if (on_freelist(page->slab_cache, page, object)) {
3453 object_err(page->slab, page, object, "Object is on free-list"); 3373 object_err(page->slab_cache, page, object, "Object is on free-list");
3454 rv = false; 3374 rv = false;
3455 } else { 3375 } else {
3456 rv = true; 3376 rv = true;
@@ -3481,7 +3401,7 @@ void kfree(const void *x)
3481 __free_pages(page, compound_order(page)); 3401 __free_pages(page, compound_order(page));
3482 return; 3402 return;
3483 } 3403 }
3484 slab_free(page->slab, page, object, _RET_IP_); 3404 slab_free(page->slab_cache, page, object, _RET_IP_);
3485} 3405}
3486EXPORT_SYMBOL(kfree); 3406EXPORT_SYMBOL(kfree);
3487 3407
@@ -3676,15 +3596,16 @@ static int slab_memory_callback(struct notifier_block *self,
3676 3596
3677/* 3597/*
3678 * Used for early kmem_cache structures that were allocated using 3598 * Used for early kmem_cache structures that were allocated using
3679 * the page allocator 3599 * the page allocator. Allocate them properly then fix up the pointers
3600 * that may be pointing to the wrong kmem_cache structure.
3680 */ 3601 */
3681 3602
3682static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 3603static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3683{ 3604{
3684 int node; 3605 int node;
3606 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3685 3607
3686 list_add(&s->list, &slab_caches); 3608 memcpy(s, static_cache, kmem_cache->object_size);
3687 s->refcount = -1;
3688 3609
3689 for_each_node_state(node, N_NORMAL_MEMORY) { 3610 for_each_node_state(node, N_NORMAL_MEMORY) {
3690 struct kmem_cache_node *n = get_node(s, node); 3611 struct kmem_cache_node *n = get_node(s, node);
@@ -3692,78 +3613,52 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3692 3613
3693 if (n) { 3614 if (n) {
3694 list_for_each_entry(p, &n->partial, lru) 3615 list_for_each_entry(p, &n->partial, lru)
3695 p->slab = s; 3616 p->slab_cache = s;
3696 3617
3697#ifdef CONFIG_SLUB_DEBUG 3618#ifdef CONFIG_SLUB_DEBUG
3698 list_for_each_entry(p, &n->full, lru) 3619 list_for_each_entry(p, &n->full, lru)
3699 p->slab = s; 3620 p->slab_cache = s;
3700#endif 3621#endif
3701 } 3622 }
3702 } 3623 }
3624 list_add(&s->list, &slab_caches);
3625 return s;
3703} 3626}
3704 3627
3705void __init kmem_cache_init(void) 3628void __init kmem_cache_init(void)
3706{ 3629{
3630 static __initdata struct kmem_cache boot_kmem_cache,
3631 boot_kmem_cache_node;
3707 int i; 3632 int i;
3708 int caches = 0; 3633 int caches = 2;
3709 struct kmem_cache *temp_kmem_cache;
3710 int order;
3711 struct kmem_cache *temp_kmem_cache_node;
3712 unsigned long kmalloc_size;
3713 3634
3714 if (debug_guardpage_minorder()) 3635 if (debug_guardpage_minorder())
3715 slub_max_order = 0; 3636 slub_max_order = 0;
3716 3637
3717 kmem_size = offsetof(struct kmem_cache, node) + 3638 kmem_cache_node = &boot_kmem_cache_node;
3718 nr_node_ids * sizeof(struct kmem_cache_node *); 3639 kmem_cache = &boot_kmem_cache;
3719
3720 /* Allocate two kmem_caches from the page allocator */
3721 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3722 order = get_order(2 * kmalloc_size);
3723 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
3724
3725 /*
3726 * Must first have the slab cache available for the allocations of the
3727 * struct kmem_cache_node's. There is special bootstrap code in
3728 * kmem_cache_open for slab_state == DOWN.
3729 */
3730 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3731 3640
3732 kmem_cache_node->name = "kmem_cache_node"; 3641 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3733 kmem_cache_node->size = kmem_cache_node->object_size = 3642 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
3734 sizeof(struct kmem_cache_node);
3735 kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3736 3643
3737 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3644 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3738 3645
3739 /* Able to allocate the per node structures */ 3646 /* Able to allocate the per node structures */
3740 slab_state = PARTIAL; 3647 slab_state = PARTIAL;
3741 3648
3742 temp_kmem_cache = kmem_cache; 3649 create_boot_cache(kmem_cache, "kmem_cache",
3743 kmem_cache->name = "kmem_cache"; 3650 offsetof(struct kmem_cache, node) +
3744 kmem_cache->size = kmem_cache->object_size = kmem_size; 3651 nr_node_ids * sizeof(struct kmem_cache_node *),
3745 kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3652 SLAB_HWCACHE_ALIGN);
3746 3653
3747 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3654 kmem_cache = bootstrap(&boot_kmem_cache);
3748 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3749 3655
3750 /* 3656 /*
3751 * Allocate kmem_cache_node properly from the kmem_cache slab. 3657 * Allocate kmem_cache_node properly from the kmem_cache slab.
3752 * kmem_cache_node is separately allocated so no need to 3658 * kmem_cache_node is separately allocated so no need to
3753 * update any list pointers. 3659 * update any list pointers.
3754 */ 3660 */
3755 temp_kmem_cache_node = kmem_cache_node; 3661 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3756
3757 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3758 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3759
3760 kmem_cache_bootstrap_fixup(kmem_cache_node);
3761
3762 caches++;
3763 kmem_cache_bootstrap_fixup(kmem_cache);
3764 caches++;
3765 /* Free temporary boot structure */
3766 free_pages((unsigned long)temp_kmem_cache, order);
3767 3662
3768 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3663 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3769 3664
@@ -3964,6 +3859,10 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3964 if (err) 3859 if (err)
3965 return err; 3860 return err;
3966 3861
3862 /* Mutex is not taken during early boot */
3863 if (slab_state <= UP)
3864 return 0;
3865
3967 mutex_unlock(&slab_mutex); 3866 mutex_unlock(&slab_mutex);
3968 err = sysfs_slab_add(s); 3867 err = sysfs_slab_add(s);
3969 mutex_lock(&slab_mutex); 3868 mutex_lock(&slab_mutex);
@@ -5265,13 +5164,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
5265{ 5164{
5266 int err; 5165 int err;
5267 const char *name; 5166 const char *name;
5268 int unmergeable; 5167 int unmergeable = slab_unmergeable(s);
5269 5168
5270 if (slab_state < FULL)
5271 /* Defer until later */
5272 return 0;
5273
5274 unmergeable = slab_unmergeable(s);
5275 if (unmergeable) { 5169 if (unmergeable) {
5276 /* 5170 /*
5277 * Slabcache can never be merged so we can use the name proper. 5171 * Slabcache can never be merged so we can use the name proper.
@@ -5405,49 +5299,14 @@ __initcall(slab_sysfs_init);
5405 * The /proc/slabinfo ABI 5299 * The /proc/slabinfo ABI
5406 */ 5300 */
5407#ifdef CONFIG_SLABINFO 5301#ifdef CONFIG_SLABINFO
5408static void print_slabinfo_header(struct seq_file *m) 5302void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5409{
5410 seq_puts(m, "slabinfo - version: 2.1\n");
5411 seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
5412 "<objperslab> <pagesperslab>");
5413 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5414 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5415 seq_putc(m, '\n');
5416}
5417
5418static void *s_start(struct seq_file *m, loff_t *pos)
5419{
5420 loff_t n = *pos;
5421
5422 mutex_lock(&slab_mutex);
5423 if (!n)
5424 print_slabinfo_header(m);
5425
5426 return seq_list_start(&slab_caches, *pos);
5427}
5428
5429static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5430{
5431 return seq_list_next(p, &slab_caches, pos);
5432}
5433
5434static void s_stop(struct seq_file *m, void *p)
5435{
5436 mutex_unlock(&slab_mutex);
5437}
5438
5439static int s_show(struct seq_file *m, void *p)
5440{ 5303{
5441 unsigned long nr_partials = 0; 5304 unsigned long nr_partials = 0;
5442 unsigned long nr_slabs = 0; 5305 unsigned long nr_slabs = 0;
5443 unsigned long nr_inuse = 0;
5444 unsigned long nr_objs = 0; 5306 unsigned long nr_objs = 0;
5445 unsigned long nr_free = 0; 5307 unsigned long nr_free = 0;
5446 struct kmem_cache *s;
5447 int node; 5308 int node;
5448 5309
5449 s = list_entry(p, struct kmem_cache, list);
5450
5451 for_each_online_node(node) { 5310 for_each_online_node(node) {
5452 struct kmem_cache_node *n = get_node(s, node); 5311 struct kmem_cache_node *n = get_node(s, node);
5453 5312
@@ -5460,41 +5319,21 @@ static int s_show(struct seq_file *m, void *p)
5460 nr_free += count_partial(n, count_free); 5319 nr_free += count_partial(n, count_free);
5461 } 5320 }
5462 5321
5463 nr_inuse = nr_objs - nr_free; 5322 sinfo->active_objs = nr_objs - nr_free;
5464 5323 sinfo->num_objs = nr_objs;
5465 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 5324 sinfo->active_slabs = nr_slabs;
5466 nr_objs, s->size, oo_objects(s->oo), 5325 sinfo->num_slabs = nr_slabs;
5467 (1 << oo_order(s->oo))); 5326 sinfo->objects_per_slab = oo_objects(s->oo);
5468 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 5327 sinfo->cache_order = oo_order(s->oo);
5469 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5470 0UL);
5471 seq_putc(m, '\n');
5472 return 0;
5473} 5328}
5474 5329
5475static const struct seq_operations slabinfo_op = { 5330void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5476 .start = s_start,
5477 .next = s_next,
5478 .stop = s_stop,
5479 .show = s_show,
5480};
5481
5482static int slabinfo_open(struct inode *inode, struct file *file)
5483{ 5331{
5484 return seq_open(file, &slabinfo_op);
5485} 5332}
5486 5333
5487static const struct file_operations proc_slabinfo_operations = { 5334ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5488 .open = slabinfo_open, 5335 size_t count, loff_t *ppos)
5489 .read = seq_read,
5490 .llseek = seq_lseek,
5491 .release = seq_release,
5492};
5493
5494static int __init slab_proc_init(void)
5495{ 5336{
5496 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); 5337 return -EIO;
5497 return 0;
5498} 5338}
5499module_init(slab_proc_init);
5500#endif /* CONFIG_SLABINFO */ 5339#endif /* CONFIG_SLABINFO */