aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-07-06 16:25:11 -0400
committerPekka Enberg <penberg@kernel.org>2012-07-09 05:13:35 -0400
commit97d06609158e61f6bdf538c4a6788e2de492236f (patch)
treefa3f57ff3e2d3f4f866d84dd9d634ade43941be8 /mm
parent039363f38bfe5f6281e9eae5e0518b11577d9d50 (diff)
mm, sl[aou]b: Common definition for boot state of the slab allocators
All allocators have some sort of support for the bootstrap status. Setup a common definition for the boot states and make all slab allocators use that definition. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h29
-rw-r--r--mm/slab_common.c9
-rw-r--r--mm/slob.c14
-rw-r--r--mm/slub.c21
5 files changed, 62 insertions, 56 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 10c821e492bf..59a466b85b0f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,6 +87,7 @@
87 */ 87 */
88 88
89#include <linux/slab.h> 89#include <linux/slab.h>
90#include "slab.h"
90#include <linux/mm.h> 91#include <linux/mm.h>
91#include <linux/poison.h> 92#include <linux/poison.h>
92#include <linux/swap.h> 93#include <linux/swap.h>
@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = {
565 566
566#define BAD_ALIEN_MAGIC 0x01020304ul 567#define BAD_ALIEN_MAGIC 0x01020304ul
567 568
568/*
569 * chicken and egg problem: delay the per-cpu array allocation
570 * until the general caches are up.
571 */
572static enum {
573 NONE,
574 PARTIAL_AC,
575 PARTIAL_L3,
576 EARLY,
577 LATE,
578 FULL
579} g_cpucache_up;
580
581/*
582 * used by boot code to determine if it can use slab based allocator
583 */
584int slab_is_available(void)
585{
586 return g_cpucache_up >= EARLY;
587}
588
589#ifdef CONFIG_LOCKDEP 569#ifdef CONFIG_LOCKDEP
590 570
591/* 571/*
@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q)
651{ 631{
652 struct cache_sizes *s = malloc_sizes; 632 struct cache_sizes *s = malloc_sizes;
653 633
654 if (g_cpucache_up < LATE) 634 if (slab_state < UP)
655 return; 635 return;
656 636
657 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { 637 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void)
1649 } 1629 }
1650 } 1630 }
1651 1631
1652 g_cpucache_up = EARLY; 1632 slab_state = UP;
1653} 1633}
1654 1634
1655void __init kmem_cache_init_late(void) 1635void __init kmem_cache_init_late(void)
1656{ 1636{
1657 struct kmem_cache *cachep; 1637 struct kmem_cache *cachep;
1658 1638
1659 g_cpucache_up = LATE; 1639 slab_state = UP;
1660 1640
1661 /* Annotate slab for lockdep -- annotate the malloc caches */ 1641 /* Annotate slab for lockdep -- annotate the malloc caches */
1662 init_lock_keys(); 1642 init_lock_keys();
@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void)
1668 BUG(); 1648 BUG();
1669 mutex_unlock(&cache_chain_mutex); 1649 mutex_unlock(&cache_chain_mutex);
1670 1650
1651 /* Done! */
1652 slab_state = FULL;
1653
1671 /* 1654 /*
1672 * Register a cpu startup notifier callback that initializes 1655 * Register a cpu startup notifier callback that initializes
1673 * cpu_cache_get for all new cpus 1656 * cpu_cache_get for all new cpus
@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void)
1699 start_cpu_timer(cpu); 1682 start_cpu_timer(cpu);
1700 1683
1701 /* Done! */ 1684 /* Done! */
1702 g_cpucache_up = FULL; 1685 slab_state = FULL;
1703 return 0; 1686 return 0;
1704} 1687}
1705__initcall(cpucache_init); 1688__initcall(cpucache_init);
@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2167 2150
2168static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2151static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2169{ 2152{
2170 if (g_cpucache_up >= LATE) 2153 if (slab_state >= FULL)
2171 return enable_cpucache(cachep, gfp); 2154 return enable_cpucache(cachep, gfp);
2172 2155
2173 if (g_cpucache_up == NONE) { 2156 if (slab_state == DOWN) {
2174 /* 2157 /*
2175 * Note: the first kmem_cache_create must create the cache 2158 * Note: the first kmem_cache_create must create the cache
2176 * that's used by kmalloc(24), otherwise the creation of 2159 * that's used by kmalloc(24), otherwise the creation of
@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2185 */ 2168 */
2186 set_up_list3s(cachep, SIZE_AC); 2169 set_up_list3s(cachep, SIZE_AC);
2187 if (INDEX_AC == INDEX_L3) 2170 if (INDEX_AC == INDEX_L3)
2188 g_cpucache_up = PARTIAL_L3; 2171 slab_state = PARTIAL_L3;
2189 else 2172 else
2190 g_cpucache_up = PARTIAL_AC; 2173 slab_state = PARTIAL_ARRAYCACHE;
2191 } else { 2174 } else {
2192 cachep->array[smp_processor_id()] = 2175 cachep->array[smp_processor_id()] =
2193 kmalloc(sizeof(struct arraycache_init), gfp); 2176 kmalloc(sizeof(struct arraycache_init), gfp);
2194 2177
2195 if (g_cpucache_up == PARTIAL_AC) { 2178 if (slab_state == PARTIAL_ARRAYCACHE) {
2196 set_up_list3s(cachep, SIZE_L3); 2179 set_up_list3s(cachep, SIZE_L3);
2197 g_cpucache_up = PARTIAL_L3; 2180 slab_state = PARTIAL_L3;
2198 } else { 2181 } else {
2199 int node; 2182 int node;
2200 for_each_online_node(node) { 2183 for_each_online_node(node) {
diff --git a/mm/slab.h b/mm/slab.h
new file mode 100644
index 000000000000..f9a9815cdc82
--- /dev/null
+++ b/mm/slab.h
@@ -0,0 +1,29 @@
1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
26struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
27 size_t align, unsigned long flags, void (*ctor)(void *));
28
29#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 80412beb67cc..ca1aaf69a1f5 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -16,6 +16,10 @@
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17#include <asm/page.h> 17#include <asm/page.h>
18 18
19#include "slab.h"
20
21enum slab_state slab_state;
22
19/* 23/*
20 * kmem_cache_create - Create a cache. 24 * kmem_cache_create - Create a cache.
21 * @name: A string which is used in /proc/slabinfo to identify this cache. 25 * @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -66,3 +70,8 @@ out:
66 return s; 70 return s;
67} 71}
68EXPORT_SYMBOL(kmem_cache_create); 72EXPORT_SYMBOL(kmem_cache_create);
73
74int slab_is_available(void)
75{
76 return slab_state >= UP;
77}
diff --git a/mm/slob.c b/mm/slob.c
index d63923d549ec..0111e0dece93 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -59,6 +59,8 @@
59 59
60#include <linux/kernel.h> 60#include <linux/kernel.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include "slab.h"
63
62#include <linux/mm.h> 64#include <linux/mm.h>
63#include <linux/swap.h> /* struct reclaim_state */ 65#include <linux/swap.h> /* struct reclaim_state */
64#include <linux/cache.h> 66#include <linux/cache.h>
@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
531 c->align = align; 533 c->align = align;
532 534
533 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); 535 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
536 c->refcount = 1;
534 } 537 }
535 return c; 538 return c;
536} 539}
@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
616} 619}
617EXPORT_SYMBOL(kmem_cache_shrink); 620EXPORT_SYMBOL(kmem_cache_shrink);
618 621
619static unsigned int slob_ready __read_mostly;
620
621int slab_is_available(void)
622{
623 return slob_ready;
624}
625
626void __init kmem_cache_init(void) 622void __init kmem_cache_init(void)
627{ 623{
628 slob_ready = 1; 624 slab_state = UP;
629} 625}
630 626
631void __init kmem_cache_init_late(void) 627void __init kmem_cache_init_late(void)
632{ 628{
633 /* Nothing to do */ 629 slab_state = FULL;
634} 630}
diff --git a/mm/slub.c b/mm/slub.c
index 6551cc9a51f8..4c385164d9f7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include "slab.h"
19#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
21#include <linux/kmemcheck.h> 22#include <linux/kmemcheck.h>
@@ -182,13 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
182static struct notifier_block slab_notifier; 183static struct notifier_block slab_notifier;
183#endif 184#endif
184 185
185static enum {
186 DOWN, /* No slab functionality available */
187 PARTIAL, /* Kmem_cache_node works */
188 UP, /* Everything works but does not show up in sysfs */
189 SYSFS /* Sysfs up */
190} slab_state = DOWN;
191
192/* A list of all slab caches on the system */ 186/* A list of all slab caches on the system */
193static DECLARE_RWSEM(slub_lock); 187static DECLARE_RWSEM(slub_lock);
194static LIST_HEAD(slab_caches); 188static LIST_HEAD(slab_caches);
@@ -237,11 +231,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
237 * Core slab cache functions 231 * Core slab cache functions
238 *******************************************************************/ 232 *******************************************************************/
239 233
240int slab_is_available(void)
241{
242 return slab_state >= UP;
243}
244
245static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 234static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
246{ 235{
247 return s->node[node]; 236 return s->node[node];
@@ -5274,7 +5263,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
5274 const char *name; 5263 const char *name;
5275 int unmergeable; 5264 int unmergeable;
5276 5265
5277 if (slab_state < SYSFS) 5266 if (slab_state < FULL)
5278 /* Defer until later */ 5267 /* Defer until later */
5279 return 0; 5268 return 0;
5280 5269
@@ -5319,7 +5308,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
5319 5308
5320static void sysfs_slab_remove(struct kmem_cache *s) 5309static void sysfs_slab_remove(struct kmem_cache *s)
5321{ 5310{
5322 if (slab_state < SYSFS) 5311 if (slab_state < FULL)
5323 /* 5312 /*
5324 * Sysfs has not been setup yet so no need to remove the 5313 * Sysfs has not been setup yet so no need to remove the
5325 * cache from sysfs. 5314 * cache from sysfs.
@@ -5347,7 +5336,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5347{ 5336{
5348 struct saved_alias *al; 5337 struct saved_alias *al;
5349 5338
5350 if (slab_state == SYSFS) { 5339 if (slab_state == FULL) {
5351 /* 5340 /*
5352 * If we have a leftover link then remove it. 5341 * If we have a leftover link then remove it.
5353 */ 5342 */
@@ -5380,7 +5369,7 @@ static int __init slab_sysfs_init(void)
5380 return -ENOSYS; 5369 return -ENOSYS;
5381 } 5370 }
5382 5371
5383 slab_state = SYSFS; 5372 slab_state = FULL;
5384 5373
5385 list_for_each_entry(s, &slab_caches, list) { 5374 list_for_each_entry(s, &slab_caches, list) {
5386 err = sysfs_slab_add(s); 5375 err = sysfs_slab_add(s);