diff options
author | Christoph Lameter <cl@linux.com> | 2012-07-06 16:25:11 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-07-09 05:13:35 -0400 |
commit | 97d06609158e61f6bdf538c4a6788e2de492236f (patch) | |
tree | fa3f57ff3e2d3f4f866d84dd9d634ade43941be8 /mm/slab.c | |
parent | 039363f38bfe5f6281e9eae5e0518b11577d9d50 (diff) |
mm, sl[aou]b: Common definition for boot state of the slab allocators
All allocators have some sort of support for the bootstrap status.
Setup a common definition for the boot states and make all slab
allocators use that definition.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 45 |
1 files changed, 14 insertions, 31 deletions
@@ -87,6 +87,7 @@ | |||
87 | */ | 87 | */ |
88 | 88 | ||
89 | #include <linux/slab.h> | 89 | #include <linux/slab.h> |
90 | #include "slab.h" | ||
90 | #include <linux/mm.h> | 91 | #include <linux/mm.h> |
91 | #include <linux/poison.h> | 92 | #include <linux/poison.h> |
92 | #include <linux/swap.h> | 93 | #include <linux/swap.h> |
@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = { | |||
565 | 566 | ||
566 | #define BAD_ALIEN_MAGIC 0x01020304ul | 567 | #define BAD_ALIEN_MAGIC 0x01020304ul |
567 | 568 | ||
568 | /* | ||
569 | * chicken and egg problem: delay the per-cpu array allocation | ||
570 | * until the general caches are up. | ||
571 | */ | ||
572 | static enum { | ||
573 | NONE, | ||
574 | PARTIAL_AC, | ||
575 | PARTIAL_L3, | ||
576 | EARLY, | ||
577 | LATE, | ||
578 | FULL | ||
579 | } g_cpucache_up; | ||
580 | |||
581 | /* | ||
582 | * used by boot code to determine if it can use slab based allocator | ||
583 | */ | ||
584 | int slab_is_available(void) | ||
585 | { | ||
586 | return g_cpucache_up >= EARLY; | ||
587 | } | ||
588 | |||
589 | #ifdef CONFIG_LOCKDEP | 569 | #ifdef CONFIG_LOCKDEP |
590 | 570 | ||
591 | /* | 571 | /* |
@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q) | |||
651 | { | 631 | { |
652 | struct cache_sizes *s = malloc_sizes; | 632 | struct cache_sizes *s = malloc_sizes; |
653 | 633 | ||
654 | if (g_cpucache_up < LATE) | 634 | if (slab_state < UP) |
655 | return; | 635 | return; |
656 | 636 | ||
657 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 637 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void) | |||
1649 | } | 1629 | } |
1650 | } | 1630 | } |
1651 | 1631 | ||
1652 | g_cpucache_up = EARLY; | 1632 | slab_state = UP; |
1653 | } | 1633 | } |
1654 | 1634 | ||
1655 | void __init kmem_cache_init_late(void) | 1635 | void __init kmem_cache_init_late(void) |
1656 | { | 1636 | { |
1657 | struct kmem_cache *cachep; | 1637 | struct kmem_cache *cachep; |
1658 | 1638 | ||
1659 | g_cpucache_up = LATE; | 1639 | slab_state = UP; |
1660 | 1640 | ||
1661 | /* Annotate slab for lockdep -- annotate the malloc caches */ | 1641 | /* Annotate slab for lockdep -- annotate the malloc caches */ |
1662 | init_lock_keys(); | 1642 | init_lock_keys(); |
@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void) | |||
1668 | BUG(); | 1648 | BUG(); |
1669 | mutex_unlock(&cache_chain_mutex); | 1649 | mutex_unlock(&cache_chain_mutex); |
1670 | 1650 | ||
1651 | /* Done! */ | ||
1652 | slab_state = FULL; | ||
1653 | |||
1671 | /* | 1654 | /* |
1672 | * Register a cpu startup notifier callback that initializes | 1655 | * Register a cpu startup notifier callback that initializes |
1673 | * cpu_cache_get for all new cpus | 1656 | * cpu_cache_get for all new cpus |
@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void) | |||
1699 | start_cpu_timer(cpu); | 1682 | start_cpu_timer(cpu); |
1700 | 1683 | ||
1701 | /* Done! */ | 1684 | /* Done! */ |
1702 | g_cpucache_up = FULL; | 1685 | slab_state = FULL; |
1703 | return 0; | 1686 | return 0; |
1704 | } | 1687 | } |
1705 | __initcall(cpucache_init); | 1688 | __initcall(cpucache_init); |
@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2167 | 2150 | ||
2168 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | 2151 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2169 | { | 2152 | { |
2170 | if (g_cpucache_up >= LATE) | 2153 | if (slab_state >= FULL) |
2171 | return enable_cpucache(cachep, gfp); | 2154 | return enable_cpucache(cachep, gfp); |
2172 | 2155 | ||
2173 | if (g_cpucache_up == NONE) { | 2156 | if (slab_state == DOWN) { |
2174 | /* | 2157 | /* |
2175 | * Note: the first kmem_cache_create must create the cache | 2158 | * Note: the first kmem_cache_create must create the cache |
2176 | * that's used by kmalloc(24), otherwise the creation of | 2159 | * that's used by kmalloc(24), otherwise the creation of |
@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2185 | */ | 2168 | */ |
2186 | set_up_list3s(cachep, SIZE_AC); | 2169 | set_up_list3s(cachep, SIZE_AC); |
2187 | if (INDEX_AC == INDEX_L3) | 2170 | if (INDEX_AC == INDEX_L3) |
2188 | g_cpucache_up = PARTIAL_L3; | 2171 | slab_state = PARTIAL_L3; |
2189 | else | 2172 | else |
2190 | g_cpucache_up = PARTIAL_AC; | 2173 | slab_state = PARTIAL_ARRAYCACHE; |
2191 | } else { | 2174 | } else { |
2192 | cachep->array[smp_processor_id()] = | 2175 | cachep->array[smp_processor_id()] = |
2193 | kmalloc(sizeof(struct arraycache_init), gfp); | 2176 | kmalloc(sizeof(struct arraycache_init), gfp); |
2194 | 2177 | ||
2195 | if (g_cpucache_up == PARTIAL_AC) { | 2178 | if (slab_state == PARTIAL_ARRAYCACHE) { |
2196 | set_up_list3s(cachep, SIZE_L3); | 2179 | set_up_list3s(cachep, SIZE_L3); |
2197 | g_cpucache_up = PARTIAL_L3; | 2180 | slab_state = PARTIAL_L3; |
2198 | } else { | 2181 | } else { |
2199 | int node; | 2182 | int node; |
2200 | for_each_online_node(node) { | 2183 | for_each_online_node(node) { |