aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-06-20 16:59:18 -0400
committerPekka Enberg <penberg@kernel.org>2012-07-02 06:56:59 -0400
commita164f89628fa813a2b012ec033625e9e507c29bb (patch)
tree5da295dece37926ab5da8d018373a80ab8388bb9 /mm/slab.c
parentd97d476b1bb11e24268a6bac8214f9bc58716b45 (diff)
slab: move FULL state transition to an initcall
During kmem_cache_init_late(), we transition to the LATE state, and after some more work, to the FULL state, its last state This is quite different from slub, that will only transition to its last state (previously SYSFS), in a (late)initcall, after a lot more of the kernel is ready. This means that in slab, we have no way to taking actions dependent on the initialization of other pieces of the kernel that are supposed to start way after kmem_init_late(), such as cgroups initialization. To achieve more consistency in this behavior, that patch only transitions to the UP state in kmem_init_late. In my analysis, setup_cpu_cache() should be happy to test for >= UP, instead of == FULL. It also has passed some tests I've made. We then only mark FULL state after the reap timers are in place, meaning that no further setup is expected. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8b7cb802a754..105f188d14a3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1668,9 +1668,6 @@ void __init kmem_cache_init_late(void)
1668 BUG(); 1668 BUG();
1669 mutex_unlock(&cache_chain_mutex); 1669 mutex_unlock(&cache_chain_mutex);
1670 1670
1671 /* Done! */
1672 g_cpucache_up = FULL;
1673
1674 /* 1671 /*
1675 * Register a cpu startup notifier callback that initializes 1672 * Register a cpu startup notifier callback that initializes
1676 * cpu_cache_get for all new cpus 1673 * cpu_cache_get for all new cpus
@@ -1700,6 +1697,9 @@ static int __init cpucache_init(void)
1700 */ 1697 */
1701 for_each_online_cpu(cpu) 1698 for_each_online_cpu(cpu)
1702 start_cpu_timer(cpu); 1699 start_cpu_timer(cpu);
1700
1701 /* Done! */
1702 g_cpucache_up = FULL;
1703 return 0; 1703 return 0;
1704} 1704}
1705__initcall(cpucache_init); 1705__initcall(cpucache_init);
@@ -2167,7 +2167,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2167 2167
2168static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2168static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2169{ 2169{
2170 if (g_cpucache_up == FULL) 2170 if (g_cpucache_up >= LATE)
2171 return enable_cpucache(cachep, gfp); 2171 return enable_cpucache(cachep, gfp);
2172 2172
2173 if (g_cpucache_up == NONE) { 2173 if (g_cpucache_up == NONE) {