aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f46b65d124e5..18e3164de09a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
304}; 304};
305 305
306/* 306/*
307 * The slab allocator is initialized with interrupts disabled. Therefore, make
308 * sure early boot allocations don't accidentally enable interrupts.
309 */
310static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
311
312/*
307 * Need this for bootstrapping a per node allocator. 313 * Need this for bootstrapping a per node allocator.
308 */ 314 */
309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 315#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -753,6 +759,7 @@ static enum {
753 NONE, 759 NONE,
754 PARTIAL_AC, 760 PARTIAL_AC,
755 PARTIAL_L3, 761 PARTIAL_L3,
762 EARLY,
756 FULL 763 FULL
757} g_cpucache_up; 764} g_cpucache_up;
758 765
@@ -761,7 +768,7 @@ static enum {
761 */ 768 */
762int slab_is_available(void) 769int slab_is_available(void)
763{ 770{
764 return g_cpucache_up == FULL; 771 return g_cpucache_up >= EARLY;
765} 772}
766 773
767static DEFINE_PER_CPU(struct delayed_work, reap_work); 774static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1625,19 +1632,27 @@ void __init kmem_cache_init(void)
1625 } 1632 }
1626 } 1633 }
1627 1634
1628 /* 6) resize the head arrays to their final sizes */ 1635 g_cpucache_up = EARLY;
1629 {
1630 struct kmem_cache *cachep;
1631 mutex_lock(&cache_chain_mutex);
1632 list_for_each_entry(cachep, &cache_chain, next)
1633 if (enable_cpucache(cachep, GFP_NOWAIT))
1634 BUG();
1635 mutex_unlock(&cache_chain_mutex);
1636 }
1637 1636
1638 /* Annotate slab for lockdep -- annotate the malloc caches */ 1637 /* Annotate slab for lockdep -- annotate the malloc caches */
1639 init_lock_keys(); 1638 init_lock_keys();
1639}
1640
1641void __init kmem_cache_init_late(void)
1642{
1643 struct kmem_cache *cachep;
1644
1645 /*
1646 * Interrupts are enabled now so all GFP allocations are safe.
1647 */
1648 slab_gfp_mask = __GFP_BITS_MASK;
1640 1649
1650 /* 6) resize the head arrays to their final sizes */
1651 mutex_lock(&cache_chain_mutex);
1652 list_for_each_entry(cachep, &cache_chain, next)
1653 if (enable_cpucache(cachep, GFP_NOWAIT))
1654 BUG();
1655 mutex_unlock(&cache_chain_mutex);
1641 1656
1642 /* Done! */ 1657 /* Done! */
1643 g_cpucache_up = FULL; 1658 g_cpucache_up = FULL;
@@ -2102,7 +2117,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2102 for_each_online_node(node) { 2117 for_each_online_node(node) {
2103 cachep->nodelists[node] = 2118 cachep->nodelists[node] =
2104 kmalloc_node(sizeof(struct kmem_list3), 2119 kmalloc_node(sizeof(struct kmem_list3),
2105 GFP_KERNEL, node); 2120 gfp, node);
2106 BUG_ON(!cachep->nodelists[node]); 2121 BUG_ON(!cachep->nodelists[node]);
2107 kmem_list3_init(cachep->nodelists[node]); 2122 kmem_list3_init(cachep->nodelists[node]);
2108 } 2123 }
@@ -3354,6 +3369,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3354 unsigned long save_flags; 3369 unsigned long save_flags;
3355 void *ptr; 3370 void *ptr;
3356 3371
3372 flags &= slab_gfp_mask;
3373
3357 lockdep_trace_alloc(flags); 3374 lockdep_trace_alloc(flags);
3358 3375
3359 if (slab_should_failslab(cachep, flags)) 3376 if (slab_should_failslab(cachep, flags))
@@ -3434,6 +3451,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3434 unsigned long save_flags; 3451 unsigned long save_flags;
3435 void *objp; 3452 void *objp;
3436 3453
3454 flags &= slab_gfp_mask;
3455
3437 lockdep_trace_alloc(flags); 3456 lockdep_trace_alloc(flags);
3438 3457
3439 if (slab_should_failslab(cachep, flags)) 3458 if (slab_should_failslab(cachep, flags))