aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 08:58:59 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 11:53:58 -0400
commit8429db5c6336083594036c30f49401405d536911 (patch)
tree00ab1aa8e222f384bac0e66b7da195279b1c7698 /mm
parent7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 (diff)
slab: setup cpu caches later on when interrupts are enabled
Fixes the following boot-time warning: [ 0.000000] ------------[ cut here ]------------ [ 0.000000] WARNING: at kernel/smp.c:369 smp_call_function_many+0x56/0x1bc() [ 0.000000] Hardware name: [ 0.000000] Modules linked in: [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30 #492 [ 0.000000] Call Trace: [ 0.000000] [<ffffffff8149e021>] ? _spin_unlock+0x4f/0x5c [ 0.000000] [<ffffffff8108f11b>] ? smp_call_function_many+0x56/0x1bc [ 0.000000] [<ffffffff81061764>] warn_slowpath_common+0x7c/0xa9 [ 0.000000] [<ffffffff810617a5>] warn_slowpath_null+0x14/0x16 [ 0.000000] [<ffffffff8108f11b>] smp_call_function_many+0x56/0x1bc [ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [<ffffffff8108f2be>] smp_call_function+0x3d/0x68 [ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [<ffffffff81066fd8>] on_each_cpu+0x31/0x7c [ 0.000000] [<ffffffff810f64f5>] do_tune_cpucache+0x119/0x454 [ 0.000000] [<ffffffff81087080>] ? lockdep_init_map+0x94/0x10b [ 0.000000] [<ffffffff818133b0>] ? kmem_cache_init+0x421/0x593 [ 0.000000] [<ffffffff810f69cf>] enable_cpucache+0x68/0xad [ 0.000000] [<ffffffff818133c3>] kmem_cache_init+0x434/0x593 [ 0.000000] [<ffffffff8180987c>] ? mem_init+0x156/0x161 [ 0.000000] [<ffffffff817f8aae>] start_kernel+0x1cc/0x3b9 [ 0.000000] [<ffffffff817f829a>] x86_64_start_reservations+0xaa/0xae [ 0.000000] [<ffffffff817f837f>] x86_64_start_kernel+0xe1/0xe8 [ 0.000000] ---[ end trace 4eaa2a86a8e2da22 ]--- Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 453efcb1c980..18e3164de09a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -759,6 +759,7 @@ static enum {
759 NONE, 759 NONE,
760 PARTIAL_AC, 760 PARTIAL_AC,
761 PARTIAL_L3, 761 PARTIAL_L3,
762 EARLY,
762 FULL 763 FULL
763} g_cpucache_up; 764} g_cpucache_up;
764 765
@@ -767,7 +768,7 @@ static enum {
767 */ 768 */
768int slab_is_available(void) 769int slab_is_available(void)
769{ 770{
770 return g_cpucache_up == FULL; 771 return g_cpucache_up >= EARLY;
771} 772}
772 773
773static DEFINE_PER_CPU(struct delayed_work, reap_work); 774static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void)
1631 } 1632 }
1632 } 1633 }
1633 1634
1634 /* 6) resize the head arrays to their final sizes */ 1635 g_cpucache_up = EARLY;
1635 {
1636 struct kmem_cache *cachep;
1637 mutex_lock(&cache_chain_mutex);
1638 list_for_each_entry(cachep, &cache_chain, next)
1639 if (enable_cpucache(cachep, GFP_NOWAIT))
1640 BUG();
1641 mutex_unlock(&cache_chain_mutex);
1642 }
1643 1636
1644 /* Annotate slab for lockdep -- annotate the malloc caches */ 1637 /* Annotate slab for lockdep -- annotate the malloc caches */
1645 init_lock_keys(); 1638 init_lock_keys();
1639}
1640
1641void __init kmem_cache_init_late(void)
1642{
1643 struct kmem_cache *cachep;
1644
1645 /*
1646 * Interrupts are enabled now so all GFP allocations are safe.
1647 */
1648 slab_gfp_mask = __GFP_BITS_MASK;
1646 1649
1650 /* 6) resize the head arrays to their final sizes */
1651 mutex_lock(&cache_chain_mutex);
1652 list_for_each_entry(cachep, &cache_chain, next)
1653 if (enable_cpucache(cachep, GFP_NOWAIT))
1654 BUG();
1655 mutex_unlock(&cache_chain_mutex);
1647 1656
1648 /* Done! */ 1657 /* Done! */
1649 g_cpucache_up = FULL; 1658 g_cpucache_up = FULL;
@@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void)
1660 */ 1669 */
1661} 1670}
1662 1671
1663void __init kmem_cache_init_late(void)
1664{
1665 /*
1666 * Interrupts are enabled now so all GFP allocations are safe.
1667 */
1668 slab_gfp_mask = __GFP_BITS_MASK;
1669}
1670
1671static int __init cpucache_init(void) 1672static int __init cpucache_init(void)
1672{ 1673{
1673 int cpu; 1674 int cpu;