aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorroot <root@programming.kicks-ass.net>2008-01-08 02:20:28 -0500
committerChristoph Lameter <clameter@sgi.com>2008-02-04 13:56:02 -0500
commitba84c73c7ae21fc891a3c2576fa3be42752fce53 (patch)
treed6e569ad26cf604fc16617388dd314e4b5e47fcb /mm/slub.c
parent064287807c9dd64688084d34c6748a326b5f3ec8 (diff)
SLUB: Do not upset lockdep
inconsistent {softirq-on-W} -> {in-softirq-W} usage. swapper/0 [HC0[0]:SC1[1]:HE0:SE0] takes: (&n->list_lock){-+..}, at: [<ffffffff802935c1>] add_partial+0x31/0xa0 {softirq-on-W} state was registered at: [<ffffffff80259fb8>] __lock_acquire+0x3e8/0x1140 [<ffffffff80259838>] debug_check_no_locks_freed+0x188/0x1a0 [<ffffffff8025ad65>] lock_acquire+0x55/0x70 [<ffffffff802935c1>] add_partial+0x31/0xa0 [<ffffffff805c76de>] _spin_lock+0x1e/0x30 [<ffffffff802935c1>] add_partial+0x31/0xa0 [<ffffffff80296f9c>] kmem_cache_open+0x1cc/0x330 [<ffffffff805c7984>] _spin_unlock_irq+0x24/0x30 [<ffffffff802974f4>] create_kmalloc_cache+0x64/0xf0 [<ffffffff80295640>] init_alloc_cpu_cpu+0x70/0x90 [<ffffffff8080ada5>] kmem_cache_init+0x65/0x1d0 [<ffffffff807f1b4e>] start_kernel+0x23e/0x350 [<ffffffff807f112d>] _sinittext+0x12d/0x140 [<ffffffffffffffff>] 0xffffffffffffffff This change isn't really necessary for correctness, but it prevents lockdep from getting upset and then disabling itself. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <clameter@sgi.com> Cc: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a660834416ac..3f056677fa8f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1999,6 +1999,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
1999{ 1999{
2000 struct page *page; 2000 struct page *page;
2001 struct kmem_cache_node *n; 2001 struct kmem_cache_node *n;
2002 unsigned long flags;
2002 2003
2003 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2004 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2004 2005
@@ -2023,7 +2024,14 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2023#endif 2024#endif
2024 init_kmem_cache_node(n); 2025 init_kmem_cache_node(n);
2025 atomic_long_inc(&n->nr_slabs); 2026 atomic_long_inc(&n->nr_slabs);
2027 /*
2028 * lockdep requires consistent irq usage for each lock
2029 * so even though there cannot be a race this early in
2030 * the boot sequence, we still disable irqs.
2031 */
2032 local_irq_save(flags);
2026 add_partial(n, page, 0); 2033 add_partial(n, page, 0);
2034 local_irq_restore(flags);
2027 return n; 2035 return n;
2028} 2036}
2029 2037