aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 01:41:02 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 01:45:31 -0400
commit384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch)
tree04c93f391a1b65c8bf8d7ba8643c07d26c26590a /mm/slab.c
parenta76761b621bcd8336065c4fe3a74f046858bc34c (diff)
parent142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff)
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e74a16e4ced6..7b5d4deacfcd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1544,9 +1544,6 @@ void __init kmem_cache_init(void)
1544 } 1544 }
1545 1545
1546 g_cpucache_up = EARLY; 1546 g_cpucache_up = EARLY;
1547
1548 /* Annotate slab for lockdep -- annotate the malloc caches */
1549 init_lock_keys();
1550} 1547}
1551 1548
1552void __init kmem_cache_init_late(void) 1549void __init kmem_cache_init_late(void)
@@ -1563,6 +1560,9 @@ void __init kmem_cache_init_late(void)
1563 /* Done! */ 1560 /* Done! */
1564 g_cpucache_up = FULL; 1561 g_cpucache_up = FULL;
1565 1562
1563 /* Annotate slab for lockdep -- annotate the malloc caches */
1564 init_lock_keys();
1565
1566 /* 1566 /*
1567 * Register a cpu startup notifier callback that initializes 1567 * Register a cpu startup notifier callback that initializes
1568 * cpu_cache_get for all new cpus 1568 * cpu_cache_get for all new cpus
@@ -2547,7 +2547,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2547 } 2547 }
2548 2548
2549 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2549 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2550 synchronize_rcu(); 2550 rcu_barrier();
2551 2551
2552 __kmem_cache_destroy(cachep); 2552 __kmem_cache_destroy(cachep);
2553 mutex_unlock(&cache_chain_mutex); 2553 mutex_unlock(&cache_chain_mutex);