aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2014-01-24 10:20:23 -0500
committerPekka Enberg <penberg@kernel.org>2014-01-31 06:41:26 -0500
commit67b6c900dc6dce65478d6fe37b60cd1e65bb80c2 (patch)
treec6c1b99a3483cd94481d72a35d16fe4243c329cc /mm/slub.c
parent433a91ff5fa19e3eb70b12f7056f234aebd09ac2 (diff)
mm: slub: work around unneeded lockdep warning
The slub code does some setup during early boot in early_kmem_cache_node_alloc() with some local data. There is no possible way that another CPU can see this data, so the slub code doesn't unnecessarily lock it. However, some new lockdep asserts check to make sure that add_partial() _always_ has the list_lock held. Just add the locking, even though it is technically unnecessary. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@arm.linux.org.uk> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a99e9e67c60e..432bddf484bb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2890,7 +2890,13 @@ static void early_kmem_cache_node_alloc(int node)
2890 init_kmem_cache_node(n); 2890 init_kmem_cache_node(n);
2891 inc_slabs_node(kmem_cache_node, node, page->objects); 2891 inc_slabs_node(kmem_cache_node, node, page->objects);
2892 2892
2893 /*
2894 * the lock is for lockdep's sake, not for any actual
2895 * race protection
2896 */
2897 spin_lock(&n->list_lock);
2893 add_partial(n, page, DEACTIVATE_TO_HEAD); 2898 add_partial(n, page, DEACTIVATE_TO_HEAD);
2899 spin_unlock(&n->list_lock);
2894} 2900}
2895 2901
2896static void free_kmem_cache_nodes(struct kmem_cache *s) 2902static void free_kmem_cache_nodes(struct kmem_cache *s)