aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-08-04 22:44:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-08-04 22:44:04 -0400
commitf03683b8fb7e03862d2f1366a16c1b01732a5741 (patch)
treeaf8143877e0d56e6a8206d937027a1fd8d8a9ec1 /mm
parent7f3bf7cd348cead84f8027b32aa30ea49fa64df5 (diff)
parent30765b92ada267c5395fc788623cb15233276f5c (diff)
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: slab, lockdep: Annotate the locks before using them lockdep: Clear whole lockdep_map on initialization slab, lockdep: Annotate slab -> rcu -> debug_object -> slab lockdep: Fix up warning lockdep: Fix trace_hardirqs_on_caller() futex: Fix regression with read only mappings
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c92
1 files changed, 71 insertions, 21 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 95947400702b..6d90a091fdca 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -622,6 +622,51 @@ int slab_is_available(void)
622static struct lock_class_key on_slab_l3_key; 622static struct lock_class_key on_slab_l3_key;
623static struct lock_class_key on_slab_alc_key; 623static struct lock_class_key on_slab_alc_key;
624 624
625static struct lock_class_key debugobj_l3_key;
626static struct lock_class_key debugobj_alc_key;
627
628static void slab_set_lock_classes(struct kmem_cache *cachep,
629 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
630 int q)
631{
632 struct array_cache **alc;
633 struct kmem_list3 *l3;
634 int r;
635
636 l3 = cachep->nodelists[q];
637 if (!l3)
638 return;
639
640 lockdep_set_class(&l3->list_lock, l3_key);
641 alc = l3->alien;
642 /*
643 * FIXME: This check for BAD_ALIEN_MAGIC
644 * should go away when common slab code is taught to
645 * work even without alien caches.
646 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 * for alloc_alien_cache,
648 */
649 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 return;
651 for_each_node(r) {
652 if (alc[r])
653 lockdep_set_class(&alc[r]->lock, alc_key);
654 }
655}
656
657static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
658{
659 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
660}
661
662static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
663{
664 int node;
665
666 for_each_online_node(node)
667 slab_set_debugobj_lock_classes_node(cachep, node);
668}
669
625static void init_node_lock_keys(int q) 670static void init_node_lock_keys(int q)
626{ 671{
627 struct cache_sizes *s = malloc_sizes; 672 struct cache_sizes *s = malloc_sizes;
@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q)
630 return; 675 return;
631 676
632 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { 677 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
633 struct array_cache **alc;
634 struct kmem_list3 *l3; 678 struct kmem_list3 *l3;
635 int r;
636 679
637 l3 = s->cs_cachep->nodelists[q]; 680 l3 = s->cs_cachep->nodelists[q];
638 if (!l3 || OFF_SLAB(s->cs_cachep)) 681 if (!l3 || OFF_SLAB(s->cs_cachep))
639 continue; 682 continue;
640 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 683
641 alc = l3->alien; 684 slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
642 /* 685 &on_slab_alc_key, q);
643 * FIXME: This check for BAD_ALIEN_MAGIC
644 * should go away when common slab code is taught to
645 * work even without alien caches.
646 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 * for alloc_alien_cache,
648 */
649 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 continue;
651 for_each_node(r) {
652 if (alc[r])
653 lockdep_set_class(&alc[r]->lock,
654 &on_slab_alc_key);
655 }
656 } 686 }
657} 687}
658 688
@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q)
671static inline void init_lock_keys(void) 701static inline void init_lock_keys(void)
672{ 702{
673} 703}
704
705static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
706{
707}
708
709static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
710{
711}
674#endif 712#endif
675 713
676/* 714/*
@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu)
1264 spin_unlock_irq(&l3->list_lock); 1302 spin_unlock_irq(&l3->list_lock);
1265 kfree(shared); 1303 kfree(shared);
1266 free_alien_cache(alien); 1304 free_alien_cache(alien);
1305 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1306 slab_set_debugobj_lock_classes_node(cachep, node);
1267 } 1307 }
1268 init_node_lock_keys(node); 1308 init_node_lock_keys(node);
1269 1309
@@ -1626,6 +1666,9 @@ void __init kmem_cache_init_late(void)
1626{ 1666{
1627 struct kmem_cache *cachep; 1667 struct kmem_cache *cachep;
1628 1668
1669 /* Annotate slab for lockdep -- annotate the malloc caches */
1670 init_lock_keys();
1671
1629 /* 6) resize the head arrays to their final sizes */ 1672 /* 6) resize the head arrays to their final sizes */
1630 mutex_lock(&cache_chain_mutex); 1673 mutex_lock(&cache_chain_mutex);
1631 list_for_each_entry(cachep, &cache_chain, next) 1674 list_for_each_entry(cachep, &cache_chain, next)
@@ -1636,9 +1679,6 @@ void __init kmem_cache_init_late(void)
1636 /* Done! */ 1679 /* Done! */
1637 g_cpucache_up = FULL; 1680 g_cpucache_up = FULL;
1638 1681
1639 /* Annotate slab for lockdep -- annotate the malloc caches */
1640 init_lock_keys();
1641
1642 /* 1682 /*
1643 * Register a cpu startup notifier callback that initializes 1683 * Register a cpu startup notifier callback that initializes
1644 * cpu_cache_get for all new cpus 1684 * cpu_cache_get for all new cpus
@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2426 goto oops; 2466 goto oops;
2427 } 2467 }
2428 2468
2469 if (flags & SLAB_DEBUG_OBJECTS) {
2470 /*
2471 * Would deadlock through slab_destroy()->call_rcu()->
2472 * debug_object_activate()->kmem_cache_alloc().
2473 */
2474 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2475
2476 slab_set_debugobj_lock_classes(cachep);
2477 }
2478
2429 /* cache setup completed, link it into the list */ 2479 /* cache setup completed, link it into the list */
2430 list_add(&cachep->next, &cache_chain); 2480 list_add(&cachep->next, &cache_chain);
2431oops: 2481oops: