diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-04 22:44:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-04 22:44:04 -0400 |
commit | f03683b8fb7e03862d2f1366a16c1b01732a5741 (patch) | |
tree | af8143877e0d56e6a8206d937027a1fd8d8a9ec1 /mm | |
parent | 7f3bf7cd348cead84f8027b32aa30ea49fa64df5 (diff) | |
parent | 30765b92ada267c5395fc788623cb15233276f5c (diff) |
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
slab, lockdep: Annotate the locks before using them
lockdep: Clear whole lockdep_map on initialization
slab, lockdep: Annotate slab -> rcu -> debug_object -> slab
lockdep: Fix up warning
lockdep: Fix trace_hardirqs_on_caller()
futex: Fix regression with read only mappings
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 92 |
1 files changed, 71 insertions, 21 deletions
@@ -622,6 +622,51 @@ int slab_is_available(void) | |||
622 | static struct lock_class_key on_slab_l3_key; | 622 | static struct lock_class_key on_slab_l3_key; |
623 | static struct lock_class_key on_slab_alc_key; | 623 | static struct lock_class_key on_slab_alc_key; |
624 | 624 | ||
625 | static struct lock_class_key debugobj_l3_key; | ||
626 | static struct lock_class_key debugobj_alc_key; | ||
627 | |||
628 | static void slab_set_lock_classes(struct kmem_cache *cachep, | ||
629 | struct lock_class_key *l3_key, struct lock_class_key *alc_key, | ||
630 | int q) | ||
631 | { | ||
632 | struct array_cache **alc; | ||
633 | struct kmem_list3 *l3; | ||
634 | int r; | ||
635 | |||
636 | l3 = cachep->nodelists[q]; | ||
637 | if (!l3) | ||
638 | return; | ||
639 | |||
640 | lockdep_set_class(&l3->list_lock, l3_key); | ||
641 | alc = l3->alien; | ||
642 | /* | ||
643 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
644 | * should go away when common slab code is taught to | ||
645 | * work even without alien caches. | ||
646 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
647 | * for alloc_alien_cache, | ||
648 | */ | ||
649 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
650 | return; | ||
651 | for_each_node(r) { | ||
652 | if (alc[r]) | ||
653 | lockdep_set_class(&alc[r]->lock, alc_key); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | ||
658 | { | ||
659 | slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); | ||
660 | } | ||
661 | |||
662 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
663 | { | ||
664 | int node; | ||
665 | |||
666 | for_each_online_node(node) | ||
667 | slab_set_debugobj_lock_classes_node(cachep, node); | ||
668 | } | ||
669 | |||
625 | static void init_node_lock_keys(int q) | 670 | static void init_node_lock_keys(int q) |
626 | { | 671 | { |
627 | struct cache_sizes *s = malloc_sizes; | 672 | struct cache_sizes *s = malloc_sizes; |
@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q) | |||
630 | return; | 675 | return; |
631 | 676 | ||
632 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 677 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
633 | struct array_cache **alc; | ||
634 | struct kmem_list3 *l3; | 678 | struct kmem_list3 *l3; |
635 | int r; | ||
636 | 679 | ||
637 | l3 = s->cs_cachep->nodelists[q]; | 680 | l3 = s->cs_cachep->nodelists[q]; |
638 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 681 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
639 | continue; | 682 | continue; |
640 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 683 | |
641 | alc = l3->alien; | 684 | slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, |
642 | /* | 685 | &on_slab_alc_key, q); |
643 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
644 | * should go away when common slab code is taught to | ||
645 | * work even without alien caches. | ||
646 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
647 | * for alloc_alien_cache, | ||
648 | */ | ||
649 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
650 | continue; | ||
651 | for_each_node(r) { | ||
652 | if (alc[r]) | ||
653 | lockdep_set_class(&alc[r]->lock, | ||
654 | &on_slab_alc_key); | ||
655 | } | ||
656 | } | 686 | } |
657 | } | 687 | } |
658 | 688 | ||
@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q) | |||
671 | static inline void init_lock_keys(void) | 701 | static inline void init_lock_keys(void) |
672 | { | 702 | { |
673 | } | 703 | } |
704 | |||
705 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | ||
706 | { | ||
707 | } | ||
708 | |||
709 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | ||
710 | { | ||
711 | } | ||
674 | #endif | 712 | #endif |
675 | 713 | ||
676 | /* | 714 | /* |
@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1264 | spin_unlock_irq(&l3->list_lock); | 1302 | spin_unlock_irq(&l3->list_lock); |
1265 | kfree(shared); | 1303 | kfree(shared); |
1266 | free_alien_cache(alien); | 1304 | free_alien_cache(alien); |
1305 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | ||
1306 | slab_set_debugobj_lock_classes_node(cachep, node); | ||
1267 | } | 1307 | } |
1268 | init_node_lock_keys(node); | 1308 | init_node_lock_keys(node); |
1269 | 1309 | ||
@@ -1626,6 +1666,9 @@ void __init kmem_cache_init_late(void) | |||
1626 | { | 1666 | { |
1627 | struct kmem_cache *cachep; | 1667 | struct kmem_cache *cachep; |
1628 | 1668 | ||
1669 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1670 | init_lock_keys(); | ||
1671 | |||
1629 | /* 6) resize the head arrays to their final sizes */ | 1672 | /* 6) resize the head arrays to their final sizes */ |
1630 | mutex_lock(&cache_chain_mutex); | 1673 | mutex_lock(&cache_chain_mutex); |
1631 | list_for_each_entry(cachep, &cache_chain, next) | 1674 | list_for_each_entry(cachep, &cache_chain, next) |
@@ -1636,9 +1679,6 @@ void __init kmem_cache_init_late(void) | |||
1636 | /* Done! */ | 1679 | /* Done! */ |
1637 | g_cpucache_up = FULL; | 1680 | g_cpucache_up = FULL; |
1638 | 1681 | ||
1639 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1640 | init_lock_keys(); | ||
1641 | |||
1642 | /* | 1682 | /* |
1643 | * Register a cpu startup notifier callback that initializes | 1683 | * Register a cpu startup notifier callback that initializes |
1644 | * cpu_cache_get for all new cpus | 1684 | * cpu_cache_get for all new cpus |
@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2426 | goto oops; | 2466 | goto oops; |
2427 | } | 2467 | } |
2428 | 2468 | ||
2469 | if (flags & SLAB_DEBUG_OBJECTS) { | ||
2470 | /* | ||
2471 | * Would deadlock through slab_destroy()->call_rcu()-> | ||
2472 | * debug_object_activate()->kmem_cache_alloc(). | ||
2473 | */ | ||
2474 | WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); | ||
2475 | |||
2476 | slab_set_debugobj_lock_classes(cachep); | ||
2477 | } | ||
2478 | |||
2429 | /* cache setup completed, link it into the list */ | 2479 | /* cache setup completed, link it into the list */ |
2430 | list_add(&cachep->next, &cache_chain); | 2480 | list_add(&cachep->next, &cache_chain); |
2431 | oops: | 2481 | oops: |