diff options
author | Ravikiran G Thirumalai <kiran@scalex86.org> | 2006-09-26 02:31:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:50 -0400 |
commit | 056c62418cc639bf2fe962c6a6ee56054b838bc7 (patch) | |
tree | 1c46080d82b43e406c6475199b9e171c2ea1cd6b /mm/slab.c | |
parent | 2ed3a4ef95ef1a13a424378c34ebd9b7e593f212 (diff) |
[PATCH] slab: fix lockdep warnings
Place the alien array cache locks of on slab malloc slab caches on a
seperate lockdep class. This avoids false positives from lockdep
[akpm@osdl.org: build fix]
Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Shai Fultheim <shai@scalex86.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 57 |
1 files changed, 43 insertions, 14 deletions
@@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = { | |||
674 | #endif | 674 | #endif |
675 | }; | 675 | }; |
676 | 676 | ||
677 | #define BAD_ALIEN_MAGIC 0x01020304ul | ||
678 | |||
677 | #ifdef CONFIG_LOCKDEP | 679 | #ifdef CONFIG_LOCKDEP |
678 | 680 | ||
679 | /* | 681 | /* |
@@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = { | |||
682 | * The locking for this is tricky in that it nests within the locks | 684 | * The locking for this is tricky in that it nests within the locks |
683 | * of all other slabs in a few places; to deal with this special | 685 | * of all other slabs in a few places; to deal with this special |
684 | * locking we put on-slab caches into a separate lock-class. | 686 | * locking we put on-slab caches into a separate lock-class. |
687 | * | ||
688 | * We set lock class for alien array caches which are up during init. | ||
689 | * The lock annotation will be lost if all cpus of a node goes down and | ||
690 | * then comes back up during hotplug | ||
685 | */ | 691 | */ |
686 | static struct lock_class_key on_slab_key; | 692 | static struct lock_class_key on_slab_l3_key; |
693 | static struct lock_class_key on_slab_alc_key; | ||
694 | |||
695 | static inline void init_lock_keys(void) | ||
687 | 696 | ||
688 | static inline void init_lock_keys(struct cache_sizes *s) | ||
689 | { | 697 | { |
690 | int q; | 698 | int q; |
691 | 699 | struct cache_sizes *s = malloc_sizes; | |
692 | for (q = 0; q < MAX_NUMNODES; q++) { | 700 | |
693 | if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep)) | 701 | while (s->cs_size != ULONG_MAX) { |
694 | continue; | 702 | for_each_node(q) { |
695 | lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock, | 703 | struct array_cache **alc; |
696 | &on_slab_key); | 704 | int r; |
705 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | ||
706 | if (!l3 || OFF_SLAB(s->cs_cachep)) | ||
707 | continue; | ||
708 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | ||
709 | alc = l3->alien; | ||
710 | /* | ||
711 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
712 | * should go away when common slab code is taught to | ||
713 | * work even without alien caches. | ||
714 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
715 | * for alloc_alien_cache, | ||
716 | */ | ||
717 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
718 | continue; | ||
719 | for_each_node(r) { | ||
720 | if (alc[r]) | ||
721 | lockdep_set_class(&alc[r]->lock, | ||
722 | &on_slab_alc_key); | ||
723 | } | ||
724 | } | ||
725 | s++; | ||
697 | } | 726 | } |
698 | } | 727 | } |
699 | |||
700 | #else | 728 | #else |
701 | static inline void init_lock_keys(struct cache_sizes *s) | 729 | static inline void init_lock_keys(void) |
702 | { | 730 | { |
703 | } | 731 | } |
704 | #endif | 732 | #endif |
705 | 733 | ||
706 | |||
707 | |||
708 | /* Guard access to the cache-chain. */ | 734 | /* Guard access to the cache-chain. */ |
709 | static DEFINE_MUTEX(cache_chain_mutex); | 735 | static DEFINE_MUTEX(cache_chain_mutex); |
710 | static struct list_head cache_chain; | 736 | static struct list_head cache_chain; |
@@ -1091,7 +1117,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1091 | 1117 | ||
1092 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1118 | static inline struct array_cache **alloc_alien_cache(int node, int limit) |
1093 | { | 1119 | { |
1094 | return (struct array_cache **) 0x01020304ul; | 1120 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1095 | } | 1121 | } |
1096 | 1122 | ||
1097 | static inline void free_alien_cache(struct array_cache **ac_ptr) | 1123 | static inline void free_alien_cache(struct array_cache **ac_ptr) |
@@ -1421,7 +1447,6 @@ void __init kmem_cache_init(void) | |||
1421 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1447 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
1422 | NULL, NULL); | 1448 | NULL, NULL); |
1423 | } | 1449 | } |
1424 | init_lock_keys(sizes); | ||
1425 | 1450 | ||
1426 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 1451 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, |
1427 | sizes->cs_size, | 1452 | sizes->cs_size, |
@@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void) | |||
1495 | mutex_unlock(&cache_chain_mutex); | 1520 | mutex_unlock(&cache_chain_mutex); |
1496 | } | 1521 | } |
1497 | 1522 | ||
1523 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1524 | init_lock_keys(); | ||
1525 | |||
1526 | |||
1498 | /* Done! */ | 1527 | /* Done! */ |
1499 | g_cpucache_up = FULL; | 1528 | g_cpucache_up = FULL; |
1500 | 1529 | ||