aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2018-01-17 10:14:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-01-18 05:56:48 -0500
commit64f29d1bc9fb8196df3d0f1df694245230e208c0 (patch)
treefbd97ed1f1ee7d1e9c23c86b1314135b24fcc5e8
parent1d966eb4d6326a2521073174e9710713e9846e8b (diff)
lockdep: Assign lock keys on registration
Lockdep is assigning lock keys when a lock was looked up. This is unnecessary; if the lock has never been registered then it is known that it is not locked. It also complicates the calling convention. Switch to assigning the lock key in register_lock_class(). Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Link: https://lkml.kernel.org/r/20180117151414.23686-2-willy@infradead.org
-rw-r--r--kernel/locking/lockdep.c76
1 files changed, 40 insertions, 36 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324a4f29..472547dd45c3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -647,18 +647,12 @@ static int count_matching_names(struct lock_class *new_class)
647 return count + 1; 647 return count + 1;
648} 648}
649 649
650/*
651 * Register a lock's class in the hash-table, if the class is not present
652 * yet. Otherwise we look it up. We cache the result in the lock object
653 * itself, so actual lookup of the hash should be once per lock object.
654 */
655static inline struct lock_class * 650static inline struct lock_class *
656look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 651look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
657{ 652{
658 struct lockdep_subclass_key *key; 653 struct lockdep_subclass_key *key;
659 struct hlist_head *hash_head; 654 struct hlist_head *hash_head;
660 struct lock_class *class; 655 struct lock_class *class;
661 bool is_static = false;
662 656
663 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 657 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
664 debug_locks_off(); 658 debug_locks_off();
@@ -671,24 +665,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
671 } 665 }
672 666
673 /* 667 /*
674 * Static locks do not have their class-keys yet - for them the key 668 * If it is not initialised then it has never been locked,
675 * is the lock object itself. If the lock is in the per cpu area, 669 * so it won't be present in the hash table.
676 * the canonical address of the lock (per cpu offset removed) is
677 * used.
678 */ 670 */
679 if (unlikely(!lock->key)) { 671 if (unlikely(!lock->key))
680 unsigned long can_addr, addr = (unsigned long)lock; 672 return NULL;
681
682 if (__is_kernel_percpu_address(addr, &can_addr))
683 lock->key = (void *)can_addr;
684 else if (__is_module_percpu_address(addr, &can_addr))
685 lock->key = (void *)can_addr;
686 else if (static_obj(lock))
687 lock->key = (void *)lock;
688 else
689 return ERR_PTR(-EINVAL);
690 is_static = true;
691 }
692 673
693 /* 674 /*
694 * NOTE: the class-key must be unique. For dynamic locks, a static 675 * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +701,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
720 } 701 }
721 } 702 }
722 703
723 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 704 return NULL;
705}
706
707/*
708 * Static locks do not have their class-keys yet - for them the key is
709 * the lock object itself. If the lock is in the per cpu area, the
710 * canonical address of the lock (per cpu offset removed) is used.
711 */
712static bool assign_lock_key(struct lockdep_map *lock)
713{
714 unsigned long can_addr, addr = (unsigned long)lock;
715
716 if (__is_kernel_percpu_address(addr, &can_addr))
717 lock->key = (void *)can_addr;
718 else if (__is_module_percpu_address(addr, &can_addr))
719 lock->key = (void *)can_addr;
720 else if (static_obj(lock))
721 lock->key = (void *)lock;
722 else {
723 /* Debug-check: all keys must be persistent! */
724 debug_locks_off();
725 pr_err("INFO: trying to register non-static key.\n");
726 pr_err("the code is fine but needs lockdep annotation.\n");
727 pr_err("turning off the locking correctness validator.\n");
728 dump_stack();
729 return false;
730 }
731
732 return true;
724} 733}
725 734
726/* 735/*
@@ -738,18 +747,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
738 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 747 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
739 748
740 class = look_up_lock_class(lock, subclass); 749 class = look_up_lock_class(lock, subclass);
741 if (likely(!IS_ERR_OR_NULL(class))) 750 if (likely(class))
742 goto out_set_class_cache; 751 goto out_set_class_cache;
743 752
744 /* 753 if (!lock->key) {
745 * Debug-check: all keys must be persistent! 754 if (!assign_lock_key(lock))
746 */ 755 return NULL;
747 if (IS_ERR(class)) { 756 } else if (!static_obj(lock->key)) {
748 debug_locks_off();
749 printk("INFO: trying to register non-static key.\n");
750 printk("the code is fine but needs lockdep annotation.\n");
751 printk("turning off the locking correctness validator.\n");
752 dump_stack();
753 return NULL; 757 return NULL;
754 } 758 }
755 759
@@ -3498,7 +3502,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3498 * Clearly if the lock hasn't been acquired _ever_, we're not 3502 * Clearly if the lock hasn't been acquired _ever_, we're not
3499 * holding it either, so report failure. 3503 * holding it either, so report failure.
3500 */ 3504 */
3501 if (IS_ERR_OR_NULL(class)) 3505 if (!class)
3502 return 0; 3506 return 0;
3503 3507
3504 /* 3508 /*
@@ -4294,7 +4298,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
4294 * If the class exists we look it up and zap it: 4298 * If the class exists we look it up and zap it:
4295 */ 4299 */
4296 class = look_up_lock_class(lock, j); 4300 class = look_up_lock_class(lock, j);
4297 if (!IS_ERR_OR_NULL(class)) 4301 if (class)
4298 zap_class(class); 4302 zap_class(class);
4299 } 4303 }
4300 /* 4304 /*