aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--kernel/locking/lockdep.c89
5 files changed, 53 insertions, 50 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
332 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the 332 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
333 * associated wb's list_lock. 333 * associated wb's list_lock.
334 */ 334 */
335static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 335static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
336{ 336{
337#ifdef CONFIG_LOCKDEP 337#ifdef CONFIG_LOCKDEP
338 WARN_ON_ONCE(debug_locks && 338 WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 795634ee5aa5..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
337/* 337/*
338 * Same "read" as for lock_acquire(), except -1 means any. 338 * Same "read" as for lock_acquire(), except -1 means any.
339 */ 339 */
340extern int lock_is_held_type(struct lockdep_map *lock, int read); 340extern int lock_is_held_type(const struct lockdep_map *lock, int read);
341 341
342static inline int lock_is_held(struct lockdep_map *lock) 342static inline int lock_is_held(const struct lockdep_map *lock)
343{ 343{
344 return lock_is_held_type(lock, -1); 344 return lock_is_held_type(lock, -1);
345} 345}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
92 * relies on normal RCU, it can be called from the CPU which 92 * relies on normal RCU, it can be called from the CPU which
93 * is in the idle loop from an RCU point of view or offline. 93 * is in the idle loop from an RCU point of view or offline.
94 */ 94 */
95static inline int srcu_read_lock_held(struct srcu_struct *sp) 95static inline int srcu_read_lock_held(const struct srcu_struct *sp)
96{ 96{
97 if (!debug_lockdep_rcu_enabled()) 97 if (!debug_lockdep_rcu_enabled())
98 return 1; 98 return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
101 101
102#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 102#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
103 103
104static inline int srcu_read_lock_held(struct srcu_struct *sp) 104static inline int srcu_read_lock_held(const struct srcu_struct *sp)
105{ 105{
106 return 1; 106 return 1;
107} 107}
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e9628a..c4a424fe6fdd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1445,10 +1445,8 @@ do { \
1445} while (0) 1445} while (0)
1446 1446
1447#ifdef CONFIG_LOCKDEP 1447#ifdef CONFIG_LOCKDEP
1448static inline bool lockdep_sock_is_held(const struct sock *csk) 1448static inline bool lockdep_sock_is_held(const struct sock *sk)
1449{ 1449{
1450 struct sock *sk = (struct sock *)csk;
1451
1452 return lockdep_is_held(&sk->sk_lock) || 1450 return lockdep_is_held(&sk->sk_lock) ||
1453 lockdep_is_held(&sk->sk_lock.slock); 1451 lockdep_is_held(&sk->sk_lock.slock);
1454} 1452}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 521659044719..89b5f83f1969 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -648,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
648 return count + 1; 648 return count + 1;
649} 649}
650 650
651/*
652 * Register a lock's class in the hash-table, if the class is not present
653 * yet. Otherwise we look it up. We cache the result in the lock object
654 * itself, so actual lookup of the hash should be once per lock object.
655 */
656static inline struct lock_class * 651static inline struct lock_class *
657look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 652look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
658{ 653{
659 struct lockdep_subclass_key *key; 654 struct lockdep_subclass_key *key;
660 struct hlist_head *hash_head; 655 struct hlist_head *hash_head;
661 struct lock_class *class; 656 struct lock_class *class;
662 bool is_static = false;
663 657
664 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 658 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
665 debug_locks_off(); 659 debug_locks_off();
@@ -672,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
672 } 666 }
673 667
674 /* 668 /*
675 * Static locks do not have their class-keys yet - for them the key 669 * If it is not initialised then it has never been locked,
676 * is the lock object itself. If the lock is in the per cpu area, 670 * so it won't be present in the hash table.
677 * the canonical address of the lock (per cpu offset removed) is
678 * used.
679 */ 671 */
680 if (unlikely(!lock->key)) { 672 if (unlikely(!lock->key))
681 unsigned long can_addr, addr = (unsigned long)lock; 673 return NULL;
682
683 if (__is_kernel_percpu_address(addr, &can_addr))
684 lock->key = (void *)can_addr;
685 else if (__is_module_percpu_address(addr, &can_addr))
686 lock->key = (void *)can_addr;
687 else if (static_obj(lock))
688 lock->key = (void *)lock;
689 else
690 return ERR_PTR(-EINVAL);
691 is_static = true;
692 }
693 674
694 /* 675 /*
695 * NOTE: the class-key must be unique. For dynamic locks, a static 676 * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -721,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
721 } 702 }
722 } 703 }
723 704
724 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 705 return NULL;
706}
707
708/*
709 * Static locks do not have their class-keys yet - for them the key is
710 * the lock object itself. If the lock is in the per cpu area, the
711 * canonical address of the lock (per cpu offset removed) is used.
712 */
713static bool assign_lock_key(struct lockdep_map *lock)
714{
715 unsigned long can_addr, addr = (unsigned long)lock;
716
717 if (__is_kernel_percpu_address(addr, &can_addr))
718 lock->key = (void *)can_addr;
719 else if (__is_module_percpu_address(addr, &can_addr))
720 lock->key = (void *)can_addr;
721 else if (static_obj(lock))
722 lock->key = (void *)lock;
723 else {
724 /* Debug-check: all keys must be persistent! */
725 debug_locks_off();
726 pr_err("INFO: trying to register non-static key.\n");
727 pr_err("the code is fine but needs lockdep annotation.\n");
728 pr_err("turning off the locking correctness validator.\n");
729 dump_stack();
730 return false;
731 }
732
733 return true;
725} 734}
726 735
727/* 736/*
@@ -739,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
739 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 748 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
740 749
741 class = look_up_lock_class(lock, subclass); 750 class = look_up_lock_class(lock, subclass);
742 if (likely(!IS_ERR_OR_NULL(class))) 751 if (likely(class))
743 goto out_set_class_cache; 752 goto out_set_class_cache;
744 753
745 /* 754 if (!lock->key) {
746 * Debug-check: all keys must be persistent! 755 if (!assign_lock_key(lock))
747 */ 756 return NULL;
748 if (IS_ERR(class)) { 757 } else if (!static_obj(lock->key)) {
749 debug_locks_off();
750 printk("INFO: trying to register non-static key.\n");
751 printk("the code is fine but needs lockdep annotation.\n");
752 printk("turning off the locking correctness validator.\n");
753 dump_stack();
754 return NULL; 758 return NULL;
755 } 759 }
756 760
@@ -3273,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
3273 return 0; 3277 return 0;
3274} 3278}
3275 3279
3276static int __lock_is_held(struct lockdep_map *lock, int read); 3280static int __lock_is_held(const struct lockdep_map *lock, int read);
3277 3281
3278/* 3282/*
3279 * This gets called for every mutex_lock*()/spin_lock*() operation. 3283 * This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3482,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3482 return 0; 3486 return 0;
3483} 3487}
3484 3488
3485static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) 3489static int match_held_lock(const struct held_lock *hlock,
3490 const struct lockdep_map *lock)
3486{ 3491{
3487 if (hlock->instance == lock) 3492 if (hlock->instance == lock)
3488 return 1; 3493 return 1;
3489 3494
3490 if (hlock->references) { 3495 if (hlock->references) {
3491 struct lock_class *class = lock->class_cache[0]; 3496 const struct lock_class *class = lock->class_cache[0];
3492 3497
3493 if (!class) 3498 if (!class)
3494 class = look_up_lock_class(lock, 0); 3499 class = look_up_lock_class(lock, 0);
@@ -3499,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3499 * Clearly if the lock hasn't been acquired _ever_, we're not 3504 * Clearly if the lock hasn't been acquired _ever_, we're not
3500 * holding it either, so report failure. 3505 * holding it either, so report failure.
3501 */ 3506 */
3502 if (IS_ERR_OR_NULL(class)) 3507 if (!class)
3503 return 0; 3508 return 0;
3504 3509
3505 /* 3510 /*
@@ -3724,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3724 return 1; 3729 return 1;
3725} 3730}
3726 3731
3727static int __lock_is_held(struct lockdep_map *lock, int read) 3732static int __lock_is_held(const struct lockdep_map *lock, int read)
3728{ 3733{
3729 struct task_struct *curr = current; 3734 struct task_struct *curr = current;
3730 int i; 3735 int i;
@@ -3938,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
3938} 3943}
3939EXPORT_SYMBOL_GPL(lock_release); 3944EXPORT_SYMBOL_GPL(lock_release);
3940 3945
3941int lock_is_held_type(struct lockdep_map *lock, int read) 3946int lock_is_held_type(const struct lockdep_map *lock, int read)
3942{ 3947{
3943 unsigned long flags; 3948 unsigned long flags;
3944 int ret = 0; 3949 int ret = 0;
@@ -4295,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
4295 * If the class exists we look it up and zap it: 4300 * If the class exists we look it up and zap it:
4296 */ 4301 */
4297 class = look_up_lock_class(lock, j); 4302 class = look_up_lock_class(lock, j);
4298 if (!IS_ERR_OR_NULL(class)) 4303 if (class)
4299 zap_class(class); 4304 zap_class(class);
4300 } 4305 }
4301 /* 4306 /*