aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 17:25:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 17:25:52 -0500
commit1eefdec18eded41833401cfd64749643ff72e7da (patch)
treecf9d35939e239b7d1ed3194bec7f4d51409c2d50 /kernel
parent684019dd1f0092b4ffce4958c84aff0891deac83 (diff)
parent80eb865768703c0f85a0603762742ae1dedf21f0 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main change in this cycle are initial preparatory bits of dynamic lockdep keys support from Bart Van Assche. There are also misc changes, a comment cleanup and a data structure cleanup" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Clean up comment in nohz_idle_balance() locking/lockdep: Stop using RCU primitives to access 'all_lock_classes' locking/lockdep: Make concurrent lockdep_reset_lock() calls safe locking/lockdep: Remove a superfluous INIT_LIST_HEAD() statement locking/lockdep: Introduce lock_class_cache_is_registered() locking/lockdep: Inline __lockdep_init_map() locking/lockdep: Declare local symbols static tools/lib/lockdep/tests: Test the lockdep_reset_lock() implementation tools/lib/lockdep: Add dummy print_irqtrace_events() implementation tools/lib/lockdep: Rename "trywlock" into "trywrlock" tools/lib/lockdep/tests: Run lockdep tests a second time under Valgrind tools/lib/lockdep/tests: Improve testing accuracy tools/lib/lockdep/tests: Fix shellcheck warnings tools/lib/lockdep/tests: Display compiler warning and error messages locking/lockdep: Remove ::version from lock_class structure
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/lockdep.c76
-rw-r--r--kernel/sched/fair.c4
2 files changed, 44 insertions, 36 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ef27f98714c0..95932333a48b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -138,6 +138,9 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
138 * get freed - this significantly simplifies the debugging code. 138 * get freed - this significantly simplifies the debugging code.
139 */ 139 */
140unsigned long nr_lock_classes; 140unsigned long nr_lock_classes;
141#ifndef CONFIG_DEBUG_LOCKDEP
142static
143#endif
141struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 144struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142 145
143static inline struct lock_class *hlock_class(struct held_lock *hlock) 146static inline struct lock_class *hlock_class(struct held_lock *hlock)
@@ -626,7 +629,8 @@ static int static_obj(void *obj)
626 629
627/* 630/*
628 * To make lock name printouts unique, we calculate a unique 631 * To make lock name printouts unique, we calculate a unique
629 * class->name_version generation counter: 632 * class->name_version generation counter. The caller must hold the graph
633 * lock.
630 */ 634 */
631static int count_matching_names(struct lock_class *new_class) 635static int count_matching_names(struct lock_class *new_class)
632{ 636{
@@ -636,7 +640,7 @@ static int count_matching_names(struct lock_class *new_class)
636 if (!new_class->name) 640 if (!new_class->name)
637 return 0; 641 return 0;
638 642
639 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 643 list_for_each_entry(class, &all_lock_classes, lock_entry) {
640 if (new_class->key - new_class->subclass == class->key) 644 if (new_class->key - new_class->subclass == class->key)
641 return class->name_version; 645 return class->name_version;
642 if (class->name && !strcmp(class->name, new_class->name)) 646 if (class->name && !strcmp(class->name, new_class->name))
@@ -789,7 +793,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
789 class->key = key; 793 class->key = key;
790 class->name = lock->name; 794 class->name = lock->name;
791 class->subclass = subclass; 795 class->subclass = subclass;
792 INIT_LIST_HEAD(&class->lock_entry);
793 INIT_LIST_HEAD(&class->locks_before); 796 INIT_LIST_HEAD(&class->locks_before);
794 INIT_LIST_HEAD(&class->locks_after); 797 INIT_LIST_HEAD(&class->locks_after);
795 class->name_version = count_matching_names(class); 798 class->name_version = count_matching_names(class);
@@ -801,7 +804,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
801 /* 804 /*
802 * Add it to the global list of classes: 805 * Add it to the global list of classes:
803 */ 806 */
804 list_add_tail_rcu(&class->lock_entry, &all_lock_classes); 807 list_add_tail(&class->lock_entry, &all_lock_classes);
805 808
806 if (verbose(class)) { 809 if (verbose(class)) {
807 graph_unlock(); 810 graph_unlock();
@@ -3088,7 +3091,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
3088/* 3091/*
3089 * Initialize a lock instance's lock-class mapping info: 3092 * Initialize a lock instance's lock-class mapping info:
3090 */ 3093 */
3091static void __lockdep_init_map(struct lockdep_map *lock, const char *name, 3094void lockdep_init_map(struct lockdep_map *lock, const char *name,
3092 struct lock_class_key *key, int subclass) 3095 struct lock_class_key *key, int subclass)
3093{ 3096{
3094 int i; 3097 int i;
@@ -3144,12 +3147,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3144 raw_local_irq_restore(flags); 3147 raw_local_irq_restore(flags);
3145 } 3148 }
3146} 3149}
3147
3148void lockdep_init_map(struct lockdep_map *lock, const char *name,
3149 struct lock_class_key *key, int subclass)
3150{
3151 __lockdep_init_map(lock, name, key, subclass);
3152}
3153EXPORT_SYMBOL_GPL(lockdep_init_map); 3150EXPORT_SYMBOL_GPL(lockdep_init_map);
3154 3151
3155struct lock_class_key __lockdep_no_validate__; 3152struct lock_class_key __lockdep_no_validate__;
@@ -4126,6 +4123,9 @@ void lockdep_reset(void)
4126 raw_local_irq_restore(flags); 4123 raw_local_irq_restore(flags);
4127} 4124}
4128 4125
4126/*
4127 * Remove all references to a lock class. The caller must hold the graph lock.
4128 */
4129static void zap_class(struct lock_class *class) 4129static void zap_class(struct lock_class *class)
4130{ 4130{
4131 int i; 4131 int i;
@@ -4142,7 +4142,7 @@ static void zap_class(struct lock_class *class)
4142 * Unhash the class and remove it from the all_lock_classes list: 4142 * Unhash the class and remove it from the all_lock_classes list:
4143 */ 4143 */
4144 hlist_del_rcu(&class->hash_entry); 4144 hlist_del_rcu(&class->hash_entry);
4145 list_del_rcu(&class->lock_entry); 4145 list_del(&class->lock_entry);
4146 4146
4147 RCU_INIT_POINTER(class->key, NULL); 4147 RCU_INIT_POINTER(class->key, NULL);
4148 RCU_INIT_POINTER(class->name, NULL); 4148 RCU_INIT_POINTER(class->name, NULL);
@@ -4204,15 +4204,36 @@ void lockdep_free_key_range(void *start, unsigned long size)
4204 */ 4204 */
4205} 4205}
4206 4206
4207void lockdep_reset_lock(struct lockdep_map *lock) 4207/*
4208 * Check whether any element of the @lock->class_cache[] array refers to a
4209 * registered lock class. The caller must hold either the graph lock or the
4210 * RCU read lock.
4211 */
4212static bool lock_class_cache_is_registered(struct lockdep_map *lock)
4208{ 4213{
4209 struct lock_class *class; 4214 struct lock_class *class;
4210 struct hlist_head *head; 4215 struct hlist_head *head;
4211 unsigned long flags;
4212 int i, j; 4216 int i, j;
4213 int locked; 4217
4218 for (i = 0; i < CLASSHASH_SIZE; i++) {
4219 head = classhash_table + i;
4220 hlist_for_each_entry_rcu(class, head, hash_entry) {
4221 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4222 if (lock->class_cache[j] == class)
4223 return true;
4224 }
4225 }
4226 return false;
4227}
4228
4229void lockdep_reset_lock(struct lockdep_map *lock)
4230{
4231 struct lock_class *class;
4232 unsigned long flags;
4233 int j, locked;
4214 4234
4215 raw_local_irq_save(flags); 4235 raw_local_irq_save(flags);
4236 locked = graph_lock();
4216 4237
4217 /* 4238 /*
4218 * Remove all classes this lock might have: 4239 * Remove all classes this lock might have:
@@ -4229,25 +4250,14 @@ void lockdep_reset_lock(struct lockdep_map *lock)
4229 * Debug check: in the end all mapped classes should 4250 * Debug check: in the end all mapped classes should
4230 * be gone. 4251 * be gone.
4231 */ 4252 */
4232 locked = graph_lock(); 4253 if (unlikely(lock_class_cache_is_registered(lock))) {
4233 for (i = 0; i < CLASSHASH_SIZE; i++) { 4254 if (debug_locks_off_graph_unlock()) {
4234 head = classhash_table + i; 4255 /*
4235 hlist_for_each_entry_rcu(class, head, hash_entry) { 4256 * We all just reset everything, how did it match?
4236 int match = 0; 4257 */
4237 4258 WARN_ON(1);
4238 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4239 match |= class == lock->class_cache[j];
4240
4241 if (unlikely(match)) {
4242 if (debug_locks_off_graph_unlock()) {
4243 /*
4244 * We all just reset everything, how did it match?
4245 */
4246 WARN_ON(1);
4247 }
4248 goto out_restore;
4249 }
4250 } 4259 }
4260 goto out_restore;
4251 } 4261 }
4252 if (locked) 4262 if (locked)
4253 graph_unlock(); 4263 graph_unlock();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ac855b2f4774..db514993565b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9533,9 +9533,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9533 return false; 9533 return false;
9534 } 9534 }
9535 9535
9536 /* 9536 /* could be _relaxed() */
9537 * barrier, pairs with nohz_balance_enter_idle(), ensures ...
9538 */
9539 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 9537 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9540 if (!(flags & NOHZ_KICK_MASK)) 9538 if (!(flags & NOHZ_KICK_MASK))
9541 return false; 9539 return false;