aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-12-12 06:31:16 -0500
committerIngo Molnar <mingo@kernel.org>2017-12-12 06:38:51 -0500
commite966eaeeb623f09975ef362c2866fae6f86844f9 (patch)
tree8dfcb814c86d24c32ae3338d4dc1dc3aa1017764 /kernel/locking
parentd89c70356acf11b7cf47ca5cfcafae5062a85451 (diff)
locking/lockdep: Remove the cross-release locking checks
This code (CONFIG_LOCKDEP_CROSSRELEASE=y and CONFIG_LOCKDEP_COMPLETIONS=y), while it found a number of old bugs initially, was also causing too many false positives that caused people to disable lockdep - which is arguably a worse overall outcome. If we disable cross-release by default but keep the code upstream then in practice the most likely outcome is that we'll allow the situation to degrade gradually, by allowing entropy to introduce more and more false positives, until it overwhelms maintenance capacity. Another bad side effect was that people were trying to work around the false positives by uglifying/complicating unrelated code. There's a marked difference between annotating locking operations and uglifying good code just due to bad lock debugging code ... This gradual decrease in quality happened to a number of debugging facilities in the kernel, and lockdep is pretty complex already, so we cannot risk this outcome. Either cross-release checking can be done right with no false positives, or it should not be included in the upstream kernel. ( Note that it might make sense to maintain it out of tree and go through the false positives every now and then and see whether new bugs were introduced. ) Cc: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c652
1 files changed, 35 insertions, 617 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 670d8d7d8087..5fa1324a4f29 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -57,10 +57,6 @@
57#define CREATE_TRACE_POINTS 57#define CREATE_TRACE_POINTS
58#include <trace/events/lock.h> 58#include <trace/events/lock.h>
59 59
60#ifdef CONFIG_LOCKDEP_CROSSRELEASE
61#include <linux/slab.h>
62#endif
63
64#ifdef CONFIG_PROVE_LOCKING 60#ifdef CONFIG_PROVE_LOCKING
65int prove_locking = 1; 61int prove_locking = 1;
66module_param(prove_locking, int, 0644); 62module_param(prove_locking, int, 0644);
@@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644);
75#define lock_stat 0 71#define lock_stat 0
76#endif 72#endif
77 73
78#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
79static int crossrelease_fullstack = 1;
80#else
81static int crossrelease_fullstack;
82#endif
83static int __init allow_crossrelease_fullstack(char *str)
84{
85 crossrelease_fullstack = 1;
86 return 0;
87}
88
89early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
90
91/* 74/*
92 * lockdep_lock: protects the lockdep graph, the hashes and the 75 * lockdep_lock: protects the lockdep graph, the hashes and the
93 * class/list/hash allocators. 76 * class/list/hash allocators.
@@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
740 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 723 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
741} 724}
742 725
743#ifdef CONFIG_LOCKDEP_CROSSRELEASE
744static void cross_init(struct lockdep_map *lock, int cross);
745static int cross_lock(struct lockdep_map *lock);
746static int lock_acquire_crosslock(struct held_lock *hlock);
747static int lock_release_crosslock(struct lockdep_map *lock);
748#else
749static inline void cross_init(struct lockdep_map *lock, int cross) {}
750static inline int cross_lock(struct lockdep_map *lock) { return 0; }
751static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
752static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
753#endif
754
755/* 726/*
756 * Register a lock's class in the hash-table, if the class is not present 727 * Register a lock's class in the hash-table, if the class is not present
757 * yet. Otherwise we look it up. We cache the result in the lock object 728 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1151,41 +1122,22 @@ print_circular_lock_scenario(struct held_lock *src,
1151 printk(KERN_CONT "\n\n"); 1122 printk(KERN_CONT "\n\n");
1152 } 1123 }
1153 1124
1154 if (cross_lock(tgt->instance)) { 1125 printk(" Possible unsafe locking scenario:\n\n");
1155 printk(" Possible unsafe locking scenario by crosslock:\n\n"); 1126 printk(" CPU0 CPU1\n");
1156 printk(" CPU0 CPU1\n"); 1127 printk(" ---- ----\n");
1157 printk(" ---- ----\n"); 1128 printk(" lock(");
1158 printk(" lock("); 1129 __print_lock_name(target);
1159 __print_lock_name(parent); 1130 printk(KERN_CONT ");\n");
1160 printk(KERN_CONT ");\n"); 1131 printk(" lock(");
1161 printk(" lock("); 1132 __print_lock_name(parent);
1162 __print_lock_name(target); 1133 printk(KERN_CONT ");\n");
1163 printk(KERN_CONT ");\n"); 1134 printk(" lock(");
1164 printk(" lock("); 1135 __print_lock_name(target);
1165 __print_lock_name(source); 1136 printk(KERN_CONT ");\n");
1166 printk(KERN_CONT ");\n"); 1137 printk(" lock(");
1167 printk(" unlock("); 1138 __print_lock_name(source);
1168 __print_lock_name(target); 1139 printk(KERN_CONT ");\n");
1169 printk(KERN_CONT ");\n"); 1140 printk("\n *** DEADLOCK ***\n\n");
1170 printk("\n *** DEADLOCK ***\n\n");
1171 } else {
1172 printk(" Possible unsafe locking scenario:\n\n");
1173 printk(" CPU0 CPU1\n");
1174 printk(" ---- ----\n");
1175 printk(" lock(");
1176 __print_lock_name(target);
1177 printk(KERN_CONT ");\n");
1178 printk(" lock(");
1179 __print_lock_name(parent);
1180 printk(KERN_CONT ");\n");
1181 printk(" lock(");
1182 __print_lock_name(target);
1183 printk(KERN_CONT ");\n");
1184 printk(" lock(");
1185 __print_lock_name(source);
1186 printk(KERN_CONT ");\n");
1187 printk("\n *** DEADLOCK ***\n\n");
1188 }
1189} 1141}
1190 1142
1191/* 1143/*
@@ -1211,10 +1163,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1211 curr->comm, task_pid_nr(curr)); 1163 curr->comm, task_pid_nr(curr));
1212 print_lock(check_src); 1164 print_lock(check_src);
1213 1165
1214 if (cross_lock(check_tgt->instance)) 1166 pr_warn("\nbut task is already holding lock:\n");
1215 pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
1216 else
1217 pr_warn("\nbut task is already holding lock:\n");
1218 1167
1219 print_lock(check_tgt); 1168 print_lock(check_tgt);
1220 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1169 pr_warn("\nwhich lock already depends on the new lock.\n\n");
@@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1244 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1193 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1245 return 0; 1194 return 0;
1246 1195
1247 if (cross_lock(check_tgt->instance)) 1196 if (!save_trace(&this->trace))
1248 this->trace = *trace;
1249 else if (!save_trace(&this->trace))
1250 return 0; 1197 return 0;
1251 1198
1252 depth = get_lock_depth(target); 1199 depth = get_lock_depth(target);
@@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1850 if (nest) 1797 if (nest)
1851 return 2; 1798 return 2;
1852 1799
1853 if (cross_lock(prev->instance))
1854 continue;
1855
1856 return print_deadlock_bug(curr, prev, next); 1800 return print_deadlock_bug(curr, prev, next);
1857 } 1801 }
1858 return 1; 1802 return 1;
@@ -2018,31 +1962,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2018 for (;;) { 1962 for (;;) {
2019 int distance = curr->lockdep_depth - depth + 1; 1963 int distance = curr->lockdep_depth - depth + 1;
2020 hlock = curr->held_locks + depth - 1; 1964 hlock = curr->held_locks + depth - 1;
1965
2021 /* 1966 /*
2022 * Only non-crosslock entries get new dependencies added. 1967 * Only non-recursive-read entries get new dependencies
2023 * Crosslock entries will be added by commit later: 1968 * added:
2024 */ 1969 */
2025 if (!cross_lock(hlock->instance)) { 1970 if (hlock->read != 2 && hlock->check) {
1971 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1972 if (!ret)
1973 return 0;
1974
2026 /* 1975 /*
2027 * Only non-recursive-read entries get new dependencies 1976 * Stop after the first non-trylock entry,
2028 * added: 1977 * as non-trylock entries have added their
1978 * own direct dependencies already, so this
1979 * lock is connected to them indirectly:
2029 */ 1980 */
2030 if (hlock->read != 2 && hlock->check) { 1981 if (!hlock->trylock)
2031 int ret = check_prev_add(curr, hlock, next, 1982 break;
2032 distance, &trace, save_trace);
2033 if (!ret)
2034 return 0;
2035
2036 /*
2037 * Stop after the first non-trylock entry,
2038 * as non-trylock entries have added their
2039 * own direct dependencies already, so this
2040 * lock is connected to them indirectly:
2041 */
2042 if (!hlock->trylock)
2043 break;
2044 }
2045 } 1983 }
1984
2046 depth--; 1985 depth--;
2047 /* 1986 /*
2048 * End of lock-stack? 1987 * End of lock-stack?
@@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3292void lockdep_init_map(struct lockdep_map *lock, const char *name, 3231void lockdep_init_map(struct lockdep_map *lock, const char *name,
3293 struct lock_class_key *key, int subclass) 3232 struct lock_class_key *key, int subclass)
3294{ 3233{
3295 cross_init(lock, 0);
3296 __lockdep_init_map(lock, name, key, subclass); 3234 __lockdep_init_map(lock, name, key, subclass);
3297} 3235}
3298EXPORT_SYMBOL_GPL(lockdep_init_map); 3236EXPORT_SYMBOL_GPL(lockdep_init_map);
3299 3237
3300#ifdef CONFIG_LOCKDEP_CROSSRELEASE
3301void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
3302 struct lock_class_key *key, int subclass)
3303{
3304 cross_init(lock, 1);
3305 __lockdep_init_map(lock, name, key, subclass);
3306}
3307EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
3308#endif
3309
3310struct lock_class_key __lockdep_no_validate__; 3238struct lock_class_key __lockdep_no_validate__;
3311EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3239EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3312 3240
@@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3362 int chain_head = 0; 3290 int chain_head = 0;
3363 int class_idx; 3291 int class_idx;
3364 u64 chain_key; 3292 u64 chain_key;
3365 int ret;
3366 3293
3367 if (unlikely(!debug_locks)) 3294 if (unlikely(!debug_locks))
3368 return 0; 3295 return 0;
@@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3411 3338
3412 class_idx = class - lock_classes + 1; 3339 class_idx = class - lock_classes + 1;
3413 3340
3414 /* TODO: nest_lock is not implemented for crosslock yet. */ 3341 if (depth) {
3415 if (depth && !cross_lock(lock)) {
3416 hlock = curr->held_locks + depth - 1; 3342 hlock = curr->held_locks + depth - 1;
3417 if (hlock->class_idx == class_idx && nest_lock) { 3343 if (hlock->class_idx == class_idx && nest_lock) {
3418 if (hlock->references) { 3344 if (hlock->references) {
@@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3500 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3426 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3501 return 0; 3427 return 0;
3502 3428
3503 ret = lock_acquire_crosslock(hlock);
3504 /*
3505 * 2 means normal acquire operations are needed. Otherwise, it's
3506 * ok just to return with '0:fail, 1:success'.
3507 */
3508 if (ret != 2)
3509 return ret;
3510
3511 curr->curr_chain_key = chain_key; 3429 curr->curr_chain_key = chain_key;
3512 curr->lockdep_depth++; 3430 curr->lockdep_depth++;
3513 check_chain_key(curr); 3431 check_chain_key(curr);
@@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3745 struct task_struct *curr = current; 3663 struct task_struct *curr = current;
3746 struct held_lock *hlock; 3664 struct held_lock *hlock;
3747 unsigned int depth; 3665 unsigned int depth;
3748 int ret, i; 3666 int i;
3749 3667
3750 if (unlikely(!debug_locks)) 3668 if (unlikely(!debug_locks))
3751 return 0; 3669 return 0;
3752 3670
3753 ret = lock_release_crosslock(lock);
3754 /*
3755 * 2 means normal release operations are needed. Otherwise, it's
3756 * ok just to return with '0:fail, 1:success'.
3757 */
3758 if (ret != 2)
3759 return ret;
3760
3761 depth = curr->lockdep_depth; 3671 depth = curr->lockdep_depth;
3762 /* 3672 /*
3763 * So we're all set to release this lock.. wait what lock? We don't 3673 * So we're all set to release this lock.. wait what lock? We don't
@@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4675 dump_stack(); 4585 dump_stack();
4676} 4586}
4677EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4587EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4678
4679#ifdef CONFIG_LOCKDEP_CROSSRELEASE
4680
4681/*
4682 * Crossrelease works by recording a lock history for each thread and
4683 * connecting those historic locks that were taken after the
4684 * wait_for_completion() in the complete() context.
4685 *
4686 * Task-A Task-B
4687 *
4688 * mutex_lock(&A);
4689 * mutex_unlock(&A);
4690 *
4691 * wait_for_completion(&C);
4692 * lock_acquire_crosslock();
4693 * atomic_inc_return(&cross_gen_id);
4694 * |
4695 * | mutex_lock(&B);
4696 * | mutex_unlock(&B);
4697 * |
4698 * | complete(&C);
4699 * `-- lock_commit_crosslock();
4700 *
4701 * Which will then add a dependency between B and C.
4702 */
4703
4704#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
4705
4706/*
4707 * Whenever a crosslock is held, cross_gen_id will be increased.
4708 */
4709static atomic_t cross_gen_id; /* Can be wrapped */
4710
4711/*
4712 * Make an entry of the ring buffer invalid.
4713 */
4714static inline void invalidate_xhlock(struct hist_lock *xhlock)
4715{
4716 /*
4717 * Normally, xhlock->hlock.instance must be !NULL.
4718 */
4719 xhlock->hlock.instance = NULL;
4720}
4721
4722/*
4723 * Lock history stacks; we have 2 nested lock history stacks:
4724 *
4725 * HARD(IRQ)
4726 * SOFT(IRQ)
4727 *
4728 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
4729 * should not depend on any of the locks observed while running the IRQ. So
4730 * what we do is rewind the history buffer and erase all our knowledge of that
4731 * temporal event.
4732 */
4733
4734void crossrelease_hist_start(enum xhlock_context_t c)
4735{
4736 struct task_struct *cur = current;
4737
4738 if (!cur->xhlocks)
4739 return;
4740
4741 cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4742 cur->hist_id_save[c] = cur->hist_id;
4743}
4744
4745void crossrelease_hist_end(enum xhlock_context_t c)
4746{
4747 struct task_struct *cur = current;
4748
4749 if (cur->xhlocks) {
4750 unsigned int idx = cur->xhlock_idx_hist[c];
4751 struct hist_lock *h = &xhlock(idx);
4752
4753 cur->xhlock_idx = idx;
4754
4755 /* Check if the ring was overwritten. */
4756 if (h->hist_id != cur->hist_id_save[c])
4757 invalidate_xhlock(h);
4758 }
4759}
4760
4761/*
4762 * lockdep_invariant_state() is used to annotate independence inside a task, to
4763 * make one task look like multiple independent 'tasks'.
4764 *
4765 * Take for instance workqueues; each work is independent of the last. The
4766 * completion of a future work does not depend on the completion of a past work
4767 * (in general). Therefore we must not carry that (lock) dependency across
4768 * works.
4769 *
4770 * This is true for many things; pretty much all kthreads fall into this
4771 * pattern, where they have an invariant state and future completions do not
4772 * depend on past completions. Its just that since they all have the 'same'
4773 * form -- the kthread does the same over and over -- it doesn't typically
4774 * matter.
4775 *
4776 * The same is true for system-calls, once a system call is completed (we've
4777 * returned to userspace) the next system call does not depend on the lock
4778 * history of the previous system call.
4779 *
4780 * They key property for independence, this invariant state, is that it must be
4781 * a point where we hold no locks and have no history. Because if we were to
4782 * hold locks, the restore at _end() would not necessarily recover it's history
4783 * entry. Similarly, independence per-definition means it does not depend on
4784 * prior state.
4785 */
4786void lockdep_invariant_state(bool force)
4787{
4788 /*
4789 * We call this at an invariant point, no current state, no history.
4790 * Verify the former, enforce the latter.
4791 */
4792 WARN_ON_ONCE(!force && current->lockdep_depth);
4793 if (current->xhlocks)
4794 invalidate_xhlock(&xhlock(current->xhlock_idx));
4795}
4796
4797static int cross_lock(struct lockdep_map *lock)
4798{
4799 return lock ? lock->cross : 0;
4800}
4801
4802/*
4803 * This is needed to decide the relationship between wrapable variables.
4804 */
4805static inline int before(unsigned int a, unsigned int b)
4806{
4807 return (int)(a - b) < 0;
4808}
4809
4810static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
4811{
4812 return hlock_class(&xhlock->hlock);
4813}
4814
4815static inline struct lock_class *xlock_class(struct cross_lock *xlock)
4816{
4817 return hlock_class(&xlock->hlock);
4818}
4819
4820/*
4821 * Should we check a dependency with previous one?
4822 */
4823static inline int depend_before(struct held_lock *hlock)
4824{
4825 return hlock->read != 2 && hlock->check && !hlock->trylock;
4826}
4827
4828/*
4829 * Should we check a dependency with next one?
4830 */
4831static inline int depend_after(struct held_lock *hlock)
4832{
4833 return hlock->read != 2 && hlock->check;
4834}
4835
4836/*
4837 * Check if the xhlock is valid, which would be false if,
4838 *
4839 * 1. Has not used after initializaion yet.
4840 * 2. Got invalidated.
4841 *
4842 * Remind hist_lock is implemented as a ring buffer.
4843 */
4844static inline int xhlock_valid(struct hist_lock *xhlock)
4845{
4846 /*
4847 * xhlock->hlock.instance must be !NULL.
4848 */
4849 return !!xhlock->hlock.instance;
4850}
4851
4852/*
4853 * Record a hist_lock entry.
4854 *
4855 * Irq disable is only required.
4856 */
4857static void add_xhlock(struct held_lock *hlock)
4858{
4859 unsigned int idx = ++current->xhlock_idx;
4860 struct hist_lock *xhlock = &xhlock(idx);
4861
4862#ifdef CONFIG_DEBUG_LOCKDEP
4863 /*
4864 * This can be done locklessly because they are all task-local
4865 * state, we must however ensure IRQs are disabled.
4866 */
4867 WARN_ON_ONCE(!irqs_disabled());
4868#endif
4869
4870 /* Initialize hist_lock's members */
4871 xhlock->hlock = *hlock;
4872 xhlock->hist_id = ++current->hist_id;
4873
4874 xhlock->trace.nr_entries = 0;
4875 xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
4876 xhlock->trace.entries = xhlock->trace_entries;
4877
4878 if (crossrelease_fullstack) {
4879 xhlock->trace.skip = 3;
4880 save_stack_trace(&xhlock->trace);
4881 } else {
4882 xhlock->trace.nr_entries = 1;
4883 xhlock->trace.entries[0] = hlock->acquire_ip;
4884 }
4885}
4886
4887static inline int same_context_xhlock(struct hist_lock *xhlock)
4888{
4889 return xhlock->hlock.irq_context == task_irq_context(current);
4890}
4891
4892/*
4893 * This should be lockless as far as possible because this would be
4894 * called very frequently.
4895 */
4896static void check_add_xhlock(struct held_lock *hlock)
4897{
4898 /*
4899 * Record a hist_lock, only in case that acquisitions ahead
4900 * could depend on the held_lock. For example, if the held_lock
4901 * is trylock then acquisitions ahead never depends on that.
4902 * In that case, we don't need to record it. Just return.
4903 */
4904 if (!current->xhlocks || !depend_before(hlock))
4905 return;
4906
4907 add_xhlock(hlock);
4908}
4909
4910/*
4911 * For crosslock.
4912 */
4913static int add_xlock(struct held_lock *hlock)
4914{
4915 struct cross_lock *xlock;
4916 unsigned int gen_id;
4917
4918 if (!graph_lock())
4919 return 0;
4920
4921 xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
4922
4923 /*
4924 * When acquisitions for a crosslock are overlapped, we use
4925 * nr_acquire to perform commit for them, based on cross_gen_id
4926 * of the first acquisition, which allows to add additional
4927 * dependencies.
4928 *
4929 * Moreover, when no acquisition of a crosslock is in progress,
4930 * we should not perform commit because the lock might not exist
4931 * any more, which might cause incorrect memory access. So we
4932 * have to track the number of acquisitions of a crosslock.
4933 *
4934 * depend_after() is necessary to initialize only the first
4935 * valid xlock so that the xlock can be used on its commit.
4936 */
4937 if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
4938 goto unlock;
4939
4940 gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
4941 xlock->hlock = *hlock;
4942 xlock->hlock.gen_id = gen_id;
4943unlock:
4944 graph_unlock();
4945 return 1;
4946}
4947
4948/*
4949 * Called for both normal and crosslock acquires. Normal locks will be
4950 * pushed on the hist_lock queue. Cross locks will record state and
4951 * stop regular lock_acquire() to avoid being placed on the held_lock
4952 * stack.
4953 *
4954 * Return: 0 - failure;
4955 * 1 - crosslock, done;
4956 * 2 - normal lock, continue to held_lock[] ops.
4957 */
4958static int lock_acquire_crosslock(struct held_lock *hlock)
4959{
4960 /*
4961 * CONTEXT 1 CONTEXT 2
4962 * --------- ---------
4963 * lock A (cross)
4964 * X = atomic_inc_return(&cross_gen_id)
4965 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4966 * Y = atomic_read_acquire(&cross_gen_id)
4967 * lock B
4968 *
4969 * atomic_read_acquire() is for ordering between A and B,
4970 * IOW, A happens before B, when CONTEXT 2 see Y >= X.
4971 *
4972 * Pairs with atomic_inc_return() in add_xlock().
4973 */
4974 hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
4975
4976 if (cross_lock(hlock->instance))
4977 return add_xlock(hlock);
4978
4979 check_add_xhlock(hlock);
4980 return 2;
4981}
4982
4983static int copy_trace(struct stack_trace *trace)
4984{
4985 unsigned long *buf = stack_trace + nr_stack_trace_entries;
4986 unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
4987 unsigned int nr = min(max_nr, trace->nr_entries);
4988
4989 trace->nr_entries = nr;
4990 memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
4991 trace->entries = buf;
4992 nr_stack_trace_entries += nr;
4993
4994 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
4995 if (!debug_locks_off_graph_unlock())
4996 return 0;
4997
4998 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
4999 dump_stack();
5000
5001 return 0;
5002 }
5003
5004 return 1;
5005}
5006
5007static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
5008{
5009 unsigned int xid, pid;
5010 u64 chain_key;
5011
5012 xid = xlock_class(xlock) - lock_classes;
5013 chain_key = iterate_chain_key((u64)0, xid);
5014 pid = xhlock_class(xhlock) - lock_classes;
5015 chain_key = iterate_chain_key(chain_key, pid);
5016
5017 if (lookup_chain_cache(chain_key))
5018 return 1;
5019
5020 if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
5021 chain_key))
5022 return 0;
5023
5024 if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
5025 &xhlock->trace, copy_trace))
5026 return 0;
5027
5028 return 1;
5029}
5030
5031static void commit_xhlocks(struct cross_lock *xlock)
5032{
5033 unsigned int cur = current->xhlock_idx;
5034 unsigned int prev_hist_id = xhlock(cur).hist_id;
5035 unsigned int i;
5036
5037 if (!graph_lock())
5038 return;
5039
5040 if (xlock->nr_acquire) {
5041 for (i = 0; i < MAX_XHLOCKS_NR; i++) {
5042 struct hist_lock *xhlock = &xhlock(cur - i);
5043
5044 if (!xhlock_valid(xhlock))
5045 break;
5046
5047 if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
5048 break;
5049
5050 if (!same_context_xhlock(xhlock))
5051 break;
5052
5053 /*
5054 * Filter out the cases where the ring buffer was
5055 * overwritten and the current entry has a bigger
5056 * hist_id than the previous one, which is impossible
5057 * otherwise:
5058 */
5059 if (unlikely(before(prev_hist_id, xhlock->hist_id)))
5060 break;
5061
5062 prev_hist_id = xhlock->hist_id;
5063
5064 /*
5065 * commit_xhlock() returns 0 with graph_lock already
5066 * released if fail.
5067 */
5068 if (!commit_xhlock(xlock, xhlock))
5069 return;
5070 }
5071 }
5072
5073 graph_unlock();
5074}
5075
5076void lock_commit_crosslock(struct lockdep_map *lock)
5077{
5078 struct cross_lock *xlock;
5079 unsigned long flags;
5080
5081 if (unlikely(!debug_locks || current->lockdep_recursion))
5082 return;
5083
5084 if (!current->xhlocks)
5085 return;
5086
5087 /*
5088 * Do commit hist_locks with the cross_lock, only in case that
5089 * the cross_lock could depend on acquisitions after that.
5090 *
5091 * For example, if the cross_lock does not have the 'check' flag
5092 * then we don't need to check dependencies and commit for that.
5093 * Just skip it. In that case, of course, the cross_lock does
5094 * not depend on acquisitions ahead, either.
5095 *
5096 * WARNING: Don't do that in add_xlock() in advance. When an
5097 * acquisition context is different from the commit context,
5098 * invalid(skipped) cross_lock might be accessed.
5099 */
5100 if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
5101 return;
5102
5103 raw_local_irq_save(flags);
5104 check_flags(flags);
5105 current->lockdep_recursion = 1;
5106 xlock = &((struct lockdep_map_cross *)lock)->xlock;
5107 commit_xhlocks(xlock);
5108 current->lockdep_recursion = 0;
5109 raw_local_irq_restore(flags);
5110}
5111EXPORT_SYMBOL_GPL(lock_commit_crosslock);
5112
5113/*
5114 * Return: 0 - failure;
5115 * 1 - crosslock, done;
5116 * 2 - normal lock, continue to held_lock[] ops.
5117 */
5118static int lock_release_crosslock(struct lockdep_map *lock)
5119{
5120 if (cross_lock(lock)) {
5121 if (!graph_lock())
5122 return 0;
5123 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
5124 graph_unlock();
5125 return 1;
5126 }
5127 return 2;
5128}
5129
5130static void cross_init(struct lockdep_map *lock, int cross)
5131{
5132 if (cross)
5133 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
5134
5135 lock->cross = cross;
5136
5137 /*
5138 * Crossrelease assumes that the ring buffer size of xhlocks
5139 * is aligned with power of 2. So force it on build.
5140 */
5141 BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
5142}
5143
5144void lockdep_init_task(struct task_struct *task)
5145{
5146 int i;
5147
5148 task->xhlock_idx = UINT_MAX;
5149 task->hist_id = 0;
5150
5151 for (i = 0; i < XHLOCK_CTX_NR; i++) {
5152 task->xhlock_idx_hist[i] = UINT_MAX;
5153 task->hist_id_save[i] = 0;
5154 }
5155
5156 task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
5157 GFP_KERNEL);
5158}
5159
5160void lockdep_free_task(struct task_struct *task)
5161{
5162 if (task->xhlocks) {
5163 void *tmp = task->xhlocks;
5164 /* Diable crossrelease for current */
5165 task->xhlocks = NULL;
5166 kfree(tmp);
5167 }
5168}
5169#endif