aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorByungchul Park <byungchul.park@lge.com>2017-08-07 03:12:52 -0400
committerIngo Molnar <mingo@kernel.org>2017-08-10 06:29:07 -0400
commitb09be676e0ff25bd6d2e7637e26d349f9109ad75 (patch)
tree8dc82ceaa001d29bb07a51f5c4814759b303e8e4 /kernel
parentce07a9415f266e181a0a33033a5f7138760240a4 (diff)
locking/lockdep: Implement the 'crossrelease' feature
Lockdep is a runtime locking correctness validator that detects and reports a deadlock or its possibility by checking dependencies between locks. It's useful since it does not report just an actual deadlock but also the possibility of a deadlock that has not actually happened yet. That enables problems to be fixed before they affect real systems. However, this facility is only applicable to typical locks, such as spinlocks and mutexes, which are normally released within the context in which they were acquired. However, synchronization primitives like page locks or completions, which are allowed to be released in any context, also create dependencies and can cause a deadlock. So lockdep should track these locks to do a better job. The 'crossrelease' implementation makes these primitives also be tracked. Signed-off-by: Byungchul Park <byungchul.park@lge.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Cc: willy@infradead.org Link: http://lkml.kernel.org/r/1502089981-21272-6-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/locking/lockdep.c508
-rw-r--r--kernel/workqueue.c2
4 files changed, 492 insertions, 23 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index c5548faa9f37..fa72d57db747 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -920,6 +920,7 @@ void __noreturn do_exit(long code)
920 exit_rcu(); 920 exit_rcu();
921 TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i)); 921 TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
922 922
923 lockdep_free_task(tsk);
923 do_task_dead(); 924 do_task_dead();
924} 925}
925EXPORT_SYMBOL_GPL(do_exit); 926EXPORT_SYMBOL_GPL(do_exit);
diff --git a/kernel/fork.c b/kernel/fork.c
index 17921b0390b4..cbf2221ee81a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -484,6 +484,8 @@ void __init fork_init(void)
484 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 484 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
485 NULL, free_vm_stack_cache); 485 NULL, free_vm_stack_cache);
486#endif 486#endif
487
488 lockdep_init_task(&init_task);
487} 489}
488 490
489int __weak arch_dup_task_struct(struct task_struct *dst, 491int __weak arch_dup_task_struct(struct task_struct *dst,
@@ -1691,6 +1693,7 @@ static __latent_entropy struct task_struct *copy_process(
1691 p->lockdep_depth = 0; /* no locks held yet */ 1693 p->lockdep_depth = 0; /* no locks held yet */
1692 p->curr_chain_key = 0; 1694 p->curr_chain_key = 0;
1693 p->lockdep_recursion = 0; 1695 p->lockdep_recursion = 0;
1696 lockdep_init_task(p);
1694#endif 1697#endif
1695 1698
1696#ifdef CONFIG_DEBUG_MUTEXES 1699#ifdef CONFIG_DEBUG_MUTEXES
@@ -1949,6 +1952,7 @@ bad_fork_cleanup_audit:
1949bad_fork_cleanup_perf: 1952bad_fork_cleanup_perf:
1950 perf_event_free_task(p); 1953 perf_event_free_task(p);
1951bad_fork_cleanup_policy: 1954bad_fork_cleanup_policy:
1955 lockdep_free_task(p);
1952#ifdef CONFIG_NUMA 1956#ifdef CONFIG_NUMA
1953 mpol_put(p->mempolicy); 1957 mpol_put(p->mempolicy);
1954bad_fork_cleanup_threadgroup_lock: 1958bad_fork_cleanup_threadgroup_lock:
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 841828ba35b9..56f69cc53ddc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -58,6 +58,10 @@
58#define CREATE_TRACE_POINTS 58#define CREATE_TRACE_POINTS
59#include <trace/events/lock.h> 59#include <trace/events/lock.h>
60 60
61#ifdef CONFIG_LOCKDEP_CROSSRELEASE
62#include <linux/slab.h>
63#endif
64
61#ifdef CONFIG_PROVE_LOCKING 65#ifdef CONFIG_PROVE_LOCKING
62int prove_locking = 1; 66int prove_locking = 1;
63module_param(prove_locking, int, 0644); 67module_param(prove_locking, int, 0644);
@@ -724,6 +728,18 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
724 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 728 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
725} 729}
726 730
731#ifdef CONFIG_LOCKDEP_CROSSRELEASE
732static void cross_init(struct lockdep_map *lock, int cross);
733static int cross_lock(struct lockdep_map *lock);
734static int lock_acquire_crosslock(struct held_lock *hlock);
735static int lock_release_crosslock(struct lockdep_map *lock);
736#else
737static inline void cross_init(struct lockdep_map *lock, int cross) {}
738static inline int cross_lock(struct lockdep_map *lock) { return 0; }
739static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
740static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
741#endif
742
727/* 743/*
728 * Register a lock's class in the hash-table, if the class is not present 744 * Register a lock's class in the hash-table, if the class is not present
729 * yet. Otherwise we look it up. We cache the result in the lock object 745 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1795,6 +1811,9 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1795 if (nest) 1811 if (nest)
1796 return 2; 1812 return 2;
1797 1813
1814 if (cross_lock(prev->instance))
1815 continue;
1816
1798 return print_deadlock_bug(curr, prev, next); 1817 return print_deadlock_bug(curr, prev, next);
1799 } 1818 }
1800 return 1; 1819 return 1;
@@ -1962,30 +1981,36 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1962 int distance = curr->lockdep_depth - depth + 1; 1981 int distance = curr->lockdep_depth - depth + 1;
1963 hlock = curr->held_locks + depth - 1; 1982 hlock = curr->held_locks + depth - 1;
1964 /* 1983 /*
1965 * Only non-recursive-read entries get new dependencies 1984 * Only non-crosslock entries get new dependencies added.
1966 * added: 1985 * Crosslock entries will be added by commit later:
1967 */ 1986 */
1968 if (hlock->read != 2 && hlock->check) { 1987 if (!cross_lock(hlock->instance)) {
1969 int ret = check_prev_add(curr, hlock, next,
1970 distance, &trace, save);
1971 if (!ret)
1972 return 0;
1973
1974 /* 1988 /*
1975 * Stop saving stack_trace if save_trace() was 1989 * Only non-recursive-read entries get new dependencies
1976 * called at least once: 1990 * added:
1977 */ 1991 */
1978 if (save && ret == 2) 1992 if (hlock->read != 2 && hlock->check) {
1979 save = NULL; 1993 int ret = check_prev_add(curr, hlock, next,
1994 distance, &trace, save);
1995 if (!ret)
1996 return 0;
1980 1997
1981 /* 1998 /*
1982 * Stop after the first non-trylock entry, 1999 * Stop saving stack_trace if save_trace() was
1983 * as non-trylock entries have added their 2000 * called at least once:
1984 * own direct dependencies already, so this 2001 */
1985 * lock is connected to them indirectly: 2002 if (save && ret == 2)
1986 */ 2003 save = NULL;
1987 if (!hlock->trylock) 2004
1988 break; 2005 /*
2006 * Stop after the first non-trylock entry,
2007 * as non-trylock entries have added their
2008 * own direct dependencies already, so this
2009 * lock is connected to them indirectly:
2010 */
2011 if (!hlock->trylock)
2012 break;
2013 }
1989 } 2014 }
1990 depth--; 2015 depth--;
1991 /* 2016 /*
@@ -3176,7 +3201,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
3176/* 3201/*
3177 * Initialize a lock instance's lock-class mapping info: 3202 * Initialize a lock instance's lock-class mapping info:
3178 */ 3203 */
3179void lockdep_init_map(struct lockdep_map *lock, const char *name, 3204static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3180 struct lock_class_key *key, int subclass) 3205 struct lock_class_key *key, int subclass)
3181{ 3206{
3182 int i; 3207 int i;
@@ -3234,8 +3259,25 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
3234 raw_local_irq_restore(flags); 3259 raw_local_irq_restore(flags);
3235 } 3260 }
3236} 3261}
3262
3263void lockdep_init_map(struct lockdep_map *lock, const char *name,
3264 struct lock_class_key *key, int subclass)
3265{
3266 cross_init(lock, 0);
3267 __lockdep_init_map(lock, name, key, subclass);
3268}
3237EXPORT_SYMBOL_GPL(lockdep_init_map); 3269EXPORT_SYMBOL_GPL(lockdep_init_map);
3238 3270
3271#ifdef CONFIG_LOCKDEP_CROSSRELEASE
3272void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
3273 struct lock_class_key *key, int subclass)
3274{
3275 cross_init(lock, 1);
3276 __lockdep_init_map(lock, name, key, subclass);
3277}
3278EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
3279#endif
3280
3239struct lock_class_key __lockdep_no_validate__; 3281struct lock_class_key __lockdep_no_validate__;
3240EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3282EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3241 3283
@@ -3291,6 +3333,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3291 int chain_head = 0; 3333 int chain_head = 0;
3292 int class_idx; 3334 int class_idx;
3293 u64 chain_key; 3335 u64 chain_key;
3336 int ret;
3294 3337
3295 if (unlikely(!debug_locks)) 3338 if (unlikely(!debug_locks))
3296 return 0; 3339 return 0;
@@ -3339,7 +3382,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3339 3382
3340 class_idx = class - lock_classes + 1; 3383 class_idx = class - lock_classes + 1;
3341 3384
3342 if (depth) { 3385 /* TODO: nest_lock is not implemented for crosslock yet. */
3386 if (depth && !cross_lock(lock)) {
3343 hlock = curr->held_locks + depth - 1; 3387 hlock = curr->held_locks + depth - 1;
3344 if (hlock->class_idx == class_idx && nest_lock) { 3388 if (hlock->class_idx == class_idx && nest_lock) {
3345 if (hlock->references) { 3389 if (hlock->references) {
@@ -3427,6 +3471,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3427 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3471 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3428 return 0; 3472 return 0;
3429 3473
3474 ret = lock_acquire_crosslock(hlock);
3475 /*
3476 * 2 means normal acquire operations are needed. Otherwise, it's
3477 * ok just to return with '0:fail, 1:success'.
3478 */
3479 if (ret != 2)
3480 return ret;
3481
3430 curr->curr_chain_key = chain_key; 3482 curr->curr_chain_key = chain_key;
3431 curr->lockdep_depth++; 3483 curr->lockdep_depth++;
3432 check_chain_key(curr); 3484 check_chain_key(curr);
@@ -3664,11 +3716,19 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3664 struct task_struct *curr = current; 3716 struct task_struct *curr = current;
3665 struct held_lock *hlock; 3717 struct held_lock *hlock;
3666 unsigned int depth; 3718 unsigned int depth;
3667 int i; 3719 int ret, i;
3668 3720
3669 if (unlikely(!debug_locks)) 3721 if (unlikely(!debug_locks))
3670 return 0; 3722 return 0;
3671 3723
3724 ret = lock_release_crosslock(lock);
3725 /*
3726 * 2 means normal release operations are needed. Otherwise, it's
3727 * ok just to return with '0:fail, 1:success'.
3728 */
3729 if (ret != 2)
3730 return ret;
3731
3672 depth = curr->lockdep_depth; 3732 depth = curr->lockdep_depth;
3673 /* 3733 /*
3674 * So we're all set to release this lock.. wait what lock? We don't 3734 * So we're all set to release this lock.. wait what lock? We don't
@@ -4532,6 +4592,13 @@ asmlinkage __visible void lockdep_sys_exit(void)
4532 curr->comm, curr->pid); 4592 curr->comm, curr->pid);
4533 lockdep_print_held_locks(curr); 4593 lockdep_print_held_locks(curr);
4534 } 4594 }
4595
4596 /*
4597 * The lock history for each syscall should be independent. So wipe the
4598 * slate clean on return to userspace.
4599 */
4600 crossrelease_hist_end(XHLOCK_PROC);
4601 crossrelease_hist_start(XHLOCK_PROC);
4535} 4602}
4536 4603
4537void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 4604void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -4580,3 +4647,398 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4580 dump_stack(); 4647 dump_stack();
4581} 4648}
4582EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4649EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4650
4651#ifdef CONFIG_LOCKDEP_CROSSRELEASE
4652
4653/*
4654 * Crossrelease works by recording a lock history for each thread and
4655 * connecting those historic locks that were taken after the
4656 * wait_for_completion() in the complete() context.
4657 *
4658 * Task-A Task-B
4659 *
4660 * mutex_lock(&A);
4661 * mutex_unlock(&A);
4662 *
4663 * wait_for_completion(&C);
4664 * lock_acquire_crosslock();
4665 * atomic_inc_return(&cross_gen_id);
4666 * |
4667 * | mutex_lock(&B);
4668 * | mutex_unlock(&B);
4669 * |
4670 * | complete(&C);
4671 * `-- lock_commit_crosslock();
4672 *
4673 * Which will then add a dependency between B and C.
4674 */
4675
4676#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
4677
4678/*
4679 * Whenever a crosslock is held, cross_gen_id will be increased.
4680 */
4681static atomic_t cross_gen_id; /* Can be wrapped */
4682
4683/*
4684 * Lock history stacks; we have 3 nested lock history stacks:
4685 *
4686 * Hard IRQ
4687 * Soft IRQ
4688 * History / Task
4689 *
4690 * The thing is that once we complete a (Hard/Soft) IRQ the future task locks
4691 * should not depend on any of the locks observed while running the IRQ.
4692 *
4693 * So what we do is rewind the history buffer and erase all our knowledge of
4694 * that temporal event.
4695 */
4696
4697/*
4698 * We need this to annotate lock history boundaries. Take for instance
4699 * workqueues; each work is independent of the last. The completion of a future
4700 * work does not depend on the completion of a past work (in general).
4701 * Therefore we must not carry that (lock) dependency across works.
4702 *
4703 * This is true for many things; pretty much all kthreads fall into this
4704 * pattern, where they have an 'idle' state and future completions do not
4705 * depend on past completions. Its just that since they all have the 'same'
4706 * form -- the kthread does the same over and over -- it doesn't typically
4707 * matter.
4708 *
4709 * The same is true for system-calls, once a system call is completed (we've
4710 * returned to userspace) the next system call does not depend on the lock
4711 * history of the previous system call.
4712 */
4713void crossrelease_hist_start(enum xhlock_context_t c)
4714{
4715 if (current->xhlocks)
4716 current->xhlock_idx_hist[c] = current->xhlock_idx;
4717}
4718
4719void crossrelease_hist_end(enum xhlock_context_t c)
4720{
4721 if (current->xhlocks)
4722 current->xhlock_idx = current->xhlock_idx_hist[c];
4723}
4724
4725static int cross_lock(struct lockdep_map *lock)
4726{
4727 return lock ? lock->cross : 0;
4728}
4729
4730/*
4731 * This is needed to decide the relationship between wrapable variables.
4732 */
4733static inline int before(unsigned int a, unsigned int b)
4734{
4735 return (int)(a - b) < 0;
4736}
4737
4738static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
4739{
4740 return hlock_class(&xhlock->hlock);
4741}
4742
4743static inline struct lock_class *xlock_class(struct cross_lock *xlock)
4744{
4745 return hlock_class(&xlock->hlock);
4746}
4747
4748/*
4749 * Should we check a dependency with previous one?
4750 */
4751static inline int depend_before(struct held_lock *hlock)
4752{
4753 return hlock->read != 2 && hlock->check && !hlock->trylock;
4754}
4755
4756/*
4757 * Should we check a dependency with next one?
4758 */
4759static inline int depend_after(struct held_lock *hlock)
4760{
4761 return hlock->read != 2 && hlock->check;
4762}
4763
4764/*
4765 * Check if the xhlock is valid, which would be false if,
4766 *
4767 * 1. Has not used after initializaion yet.
4768 *
4769 * Remind hist_lock is implemented as a ring buffer.
4770 */
4771static inline int xhlock_valid(struct hist_lock *xhlock)
4772{
4773 /*
4774 * xhlock->hlock.instance must be !NULL.
4775 */
4776 return !!xhlock->hlock.instance;
4777}
4778
4779/*
4780 * Record a hist_lock entry.
4781 *
4782 * Irq disable is only required.
4783 */
4784static void add_xhlock(struct held_lock *hlock)
4785{
4786 unsigned int idx = ++current->xhlock_idx;
4787 struct hist_lock *xhlock = &xhlock(idx);
4788
4789#ifdef CONFIG_DEBUG_LOCKDEP
4790 /*
4791 * This can be done locklessly because they are all task-local
4792 * state, we must however ensure IRQs are disabled.
4793 */
4794 WARN_ON_ONCE(!irqs_disabled());
4795#endif
4796
4797 /* Initialize hist_lock's members */
4798 xhlock->hlock = *hlock;
4799
4800 xhlock->trace.nr_entries = 0;
4801 xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
4802 xhlock->trace.entries = xhlock->trace_entries;
4803 xhlock->trace.skip = 3;
4804 save_stack_trace(&xhlock->trace);
4805}
4806
4807static inline int same_context_xhlock(struct hist_lock *xhlock)
4808{
4809 return xhlock->hlock.irq_context == task_irq_context(current);
4810}
4811
4812/*
4813 * This should be lockless as far as possible because this would be
4814 * called very frequently.
4815 */
4816static void check_add_xhlock(struct held_lock *hlock)
4817{
4818 /*
4819 * Record a hist_lock, only in case that acquisitions ahead
4820 * could depend on the held_lock. For example, if the held_lock
4821 * is trylock then acquisitions ahead never depends on that.
4822 * In that case, we don't need to record it. Just return.
4823 */
4824 if (!current->xhlocks || !depend_before(hlock))
4825 return;
4826
4827 add_xhlock(hlock);
4828}
4829
4830/*
4831 * For crosslock.
4832 */
4833static int add_xlock(struct held_lock *hlock)
4834{
4835 struct cross_lock *xlock;
4836 unsigned int gen_id;
4837
4838 if (!graph_lock())
4839 return 0;
4840
4841 xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
4842
4843 gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
4844 xlock->hlock = *hlock;
4845 xlock->hlock.gen_id = gen_id;
4846 graph_unlock();
4847
4848 return 1;
4849}
4850
4851/*
4852 * Called for both normal and crosslock acquires. Normal locks will be
4853 * pushed on the hist_lock queue. Cross locks will record state and
4854 * stop regular lock_acquire() to avoid being placed on the held_lock
4855 * stack.
4856 *
4857 * Return: 0 - failure;
4858 * 1 - crosslock, done;
4859 * 2 - normal lock, continue to held_lock[] ops.
4860 */
4861static int lock_acquire_crosslock(struct held_lock *hlock)
4862{
4863 /*
4864 * CONTEXT 1 CONTEXT 2
4865 * --------- ---------
4866 * lock A (cross)
4867 * X = atomic_inc_return(&cross_gen_id)
4868 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4869 * Y = atomic_read_acquire(&cross_gen_id)
4870 * lock B
4871 *
4872 * atomic_read_acquire() is for ordering between A and B,
4873 * IOW, A happens before B, when CONTEXT 2 see Y >= X.
4874 *
4875 * Pairs with atomic_inc_return() in add_xlock().
4876 */
4877 hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
4878
4879 if (cross_lock(hlock->instance))
4880 return add_xlock(hlock);
4881
4882 check_add_xhlock(hlock);
4883 return 2;
4884}
4885
4886static int copy_trace(struct stack_trace *trace)
4887{
4888 unsigned long *buf = stack_trace + nr_stack_trace_entries;
4889 unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
4890 unsigned int nr = min(max_nr, trace->nr_entries);
4891
4892 trace->nr_entries = nr;
4893 memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
4894 trace->entries = buf;
4895 nr_stack_trace_entries += nr;
4896
4897 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
4898 if (!debug_locks_off_graph_unlock())
4899 return 0;
4900
4901 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
4902 dump_stack();
4903
4904 return 0;
4905 }
4906
4907 return 1;
4908}
4909
4910static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
4911{
4912 unsigned int xid, pid;
4913 u64 chain_key;
4914
4915 xid = xlock_class(xlock) - lock_classes;
4916 chain_key = iterate_chain_key((u64)0, xid);
4917 pid = xhlock_class(xhlock) - lock_classes;
4918 chain_key = iterate_chain_key(chain_key, pid);
4919
4920 if (lookup_chain_cache(chain_key))
4921 return 1;
4922
4923 if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
4924 chain_key))
4925 return 0;
4926
4927 if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
4928 &xhlock->trace, copy_trace))
4929 return 0;
4930
4931 return 1;
4932}
4933
4934static void commit_xhlocks(struct cross_lock *xlock)
4935{
4936 unsigned int cur = current->xhlock_idx;
4937 unsigned int i;
4938
4939 if (!graph_lock())
4940 return;
4941
4942 for (i = 0; i < MAX_XHLOCKS_NR; i++) {
4943 struct hist_lock *xhlock = &xhlock(cur - i);
4944
4945 if (!xhlock_valid(xhlock))
4946 break;
4947
4948 if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
4949 break;
4950
4951 if (!same_context_xhlock(xhlock))
4952 break;
4953
4954 /*
4955 * commit_xhlock() returns 0 with graph_lock already
4956 * released if fail.
4957 */
4958 if (!commit_xhlock(xlock, xhlock))
4959 return;
4960 }
4961
4962 graph_unlock();
4963}
4964
4965void lock_commit_crosslock(struct lockdep_map *lock)
4966{
4967 struct cross_lock *xlock;
4968 unsigned long flags;
4969
4970 if (unlikely(!debug_locks || current->lockdep_recursion))
4971 return;
4972
4973 if (!current->xhlocks)
4974 return;
4975
4976 /*
4977 * Do commit hist_locks with the cross_lock, only in case that
4978 * the cross_lock could depend on acquisitions after that.
4979 *
4980 * For example, if the cross_lock does not have the 'check' flag
4981 * then we don't need to check dependencies and commit for that.
4982 * Just skip it. In that case, of course, the cross_lock does
4983 * not depend on acquisitions ahead, either.
4984 *
4985 * WARNING: Don't do that in add_xlock() in advance. When an
4986 * acquisition context is different from the commit context,
4987 * invalid(skipped) cross_lock might be accessed.
4988 */
4989 if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
4990 return;
4991
4992 raw_local_irq_save(flags);
4993 check_flags(flags);
4994 current->lockdep_recursion = 1;
4995 xlock = &((struct lockdep_map_cross *)lock)->xlock;
4996 commit_xhlocks(xlock);
4997 current->lockdep_recursion = 0;
4998 raw_local_irq_restore(flags);
4999}
5000EXPORT_SYMBOL_GPL(lock_commit_crosslock);
5001
5002/*
5003 * Return: 1 - crosslock, done;
5004 * 2 - normal lock, continue to held_lock[] ops.
5005 */
5006static int lock_release_crosslock(struct lockdep_map *lock)
5007{
5008 return cross_lock(lock) ? 1 : 2;
5009}
5010
5011static void cross_init(struct lockdep_map *lock, int cross)
5012{
5013 lock->cross = cross;
5014
5015 /*
5016 * Crossrelease assumes that the ring buffer size of xhlocks
5017 * is aligned with power of 2. So force it on build.
5018 */
5019 BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
5020}
5021
5022void lockdep_init_task(struct task_struct *task)
5023{
5024 int i;
5025
5026 task->xhlock_idx = UINT_MAX;
5027
5028 for (i = 0; i < XHLOCK_CTX_NR; i++)
5029 task->xhlock_idx_hist[i] = UINT_MAX;
5030
5031 task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
5032 GFP_KERNEL);
5033}
5034
5035void lockdep_free_task(struct task_struct *task)
5036{
5037 if (task->xhlocks) {
5038 void *tmp = task->xhlocks;
5039 /* Diable crossrelease for current */
5040 task->xhlocks = NULL;
5041 kfree(tmp);
5042 }
5043}
5044#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca937b0c3a96..e86733a8b344 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2093,6 +2093,7 @@ __acquires(&pool->lock)
2093 2093
2094 lock_map_acquire_read(&pwq->wq->lockdep_map); 2094 lock_map_acquire_read(&pwq->wq->lockdep_map);
2095 lock_map_acquire(&lockdep_map); 2095 lock_map_acquire(&lockdep_map);
2096 crossrelease_hist_start(XHLOCK_PROC);
2096 trace_workqueue_execute_start(work); 2097 trace_workqueue_execute_start(work);
2097 worker->current_func(work); 2098 worker->current_func(work);
2098 /* 2099 /*
@@ -2100,6 +2101,7 @@ __acquires(&pool->lock)
2100 * point will only record its address. 2101 * point will only record its address.
2101 */ 2102 */
2102 trace_workqueue_execute_end(work); 2103 trace_workqueue_execute_end(work);
2104 crossrelease_hist_end(XHLOCK_PROC);
2103 lock_map_release(&lockdep_map); 2105 lock_map_release(&lockdep_map);
2104 lock_map_release(&pwq->wq->lockdep_map); 2106 lock_map_release(&pwq->wq->lockdep_map);
2105 2107