aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--kernel/locking/lockdep.c79
-rw-r--r--kernel/workqueue.c9
4 files changed, 48 insertions, 51 deletions
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 9bc050bc81b2..5fdd93bb9300 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -26,7 +26,7 @@
26# define trace_hardirq_enter() \ 26# define trace_hardirq_enter() \
27do { \ 27do { \
28 current->hardirq_context++; \ 28 current->hardirq_context++; \
29 crossrelease_hist_start(XHLOCK_HARD, 0);\ 29 crossrelease_hist_start(XHLOCK_HARD); \
30} while (0) 30} while (0)
31# define trace_hardirq_exit() \ 31# define trace_hardirq_exit() \
32do { \ 32do { \
@@ -36,7 +36,7 @@ do { \
36# define lockdep_softirq_enter() \ 36# define lockdep_softirq_enter() \
37do { \ 37do { \
38 current->softirq_context++; \ 38 current->softirq_context++; \
39 crossrelease_hist_start(XHLOCK_SOFT, 0);\ 39 crossrelease_hist_start(XHLOCK_SOFT); \
40} while (0) 40} while (0)
41# define lockdep_softirq_exit() \ 41# define lockdep_softirq_exit() \
42do { \ 42do { \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 78bb7133abed..bfa8e0b0d6f1 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -551,7 +551,6 @@ struct pin_cookie { };
551enum xhlock_context_t { 551enum xhlock_context_t {
552 XHLOCK_HARD, 552 XHLOCK_HARD,
553 XHLOCK_SOFT, 553 XHLOCK_SOFT,
554 XHLOCK_PROC,
555 XHLOCK_CTX_NR, 554 XHLOCK_CTX_NR,
556}; 555};
557 556
@@ -580,8 +579,9 @@ extern void lock_commit_crosslock(struct lockdep_map *lock);
580#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 579#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
581 { .name = (_name), .key = (void *)(_key), .cross = 0, } 580 { .name = (_name), .key = (void *)(_key), .cross = 0, }
582 581
583extern void crossrelease_hist_start(enum xhlock_context_t c, bool force); 582extern void crossrelease_hist_start(enum xhlock_context_t c);
584extern void crossrelease_hist_end(enum xhlock_context_t c); 583extern void crossrelease_hist_end(enum xhlock_context_t c);
584extern void lockdep_invariant_state(bool force);
585extern void lockdep_init_task(struct task_struct *task); 585extern void lockdep_init_task(struct task_struct *task);
586extern void lockdep_free_task(struct task_struct *task); 586extern void lockdep_free_task(struct task_struct *task);
587#else /* !CROSSRELEASE */ 587#else /* !CROSSRELEASE */
@@ -593,8 +593,9 @@ extern void lockdep_free_task(struct task_struct *task);
593#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 593#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
594 { .name = (_name), .key = (void *)(_key), } 594 { .name = (_name), .key = (void *)(_key), }
595 595
596static inline void crossrelease_hist_start(enum xhlock_context_t c, bool force) {} 596static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
597static inline void crossrelease_hist_end(enum xhlock_context_t c) {} 597static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
598static inline void lockdep_invariant_state(bool force) {}
598static inline void lockdep_init_task(struct task_struct *task) {} 599static inline void lockdep_init_task(struct task_struct *task) {}
599static inline void lockdep_free_task(struct task_struct *task) {} 600static inline void lockdep_free_task(struct task_struct *task) {}
600#endif /* CROSSRELEASE */ 601#endif /* CROSSRELEASE */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f73ca595b81e..44c8d0d17170 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4623,13 +4623,8 @@ asmlinkage __visible void lockdep_sys_exit(void)
4623 /* 4623 /*
4624 * The lock history for each syscall should be independent. So wipe the 4624 * The lock history for each syscall should be independent. So wipe the
4625 * slate clean on return to userspace. 4625 * slate clean on return to userspace.
4626 *
4627 * crossrelease_hist_end() works well here even when getting here
4628 * without starting (i.e. just after forking), because it rolls back
4629 * the index to point to the last entry, which is already invalid.
4630 */ 4626 */
4631 crossrelease_hist_end(XHLOCK_PROC); 4627 lockdep_invariant_state(false);
4632 crossrelease_hist_start(XHLOCK_PROC, false);
4633} 4628}
4634 4629
4635void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 4630void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
@@ -4723,19 +4718,47 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
4723} 4718}
4724 4719
4725/* 4720/*
4726 * Lock history stacks; we have 3 nested lock history stacks: 4721 * Lock history stacks; we have 2 nested lock history stacks:
4727 * 4722 *
4728 * HARD(IRQ) 4723 * HARD(IRQ)
4729 * SOFT(IRQ) 4724 * SOFT(IRQ)
4730 * PROC(ess)
4731 * 4725 *
4732 * The thing is that once we complete a HARD/SOFT IRQ the future task locks 4726 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
4733 * should not depend on any of the locks observed while running the IRQ. So 4727 * should not depend on any of the locks observed while running the IRQ. So
4734 * what we do is rewind the history buffer and erase all our knowledge of that 4728 * what we do is rewind the history buffer and erase all our knowledge of that
4735 * temporal event. 4729 * temporal event.
4736 * 4730 */
4737 * The PROCess one is special though; it is used to annotate independence 4731
4738 * inside a task. 4732void crossrelease_hist_start(enum xhlock_context_t c)
4733{
4734 struct task_struct *cur = current;
4735
4736 if (!cur->xhlocks)
4737 return;
4738
4739 cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4740 cur->hist_id_save[c] = cur->hist_id;
4741}
4742
4743void crossrelease_hist_end(enum xhlock_context_t c)
4744{
4745 struct task_struct *cur = current;
4746
4747 if (cur->xhlocks) {
4748 unsigned int idx = cur->xhlock_idx_hist[c];
4749 struct hist_lock *h = &xhlock(idx);
4750
4751 cur->xhlock_idx = idx;
4752
4753 /* Check if the ring was overwritten. */
4754 if (h->hist_id != cur->hist_id_save[c])
4755 invalidate_xhlock(h);
4756 }
4757}
4758
4759/*
4760 * lockdep_invariant_state() is used to annotate independence inside a task, to
4761 * make one task look like multiple independent 'tasks'.
4739 * 4762 *
4740 * Take for instance workqueues; each work is independent of the last. The 4763 * Take for instance workqueues; each work is independent of the last. The
4741 * completion of a future work does not depend on the completion of a past work 4764 * completion of a future work does not depend on the completion of a past work
@@ -4758,40 +4781,14 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
4758 * entry. Similarly, independence per-definition means it does not depend on 4781 * entry. Similarly, independence per-definition means it does not depend on
4759 * prior state. 4782 * prior state.
4760 */ 4783 */
4761void crossrelease_hist_start(enum xhlock_context_t c, bool force) 4784void lockdep_invariant_state(bool force)
4762{ 4785{
4763 struct task_struct *cur = current;
4764
4765 if (!cur->xhlocks)
4766 return;
4767
4768 /* 4786 /*
4769 * We call this at an invariant point, no current state, no history. 4787 * We call this at an invariant point, no current state, no history.
4788 * Verify the former, enforce the latter.
4770 */ 4789 */
4771 if (c == XHLOCK_PROC) { 4790 WARN_ON_ONCE(!force && current->lockdep_depth);
4772 /* verified the former, ensure the latter */ 4791 invalidate_xhlock(&xhlock(current->xhlock_idx));
4773 WARN_ON_ONCE(!force && cur->lockdep_depth);
4774 invalidate_xhlock(&xhlock(cur->xhlock_idx));
4775 }
4776
4777 cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4778 cur->hist_id_save[c] = cur->hist_id;
4779}
4780
4781void crossrelease_hist_end(enum xhlock_context_t c)
4782{
4783 struct task_struct *cur = current;
4784
4785 if (cur->xhlocks) {
4786 unsigned int idx = cur->xhlock_idx_hist[c];
4787 struct hist_lock *h = &xhlock(idx);
4788
4789 cur->xhlock_idx = idx;
4790
4791 /* Check if the ring was overwritten. */
4792 if (h->hist_id != cur->hist_id_save[c])
4793 invalidate_xhlock(h);
4794 }
4795} 4792}
4796 4793
4797static int cross_lock(struct lockdep_map *lock) 4794static int cross_lock(struct lockdep_map *lock)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c0331891dec1..ab3c0dc8c7ed 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2094,8 +2094,8 @@ __acquires(&pool->lock)
2094 lock_map_acquire(&pwq->wq->lockdep_map); 2094 lock_map_acquire(&pwq->wq->lockdep_map);
2095 lock_map_acquire(&lockdep_map); 2095 lock_map_acquire(&lockdep_map);
2096 /* 2096 /*
2097 * Strictly speaking we should do start(PROC) without holding any 2097 * Strictly speaking we should mark the invariant state without holding
2098 * locks, that is, before these two lock_map_acquire()'s. 2098 * any locks, that is, before these two lock_map_acquire()'s.
2099 * 2099 *
2100 * However, that would result in: 2100 * However, that would result in:
2101 * 2101 *
@@ -2107,14 +2107,14 @@ __acquires(&pool->lock)
2107 * Which would create W1->C->W1 dependencies, even though there is no 2107 * Which would create W1->C->W1 dependencies, even though there is no
2108 * actual deadlock possible. There are two solutions, using a 2108 * actual deadlock possible. There are two solutions, using a
2109 * read-recursive acquire on the work(queue) 'locks', but this will then 2109 * read-recursive acquire on the work(queue) 'locks', but this will then
2110 * hit the lockdep limitation on recursive locks, or simly discard 2110 * hit the lockdep limitation on recursive locks, or simply discard
2111 * these locks. 2111 * these locks.
2112 * 2112 *
2113 * AFAICT there is no possible deadlock scenario between the 2113 * AFAICT there is no possible deadlock scenario between the
2114 * flush_work() and complete() primitives (except for single-threaded 2114 * flush_work() and complete() primitives (except for single-threaded
2115 * workqueues), so hiding them isn't a problem. 2115 * workqueues), so hiding them isn't a problem.
2116 */ 2116 */
2117 crossrelease_hist_start(XHLOCK_PROC, true); 2117 lockdep_invariant_state(true);
2118 trace_workqueue_execute_start(work); 2118 trace_workqueue_execute_start(work);
2119 worker->current_func(work); 2119 worker->current_func(work);
2120 /* 2120 /*
@@ -2122,7 +2122,6 @@ __acquires(&pool->lock)
2122 * point will only record its address. 2122 * point will only record its address.
2123 */ 2123 */
2124 trace_workqueue_execute_end(work); 2124 trace_workqueue_execute_end(work);
2125 crossrelease_hist_end(XHLOCK_PROC);
2126 lock_map_release(&lockdep_map); 2125 lock_map_release(&lockdep_map);
2127 lock_map_release(&pwq->wq->lockdep_map); 2126 lock_map_release(&pwq->wq->lockdep_map);
2128 2127