diff options
author | Bart Van Assche <bvanassche@acm.org> | 2019-07-22 14:24:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-07-25 09:43:27 -0400 |
commit | 12593b7467f9130b64a6d4b6a26ed4ec217b6784 (patch) | |
tree | 6b91a989017a0b47d630294679264366628a6931 /include/linux/lockdep.h | |
parent | a2970421640bd9b6a78f2685d7750a791abdfd4e (diff) |
locking/lockdep: Reduce space occupied by stack traces
Although commit 669de8bda87b ("kernel/workqueue: Use dynamic lockdep keys
for workqueues") unregisters dynamic lockdep keys when a workqueue is
destroyed, a side effect of that commit is that all stack traces
associated with the lockdep key are leaked when a workqueue is destroyed.
Fix this by storing each unique stack trace once. Other changes in this
patch are:
- Use NULL instead of { .nr_entries = 0 } to represent 'no trace'.
- Store a pointer to a stack trace in struct lock_class and struct
lock_list instead of storing 'nr_entries' and 'offset'.
This patch avoids that the following program triggers the "BUG:
MAX_STACK_TRACE_ENTRIES too low!" complaint:
#include <fcntl.h>
#include <unistd.h>
int main()
{
for (;;) {
int fd = open("/dev/infiniband/rdma_cm", O_RDWR);
close(fd);
}
}
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Reported-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yuyang Du <duyuyang@gmail.com>
Link: https://lkml.kernel.org/r/20190722182443.216015-4-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r-- | include/linux/lockdep.h | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index cdb3c2f06092..b8a835fd611b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -66,10 +66,7 @@ struct lock_class_key { | |||
66 | 66 | ||
67 | extern struct lock_class_key __lockdep_no_validate__; | 67 | extern struct lock_class_key __lockdep_no_validate__; |
68 | 68 | ||
69 | struct lock_trace { | 69 | struct lock_trace; |
70 | unsigned int nr_entries; | ||
71 | unsigned int offset; | ||
72 | }; | ||
73 | 70 | ||
74 | #define LOCKSTAT_POINTS 4 | 71 | #define LOCKSTAT_POINTS 4 |
75 | 72 | ||
@@ -105,7 +102,7 @@ struct lock_class { | |||
105 | * IRQ/softirq usage tracking bits: | 102 | * IRQ/softirq usage tracking bits: |
106 | */ | 103 | */ |
107 | unsigned long usage_mask; | 104 | unsigned long usage_mask; |
108 | struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; | 105 | const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; |
109 | 106 | ||
110 | /* | 107 | /* |
111 | * Generation counter, when doing certain classes of graph walking, | 108 | * Generation counter, when doing certain classes of graph walking, |
@@ -193,7 +190,7 @@ struct lock_list { | |||
193 | struct list_head entry; | 190 | struct list_head entry; |
194 | struct lock_class *class; | 191 | struct lock_class *class; |
195 | struct lock_class *links_to; | 192 | struct lock_class *links_to; |
196 | struct lock_trace trace; | 193 | const struct lock_trace *trace; |
197 | int distance; | 194 | int distance; |
198 | 195 | ||
199 | /* | 196 | /* |