aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-02-22 20:04:45 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-25 03:40:59 -0500
commit632ee200130899252508c478ad0e808222573fbc (patch)
treee3f44ca9daf2a2da2186419f703bc9cec8faa058
parent996de8c6fe95c5a9fc524241cc8f142ef0605d3d (diff)
rcu: Introduce lockdep-based checking to RCU read-side primitives
Inspection is proving insufficient to catch all RCU misuses, which is understandable given that rcu_dereference() might be protected by any of four different flavors of RCU (RCU, RCU-bh, RCU-sched, and SRCU), and might also/instead be protected by any of a number of locking primitives. It is therefore time to enlist the aid of lockdep. This set of patches is inspired by earlier work by Peter Zijlstra and Thomas Gleixner, and takes the following approach: o Set up separate lockdep classes for RCU, RCU-bh, and RCU-sched. o Set up separate lockdep classes for each instance of SRCU. o Create primitives that check for being in an RCU read-side critical section. These return exact answers if lockdep is fully enabled, but if unsure, report being in an RCU read-side critical section. (We want to avoid false positives!) The primitives are: For RCU: rcu_read_lock_held(void) For RCU-bh: rcu_read_lock_bh_held(void) For RCU-sched: rcu_read_lock_sched_held(void) For SRCU: srcu_read_lock_held(struct srcu_struct *sp) o Add rcu_dereference_check(), which takes a second argument in which one places a boolean expression based on the above primitives and/or lockdep_is_held(). o A new kernel configuration parameter, CONFIG_PROVE_RCU, enables rcu_dereference_check(). This depends on CONFIG_PROVE_LOCKING, and should be quite helpful during the transition period while CONFIG_PROVE_RCU-unaware patches are in flight. The existing rcu_dereference() primitive does no checking, but upcoming patches will change that. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-1-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/rcupdate.h126
-rw-r--r--include/linux/srcu.h87
-rw-r--r--kernel/rcupdate.c10
-rw-r--r--kernel/rcutorture.c12
-rw-r--r--kernel/srcu.c50
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/debug_locks.c1
7 files changed, 267 insertions, 31 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 24440f4bf476..e3d37efe2703 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -78,14 +78,120 @@ extern void rcu_init(void);
78} while (0) 78} while (0)
79 79
80#ifdef CONFIG_DEBUG_LOCK_ALLOC 80#ifdef CONFIG_DEBUG_LOCK_ALLOC
81
81extern struct lockdep_map rcu_lock_map; 82extern struct lockdep_map rcu_lock_map;
82# define rcu_read_acquire() \ 83# define rcu_read_acquire() \
83 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 84 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
84# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) 85# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
85#else 86
86# define rcu_read_acquire() do { } while (0) 87extern struct lockdep_map rcu_bh_lock_map;
87# define rcu_read_release() do { } while (0) 88# define rcu_read_acquire_bh() \
88#endif 89 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
90# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
91
92extern struct lockdep_map rcu_sched_lock_map;
93# define rcu_read_acquire_sched() \
94 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
95# define rcu_read_release_sched() \
96 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
97
98/**
99 * rcu_read_lock_held - might we be in RCU read-side critical section?
100 *
101 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
102 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
103 * this assumes we are in an RCU read-side critical section unless it can
104 * prove otherwise.
105 */
106static inline int rcu_read_lock_held(void)
107{
108 if (debug_locks)
109 return lock_is_held(&rcu_lock_map);
110 return 1;
111}
112
113/**
114 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
115 *
116 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
117 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
118 * this assumes we are in an RCU-bh read-side critical section unless it can
119 * prove otherwise.
120 */
121static inline int rcu_read_lock_bh_held(void)
122{
123 if (debug_locks)
124 return lock_is_held(&rcu_bh_lock_map);
125 return 1;
126}
127
128/**
129 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
130 *
131 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
132 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
133 * this assumes we are in an RCU-sched read-side critical section unless it
134 * can prove otherwise. Note that disabling of preemption (including
135 * disabling irqs) counts as an RCU-sched read-side critical section.
136 */
137static inline int rcu_read_lock_sched_held(void)
138{
139 int lockdep_opinion = 0;
140
141 if (debug_locks)
142 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
143 return lockdep_opinion || preempt_count() != 0;
144}
145
146#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
147
148# define rcu_read_acquire() do { } while (0)
149# define rcu_read_release() do { } while (0)
150# define rcu_read_acquire_bh() do { } while (0)
151# define rcu_read_release_bh() do { } while (0)
152# define rcu_read_acquire_sched() do { } while (0)
153# define rcu_read_release_sched() do { } while (0)
154
155static inline int rcu_read_lock_held(void)
156{
157 return 1;
158}
159
160static inline int rcu_read_lock_bh_held(void)
161{
162 return 1;
163}
164
165static inline int rcu_read_lock_sched_held(void)
166{
167 return preempt_count() != 0;
168}
169
170#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
171
172#ifdef CONFIG_PROVE_RCU
173
174/**
175 * rcu_dereference_check - rcu_dereference with debug checking
176 *
177 * Do an rcu_dereference(), but check that the context is correct.
178 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
179 * ensure that the rcu_dereference_check() executes within an RCU
180 * read-side critical section. It is also possible to check for
181 * locks being held, for example, by using lockdep_is_held().
182 */
183#define rcu_dereference_check(p, c) \
184 ({ \
185 if (debug_locks) \
186 WARN_ON_ONCE(!(c)); \
187 rcu_dereference(p); \
188 })
189
190#else /* #ifdef CONFIG_PROVE_RCU */
191
192#define rcu_dereference_check(p, c) rcu_dereference(p)
193
194#endif /* #else #ifdef CONFIG_PROVE_RCU */
89 195
90/** 196/**
91 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 197 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
@@ -160,7 +266,7 @@ static inline void rcu_read_lock_bh(void)
160{ 266{
161 __rcu_read_lock_bh(); 267 __rcu_read_lock_bh();
162 __acquire(RCU_BH); 268 __acquire(RCU_BH);
163 rcu_read_acquire(); 269 rcu_read_acquire_bh();
164} 270}
165 271
166/* 272/*
@@ -170,7 +276,7 @@ static inline void rcu_read_lock_bh(void)
170 */ 276 */
171static inline void rcu_read_unlock_bh(void) 277static inline void rcu_read_unlock_bh(void)
172{ 278{
173 rcu_read_release(); 279 rcu_read_release_bh();
174 __release(RCU_BH); 280 __release(RCU_BH);
175 __rcu_read_unlock_bh(); 281 __rcu_read_unlock_bh();
176} 282}
@@ -188,7 +294,7 @@ static inline void rcu_read_lock_sched(void)
188{ 294{
189 preempt_disable(); 295 preempt_disable();
190 __acquire(RCU_SCHED); 296 __acquire(RCU_SCHED);
191 rcu_read_acquire(); 297 rcu_read_acquire_sched();
192} 298}
193 299
194/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 300/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -205,7 +311,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
205 */ 311 */
206static inline void rcu_read_unlock_sched(void) 312static inline void rcu_read_unlock_sched(void)
207{ 313{
208 rcu_read_release(); 314 rcu_read_release_sched();
209 __release(RCU_SCHED); 315 __release(RCU_SCHED);
210 preempt_enable(); 316 preempt_enable();
211} 317}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4765d97dcafb..adbe1670b366 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -35,6 +35,9 @@ struct srcu_struct {
35 int completed; 35 int completed;
36 struct srcu_struct_array *per_cpu_ref; 36 struct srcu_struct_array *per_cpu_ref;
37 struct mutex mutex; 37 struct mutex mutex;
38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map;
40#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
38}; 41};
39 42
40#ifndef CONFIG_PREEMPT 43#ifndef CONFIG_PREEMPT
@@ -43,12 +46,92 @@ struct srcu_struct {
43#define srcu_barrier() 46#define srcu_barrier()
44#endif /* #else #ifndef CONFIG_PREEMPT */ 47#endif /* #else #ifndef CONFIG_PREEMPT */
45 48
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50
51int __init_srcu_struct(struct srcu_struct *sp, const char *name,
52 struct lock_class_key *key);
53
54#define init_srcu_struct(sp) \
55({ \
56 static struct lock_class_key __srcu_key; \
57 \
58 __init_srcu_struct((sp), #sp, &__srcu_key); \
59})
60
61# define srcu_read_acquire(sp) \
62 lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
63# define srcu_read_release(sp) \
64 lock_release(&(sp)->dep_map, 1, _THIS_IP_)
65
66#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
67
46int init_srcu_struct(struct srcu_struct *sp); 68int init_srcu_struct(struct srcu_struct *sp);
69
70# define srcu_read_acquire(sp) do { } while (0)
71# define srcu_read_release(sp) do { } while (0)
72
73#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
74
47void cleanup_srcu_struct(struct srcu_struct *sp); 75void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 76int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 77void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp); 78void synchronize_srcu(struct srcu_struct *sp);
51void synchronize_srcu_expedited(struct srcu_struct *sp); 79void synchronize_srcu_expedited(struct srcu_struct *sp);
52long srcu_batches_completed(struct srcu_struct *sp); 80long srcu_batches_completed(struct srcu_struct *sp);
53 81
82#ifdef CONFIG_DEBUG_LOCK_ALLOC
83
84/**
85 * srcu_read_lock_held - might we be in SRCU read-side critical section?
86 *
87 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
88 * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
89 * this assumes we are in an SRCU read-side critical section unless it can
90 * prove otherwise.
91 */
92static inline int srcu_read_lock_held(struct srcu_struct *sp)
93{
94 if (debug_locks)
95 return lock_is_held(&sp->dep_map);
96 return 1;
97}
98
99#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
100
101static inline int srcu_read_lock_held(struct srcu_struct *sp)
102{
103 return 1;
104}
105
106#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
107
108/**
109 * srcu_read_lock - register a new reader for an SRCU-protected structure.
110 * @sp: srcu_struct in which to register the new reader.
111 *
112 * Enter an SRCU read-side critical section. Note that SRCU read-side
113 * critical sections may be nested.
114 */
115static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
116{
117 int retval = __srcu_read_lock(sp);
118
119 srcu_read_acquire(sp);
120 return retval;
121}
122
123/**
124 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
125 * @sp: srcu_struct in which to unregister the old reader.
126 * @idx: return value from corresponding srcu_read_lock().
127 *
128 * Exit an SRCU read-side critical section.
129 */
130static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
131 __releases(sp)
132{
133 srcu_read_release(sp);
134 __srcu_read_unlock(sp, idx);
135}
136
54#endif 137#endif
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 9b7fd4723878..033cb55c26df 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -50,6 +50,16 @@ static struct lock_class_key rcu_lock_key;
50struct lockdep_map rcu_lock_map = 50struct lockdep_map rcu_lock_map =
51 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 51 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
52EXPORT_SYMBOL_GPL(rcu_lock_map); 52EXPORT_SYMBOL_GPL(rcu_lock_map);
53
54static struct lock_class_key rcu_bh_lock_key;
55struct lockdep_map rcu_bh_lock_map =
56 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
57EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
58
59static struct lock_class_key rcu_sched_lock_key;
60struct lockdep_map rcu_sched_lock_map =
61 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
62EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
53#endif 63#endif
54 64
55/* 65/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index adda92bfafac..5f43f30fcd1d 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -796,7 +796,11 @@ static void rcu_torture_timer(unsigned long unused)
796 796
797 idx = cur_ops->readlock(); 797 idx = cur_ops->readlock();
798 completed = cur_ops->completed(); 798 completed = cur_ops->completed();
799 p = rcu_dereference(rcu_torture_current); 799 p = rcu_dereference_check(rcu_torture_current,
800 rcu_read_lock_held() ||
801 rcu_read_lock_bh_held() ||
802 rcu_read_lock_sched_held() ||
803 srcu_read_lock_held(&srcu_ctl));
800 if (p == NULL) { 804 if (p == NULL) {
801 /* Leave because rcu_torture_writer is not yet underway */ 805 /* Leave because rcu_torture_writer is not yet underway */
802 cur_ops->readunlock(idx); 806 cur_ops->readunlock(idx);
@@ -853,7 +857,11 @@ rcu_torture_reader(void *arg)
853 } 857 }
854 idx = cur_ops->readlock(); 858 idx = cur_ops->readlock();
855 completed = cur_ops->completed(); 859 completed = cur_ops->completed();
856 p = rcu_dereference(rcu_torture_current); 860 p = rcu_dereference_check(rcu_torture_current,
861 rcu_read_lock_held() ||
862 rcu_read_lock_bh_held() ||
863 rcu_read_lock_sched_held() ||
864 srcu_read_lock_held(&srcu_ctl));
857 if (p == NULL) { 865 if (p == NULL) {
858 /* Wait for rcu_torture_writer to get underway */ 866 /* Wait for rcu_torture_writer to get underway */
859 cur_ops->readunlock(idx); 867 cur_ops->readunlock(idx);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 31b275b9c112..bde4295774c8 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -34,6 +34,30 @@
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/srcu.h> 35#include <linux/srcu.h>
36 36
37static int init_srcu_struct_fields(struct srcu_struct *sp)
38{
39 sp->completed = 0;
40 mutex_init(&sp->mutex);
41 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
42 return sp->per_cpu_ref ? 0 : -ENOMEM;
43}
44
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46
47int __init_srcu_struct(struct srcu_struct *sp, const char *name,
48 struct lock_class_key *key)
49{
50#ifdef CONFIG_DEBUG_LOCK_ALLOC
51 /* Don't re-initialize a lock while it is held. */
52 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
53 lockdep_init_map(&sp->dep_map, name, key, 0);
54#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
55 return init_srcu_struct_fields(sp);
56}
57EXPORT_SYMBOL_GPL(__init_srcu_struct);
58
59#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
60
37/** 61/**
38 * init_srcu_struct - initialize a sleep-RCU structure 62 * init_srcu_struct - initialize a sleep-RCU structure
39 * @sp: structure to initialize. 63 * @sp: structure to initialize.
@@ -44,13 +68,12 @@
44 */ 68 */
45int init_srcu_struct(struct srcu_struct *sp) 69int init_srcu_struct(struct srcu_struct *sp)
46{ 70{
47 sp->completed = 0; 71 return init_srcu_struct_fields(sp);
48 mutex_init(&sp->mutex);
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 72}
52EXPORT_SYMBOL_GPL(init_srcu_struct); 73EXPORT_SYMBOL_GPL(init_srcu_struct);
53 74
75#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76
54/* 77/*
55 * srcu_readers_active_idx -- returns approximate number of readers 78 * srcu_readers_active_idx -- returns approximate number of readers
56 * active on the specified rank of per-CPU counters. 79 * active on the specified rank of per-CPU counters.
@@ -100,15 +123,12 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
100} 123}
101EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 124EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
102 125
103/** 126/*
104 * srcu_read_lock - register a new reader for an SRCU-protected structure.
105 * @sp: srcu_struct in which to register the new reader.
106 *
107 * Counts the new reader in the appropriate per-CPU element of the 127 * Counts the new reader in the appropriate per-CPU element of the
108 * srcu_struct. Must be called from process context. 128 * srcu_struct. Must be called from process context.
109 * Returns an index that must be passed to the matching srcu_read_unlock(). 129 * Returns an index that must be passed to the matching srcu_read_unlock().
110 */ 130 */
111int srcu_read_lock(struct srcu_struct *sp) 131int __srcu_read_lock(struct srcu_struct *sp)
112{ 132{
113 int idx; 133 int idx;
114 134
@@ -120,26 +140,22 @@ int srcu_read_lock(struct srcu_struct *sp)
120 preempt_enable(); 140 preempt_enable();
121 return idx; 141 return idx;
122} 142}
123EXPORT_SYMBOL_GPL(srcu_read_lock); 143EXPORT_SYMBOL_GPL(__srcu_read_lock);
124 144
125/** 145/*
126 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
127 * @sp: srcu_struct in which to unregister the old reader.
128 * @idx: return value from corresponding srcu_read_lock().
129 *
130 * Removes the count for the old reader from the appropriate per-CPU 146 * Removes the count for the old reader from the appropriate per-CPU
131 * element of the srcu_struct. Note that this may well be a different 147 * element of the srcu_struct. Note that this may well be a different
132 * CPU than that which was incremented by the corresponding srcu_read_lock(). 148 * CPU than that which was incremented by the corresponding srcu_read_lock().
133 * Must be called from process context. 149 * Must be called from process context.
134 */ 150 */
135void srcu_read_unlock(struct srcu_struct *sp, int idx) 151void __srcu_read_unlock(struct srcu_struct *sp, int idx)
136{ 152{
137 preempt_disable(); 153 preempt_disable();
138 srcu_barrier(); /* ensure compiler won't misorder critical section. */ 154 srcu_barrier(); /* ensure compiler won't misorder critical section. */
139 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 155 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
140 preempt_enable(); 156 preempt_enable();
141} 157}
142EXPORT_SYMBOL_GPL(srcu_read_unlock); 158EXPORT_SYMBOL_GPL(__srcu_read_unlock);
143 159
144/* 160/*
145 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 161 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6bf97d176326..6af20a8a0a54 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -499,6 +499,18 @@ config PROVE_LOCKING
499 499
500 For more details, see Documentation/lockdep-design.txt. 500 For more details, see Documentation/lockdep-design.txt.
501 501
502config PROVE_RCU
503 bool "RCU debugging: prove RCU correctness"
504 depends on PROVE_LOCKING
505 default n
506 help
507 This feature enables lockdep extensions that check for correct
508 use of RCU APIs. This is currently under development. Say Y
509 if you want to debug RCU usage or help work on the PROVE_RCU
510 feature.
511
512 Say N if you are unsure.
513
502config LOCKDEP 514config LOCKDEP
503 bool 515 bool
504 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 516 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9c..5bf0020b9248 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
23 * shut up after that. 23 * shut up after that.
24 */ 24 */
25int debug_locks = 1; 25int debug_locks = 1;
26EXPORT_SYMBOL_GPL(debug_locks);
26 27
27/* 28/*
28 * The locking-testsuite uses <debug_locks_silent> to get a 29 * The locking-testsuite uses <debug_locks_silent> to get a