summaryrefslogtreecommitdiffstats
path: root/include/linux/lockdep.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-12-12 06:31:16 -0500
committerIngo Molnar <mingo@kernel.org>2017-12-12 06:38:51 -0500
commite966eaeeb623f09975ef362c2866fae6f86844f9 (patch)
tree8dfcb814c86d24c32ae3338d4dc1dc3aa1017764 /include/linux/lockdep.h
parentd89c70356acf11b7cf47ca5cfcafae5062a85451 (diff)
locking/lockdep: Remove the cross-release locking checks
This code (CONFIG_LOCKDEP_CROSSRELEASE=y and CONFIG_LOCKDEP_COMPLETIONS=y), while it found a number of old bugs initially, was also causing too many false positives that caused people to disable lockdep - which is arguably a worse overall outcome. If we disable cross-release by default but keep the code upstream then in practice the most likely outcome is that we'll allow the situation to degrade gradually, by allowing entropy to introduce more and more false positives, until it overwhelms maintenance capacity. Another bad side effect was that people were trying to work around the false positives by uglifying/complicating unrelated code. There's a marked difference between annotating locking operations and uglifying good code just due to bad lock debugging code ... This gradual decrease in quality happened to a number of debugging facilities in the kernel, and lockdep is pretty complex already, so we cannot risk this outcome. Either cross-release checking can be done right with no false positives, or it should not be included in the upstream kernel. ( Note that it might make sense to maintain it out of tree and go through the false positives every now and then and see whether new bugs were introduced. ) Cc: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r--include/linux/lockdep.h125
1 files changed, 0 insertions, 125 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a842551fe044..2e75dc34bff5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -158,12 +158,6 @@ struct lockdep_map {
158 int cpu; 158 int cpu;
159 unsigned long ip; 159 unsigned long ip;
160#endif 160#endif
161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
162 /*
163 * Whether it's a crosslock.
164 */
165 int cross;
166#endif
167}; 161};
168 162
169static inline void lockdep_copy_map(struct lockdep_map *to, 163static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,96 +261,9 @@ struct held_lock {
267 unsigned int hardirqs_off:1; 261 unsigned int hardirqs_off:1;
268 unsigned int references:12; /* 32 bits */ 262 unsigned int references:12; /* 32 bits */
269 unsigned int pin_count; 263 unsigned int pin_count;
270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 /*
272 * Generation id.
273 *
274 * A value of cross_gen_id will be stored when holding this,
275 * which is globally increased whenever each crosslock is held.
276 */
277 unsigned int gen_id;
278#endif
279};
280
281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
282#define MAX_XHLOCK_TRACE_ENTRIES 5
283
284/*
285 * This is for keeping locks waiting for commit so that true dependencies
286 * can be added at commit step.
287 */
288struct hist_lock {
289 /*
290 * Id for each entry in the ring buffer. This is used to
291 * decide whether the ring buffer was overwritten or not.
292 *
293 * For example,
294 *
295 * |<----------- hist_lock ring buffer size ------->|
296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
298 *
299 * where 'p' represents an acquisition in process
300 * context, 'i' represents an acquisition in irq
301 * context.
302 *
303 * In this example, the ring buffer was overwritten by
304 * acquisitions in irq context, that should be detected on
305 * rollback or commit.
306 */
307 unsigned int hist_id;
308
309 /*
310 * Seperate stack_trace data. This will be used at commit step.
311 */
312 struct stack_trace trace;
313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
314
315 /*
316 * Seperate hlock instance. This will be used at commit step.
317 *
318 * TODO: Use a smaller data structure containing only necessary
319 * data. However, we should make lockdep code able to handle the
320 * smaller one first.
321 */
322 struct held_lock hlock;
323}; 264};
324 265
325/* 266/*
326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
327 * be called instead of lockdep_init_map().
328 */
329struct cross_lock {
330 /*
331 * When more than one acquisition of crosslocks are overlapped,
332 * we have to perform commit for them based on cross_gen_id of
333 * the first acquisition, which allows us to add more true
334 * dependencies.
335 *
336 * Moreover, when no acquisition of a crosslock is in progress,
337 * we should not perform commit because the lock might not exist
338 * any more, which might cause incorrect memory access. So we
339 * have to track the number of acquisitions of a crosslock.
340 */
341 int nr_acquire;
342
343 /*
344 * Seperate hlock instance. This will be used at commit step.
345 *
346 * TODO: Use a smaller data structure containing only necessary
347 * data. However, we should make lockdep code able to handle the
348 * smaller one first.
349 */
350 struct held_lock hlock;
351};
352
353struct lockdep_map_cross {
354 struct lockdep_map map;
355 struct cross_lock xlock;
356};
357#endif
358
359/*
360 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
361 */ 268 */
362extern void lockdep_info(void); 269extern void lockdep_info(void);
@@ -560,37 +467,6 @@ enum xhlock_context_t {
560 XHLOCK_CTX_NR, 467 XHLOCK_CTX_NR,
561}; 468};
562 469
563#ifdef CONFIG_LOCKDEP_CROSSRELEASE
564extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
565 const char *name,
566 struct lock_class_key *key,
567 int subclass);
568extern void lock_commit_crosslock(struct lockdep_map *lock);
569
570/*
571 * What we essencially have to initialize is 'nr_acquire'. Other members
572 * will be initialized in add_xlock().
573 */
574#define STATIC_CROSS_LOCK_INIT() \
575 { .nr_acquire = 0,}
576
577#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
578 { .map.name = (_name), .map.key = (void *)(_key), \
579 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
580
581/*
582 * To initialize a lockdep_map statically use this macro.
583 * Note that _name must not be NULL.
584 */
585#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
586 { .name = (_name), .key = (void *)(_key), .cross = 0, }
587
588extern void crossrelease_hist_start(enum xhlock_context_t c);
589extern void crossrelease_hist_end(enum xhlock_context_t c);
590extern void lockdep_invariant_state(bool force);
591extern void lockdep_init_task(struct task_struct *task);
592extern void lockdep_free_task(struct task_struct *task);
593#else /* !CROSSRELEASE */
594#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 470#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
595/* 471/*
596 * To initialize a lockdep_map statically use this macro. 472 * To initialize a lockdep_map statically use this macro.
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
604static inline void lockdep_invariant_state(bool force) {} 480static inline void lockdep_invariant_state(bool force) {}
605static inline void lockdep_init_task(struct task_struct *task) {} 481static inline void lockdep_init_task(struct task_struct *task) {}
606static inline void lockdep_free_task(struct task_struct *task) {} 482static inline void lockdep_free_task(struct task_struct *task) {}
607#endif /* CROSSRELEASE */
608 483
609#ifdef CONFIG_LOCK_STAT 484#ifdef CONFIG_LOCK_STAT
610 485