aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/lockdep.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:03 -0400
commitfbb9ce9530fd9b66096d5187fa6a115d16d9746c (patch)
tree1151a55e5d56045bac17b9766e6a4696cff0a26f /include/linux/lockdep.h
parentcae2ed9aa573415c6e5de9a09b7ff0d74af793bc (diff)
[PATCH] lockdep: core
Do 'make oldconfig' and accept all the defaults for new config options - reboot into the kernel and if everything goes well it should boot up fine and you should have /proc/lockdep and /proc/lockdep_stats files. Typically if the lock validator finds some problem it will print out voluminous debug output that begins with "BUG: ..." and which syslog output can be used by kernel developers to figure out the precise locking scenario. What does the lock validator do? It "observes" and maps all locking rules as they occur dynamically (as triggered by the kernel's natural use of spinlocks, rwlocks, mutexes and rwsems). Whenever the lock validator subsystem detects a new locking scenario, it validates this new rule against the existing set of rules. If this new rule is consistent with the existing set of rules then the new rule is added transparently and the kernel continues as normal. If the new rule could create a deadlock scenario then this condition is printed out. When determining validity of locking, all possible "deadlock scenarios" are considered: assuming arbitrary number of CPUs, arbitrary irq context and task context constellations, running arbitrary combinations of all the existing locking scenarios. In a typical system this means millions of separate scenarios. This is why we call it a "locking correctness" validator - for all rules that are observed the lock validator proves it with mathematical certainty that a deadlock could not occur (assuming that the lock validator implementation itself is correct and its internal data structures are not corrupted by some other kernel subsystem). [see more details and conditionals of this statement in include/linux/lockdep.h and Documentation/lockdep-design.txt] Furthermore, this "all possible scenarios" property of the validator also enables the finding of complex, highly unlikely multi-CPU multi-context races via single single-context rules, increasing the likelyhood of finding bugs drastically. In practical terms: the lock validator already found a bug in the upstream kernel that could only occur on systems with 3 or more CPUs, and which needed 3 very unlikely code sequences to occur at once on the 3 CPUs. That bug was found and reported on a single-CPU system (!). So in essence a race will be found "piecemail-wise", triggering all the necessary components for the race, without having to reproduce the race scenario itself! In its short existence the lock validator found and reported many bugs before they actually caused a real deadlock. To further increase the efficiency of the validator, the mapping is not per "lock instance", but per "lock-class". For example, all struct inode objects in the kernel have inode->inotify_mutex. If there are 10,000 inodes cached, then there are 10,000 lock objects. But ->inotify_mutex is a single "lock type", and all locking activities that occur against ->inotify_mutex are "unified" into this single lock-class. The advantage of the lock-class approach is that all historical ->inotify_mutex uses are mapped into a single (and as narrow as possible) set of locking rules - regardless of how many different tasks or inode structures it took to build this set of rules. The set of rules persist during the lifetime of the kernel. To see the rough magnitude of checking that the lock validator does, here's a portion of /proc/lockdep_stats, fresh after bootup: lock-classes: 694 [max: 2048] direct dependencies: 1598 [max: 8192] indirect dependencies: 17896 all direct dependencies: 16206 dependency chains: 1910 [max: 8192] in-hardirq chains: 17 in-softirq chains: 105 in-process chains: 1065 stack-trace entries: 38761 [max: 131072] combined max dependencies: 2033928 hardirq-safe locks: 24 hardirq-unsafe locks: 176 softirq-safe locks: 53 softirq-unsafe locks: 137 irq-safe locks: 59 irq-unsafe locks: 176 The lock validator has observed 1598 actual single-thread locking patterns, and has validated all possible 2033928 distinct locking scenarios. More details about the design of the lock validator can be found in Documentation/lockdep-design.txt, which can also found at: http://redhat.com/~mingo/lockdep-patches/lockdep-design.txt [bunk@stusta.de: cleanups] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r--include/linux/lockdep.h347
1 files changed, 347 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 000000000000..80ec7a4dbc98
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,347 @@
1/*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * see Documentation/lockdep-design.txt for more details.
7 */
8#ifndef __LINUX_LOCKDEP_H
9#define __LINUX_LOCKDEP_H
10
11#include <linux/linkage.h>
12#include <linux/list.h>
13#include <linux/debug_locks.h>
14#include <linux/stacktrace.h>
15
16#ifdef CONFIG_LOCKDEP
17
18/*
19 * Lock-class usage-state bits:
20 */
21enum lock_usage_bit
22{
23 LOCK_USED = 0,
24 LOCK_USED_IN_HARDIRQ,
25 LOCK_USED_IN_SOFTIRQ,
26 LOCK_ENABLED_SOFTIRQS,
27 LOCK_ENABLED_HARDIRQS,
28 LOCK_USED_IN_HARDIRQ_READ,
29 LOCK_USED_IN_SOFTIRQ_READ,
30 LOCK_ENABLED_SOFTIRQS_READ,
31 LOCK_ENABLED_HARDIRQS_READ,
32 LOCK_USAGE_STATES
33};
34
35/*
36 * Usage-state bitmasks:
37 */
38#define LOCKF_USED (1 << LOCK_USED)
39#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
40#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
41#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
42#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
43
44#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
45#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
46
47#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
48#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
49#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
50#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
51
52#define LOCKF_ENABLED_IRQS_READ \
53 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
54#define LOCKF_USED_IN_IRQ_READ \
55 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
56
57#define MAX_LOCKDEP_SUBCLASSES 8UL
58
59/*
60 * Lock-classes are keyed via unique addresses, by embedding the
61 * lockclass-key into the kernel (or module) .data section. (For
62 * static locks we use the lock address itself as the key.)
63 */
64struct lockdep_subclass_key {
65 char __one_byte;
66} __attribute__ ((__packed__));
67
68struct lock_class_key {
69 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
70};
71
72/*
73 * The lock-class itself:
74 */
75struct lock_class {
76 /*
77 * class-hash:
78 */
79 struct list_head hash_entry;
80
81 /*
82 * global list of all lock-classes:
83 */
84 struct list_head lock_entry;
85
86 struct lockdep_subclass_key *key;
87 unsigned int subclass;
88
89 /*
90 * IRQ/softirq usage tracking bits:
91 */
92 unsigned long usage_mask;
93 struct stack_trace usage_traces[LOCK_USAGE_STATES];
94
95 /*
96 * These fields represent a directed graph of lock dependencies,
97 * to every node we attach a list of "forward" and a list of
98 * "backward" graph nodes.
99 */
100 struct list_head locks_after, locks_before;
101
102 /*
103 * Generation counter, when doing certain classes of graph walking,
104 * to ensure that we check one node only once:
105 */
106 unsigned int version;
107
108 /*
109 * Statistics counter:
110 */
111 unsigned long ops;
112
113 const char *name;
114 int name_version;
115};
116
117/*
118 * Map the lock object (the lock instance) to the lock-class object.
119 * This is embedded into specific lock instances:
120 */
121struct lockdep_map {
122 struct lock_class_key *key;
123 struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
124 const char *name;
125};
126
127/*
128 * Every lock has a list of other locks that were taken after it.
129 * We only grow the list, never remove from it:
130 */
131struct lock_list {
132 struct list_head entry;
133 struct lock_class *class;
134 struct stack_trace trace;
135};
136
137/*
138 * We record lock dependency chains, so that we can cache them:
139 */
140struct lock_chain {
141 struct list_head entry;
142 u64 chain_key;
143};
144
145struct held_lock {
146 /*
147 * One-way hash of the dependency chain up to this point. We
148 * hash the hashes step by step as the dependency chain grows.
149 *
150 * We use it for dependency-caching and we skip detection
151 * passes and dependency-updates if there is a cache-hit, so
152 * it is absolutely critical for 100% coverage of the validator
153 * to have a unique key value for every unique dependency path
154 * that can occur in the system, to make a unique hash value
155 * as likely as possible - hence the 64-bit width.
156 *
157 * The task struct holds the current hash value (initialized
158 * with zero), here we store the previous hash value:
159 */
160 u64 prev_chain_key;
161 struct lock_class *class;
162 unsigned long acquire_ip;
163 struct lockdep_map *instance;
164
165 /*
166 * The lock-stack is unified in that the lock chains of interrupt
167 * contexts nest ontop of process context chains, but we 'separate'
168 * the hashes by starting with 0 if we cross into an interrupt
169 * context, and we also keep do not add cross-context lock
170 * dependencies - the lock usage graph walking covers that area
171 * anyway, and we'd just unnecessarily increase the number of
172 * dependencies otherwise. [Note: hardirq and softirq contexts
173 * are separated from each other too.]
174 *
175 * The following field is used to detect when we cross into an
176 * interrupt context:
177 */
178 int irq_context;
179 int trylock;
180 int read;
181 int check;
182 int hardirqs_off;
183};
184
185/*
186 * Initialization, self-test and debugging-output methods:
187 */
188extern void lockdep_init(void);
189extern void lockdep_info(void);
190extern void lockdep_reset(void);
191extern void lockdep_reset_lock(struct lockdep_map *lock);
192extern void lockdep_free_key_range(void *start, unsigned long size);
193
194extern void lockdep_off(void);
195extern void lockdep_on(void);
196extern int lockdep_internal(void);
197
198/*
199 * These methods are used by specific locking variants (spinlocks,
200 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
201 * to lockdep:
202 */
203
204extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
205 struct lock_class_key *key);
206
207/*
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
212 */
213#define lockdep_set_class(lock, key) \
214 lockdep_init_map(&(lock)->dep_map, #key, key)
215#define lockdep_set_class_and_name(lock, key, name) \
216 lockdep_init_map(&(lock)->dep_map, name, key)
217
218/*
219 * Acquire a lock.
220 *
221 * Values for "read":
222 *
223 * 0: exclusive (write) acquire
224 * 1: read-acquire (no recursion allowed)
225 * 2: read-acquire with same-instance recursion allowed
226 *
227 * Values for check:
228 *
229 * 0: disabled
230 * 1: simple checks (freeing, held-at-exit-time, etc.)
231 * 2: full validation
232 */
233extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
234 int trylock, int read, int check, unsigned long ip);
235
236extern void lock_release(struct lockdep_map *lock, int nested,
237 unsigned long ip);
238
239# define INIT_LOCKDEP .lockdep_recursion = 0,
240
241#else /* !LOCKDEP */
242
243static inline void lockdep_off(void)
244{
245}
246
247static inline void lockdep_on(void)
248{
249}
250
251static inline int lockdep_internal(void)
252{
253 return 0;
254}
255
256# define lock_acquire(l, s, t, r, c, i) do { } while (0)
257# define lock_release(l, n, i) do { } while (0)
258# define lockdep_init() do { } while (0)
259# define lockdep_info() do { } while (0)
260# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
261# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
262# define lockdep_set_class_and_name(lock, key, name) \
263 do { (void)(key); } while (0)
264# define INIT_LOCKDEP
265# define lockdep_reset() do { debug_locks = 1; } while (0)
266# define lockdep_free_key_range(start, size) do { } while (0)
267/*
268 * The class key takes no space if lockdep is disabled:
269 */
270struct lock_class_key { };
271#endif /* !LOCKDEP */
272
273#ifdef CONFIG_TRACE_IRQFLAGS
274extern void early_boot_irqs_off(void);
275extern void early_boot_irqs_on(void);
276#else
277# define early_boot_irqs_off() do { } while (0)
278# define early_boot_irqs_on() do { } while (0)
279#endif
280
281/*
282 * For trivial one-depth nesting of a lock-class, the following
283 * global define can be used. (Subsystems with multiple levels
284 * of nesting should define their own lock-nesting subclasses.)
285 */
286#define SINGLE_DEPTH_NESTING 1
287
288/*
289 * Map the dependency ops to NOP or to real lockdep ops, depending
290 * on the per lock-class debug mode:
291 */
292
293#ifdef CONFIG_DEBUG_LOCK_ALLOC
294# ifdef CONFIG_PROVE_LOCKING
295# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
296# else
297# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
298# endif
299# define spin_release(l, n, i) lock_release(l, n, i)
300#else
301# define spin_acquire(l, s, t, i) do { } while (0)
302# define spin_release(l, n, i) do { } while (0)
303#endif
304
305#ifdef CONFIG_DEBUG_LOCK_ALLOC
306# ifdef CONFIG_PROVE_LOCKING
307# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
308# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
309# else
310# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
311# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
312# endif
313# define rwlock_release(l, n, i) lock_release(l, n, i)
314#else
315# define rwlock_acquire(l, s, t, i) do { } while (0)
316# define rwlock_acquire_read(l, s, t, i) do { } while (0)
317# define rwlock_release(l, n, i) do { } while (0)
318#endif
319
320#ifdef CONFIG_DEBUG_LOCK_ALLOC
321# ifdef CONFIG_PROVE_LOCKING
322# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
323# else
324# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
325# endif
326# define mutex_release(l, n, i) lock_release(l, n, i)
327#else
328# define mutex_acquire(l, s, t, i) do { } while (0)
329# define mutex_release(l, n, i) do { } while (0)
330#endif
331
332#ifdef CONFIG_DEBUG_LOCK_ALLOC
333# ifdef CONFIG_PROVE_LOCKING
334# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
335# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
336# else
337# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
338# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
339# endif
340# define rwsem_release(l, n, i) lock_release(l, n, i)
341#else
342# define rwsem_acquire(l, s, t, i) do { } while (0)
343# define rwsem_acquire_read(l, s, t, i) do { } while (0)
344# define rwsem_release(l, n, i) do { } while (0)
345#endif
346
347#endif /* __LINUX_LOCKDEP_H */