diff options
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r-- | include/linux/lockdep.h | 82 |
1 files changed, 44 insertions, 38 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124f..b25d1b53df0d 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -20,43 +20,10 @@ struct lockdep_map; | |||
20 | #include <linux/stacktrace.h> | 20 | #include <linux/stacktrace.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Lock-class usage-state bits: | 23 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
24 | * the total number of states... :-( | ||
24 | */ | 25 | */ |
25 | enum lock_usage_bit | 26 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
26 | { | ||
27 | LOCK_USED = 0, | ||
28 | LOCK_USED_IN_HARDIRQ, | ||
29 | LOCK_USED_IN_SOFTIRQ, | ||
30 | LOCK_ENABLED_SOFTIRQS, | ||
31 | LOCK_ENABLED_HARDIRQS, | ||
32 | LOCK_USED_IN_HARDIRQ_READ, | ||
33 | LOCK_USED_IN_SOFTIRQ_READ, | ||
34 | LOCK_ENABLED_SOFTIRQS_READ, | ||
35 | LOCK_ENABLED_HARDIRQS_READ, | ||
36 | LOCK_USAGE_STATES | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * Usage-state bitmasks: | ||
41 | */ | ||
42 | #define LOCKF_USED (1 << LOCK_USED) | ||
43 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) | ||
44 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) | ||
45 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) | ||
46 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) | ||
47 | |||
48 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) | ||
49 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | ||
50 | |||
51 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) | ||
52 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) | ||
53 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) | ||
54 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) | ||
55 | |||
56 | #define LOCKF_ENABLED_IRQS_READ \ | ||
57 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) | ||
58 | #define LOCKF_USED_IN_IRQ_READ \ | ||
59 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | ||
60 | 27 | ||
61 | #define MAX_LOCKDEP_SUBCLASSES 8UL | 28 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
62 | 29 | ||
@@ -97,7 +64,7 @@ struct lock_class { | |||
97 | * IRQ/softirq usage tracking bits: | 64 | * IRQ/softirq usage tracking bits: |
98 | */ | 65 | */ |
99 | unsigned long usage_mask; | 66 | unsigned long usage_mask; |
100 | struct stack_trace usage_traces[LOCK_USAGE_STATES]; | 67 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
101 | 68 | ||
102 | /* | 69 | /* |
103 | * These fields represent a directed graph of lock dependencies, | 70 | * These fields represent a directed graph of lock dependencies, |
@@ -291,6 +258,16 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
291 | #define lockdep_set_subclass(lock, sub) \ | 258 | #define lockdep_set_subclass(lock, sub) \ |
292 | lockdep_init_map(&(lock)->dep_map, #lock, \ | 259 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
293 | (lock)->dep_map.key, sub) | 260 | (lock)->dep_map.key, sub) |
261 | /* | ||
262 | * Compare locking classes | ||
263 | */ | ||
264 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) | ||
265 | |||
266 | static inline int lockdep_match_key(struct lockdep_map *lock, | ||
267 | struct lock_class_key *key) | ||
268 | { | ||
269 | return lock->key == key; | ||
270 | } | ||
294 | 271 | ||
295 | /* | 272 | /* |
296 | * Acquire a lock. | 273 | * Acquire a lock. |
@@ -324,7 +301,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, | |||
324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | 301 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
325 | } | 302 | } |
326 | 303 | ||
327 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 304 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
305 | extern void lockdep_clear_current_reclaim_state(void); | ||
306 | extern void lockdep_trace_alloc(gfp_t mask); | ||
307 | |||
308 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, | ||
328 | 309 | ||
329 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 310 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
330 | 311 | ||
@@ -342,6 +323,9 @@ static inline void lockdep_on(void) | |||
342 | # define lock_release(l, n, i) do { } while (0) | 323 | # define lock_release(l, n, i) do { } while (0) |
343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | 324 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
344 | # define lock_set_subclass(l, s, i) do { } while (0) | 325 | # define lock_set_subclass(l, s, i) do { } while (0) |
326 | # define lockdep_set_current_reclaim_state(g) do { } while (0) | ||
327 | # define lockdep_clear_current_reclaim_state() do { } while (0) | ||
328 | # define lockdep_trace_alloc(g) do { } while (0) | ||
345 | # define lockdep_init() do { } while (0) | 329 | # define lockdep_init() do { } while (0) |
346 | # define lockdep_info() do { } while (0) | 330 | # define lockdep_info() do { } while (0) |
347 | # define lockdep_init_map(lock, name, key, sub) \ | 331 | # define lockdep_init_map(lock, name, key, sub) \ |
@@ -352,6 +336,11 @@ static inline void lockdep_on(void) | |||
352 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | 336 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
353 | do { (void)(key); } while (0) | 337 | do { (void)(key); } while (0) |
354 | #define lockdep_set_subclass(lock, sub) do { } while (0) | 338 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
339 | /* | ||
340 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP | ||
341 | * case since the result is not well defined and the caller should rather | ||
342 | * #ifdef the call himself. | ||
343 | */ | ||
355 | 344 | ||
356 | # define INIT_LOCKDEP | 345 | # define INIT_LOCKDEP |
357 | # define lockdep_reset() do { debug_locks = 1; } while (0) | 346 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
@@ -390,6 +379,23 @@ do { \ | |||
390 | 379 | ||
391 | #endif /* CONFIG_LOCK_STAT */ | 380 | #endif /* CONFIG_LOCK_STAT */ |
392 | 381 | ||
382 | #ifdef CONFIG_LOCKDEP | ||
383 | |||
384 | /* | ||
385 | * On lockdep we dont want the hand-coded irq-enable of | ||
386 | * _raw_*_lock_flags() code, because lockdep assumes | ||
387 | * that interrupts are not re-enabled during lock-acquire: | ||
388 | */ | ||
389 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
390 | LOCK_CONTENDED((_lock), (try), (lock)) | ||
391 | |||
392 | #else /* CONFIG_LOCKDEP */ | ||
393 | |||
394 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
395 | lockfl((_lock), (flags)) | ||
396 | |||
397 | #endif /* CONFIG_LOCKDEP */ | ||
398 | |||
393 | #ifdef CONFIG_GENERIC_HARDIRQS | 399 | #ifdef CONFIG_GENERIC_HARDIRQS |
394 | extern void early_init_irq_lock_class(void); | 400 | extern void early_init_irq_lock_class(void); |
395 | #else | 401 | #else |