diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 560 |
1 files changed, 295 insertions, 265 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c3568f0b..b0f011866969 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <linux/utsname.h> | 41 | #include <linux/utsname.h> |
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | ||
45 | #include <trace/lockdep.h> | ||
44 | 46 | ||
45 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
46 | 48 | ||
@@ -310,12 +312,14 @@ EXPORT_SYMBOL(lockdep_on); | |||
310 | #if VERBOSE | 312 | #if VERBOSE |
311 | # define HARDIRQ_VERBOSE 1 | 313 | # define HARDIRQ_VERBOSE 1 |
312 | # define SOFTIRQ_VERBOSE 1 | 314 | # define SOFTIRQ_VERBOSE 1 |
315 | # define RECLAIM_VERBOSE 1 | ||
313 | #else | 316 | #else |
314 | # define HARDIRQ_VERBOSE 0 | 317 | # define HARDIRQ_VERBOSE 0 |
315 | # define SOFTIRQ_VERBOSE 0 | 318 | # define SOFTIRQ_VERBOSE 0 |
319 | # define RECLAIM_VERBOSE 0 | ||
316 | #endif | 320 | #endif |
317 | 321 | ||
318 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 322 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE |
319 | /* | 323 | /* |
320 | * Quick filtering for interesting events: | 324 | * Quick filtering for interesting events: |
321 | */ | 325 | */ |
@@ -430,30 +434,24 @@ atomic_t nr_find_usage_forwards_checks; | |||
430 | atomic_t nr_find_usage_forwards_recursions; | 434 | atomic_t nr_find_usage_forwards_recursions; |
431 | atomic_t nr_find_usage_backwards_checks; | 435 | atomic_t nr_find_usage_backwards_checks; |
432 | atomic_t nr_find_usage_backwards_recursions; | 436 | atomic_t nr_find_usage_backwards_recursions; |
433 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | ||
434 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | ||
435 | # define debug_atomic_read(ptr) atomic_read(ptr) | ||
436 | #else | ||
437 | # define debug_atomic_inc(ptr) do { } while (0) | ||
438 | # define debug_atomic_dec(ptr) do { } while (0) | ||
439 | # define debug_atomic_read(ptr) 0 | ||
440 | #endif | 437 | #endif |
441 | 438 | ||
442 | /* | 439 | /* |
443 | * Locking printouts: | 440 | * Locking printouts: |
444 | */ | 441 | */ |
445 | 442 | ||
443 | #define __USAGE(__STATE) \ | ||
444 | [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ | ||
445 | [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ | ||
446 | [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ | ||
447 | [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", | ||
448 | |||
446 | static const char *usage_str[] = | 449 | static const char *usage_str[] = |
447 | { | 450 | { |
448 | [LOCK_USED] = "initial-use ", | 451 | #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) |
449 | [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", | 452 | #include "lockdep_states.h" |
450 | [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", | 453 | #undef LOCKDEP_STATE |
451 | [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", | 454 | [LOCK_USED] = "INITIAL USE", |
452 | [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", | ||
453 | [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", | ||
454 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", | ||
455 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", | ||
456 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", | ||
457 | }; | 455 | }; |
458 | 456 | ||
459 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 457 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
@@ -461,46 +459,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | |||
461 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); | 459 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); |
462 | } | 460 | } |
463 | 461 | ||
464 | void | 462 | static inline unsigned long lock_flag(enum lock_usage_bit bit) |
465 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | ||
466 | { | 463 | { |
467 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 464 | return 1UL << bit; |
468 | 465 | } | |
469 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | ||
470 | *c1 = '+'; | ||
471 | else | ||
472 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | ||
473 | *c1 = '-'; | ||
474 | 466 | ||
475 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 467 | static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) |
476 | *c2 = '+'; | 468 | { |
477 | else | 469 | char c = '.'; |
478 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | ||
479 | *c2 = '-'; | ||
480 | 470 | ||
481 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 471 | if (class->usage_mask & lock_flag(bit + 2)) |
482 | *c3 = '-'; | 472 | c = '+'; |
483 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | 473 | if (class->usage_mask & lock_flag(bit)) { |
484 | *c3 = '+'; | 474 | c = '-'; |
485 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 475 | if (class->usage_mask & lock_flag(bit + 2)) |
486 | *c3 = '?'; | 476 | c = '?'; |
487 | } | 477 | } |
488 | 478 | ||
489 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 479 | return c; |
490 | *c4 = '-'; | 480 | } |
491 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | 481 | |
492 | *c4 = '+'; | 482 | void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) |
493 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 483 | { |
494 | *c4 = '?'; | 484 | int i = 0; |
495 | } | 485 | |
486 | #define LOCKDEP_STATE(__STATE) \ | ||
487 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ | ||
488 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); | ||
489 | #include "lockdep_states.h" | ||
490 | #undef LOCKDEP_STATE | ||
491 | |||
492 | usage[i] = '\0'; | ||
496 | } | 493 | } |
497 | 494 | ||
498 | static void print_lock_name(struct lock_class *class) | 495 | static void print_lock_name(struct lock_class *class) |
499 | { | 496 | { |
500 | char str[KSYM_NAME_LEN], c1, c2, c3, c4; | 497 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
501 | const char *name; | 498 | const char *name; |
502 | 499 | ||
503 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 500 | get_usage_chars(class, usage); |
504 | 501 | ||
505 | name = class->name; | 502 | name = class->name; |
506 | if (!name) { | 503 | if (!name) { |
@@ -513,7 +510,7 @@ static void print_lock_name(struct lock_class *class) | |||
513 | if (class->subclass) | 510 | if (class->subclass) |
514 | printk("/%d", class->subclass); | 511 | printk("/%d", class->subclass); |
515 | } | 512 | } |
516 | printk("){%c%c%c%c}", c1, c2, c3, c4); | 513 | printk("){%s}", usage); |
517 | } | 514 | } |
518 | 515 | ||
519 | static void print_lockdep_cache(struct lockdep_map *lock) | 516 | static void print_lockdep_cache(struct lockdep_map *lock) |
@@ -796,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
796 | 793 | ||
797 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 794 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); |
798 | printk("turning off the locking correctness validator.\n"); | 795 | printk("turning off the locking correctness validator.\n"); |
796 | dump_stack(); | ||
799 | return NULL; | 797 | return NULL; |
800 | } | 798 | } |
801 | class = lock_classes + nr_lock_classes++; | 799 | class = lock_classes + nr_lock_classes++; |
@@ -859,6 +857,7 @@ static struct lock_list *alloc_list_entry(void) | |||
859 | 857 | ||
860 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | 858 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); |
861 | printk("turning off the locking correctness validator.\n"); | 859 | printk("turning off the locking correctness validator.\n"); |
860 | dump_stack(); | ||
862 | return NULL; | 861 | return NULL; |
863 | } | 862 | } |
864 | return list_entries + nr_list_entries++; | 863 | return list_entries + nr_list_entries++; |
@@ -1263,9 +1262,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1263 | bit_backwards, bit_forwards, irqclass); | 1262 | bit_backwards, bit_forwards, irqclass); |
1264 | } | 1263 | } |
1265 | 1264 | ||
1266 | static int | 1265 | static const char *state_names[] = { |
1267 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | 1266 | #define LOCKDEP_STATE(__STATE) \ |
1268 | struct held_lock *next) | 1267 | __stringify(__STATE), |
1268 | #include "lockdep_states.h" | ||
1269 | #undef LOCKDEP_STATE | ||
1270 | }; | ||
1271 | |||
1272 | static const char *state_rnames[] = { | ||
1273 | #define LOCKDEP_STATE(__STATE) \ | ||
1274 | __stringify(__STATE)"-READ", | ||
1275 | #include "lockdep_states.h" | ||
1276 | #undef LOCKDEP_STATE | ||
1277 | }; | ||
1278 | |||
1279 | static inline const char *state_name(enum lock_usage_bit bit) | ||
1280 | { | ||
1281 | return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; | ||
1282 | } | ||
1283 | |||
1284 | static int exclusive_bit(int new_bit) | ||
1285 | { | ||
1286 | /* | ||
1287 | * USED_IN | ||
1288 | * USED_IN_READ | ||
1289 | * ENABLED | ||
1290 | * ENABLED_READ | ||
1291 | * | ||
1292 | * bit 0 - write/read | ||
1293 | * bit 1 - used_in/enabled | ||
1294 | * bit 2+ state | ||
1295 | */ | ||
1296 | |||
1297 | int state = new_bit & ~3; | ||
1298 | int dir = new_bit & 2; | ||
1299 | |||
1300 | /* | ||
1301 | * keep state, bit flip the direction and strip read. | ||
1302 | */ | ||
1303 | return state | (dir ^ 2); | ||
1304 | } | ||
1305 | |||
1306 | static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, | ||
1307 | struct held_lock *next, enum lock_usage_bit bit) | ||
1269 | { | 1308 | { |
1270 | /* | 1309 | /* |
1271 | * Prove that the new dependency does not connect a hardirq-safe | 1310 | * Prove that the new dependency does not connect a hardirq-safe |
@@ -1273,38 +1312,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | |||
1273 | * the backwards-subgraph starting at <prev>, and the | 1312 | * the backwards-subgraph starting at <prev>, and the |
1274 | * forwards-subgraph starting at <next>: | 1313 | * forwards-subgraph starting at <next>: |
1275 | */ | 1314 | */ |
1276 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | 1315 | if (!check_usage(curr, prev, next, bit, |
1277 | LOCK_ENABLED_HARDIRQS, "hard")) | 1316 | exclusive_bit(bit), state_name(bit))) |
1278 | return 0; | 1317 | return 0; |
1279 | 1318 | ||
1319 | bit++; /* _READ */ | ||
1320 | |||
1280 | /* | 1321 | /* |
1281 | * Prove that the new dependency does not connect a hardirq-safe-read | 1322 | * Prove that the new dependency does not connect a hardirq-safe-read |
1282 | * lock with a hardirq-unsafe lock - to achieve this we search | 1323 | * lock with a hardirq-unsafe lock - to achieve this we search |
1283 | * the backwards-subgraph starting at <prev>, and the | 1324 | * the backwards-subgraph starting at <prev>, and the |
1284 | * forwards-subgraph starting at <next>: | 1325 | * forwards-subgraph starting at <next>: |
1285 | */ | 1326 | */ |
1286 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | 1327 | if (!check_usage(curr, prev, next, bit, |
1287 | LOCK_ENABLED_HARDIRQS, "hard-read")) | 1328 | exclusive_bit(bit), state_name(bit))) |
1288 | return 0; | 1329 | return 0; |
1289 | 1330 | ||
1290 | /* | 1331 | return 1; |
1291 | * Prove that the new dependency does not connect a softirq-safe | 1332 | } |
1292 | * lock with a softirq-unsafe lock - to achieve this we search | 1333 | |
1293 | * the backwards-subgraph starting at <prev>, and the | 1334 | static int |
1294 | * forwards-subgraph starting at <next>: | 1335 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
1295 | */ | 1336 | struct held_lock *next) |
1296 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | 1337 | { |
1297 | LOCK_ENABLED_SOFTIRQS, "soft")) | 1338 | #define LOCKDEP_STATE(__STATE) \ |
1298 | return 0; | 1339 | if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ |
1299 | /* | ||
1300 | * Prove that the new dependency does not connect a softirq-safe-read | ||
1301 | * lock with a softirq-unsafe lock - to achieve this we search | ||
1302 | * the backwards-subgraph starting at <prev>, and the | ||
1303 | * forwards-subgraph starting at <next>: | ||
1304 | */ | ||
1305 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | ||
1306 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1307 | return 0; | 1340 | return 0; |
1341 | #include "lockdep_states.h" | ||
1342 | #undef LOCKDEP_STATE | ||
1308 | 1343 | ||
1309 | return 1; | 1344 | return 1; |
1310 | } | 1345 | } |
@@ -1649,6 +1684,7 @@ cache_hit: | |||
1649 | 1684 | ||
1650 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | 1685 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); |
1651 | printk("turning off the locking correctness validator.\n"); | 1686 | printk("turning off the locking correctness validator.\n"); |
1687 | dump_stack(); | ||
1652 | return 0; | 1688 | return 0; |
1653 | } | 1689 | } |
1654 | chain = lock_chains + nr_lock_chains++; | 1690 | chain = lock_chains + nr_lock_chains++; |
@@ -1861,9 +1897,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1861 | curr->comm, task_pid_nr(curr)); | 1897 | curr->comm, task_pid_nr(curr)); |
1862 | print_lock(this); | 1898 | print_lock(this); |
1863 | if (forwards) | 1899 | if (forwards) |
1864 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 1900 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
1865 | else | 1901 | else |
1866 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 1902 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
1867 | print_lock_name(other); | 1903 | print_lock_name(other); |
1868 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 1904 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
1869 | 1905 | ||
@@ -1933,7 +1969,7 @@ void print_irqtrace_events(struct task_struct *curr) | |||
1933 | print_ip_sym(curr->softirq_disable_ip); | 1969 | print_ip_sym(curr->softirq_disable_ip); |
1934 | } | 1970 | } |
1935 | 1971 | ||
1936 | static int hardirq_verbose(struct lock_class *class) | 1972 | static int HARDIRQ_verbose(struct lock_class *class) |
1937 | { | 1973 | { |
1938 | #if HARDIRQ_VERBOSE | 1974 | #if HARDIRQ_VERBOSE |
1939 | return class_filter(class); | 1975 | return class_filter(class); |
@@ -1941,7 +1977,7 @@ static int hardirq_verbose(struct lock_class *class) | |||
1941 | return 0; | 1977 | return 0; |
1942 | } | 1978 | } |
1943 | 1979 | ||
1944 | static int softirq_verbose(struct lock_class *class) | 1980 | static int SOFTIRQ_verbose(struct lock_class *class) |
1945 | { | 1981 | { |
1946 | #if SOFTIRQ_VERBOSE | 1982 | #if SOFTIRQ_VERBOSE |
1947 | return class_filter(class); | 1983 | return class_filter(class); |
@@ -1949,185 +1985,95 @@ static int softirq_verbose(struct lock_class *class) | |||
1949 | return 0; | 1985 | return 0; |
1950 | } | 1986 | } |
1951 | 1987 | ||
1988 | static int RECLAIM_FS_verbose(struct lock_class *class) | ||
1989 | { | ||
1990 | #if RECLAIM_VERBOSE | ||
1991 | return class_filter(class); | ||
1992 | #endif | ||
1993 | return 0; | ||
1994 | } | ||
1995 | |||
1952 | #define STRICT_READ_CHECKS 1 | 1996 | #define STRICT_READ_CHECKS 1 |
1953 | 1997 | ||
1954 | static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | 1998 | static int (*state_verbose_f[])(struct lock_class *class) = { |
1999 | #define LOCKDEP_STATE(__STATE) \ | ||
2000 | __STATE##_verbose, | ||
2001 | #include "lockdep_states.h" | ||
2002 | #undef LOCKDEP_STATE | ||
2003 | }; | ||
2004 | |||
2005 | static inline int state_verbose(enum lock_usage_bit bit, | ||
2006 | struct lock_class *class) | ||
2007 | { | ||
2008 | return state_verbose_f[bit >> 2](class); | ||
2009 | } | ||
2010 | |||
2011 | typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, | ||
2012 | enum lock_usage_bit bit, const char *name); | ||
2013 | |||
2014 | static int | ||
2015 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, | ||
1955 | enum lock_usage_bit new_bit) | 2016 | enum lock_usage_bit new_bit) |
1956 | { | 2017 | { |
1957 | int ret = 1; | 2018 | int excl_bit = exclusive_bit(new_bit); |
2019 | int read = new_bit & 1; | ||
2020 | int dir = new_bit & 2; | ||
1958 | 2021 | ||
1959 | switch(new_bit) { | 2022 | /* |
1960 | case LOCK_USED_IN_HARDIRQ: | 2023 | * mark USED_IN has to look forwards -- to ensure no dependency |
1961 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 2024 | * has ENABLED state, which would allow recursion deadlocks. |
1962 | return 0; | 2025 | * |
1963 | if (!valid_state(curr, this, new_bit, | 2026 | * mark ENABLED has to look backwards -- to ensure no dependee |
1964 | LOCK_ENABLED_HARDIRQS_READ)) | 2027 | * has USED_IN state, which, again, would allow recursion deadlocks. |
1965 | return 0; | 2028 | */ |
1966 | /* | 2029 | check_usage_f usage = dir ? |
1967 | * just marked it hardirq-safe, check that this lock | 2030 | check_usage_backwards : check_usage_forwards; |
1968 | * took no hardirq-unsafe lock in the past: | 2031 | |
1969 | */ | 2032 | /* |
1970 | if (!check_usage_forwards(curr, this, | 2033 | * Validate that this particular lock does not have conflicting |
1971 | LOCK_ENABLED_HARDIRQS, "hard")) | 2034 | * usage states. |
1972 | return 0; | 2035 | */ |
1973 | #if STRICT_READ_CHECKS | 2036 | if (!valid_state(curr, this, new_bit, excl_bit)) |
1974 | /* | 2037 | return 0; |
1975 | * just marked it hardirq-safe, check that this lock | 2038 | |
1976 | * took no hardirq-unsafe-read lock in the past: | 2039 | /* |
1977 | */ | 2040 | * Validate that the lock dependencies don't have conflicting usage |
1978 | if (!check_usage_forwards(curr, this, | 2041 | * states. |
1979 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 2042 | */ |
1980 | return 0; | 2043 | if ((!read || !dir || STRICT_READ_CHECKS) && |
1981 | #endif | 2044 | !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
1982 | if (hardirq_verbose(hlock_class(this))) | 2045 | return 0; |
1983 | ret = 2; | 2046 | |
1984 | break; | 2047 | /* |
1985 | case LOCK_USED_IN_SOFTIRQ: | 2048 | * Check for read in write conflicts |
1986 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 2049 | */ |
1987 | return 0; | 2050 | if (!read) { |
1988 | if (!valid_state(curr, this, new_bit, | 2051 | if (!valid_state(curr, this, new_bit, excl_bit + 1)) |
1989 | LOCK_ENABLED_SOFTIRQS_READ)) | ||
1990 | return 0; | ||
1991 | /* | ||
1992 | * just marked it softirq-safe, check that this lock | ||
1993 | * took no softirq-unsafe lock in the past: | ||
1994 | */ | ||
1995 | if (!check_usage_forwards(curr, this, | ||
1996 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1997 | return 0; | ||
1998 | #if STRICT_READ_CHECKS | ||
1999 | /* | ||
2000 | * just marked it softirq-safe, check that this lock | ||
2001 | * took no softirq-unsafe-read lock in the past: | ||
2002 | */ | ||
2003 | if (!check_usage_forwards(curr, this, | ||
2004 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | ||
2005 | return 0; | ||
2006 | #endif | ||
2007 | if (softirq_verbose(hlock_class(this))) | ||
2008 | ret = 2; | ||
2009 | break; | ||
2010 | case LOCK_USED_IN_HARDIRQ_READ: | ||
2011 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | ||
2012 | return 0; | ||
2013 | /* | ||
2014 | * just marked it hardirq-read-safe, check that this lock | ||
2015 | * took no hardirq-unsafe lock in the past: | ||
2016 | */ | ||
2017 | if (!check_usage_forwards(curr, this, | ||
2018 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
2019 | return 0; | ||
2020 | if (hardirq_verbose(hlock_class(this))) | ||
2021 | ret = 2; | ||
2022 | break; | ||
2023 | case LOCK_USED_IN_SOFTIRQ_READ: | ||
2024 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | ||
2025 | return 0; | ||
2026 | /* | ||
2027 | * just marked it softirq-read-safe, check that this lock | ||
2028 | * took no softirq-unsafe lock in the past: | ||
2029 | */ | ||
2030 | if (!check_usage_forwards(curr, this, | ||
2031 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
2032 | return 0; | ||
2033 | if (softirq_verbose(hlock_class(this))) | ||
2034 | ret = 2; | ||
2035 | break; | ||
2036 | case LOCK_ENABLED_HARDIRQS: | ||
2037 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2038 | return 0; | ||
2039 | if (!valid_state(curr, this, new_bit, | ||
2040 | LOCK_USED_IN_HARDIRQ_READ)) | ||
2041 | return 0; | ||
2042 | /* | ||
2043 | * just marked it hardirq-unsafe, check that no hardirq-safe | ||
2044 | * lock in the system ever took it in the past: | ||
2045 | */ | ||
2046 | if (!check_usage_backwards(curr, this, | ||
2047 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2048 | return 0; | ||
2049 | #if STRICT_READ_CHECKS | ||
2050 | /* | ||
2051 | * just marked it hardirq-unsafe, check that no | ||
2052 | * hardirq-safe-read lock in the system ever took | ||
2053 | * it in the past: | ||
2054 | */ | ||
2055 | if (!check_usage_backwards(curr, this, | ||
2056 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | ||
2057 | return 0; | ||
2058 | #endif | ||
2059 | if (hardirq_verbose(hlock_class(this))) | ||
2060 | ret = 2; | ||
2061 | break; | ||
2062 | case LOCK_ENABLED_SOFTIRQS: | ||
2063 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2064 | return 0; | ||
2065 | if (!valid_state(curr, this, new_bit, | ||
2066 | LOCK_USED_IN_SOFTIRQ_READ)) | ||
2067 | return 0; | ||
2068 | /* | ||
2069 | * just marked it softirq-unsafe, check that no softirq-safe | ||
2070 | * lock in the system ever took it in the past: | ||
2071 | */ | ||
2072 | if (!check_usage_backwards(curr, this, | ||
2073 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2074 | return 0; | ||
2075 | #if STRICT_READ_CHECKS | ||
2076 | /* | ||
2077 | * just marked it softirq-unsafe, check that no | ||
2078 | * softirq-safe-read lock in the system ever took | ||
2079 | * it in the past: | ||
2080 | */ | ||
2081 | if (!check_usage_backwards(curr, this, | ||
2082 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | ||
2083 | return 0; | ||
2084 | #endif | ||
2085 | if (softirq_verbose(hlock_class(this))) | ||
2086 | ret = 2; | ||
2087 | break; | ||
2088 | case LOCK_ENABLED_HARDIRQS_READ: | ||
2089 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2090 | return 0; | ||
2091 | #if STRICT_READ_CHECKS | ||
2092 | /* | ||
2093 | * just marked it hardirq-read-unsafe, check that no | ||
2094 | * hardirq-safe lock in the system ever took it in the past: | ||
2095 | */ | ||
2096 | if (!check_usage_backwards(curr, this, | ||
2097 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2098 | return 0; | ||
2099 | #endif | ||
2100 | if (hardirq_verbose(hlock_class(this))) | ||
2101 | ret = 2; | ||
2102 | break; | ||
2103 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2104 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2105 | return 0; | 2052 | return 0; |
2106 | #if STRICT_READ_CHECKS | 2053 | |
2107 | /* | 2054 | if (STRICT_READ_CHECKS && |
2108 | * just marked it softirq-read-unsafe, check that no | 2055 | !usage(curr, this, excl_bit + 1, |
2109 | * softirq-safe lock in the system ever took it in the past: | 2056 | state_name(new_bit + 1))) |
2110 | */ | ||
2111 | if (!check_usage_backwards(curr, this, | ||
2112 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2113 | return 0; | 2057 | return 0; |
2114 | #endif | ||
2115 | if (softirq_verbose(hlock_class(this))) | ||
2116 | ret = 2; | ||
2117 | break; | ||
2118 | default: | ||
2119 | WARN_ON(1); | ||
2120 | break; | ||
2121 | } | 2058 | } |
2122 | 2059 | ||
2123 | return ret; | 2060 | if (state_verbose(new_bit, hlock_class(this))) |
2061 | return 2; | ||
2062 | |||
2063 | return 1; | ||
2124 | } | 2064 | } |
2125 | 2065 | ||
2066 | enum mark_type { | ||
2067 | #define LOCKDEP_STATE(__STATE) __STATE, | ||
2068 | #include "lockdep_states.h" | ||
2069 | #undef LOCKDEP_STATE | ||
2070 | }; | ||
2071 | |||
2126 | /* | 2072 | /* |
2127 | * Mark all held locks with a usage bit: | 2073 | * Mark all held locks with a usage bit: |
2128 | */ | 2074 | */ |
2129 | static int | 2075 | static int |
2130 | mark_held_locks(struct task_struct *curr, int hardirq) | 2076 | mark_held_locks(struct task_struct *curr, enum mark_type mark) |
2131 | { | 2077 | { |
2132 | enum lock_usage_bit usage_bit; | 2078 | enum lock_usage_bit usage_bit; |
2133 | struct held_lock *hlock; | 2079 | struct held_lock *hlock; |
@@ -2136,17 +2082,12 @@ mark_held_locks(struct task_struct *curr, int hardirq) | |||
2136 | for (i = 0; i < curr->lockdep_depth; i++) { | 2082 | for (i = 0; i < curr->lockdep_depth; i++) { |
2137 | hlock = curr->held_locks + i; | 2083 | hlock = curr->held_locks + i; |
2138 | 2084 | ||
2139 | if (hardirq) { | 2085 | usage_bit = 2 + (mark << 2); /* ENABLED */ |
2140 | if (hlock->read) | 2086 | if (hlock->read) |
2141 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 2087 | usage_bit += 1; /* READ */ |
2142 | else | 2088 | |
2143 | usage_bit = LOCK_ENABLED_HARDIRQS; | 2089 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
2144 | } else { | 2090 | |
2145 | if (hlock->read) | ||
2146 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | ||
2147 | else | ||
2148 | usage_bit = LOCK_ENABLED_SOFTIRQS; | ||
2149 | } | ||
2150 | if (!mark_lock(curr, hlock, usage_bit)) | 2091 | if (!mark_lock(curr, hlock, usage_bit)) |
2151 | return 0; | 2092 | return 0; |
2152 | } | 2093 | } |
@@ -2200,7 +2141,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2200 | * We are going to turn hardirqs on, so set the | 2141 | * We are going to turn hardirqs on, so set the |
2201 | * usage bit for all held locks: | 2142 | * usage bit for all held locks: |
2202 | */ | 2143 | */ |
2203 | if (!mark_held_locks(curr, 1)) | 2144 | if (!mark_held_locks(curr, HARDIRQ)) |
2204 | return; | 2145 | return; |
2205 | /* | 2146 | /* |
2206 | * If we have softirqs enabled, then set the usage | 2147 | * If we have softirqs enabled, then set the usage |
@@ -2208,7 +2149,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2208 | * this bit from being set before) | 2149 | * this bit from being set before) |
2209 | */ | 2150 | */ |
2210 | if (curr->softirqs_enabled) | 2151 | if (curr->softirqs_enabled) |
2211 | if (!mark_held_locks(curr, 0)) | 2152 | if (!mark_held_locks(curr, SOFTIRQ)) |
2212 | return; | 2153 | return; |
2213 | 2154 | ||
2214 | curr->hardirq_enable_ip = ip; | 2155 | curr->hardirq_enable_ip = ip; |
@@ -2288,7 +2229,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2288 | * enabled too: | 2229 | * enabled too: |
2289 | */ | 2230 | */ |
2290 | if (curr->hardirqs_enabled) | 2231 | if (curr->hardirqs_enabled) |
2291 | mark_held_locks(curr, 0); | 2232 | mark_held_locks(curr, SOFTIRQ); |
2292 | } | 2233 | } |
2293 | 2234 | ||
2294 | /* | 2235 | /* |
@@ -2317,6 +2258,48 @@ void trace_softirqs_off(unsigned long ip) | |||
2317 | debug_atomic_inc(&redundant_softirqs_off); | 2258 | debug_atomic_inc(&redundant_softirqs_off); |
2318 | } | 2259 | } |
2319 | 2260 | ||
2261 | static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) | ||
2262 | { | ||
2263 | struct task_struct *curr = current; | ||
2264 | |||
2265 | if (unlikely(!debug_locks)) | ||
2266 | return; | ||
2267 | |||
2268 | /* no reclaim without waiting on it */ | ||
2269 | if (!(gfp_mask & __GFP_WAIT)) | ||
2270 | return; | ||
2271 | |||
2272 | /* this guy won't enter reclaim */ | ||
2273 | if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) | ||
2274 | return; | ||
2275 | |||
2276 | /* We're only interested __GFP_FS allocations for now */ | ||
2277 | if (!(gfp_mask & __GFP_FS)) | ||
2278 | return; | ||
2279 | |||
2280 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) | ||
2281 | return; | ||
2282 | |||
2283 | mark_held_locks(curr, RECLAIM_FS); | ||
2284 | } | ||
2285 | |||
2286 | static void check_flags(unsigned long flags); | ||
2287 | |||
2288 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2289 | { | ||
2290 | unsigned long flags; | ||
2291 | |||
2292 | if (unlikely(current->lockdep_recursion)) | ||
2293 | return; | ||
2294 | |||
2295 | raw_local_irq_save(flags); | ||
2296 | check_flags(flags); | ||
2297 | current->lockdep_recursion = 1; | ||
2298 | __lockdep_trace_alloc(gfp_mask, flags); | ||
2299 | current->lockdep_recursion = 0; | ||
2300 | raw_local_irq_restore(flags); | ||
2301 | } | ||
2302 | |||
2320 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | 2303 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
2321 | { | 2304 | { |
2322 | /* | 2305 | /* |
@@ -2345,19 +2328,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | |||
2345 | if (!hlock->hardirqs_off) { | 2328 | if (!hlock->hardirqs_off) { |
2346 | if (hlock->read) { | 2329 | if (hlock->read) { |
2347 | if (!mark_lock(curr, hlock, | 2330 | if (!mark_lock(curr, hlock, |
2348 | LOCK_ENABLED_HARDIRQS_READ)) | 2331 | LOCK_ENABLED_HARDIRQ_READ)) |
2349 | return 0; | 2332 | return 0; |
2350 | if (curr->softirqs_enabled) | 2333 | if (curr->softirqs_enabled) |
2351 | if (!mark_lock(curr, hlock, | 2334 | if (!mark_lock(curr, hlock, |
2352 | LOCK_ENABLED_SOFTIRQS_READ)) | 2335 | LOCK_ENABLED_SOFTIRQ_READ)) |
2353 | return 0; | 2336 | return 0; |
2354 | } else { | 2337 | } else { |
2355 | if (!mark_lock(curr, hlock, | 2338 | if (!mark_lock(curr, hlock, |
2356 | LOCK_ENABLED_HARDIRQS)) | 2339 | LOCK_ENABLED_HARDIRQ)) |
2357 | return 0; | 2340 | return 0; |
2358 | if (curr->softirqs_enabled) | 2341 | if (curr->softirqs_enabled) |
2359 | if (!mark_lock(curr, hlock, | 2342 | if (!mark_lock(curr, hlock, |
2360 | LOCK_ENABLED_SOFTIRQS)) | 2343 | LOCK_ENABLED_SOFTIRQ)) |
2344 | return 0; | ||
2345 | } | ||
2346 | } | ||
2347 | |||
2348 | /* | ||
2349 | * We reuse the irq context infrastructure more broadly as a general | ||
2350 | * context checking code. This tests GFP_FS recursion (a lock taken | ||
2351 | * during reclaim for a GFP_FS allocation is held over a GFP_FS | ||
2352 | * allocation). | ||
2353 | */ | ||
2354 | if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { | ||
2355 | if (hlock->read) { | ||
2356 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) | ||
2357 | return 0; | ||
2358 | } else { | ||
2359 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) | ||
2361 | return 0; | 2360 | return 0; |
2362 | } | 2361 | } |
2363 | } | 2362 | } |
@@ -2412,6 +2411,10 @@ static inline int separate_irq_context(struct task_struct *curr, | |||
2412 | return 0; | 2411 | return 0; |
2413 | } | 2412 | } |
2414 | 2413 | ||
2414 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2415 | { | ||
2416 | } | ||
2417 | |||
2415 | #endif | 2418 | #endif |
2416 | 2419 | ||
2417 | /* | 2420 | /* |
@@ -2445,14 +2448,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2445 | return 0; | 2448 | return 0; |
2446 | 2449 | ||
2447 | switch (new_bit) { | 2450 | switch (new_bit) { |
2448 | case LOCK_USED_IN_HARDIRQ: | 2451 | #define LOCKDEP_STATE(__STATE) \ |
2449 | case LOCK_USED_IN_SOFTIRQ: | 2452 | case LOCK_USED_IN_##__STATE: \ |
2450 | case LOCK_USED_IN_HARDIRQ_READ: | 2453 | case LOCK_USED_IN_##__STATE##_READ: \ |
2451 | case LOCK_USED_IN_SOFTIRQ_READ: | 2454 | case LOCK_ENABLED_##__STATE: \ |
2452 | case LOCK_ENABLED_HARDIRQS: | 2455 | case LOCK_ENABLED_##__STATE##_READ: |
2453 | case LOCK_ENABLED_SOFTIRQS: | 2456 | #include "lockdep_states.h" |
2454 | case LOCK_ENABLED_HARDIRQS_READ: | 2457 | #undef LOCKDEP_STATE |
2455 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2456 | ret = mark_lock_irq(curr, this, new_bit); | 2458 | ret = mark_lock_irq(curr, this, new_bit); |
2457 | if (!ret) | 2459 | if (!ret) |
2458 | return 0; | 2460 | return 0; |
@@ -2542,6 +2544,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2542 | debug_locks_off(); | 2544 | debug_locks_off(); |
2543 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | 2545 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); |
2544 | printk("turning off the locking correctness validator.\n"); | 2546 | printk("turning off the locking correctness validator.\n"); |
2547 | dump_stack(); | ||
2545 | return 0; | 2548 | return 0; |
2546 | } | 2549 | } |
2547 | 2550 | ||
@@ -2638,6 +2641,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2638 | debug_locks_off(); | 2641 | debug_locks_off(); |
2639 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); | 2642 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); |
2640 | printk("turning off the locking correctness validator.\n"); | 2643 | printk("turning off the locking correctness validator.\n"); |
2644 | dump_stack(); | ||
2641 | return 0; | 2645 | return 0; |
2642 | } | 2646 | } |
2643 | 2647 | ||
@@ -2925,6 +2929,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name, | |||
2925 | } | 2929 | } |
2926 | EXPORT_SYMBOL_GPL(lock_set_class); | 2930 | EXPORT_SYMBOL_GPL(lock_set_class); |
2927 | 2931 | ||
2932 | DEFINE_TRACE(lock_acquire); | ||
2933 | |||
2928 | /* | 2934 | /* |
2929 | * We are not always called with irqs disabled - do that here, | 2935 | * We are not always called with irqs disabled - do that here, |
2930 | * and also avoid lockdep recursion: | 2936 | * and also avoid lockdep recursion: |
@@ -2935,6 +2941,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2935 | { | 2941 | { |
2936 | unsigned long flags; | 2942 | unsigned long flags; |
2937 | 2943 | ||
2944 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
2945 | |||
2938 | if (unlikely(current->lockdep_recursion)) | 2946 | if (unlikely(current->lockdep_recursion)) |
2939 | return; | 2947 | return; |
2940 | 2948 | ||
@@ -2949,11 +2957,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2949 | } | 2957 | } |
2950 | EXPORT_SYMBOL_GPL(lock_acquire); | 2958 | EXPORT_SYMBOL_GPL(lock_acquire); |
2951 | 2959 | ||
2960 | DEFINE_TRACE(lock_release); | ||
2961 | |||
2952 | void lock_release(struct lockdep_map *lock, int nested, | 2962 | void lock_release(struct lockdep_map *lock, int nested, |
2953 | unsigned long ip) | 2963 | unsigned long ip) |
2954 | { | 2964 | { |
2955 | unsigned long flags; | 2965 | unsigned long flags; |
2956 | 2966 | ||
2967 | trace_lock_release(lock, nested, ip); | ||
2968 | |||
2957 | if (unlikely(current->lockdep_recursion)) | 2969 | if (unlikely(current->lockdep_recursion)) |
2958 | return; | 2970 | return; |
2959 | 2971 | ||
@@ -2966,6 +2978,16 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2966 | } | 2978 | } |
2967 | EXPORT_SYMBOL_GPL(lock_release); | 2979 | EXPORT_SYMBOL_GPL(lock_release); |
2968 | 2980 | ||
2981 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) | ||
2982 | { | ||
2983 | current->lockdep_reclaim_gfp = gfp_mask; | ||
2984 | } | ||
2985 | |||
2986 | void lockdep_clear_current_reclaim_state(void) | ||
2987 | { | ||
2988 | current->lockdep_reclaim_gfp = 0; | ||
2989 | } | ||
2990 | |||
2969 | #ifdef CONFIG_LOCK_STAT | 2991 | #ifdef CONFIG_LOCK_STAT |
2970 | static int | 2992 | static int |
2971 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | 2993 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
@@ -3092,10 +3114,14 @@ found_it: | |||
3092 | lock->ip = ip; | 3114 | lock->ip = ip; |
3093 | } | 3115 | } |
3094 | 3116 | ||
3117 | DEFINE_TRACE(lock_contended); | ||
3118 | |||
3095 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | 3119 | void lock_contended(struct lockdep_map *lock, unsigned long ip) |
3096 | { | 3120 | { |
3097 | unsigned long flags; | 3121 | unsigned long flags; |
3098 | 3122 | ||
3123 | trace_lock_contended(lock, ip); | ||
3124 | |||
3099 | if (unlikely(!lock_stat)) | 3125 | if (unlikely(!lock_stat)) |
3100 | return; | 3126 | return; |
3101 | 3127 | ||
@@ -3111,10 +3137,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3111 | } | 3137 | } |
3112 | EXPORT_SYMBOL_GPL(lock_contended); | 3138 | EXPORT_SYMBOL_GPL(lock_contended); |
3113 | 3139 | ||
3140 | DEFINE_TRACE(lock_acquired); | ||
3141 | |||
3114 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3142 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3115 | { | 3143 | { |
3116 | unsigned long flags; | 3144 | unsigned long flags; |
3117 | 3145 | ||
3146 | trace_lock_acquired(lock, ip); | ||
3147 | |||
3118 | if (unlikely(!lock_stat)) | 3148 | if (unlikely(!lock_stat)) |
3119 | return; | 3149 | return; |
3120 | 3150 | ||