diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 20:17:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 20:17:35 -0400 |
commit | c4e1aa67ed9e4e542a064bc271ddbf152b677e91 (patch) | |
tree | 2a2ca00bed0fc22b4eb83db092c9178868d8f76b /kernel | |
parent | cf2f7d7c90279cdbc12429de278f3d27ac2050ae (diff) | |
parent | 2f8501815256af8498904e68bd0984b1afffd6f8 (diff) |
Merge branch 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (33 commits)
lockdep: fix deadlock in lockdep_trace_alloc
lockdep: annotate reclaim context (__GFP_NOFS), fix SLOB
lockdep: annotate reclaim context (__GFP_NOFS), fix
lockdep: build fix for !PROVE_LOCKING
lockstat: warn about disabled lock debugging
lockdep: use stringify.h
lockdep: simplify check_prev_add_irq()
lockdep: get_user_chars() redo
lockdep: simplify get_user_chars()
lockdep: add comments to mark_lock_irq()
lockdep: remove macro usage from mark_held_locks()
lockdep: fully reduce mark_lock_irq()
lockdep: merge the !_READ mark_lock_irq() helpers
lockdep: merge the _READ mark_lock_irq() helpers
lockdep: simplify mark_lock_irq() helpers #3
lockdep: further simplify mark_lock_irq() helpers
lockdep: simplify the mark_lock_irq() helpers
lockdep: split up mark_lock_irq()
lockdep: generate usage strings
lockdep: generate the state bit definitions
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 528 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 45 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 22 | ||||
-rw-r--r-- | kernel/lockdep_states.h | 9 | ||||
-rw-r--r-- | kernel/mutex-debug.c | 9 | ||||
-rw-r--r-- | kernel/mutex-debug.h | 18 | ||||
-rw-r--r-- | kernel/mutex.c | 121 | ||||
-rw-r--r-- | kernel/mutex.h | 22 | ||||
-rw-r--r-- | kernel/sched.c | 71 | ||||
-rw-r--r-- | kernel/sched_features.h | 1 | ||||
-rw-r--r-- | kernel/timer.c | 68 |
11 files changed, 597 insertions, 317 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c3568f0b..3673a3f44d9d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/utsname.h> | 41 | #include <linux/utsname.h> |
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | ||
44 | 45 | ||
45 | #include <asm/sections.h> | 46 | #include <asm/sections.h> |
46 | 47 | ||
@@ -310,12 +311,14 @@ EXPORT_SYMBOL(lockdep_on); | |||
310 | #if VERBOSE | 311 | #if VERBOSE |
311 | # define HARDIRQ_VERBOSE 1 | 312 | # define HARDIRQ_VERBOSE 1 |
312 | # define SOFTIRQ_VERBOSE 1 | 313 | # define SOFTIRQ_VERBOSE 1 |
314 | # define RECLAIM_VERBOSE 1 | ||
313 | #else | 315 | #else |
314 | # define HARDIRQ_VERBOSE 0 | 316 | # define HARDIRQ_VERBOSE 0 |
315 | # define SOFTIRQ_VERBOSE 0 | 317 | # define SOFTIRQ_VERBOSE 0 |
318 | # define RECLAIM_VERBOSE 0 | ||
316 | #endif | 319 | #endif |
317 | 320 | ||
318 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 321 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE |
319 | /* | 322 | /* |
320 | * Quick filtering for interesting events: | 323 | * Quick filtering for interesting events: |
321 | */ | 324 | */ |
@@ -443,17 +446,18 @@ atomic_t nr_find_usage_backwards_recursions; | |||
443 | * Locking printouts: | 446 | * Locking printouts: |
444 | */ | 447 | */ |
445 | 448 | ||
449 | #define __USAGE(__STATE) \ | ||
450 | [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ | ||
451 | [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ | ||
452 | [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ | ||
453 | [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", | ||
454 | |||
446 | static const char *usage_str[] = | 455 | static const char *usage_str[] = |
447 | { | 456 | { |
448 | [LOCK_USED] = "initial-use ", | 457 | #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) |
449 | [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", | 458 | #include "lockdep_states.h" |
450 | [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", | 459 | #undef LOCKDEP_STATE |
451 | [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", | 460 | [LOCK_USED] = "INITIAL USE", |
452 | [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", | ||
453 | [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", | ||
454 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", | ||
455 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", | ||
456 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", | ||
457 | }; | 461 | }; |
458 | 462 | ||
459 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 463 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
@@ -461,46 +465,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | |||
461 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); | 465 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); |
462 | } | 466 | } |
463 | 467 | ||
464 | void | 468 | static inline unsigned long lock_flag(enum lock_usage_bit bit) |
465 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | ||
466 | { | 469 | { |
467 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 470 | return 1UL << bit; |
468 | 471 | } | |
469 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | ||
470 | *c1 = '+'; | ||
471 | else | ||
472 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | ||
473 | *c1 = '-'; | ||
474 | 472 | ||
475 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 473 | static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) |
476 | *c2 = '+'; | 474 | { |
477 | else | 475 | char c = '.'; |
478 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | ||
479 | *c2 = '-'; | ||
480 | 476 | ||
481 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 477 | if (class->usage_mask & lock_flag(bit + 2)) |
482 | *c3 = '-'; | 478 | c = '+'; |
483 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | 479 | if (class->usage_mask & lock_flag(bit)) { |
484 | *c3 = '+'; | 480 | c = '-'; |
485 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 481 | if (class->usage_mask & lock_flag(bit + 2)) |
486 | *c3 = '?'; | 482 | c = '?'; |
487 | } | 483 | } |
488 | 484 | ||
489 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 485 | return c; |
490 | *c4 = '-'; | 486 | } |
491 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | 487 | |
492 | *c4 = '+'; | 488 | void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) |
493 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 489 | { |
494 | *c4 = '?'; | 490 | int i = 0; |
495 | } | 491 | |
492 | #define LOCKDEP_STATE(__STATE) \ | ||
493 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ | ||
494 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); | ||
495 | #include "lockdep_states.h" | ||
496 | #undef LOCKDEP_STATE | ||
497 | |||
498 | usage[i] = '\0'; | ||
496 | } | 499 | } |
497 | 500 | ||
498 | static void print_lock_name(struct lock_class *class) | 501 | static void print_lock_name(struct lock_class *class) |
499 | { | 502 | { |
500 | char str[KSYM_NAME_LEN], c1, c2, c3, c4; | 503 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
501 | const char *name; | 504 | const char *name; |
502 | 505 | ||
503 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 506 | get_usage_chars(class, usage); |
504 | 507 | ||
505 | name = class->name; | 508 | name = class->name; |
506 | if (!name) { | 509 | if (!name) { |
@@ -513,7 +516,7 @@ static void print_lock_name(struct lock_class *class) | |||
513 | if (class->subclass) | 516 | if (class->subclass) |
514 | printk("/%d", class->subclass); | 517 | printk("/%d", class->subclass); |
515 | } | 518 | } |
516 | printk("){%c%c%c%c}", c1, c2, c3, c4); | 519 | printk("){%s}", usage); |
517 | } | 520 | } |
518 | 521 | ||
519 | static void print_lockdep_cache(struct lockdep_map *lock) | 522 | static void print_lockdep_cache(struct lockdep_map *lock) |
@@ -1263,9 +1266,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1263 | bit_backwards, bit_forwards, irqclass); | 1266 | bit_backwards, bit_forwards, irqclass); |
1264 | } | 1267 | } |
1265 | 1268 | ||
1266 | static int | 1269 | static const char *state_names[] = { |
1267 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | 1270 | #define LOCKDEP_STATE(__STATE) \ |
1268 | struct held_lock *next) | 1271 | __stringify(__STATE), |
1272 | #include "lockdep_states.h" | ||
1273 | #undef LOCKDEP_STATE | ||
1274 | }; | ||
1275 | |||
1276 | static const char *state_rnames[] = { | ||
1277 | #define LOCKDEP_STATE(__STATE) \ | ||
1278 | __stringify(__STATE)"-READ", | ||
1279 | #include "lockdep_states.h" | ||
1280 | #undef LOCKDEP_STATE | ||
1281 | }; | ||
1282 | |||
1283 | static inline const char *state_name(enum lock_usage_bit bit) | ||
1284 | { | ||
1285 | return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; | ||
1286 | } | ||
1287 | |||
1288 | static int exclusive_bit(int new_bit) | ||
1289 | { | ||
1290 | /* | ||
1291 | * USED_IN | ||
1292 | * USED_IN_READ | ||
1293 | * ENABLED | ||
1294 | * ENABLED_READ | ||
1295 | * | ||
1296 | * bit 0 - write/read | ||
1297 | * bit 1 - used_in/enabled | ||
1298 | * bit 2+ state | ||
1299 | */ | ||
1300 | |||
1301 | int state = new_bit & ~3; | ||
1302 | int dir = new_bit & 2; | ||
1303 | |||
1304 | /* | ||
1305 | * keep state, bit flip the direction and strip read. | ||
1306 | */ | ||
1307 | return state | (dir ^ 2); | ||
1308 | } | ||
1309 | |||
1310 | static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, | ||
1311 | struct held_lock *next, enum lock_usage_bit bit) | ||
1269 | { | 1312 | { |
1270 | /* | 1313 | /* |
1271 | * Prove that the new dependency does not connect a hardirq-safe | 1314 | * Prove that the new dependency does not connect a hardirq-safe |
@@ -1273,38 +1316,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | |||
1273 | * the backwards-subgraph starting at <prev>, and the | 1316 | * the backwards-subgraph starting at <prev>, and the |
1274 | * forwards-subgraph starting at <next>: | 1317 | * forwards-subgraph starting at <next>: |
1275 | */ | 1318 | */ |
1276 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | 1319 | if (!check_usage(curr, prev, next, bit, |
1277 | LOCK_ENABLED_HARDIRQS, "hard")) | 1320 | exclusive_bit(bit), state_name(bit))) |
1278 | return 0; | 1321 | return 0; |
1279 | 1322 | ||
1323 | bit++; /* _READ */ | ||
1324 | |||
1280 | /* | 1325 | /* |
1281 | * Prove that the new dependency does not connect a hardirq-safe-read | 1326 | * Prove that the new dependency does not connect a hardirq-safe-read |
1282 | * lock with a hardirq-unsafe lock - to achieve this we search | 1327 | * lock with a hardirq-unsafe lock - to achieve this we search |
1283 | * the backwards-subgraph starting at <prev>, and the | 1328 | * the backwards-subgraph starting at <prev>, and the |
1284 | * forwards-subgraph starting at <next>: | 1329 | * forwards-subgraph starting at <next>: |
1285 | */ | 1330 | */ |
1286 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | 1331 | if (!check_usage(curr, prev, next, bit, |
1287 | LOCK_ENABLED_HARDIRQS, "hard-read")) | 1332 | exclusive_bit(bit), state_name(bit))) |
1288 | return 0; | 1333 | return 0; |
1289 | 1334 | ||
1290 | /* | 1335 | return 1; |
1291 | * Prove that the new dependency does not connect a softirq-safe | 1336 | } |
1292 | * lock with a softirq-unsafe lock - to achieve this we search | 1337 | |
1293 | * the backwards-subgraph starting at <prev>, and the | 1338 | static int |
1294 | * forwards-subgraph starting at <next>: | 1339 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
1295 | */ | 1340 | struct held_lock *next) |
1296 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | 1341 | { |
1297 | LOCK_ENABLED_SOFTIRQS, "soft")) | 1342 | #define LOCKDEP_STATE(__STATE) \ |
1298 | return 0; | 1343 | if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ |
1299 | /* | ||
1300 | * Prove that the new dependency does not connect a softirq-safe-read | ||
1301 | * lock with a softirq-unsafe lock - to achieve this we search | ||
1302 | * the backwards-subgraph starting at <prev>, and the | ||
1303 | * forwards-subgraph starting at <next>: | ||
1304 | */ | ||
1305 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | ||
1306 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1307 | return 0; | 1344 | return 0; |
1345 | #include "lockdep_states.h" | ||
1346 | #undef LOCKDEP_STATE | ||
1308 | 1347 | ||
1309 | return 1; | 1348 | return 1; |
1310 | } | 1349 | } |
@@ -1933,7 +1972,7 @@ void print_irqtrace_events(struct task_struct *curr) | |||
1933 | print_ip_sym(curr->softirq_disable_ip); | 1972 | print_ip_sym(curr->softirq_disable_ip); |
1934 | } | 1973 | } |
1935 | 1974 | ||
1936 | static int hardirq_verbose(struct lock_class *class) | 1975 | static int HARDIRQ_verbose(struct lock_class *class) |
1937 | { | 1976 | { |
1938 | #if HARDIRQ_VERBOSE | 1977 | #if HARDIRQ_VERBOSE |
1939 | return class_filter(class); | 1978 | return class_filter(class); |
@@ -1941,7 +1980,7 @@ static int hardirq_verbose(struct lock_class *class) | |||
1941 | return 0; | 1980 | return 0; |
1942 | } | 1981 | } |
1943 | 1982 | ||
1944 | static int softirq_verbose(struct lock_class *class) | 1983 | static int SOFTIRQ_verbose(struct lock_class *class) |
1945 | { | 1984 | { |
1946 | #if SOFTIRQ_VERBOSE | 1985 | #if SOFTIRQ_VERBOSE |
1947 | return class_filter(class); | 1986 | return class_filter(class); |
@@ -1949,185 +1988,94 @@ static int softirq_verbose(struct lock_class *class) | |||
1949 | return 0; | 1988 | return 0; |
1950 | } | 1989 | } |
1951 | 1990 | ||
1991 | static int RECLAIM_FS_verbose(struct lock_class *class) | ||
1992 | { | ||
1993 | #if RECLAIM_VERBOSE | ||
1994 | return class_filter(class); | ||
1995 | #endif | ||
1996 | return 0; | ||
1997 | } | ||
1998 | |||
1952 | #define STRICT_READ_CHECKS 1 | 1999 | #define STRICT_READ_CHECKS 1 |
1953 | 2000 | ||
1954 | static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | 2001 | static int (*state_verbose_f[])(struct lock_class *class) = { |
1955 | enum lock_usage_bit new_bit) | 2002 | #define LOCKDEP_STATE(__STATE) \ |
2003 | __STATE##_verbose, | ||
2004 | #include "lockdep_states.h" | ||
2005 | #undef LOCKDEP_STATE | ||
2006 | }; | ||
2007 | |||
2008 | static inline int state_verbose(enum lock_usage_bit bit, | ||
2009 | struct lock_class *class) | ||
1956 | { | 2010 | { |
1957 | int ret = 1; | 2011 | return state_verbose_f[bit >> 2](class); |
2012 | } | ||
1958 | 2013 | ||
1959 | switch(new_bit) { | 2014 | typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, |
1960 | case LOCK_USED_IN_HARDIRQ: | 2015 | enum lock_usage_bit bit, const char *name); |
1961 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 2016 | |
1962 | return 0; | 2017 | static int |
1963 | if (!valid_state(curr, this, new_bit, | 2018 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) |
1964 | LOCK_ENABLED_HARDIRQS_READ)) | 2019 | { |
1965 | return 0; | 2020 | int excl_bit = exclusive_bit(new_bit); |
1966 | /* | 2021 | int read = new_bit & 1; |
1967 | * just marked it hardirq-safe, check that this lock | 2022 | int dir = new_bit & 2; |
1968 | * took no hardirq-unsafe lock in the past: | 2023 | |
1969 | */ | 2024 | /* |
1970 | if (!check_usage_forwards(curr, this, | 2025 | * mark USED_IN has to look forwards -- to ensure no dependency |
1971 | LOCK_ENABLED_HARDIRQS, "hard")) | 2026 | * has ENABLED state, which would allow recursion deadlocks. |
1972 | return 0; | 2027 | * |
1973 | #if STRICT_READ_CHECKS | 2028 | * mark ENABLED has to look backwards -- to ensure no dependee |
1974 | /* | 2029 | * has USED_IN state, which, again, would allow recursion deadlocks. |
1975 | * just marked it hardirq-safe, check that this lock | 2030 | */ |
1976 | * took no hardirq-unsafe-read lock in the past: | 2031 | check_usage_f usage = dir ? |
1977 | */ | 2032 | check_usage_backwards : check_usage_forwards; |
1978 | if (!check_usage_forwards(curr, this, | 2033 | |
1979 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 2034 | /* |
1980 | return 0; | 2035 | * Validate that this particular lock does not have conflicting |
1981 | #endif | 2036 | * usage states. |
1982 | if (hardirq_verbose(hlock_class(this))) | 2037 | */ |
1983 | ret = 2; | 2038 | if (!valid_state(curr, this, new_bit, excl_bit)) |
1984 | break; | 2039 | return 0; |
1985 | case LOCK_USED_IN_SOFTIRQ: | 2040 | |
1986 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 2041 | /* |
1987 | return 0; | 2042 | * Validate that the lock dependencies don't have conflicting usage |
1988 | if (!valid_state(curr, this, new_bit, | 2043 | * states. |
1989 | LOCK_ENABLED_SOFTIRQS_READ)) | 2044 | */ |
1990 | return 0; | 2045 | if ((!read || !dir || STRICT_READ_CHECKS) && |
1991 | /* | 2046 | !usage(curr, this, excl_bit, state_name(new_bit))) |
1992 | * just marked it softirq-safe, check that this lock | 2047 | return 0; |
1993 | * took no softirq-unsafe lock in the past: | 2048 | |
1994 | */ | 2049 | /* |
1995 | if (!check_usage_forwards(curr, this, | 2050 | * Check for read in write conflicts |
1996 | LOCK_ENABLED_SOFTIRQS, "soft")) | 2051 | */ |
1997 | return 0; | 2052 | if (!read) { |
1998 | #if STRICT_READ_CHECKS | 2053 | if (!valid_state(curr, this, new_bit, excl_bit + 1)) |
1999 | /* | ||
2000 | * just marked it softirq-safe, check that this lock | ||
2001 | * took no softirq-unsafe-read lock in the past: | ||
2002 | */ | ||
2003 | if (!check_usage_forwards(curr, this, | ||
2004 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | ||
2005 | return 0; | ||
2006 | #endif | ||
2007 | if (softirq_verbose(hlock_class(this))) | ||
2008 | ret = 2; | ||
2009 | break; | ||
2010 | case LOCK_USED_IN_HARDIRQ_READ: | ||
2011 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | ||
2012 | return 0; | ||
2013 | /* | ||
2014 | * just marked it hardirq-read-safe, check that this lock | ||
2015 | * took no hardirq-unsafe lock in the past: | ||
2016 | */ | ||
2017 | if (!check_usage_forwards(curr, this, | ||
2018 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
2019 | return 0; | ||
2020 | if (hardirq_verbose(hlock_class(this))) | ||
2021 | ret = 2; | ||
2022 | break; | ||
2023 | case LOCK_USED_IN_SOFTIRQ_READ: | ||
2024 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | ||
2025 | return 0; | ||
2026 | /* | ||
2027 | * just marked it softirq-read-safe, check that this lock | ||
2028 | * took no softirq-unsafe lock in the past: | ||
2029 | */ | ||
2030 | if (!check_usage_forwards(curr, this, | ||
2031 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
2032 | return 0; | ||
2033 | if (softirq_verbose(hlock_class(this))) | ||
2034 | ret = 2; | ||
2035 | break; | ||
2036 | case LOCK_ENABLED_HARDIRQS: | ||
2037 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2038 | return 0; | ||
2039 | if (!valid_state(curr, this, new_bit, | ||
2040 | LOCK_USED_IN_HARDIRQ_READ)) | ||
2041 | return 0; | ||
2042 | /* | ||
2043 | * just marked it hardirq-unsafe, check that no hardirq-safe | ||
2044 | * lock in the system ever took it in the past: | ||
2045 | */ | ||
2046 | if (!check_usage_backwards(curr, this, | ||
2047 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2048 | return 0; | ||
2049 | #if STRICT_READ_CHECKS | ||
2050 | /* | ||
2051 | * just marked it hardirq-unsafe, check that no | ||
2052 | * hardirq-safe-read lock in the system ever took | ||
2053 | * it in the past: | ||
2054 | */ | ||
2055 | if (!check_usage_backwards(curr, this, | ||
2056 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | ||
2057 | return 0; | ||
2058 | #endif | ||
2059 | if (hardirq_verbose(hlock_class(this))) | ||
2060 | ret = 2; | ||
2061 | break; | ||
2062 | case LOCK_ENABLED_SOFTIRQS: | ||
2063 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2064 | return 0; | ||
2065 | if (!valid_state(curr, this, new_bit, | ||
2066 | LOCK_USED_IN_SOFTIRQ_READ)) | ||
2067 | return 0; | ||
2068 | /* | ||
2069 | * just marked it softirq-unsafe, check that no softirq-safe | ||
2070 | * lock in the system ever took it in the past: | ||
2071 | */ | ||
2072 | if (!check_usage_backwards(curr, this, | ||
2073 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2074 | return 0; | ||
2075 | #if STRICT_READ_CHECKS | ||
2076 | /* | ||
2077 | * just marked it softirq-unsafe, check that no | ||
2078 | * softirq-safe-read lock in the system ever took | ||
2079 | * it in the past: | ||
2080 | */ | ||
2081 | if (!check_usage_backwards(curr, this, | ||
2082 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | ||
2083 | return 0; | ||
2084 | #endif | ||
2085 | if (softirq_verbose(hlock_class(this))) | ||
2086 | ret = 2; | ||
2087 | break; | ||
2088 | case LOCK_ENABLED_HARDIRQS_READ: | ||
2089 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2090 | return 0; | ||
2091 | #if STRICT_READ_CHECKS | ||
2092 | /* | ||
2093 | * just marked it hardirq-read-unsafe, check that no | ||
2094 | * hardirq-safe lock in the system ever took it in the past: | ||
2095 | */ | ||
2096 | if (!check_usage_backwards(curr, this, | ||
2097 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2098 | return 0; | ||
2099 | #endif | ||
2100 | if (hardirq_verbose(hlock_class(this))) | ||
2101 | ret = 2; | ||
2102 | break; | ||
2103 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2104 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2105 | return 0; | 2054 | return 0; |
2106 | #if STRICT_READ_CHECKS | 2055 | |
2107 | /* | 2056 | if (STRICT_READ_CHECKS && |
2108 | * just marked it softirq-read-unsafe, check that no | 2057 | !usage(curr, this, excl_bit + 1, |
2109 | * softirq-safe lock in the system ever took it in the past: | 2058 | state_name(new_bit + 1))) |
2110 | */ | ||
2111 | if (!check_usage_backwards(curr, this, | ||
2112 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2113 | return 0; | 2059 | return 0; |
2114 | #endif | ||
2115 | if (softirq_verbose(hlock_class(this))) | ||
2116 | ret = 2; | ||
2117 | break; | ||
2118 | default: | ||
2119 | WARN_ON(1); | ||
2120 | break; | ||
2121 | } | 2060 | } |
2122 | 2061 | ||
2123 | return ret; | 2062 | if (state_verbose(new_bit, hlock_class(this))) |
2063 | return 2; | ||
2064 | |||
2065 | return 1; | ||
2124 | } | 2066 | } |
2125 | 2067 | ||
2068 | enum mark_type { | ||
2069 | #define LOCKDEP_STATE(__STATE) __STATE, | ||
2070 | #include "lockdep_states.h" | ||
2071 | #undef LOCKDEP_STATE | ||
2072 | }; | ||
2073 | |||
2126 | /* | 2074 | /* |
2127 | * Mark all held locks with a usage bit: | 2075 | * Mark all held locks with a usage bit: |
2128 | */ | 2076 | */ |
2129 | static int | 2077 | static int |
2130 | mark_held_locks(struct task_struct *curr, int hardirq) | 2078 | mark_held_locks(struct task_struct *curr, enum mark_type mark) |
2131 | { | 2079 | { |
2132 | enum lock_usage_bit usage_bit; | 2080 | enum lock_usage_bit usage_bit; |
2133 | struct held_lock *hlock; | 2081 | struct held_lock *hlock; |
@@ -2136,17 +2084,12 @@ mark_held_locks(struct task_struct *curr, int hardirq) | |||
2136 | for (i = 0; i < curr->lockdep_depth; i++) { | 2084 | for (i = 0; i < curr->lockdep_depth; i++) { |
2137 | hlock = curr->held_locks + i; | 2085 | hlock = curr->held_locks + i; |
2138 | 2086 | ||
2139 | if (hardirq) { | 2087 | usage_bit = 2 + (mark << 2); /* ENABLED */ |
2140 | if (hlock->read) | 2088 | if (hlock->read) |
2141 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 2089 | usage_bit += 1; /* READ */ |
2142 | else | 2090 | |
2143 | usage_bit = LOCK_ENABLED_HARDIRQS; | 2091 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
2144 | } else { | 2092 | |
2145 | if (hlock->read) | ||
2146 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | ||
2147 | else | ||
2148 | usage_bit = LOCK_ENABLED_SOFTIRQS; | ||
2149 | } | ||
2150 | if (!mark_lock(curr, hlock, usage_bit)) | 2093 | if (!mark_lock(curr, hlock, usage_bit)) |
2151 | return 0; | 2094 | return 0; |
2152 | } | 2095 | } |
@@ -2200,7 +2143,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2200 | * We are going to turn hardirqs on, so set the | 2143 | * We are going to turn hardirqs on, so set the |
2201 | * usage bit for all held locks: | 2144 | * usage bit for all held locks: |
2202 | */ | 2145 | */ |
2203 | if (!mark_held_locks(curr, 1)) | 2146 | if (!mark_held_locks(curr, HARDIRQ)) |
2204 | return; | 2147 | return; |
2205 | /* | 2148 | /* |
2206 | * If we have softirqs enabled, then set the usage | 2149 | * If we have softirqs enabled, then set the usage |
@@ -2208,7 +2151,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2208 | * this bit from being set before) | 2151 | * this bit from being set before) |
2209 | */ | 2152 | */ |
2210 | if (curr->softirqs_enabled) | 2153 | if (curr->softirqs_enabled) |
2211 | if (!mark_held_locks(curr, 0)) | 2154 | if (!mark_held_locks(curr, SOFTIRQ)) |
2212 | return; | 2155 | return; |
2213 | 2156 | ||
2214 | curr->hardirq_enable_ip = ip; | 2157 | curr->hardirq_enable_ip = ip; |
@@ -2288,7 +2231,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2288 | * enabled too: | 2231 | * enabled too: |
2289 | */ | 2232 | */ |
2290 | if (curr->hardirqs_enabled) | 2233 | if (curr->hardirqs_enabled) |
2291 | mark_held_locks(curr, 0); | 2234 | mark_held_locks(curr, SOFTIRQ); |
2292 | } | 2235 | } |
2293 | 2236 | ||
2294 | /* | 2237 | /* |
@@ -2317,6 +2260,48 @@ void trace_softirqs_off(unsigned long ip) | |||
2317 | debug_atomic_inc(&redundant_softirqs_off); | 2260 | debug_atomic_inc(&redundant_softirqs_off); |
2318 | } | 2261 | } |
2319 | 2262 | ||
2263 | static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) | ||
2264 | { | ||
2265 | struct task_struct *curr = current; | ||
2266 | |||
2267 | if (unlikely(!debug_locks)) | ||
2268 | return; | ||
2269 | |||
2270 | /* no reclaim without waiting on it */ | ||
2271 | if (!(gfp_mask & __GFP_WAIT)) | ||
2272 | return; | ||
2273 | |||
2274 | /* this guy won't enter reclaim */ | ||
2275 | if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) | ||
2276 | return; | ||
2277 | |||
2278 | /* We're only interested __GFP_FS allocations for now */ | ||
2279 | if (!(gfp_mask & __GFP_FS)) | ||
2280 | return; | ||
2281 | |||
2282 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) | ||
2283 | return; | ||
2284 | |||
2285 | mark_held_locks(curr, RECLAIM_FS); | ||
2286 | } | ||
2287 | |||
2288 | static void check_flags(unsigned long flags); | ||
2289 | |||
2290 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2291 | { | ||
2292 | unsigned long flags; | ||
2293 | |||
2294 | if (unlikely(current->lockdep_recursion)) | ||
2295 | return; | ||
2296 | |||
2297 | raw_local_irq_save(flags); | ||
2298 | check_flags(flags); | ||
2299 | current->lockdep_recursion = 1; | ||
2300 | __lockdep_trace_alloc(gfp_mask, flags); | ||
2301 | current->lockdep_recursion = 0; | ||
2302 | raw_local_irq_restore(flags); | ||
2303 | } | ||
2304 | |||
2320 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | 2305 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
2321 | { | 2306 | { |
2322 | /* | 2307 | /* |
@@ -2345,19 +2330,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | |||
2345 | if (!hlock->hardirqs_off) { | 2330 | if (!hlock->hardirqs_off) { |
2346 | if (hlock->read) { | 2331 | if (hlock->read) { |
2347 | if (!mark_lock(curr, hlock, | 2332 | if (!mark_lock(curr, hlock, |
2348 | LOCK_ENABLED_HARDIRQS_READ)) | 2333 | LOCK_ENABLED_HARDIRQ_READ)) |
2349 | return 0; | 2334 | return 0; |
2350 | if (curr->softirqs_enabled) | 2335 | if (curr->softirqs_enabled) |
2351 | if (!mark_lock(curr, hlock, | 2336 | if (!mark_lock(curr, hlock, |
2352 | LOCK_ENABLED_SOFTIRQS_READ)) | 2337 | LOCK_ENABLED_SOFTIRQ_READ)) |
2353 | return 0; | 2338 | return 0; |
2354 | } else { | 2339 | } else { |
2355 | if (!mark_lock(curr, hlock, | 2340 | if (!mark_lock(curr, hlock, |
2356 | LOCK_ENABLED_HARDIRQS)) | 2341 | LOCK_ENABLED_HARDIRQ)) |
2357 | return 0; | 2342 | return 0; |
2358 | if (curr->softirqs_enabled) | 2343 | if (curr->softirqs_enabled) |
2359 | if (!mark_lock(curr, hlock, | 2344 | if (!mark_lock(curr, hlock, |
2360 | LOCK_ENABLED_SOFTIRQS)) | 2345 | LOCK_ENABLED_SOFTIRQ)) |
2346 | return 0; | ||
2347 | } | ||
2348 | } | ||
2349 | |||
2350 | /* | ||
2351 | * We reuse the irq context infrastructure more broadly as a general | ||
2352 | * context checking code. This tests GFP_FS recursion (a lock taken | ||
2353 | * during reclaim for a GFP_FS allocation is held over a GFP_FS | ||
2354 | * allocation). | ||
2355 | */ | ||
2356 | if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { | ||
2357 | if (hlock->read) { | ||
2358 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) | ||
2359 | return 0; | ||
2360 | } else { | ||
2361 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) | ||
2361 | return 0; | 2362 | return 0; |
2362 | } | 2363 | } |
2363 | } | 2364 | } |
@@ -2412,6 +2413,10 @@ static inline int separate_irq_context(struct task_struct *curr, | |||
2412 | return 0; | 2413 | return 0; |
2413 | } | 2414 | } |
2414 | 2415 | ||
2416 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2417 | { | ||
2418 | } | ||
2419 | |||
2415 | #endif | 2420 | #endif |
2416 | 2421 | ||
2417 | /* | 2422 | /* |
@@ -2445,14 +2450,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2445 | return 0; | 2450 | return 0; |
2446 | 2451 | ||
2447 | switch (new_bit) { | 2452 | switch (new_bit) { |
2448 | case LOCK_USED_IN_HARDIRQ: | 2453 | #define LOCKDEP_STATE(__STATE) \ |
2449 | case LOCK_USED_IN_SOFTIRQ: | 2454 | case LOCK_USED_IN_##__STATE: \ |
2450 | case LOCK_USED_IN_HARDIRQ_READ: | 2455 | case LOCK_USED_IN_##__STATE##_READ: \ |
2451 | case LOCK_USED_IN_SOFTIRQ_READ: | 2456 | case LOCK_ENABLED_##__STATE: \ |
2452 | case LOCK_ENABLED_HARDIRQS: | 2457 | case LOCK_ENABLED_##__STATE##_READ: |
2453 | case LOCK_ENABLED_SOFTIRQS: | 2458 | #include "lockdep_states.h" |
2454 | case LOCK_ENABLED_HARDIRQS_READ: | 2459 | #undef LOCKDEP_STATE |
2455 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2456 | ret = mark_lock_irq(curr, this, new_bit); | 2460 | ret = mark_lock_irq(curr, this, new_bit); |
2457 | if (!ret) | 2461 | if (!ret) |
2458 | return 0; | 2462 | return 0; |
@@ -2966,6 +2970,16 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2966 | } | 2970 | } |
2967 | EXPORT_SYMBOL_GPL(lock_release); | 2971 | EXPORT_SYMBOL_GPL(lock_release); |
2968 | 2972 | ||
2973 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) | ||
2974 | { | ||
2975 | current->lockdep_reclaim_gfp = gfp_mask; | ||
2976 | } | ||
2977 | |||
2978 | void lockdep_clear_current_reclaim_state(void) | ||
2979 | { | ||
2980 | current->lockdep_reclaim_gfp = 0; | ||
2981 | } | ||
2982 | |||
2969 | #ifdef CONFIG_LOCK_STAT | 2983 | #ifdef CONFIG_LOCK_STAT |
2970 | static int | 2984 | static int |
2971 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | 2985 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 56b196932c08..a2cc7e9a6e84 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -7,6 +7,45 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Lock-class usage-state bits: | ||
11 | */ | ||
12 | enum lock_usage_bit { | ||
13 | #define LOCKDEP_STATE(__STATE) \ | ||
14 | LOCK_USED_IN_##__STATE, \ | ||
15 | LOCK_USED_IN_##__STATE##_READ, \ | ||
16 | LOCK_ENABLED_##__STATE, \ | ||
17 | LOCK_ENABLED_##__STATE##_READ, | ||
18 | #include "lockdep_states.h" | ||
19 | #undef LOCKDEP_STATE | ||
20 | LOCK_USED, | ||
21 | LOCK_USAGE_STATES | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * Usage-state bitmasks: | ||
26 | */ | ||
27 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), | ||
28 | |||
29 | enum { | ||
30 | #define LOCKDEP_STATE(__STATE) \ | ||
31 | __LOCKF(USED_IN_##__STATE) \ | ||
32 | __LOCKF(USED_IN_##__STATE##_READ) \ | ||
33 | __LOCKF(ENABLED_##__STATE) \ | ||
34 | __LOCKF(ENABLED_##__STATE##_READ) | ||
35 | #include "lockdep_states.h" | ||
36 | #undef LOCKDEP_STATE | ||
37 | __LOCKF(USED) | ||
38 | }; | ||
39 | |||
40 | #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) | ||
41 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | ||
42 | |||
43 | #define LOCKF_ENABLED_IRQ_READ \ | ||
44 | (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) | ||
45 | #define LOCKF_USED_IN_IRQ_READ \ | ||
46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | ||
47 | |||
48 | /* | ||
10 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | 49 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
11 | * we track. | 50 | * we track. |
12 | * | 51 | * |
@@ -31,8 +70,10 @@ | |||
31 | extern struct list_head all_lock_classes; | 70 | extern struct list_head all_lock_classes; |
32 | extern struct lock_chain lock_chains[]; | 71 | extern struct lock_chain lock_chains[]; |
33 | 72 | ||
34 | extern void | 73 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
35 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); | 74 | |
75 | extern void get_usage_chars(struct lock_class *class, | ||
76 | char usage[LOCK_USAGE_CHARS]); | ||
36 | 77 | ||
37 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); | 78 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
38 | 79 | ||
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 13716b813896..d7135aa2d2c4 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) | |||
84 | { | 84 | { |
85 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
86 | struct lock_list *entry; | 86 | struct lock_list *entry; |
87 | char c1, c2, c3, c4; | 87 | char usage[LOCK_USAGE_CHARS]; |
88 | 88 | ||
89 | if (v == SEQ_START_TOKEN) { | 89 | if (v == SEQ_START_TOKEN) { |
90 | seq_printf(m, "all lock classes:\n"); | 90 | seq_printf(m, "all lock classes:\n"); |
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) | |||
100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, usage); |
104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %s", usage); |
105 | 105 | ||
106 | seq_printf(m, ": "); | 106 | seq_printf(m, ": "); |
107 | print_name(m, class); | 107 | print_name(m, class); |
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
300 | nr_uncategorized++; | 300 | nr_uncategorized++; |
301 | if (class->usage_mask & LOCKF_USED_IN_IRQ) | 301 | if (class->usage_mask & LOCKF_USED_IN_IRQ) |
302 | nr_irq_safe++; | 302 | nr_irq_safe++; |
303 | if (class->usage_mask & LOCKF_ENABLED_IRQS) | 303 | if (class->usage_mask & LOCKF_ENABLED_IRQ) |
304 | nr_irq_unsafe++; | 304 | nr_irq_unsafe++; |
305 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 305 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) |
306 | nr_softirq_safe++; | 306 | nr_softirq_safe++; |
307 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | 307 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) |
308 | nr_softirq_unsafe++; | 308 | nr_softirq_unsafe++; |
309 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 309 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) |
310 | nr_hardirq_safe++; | 310 | nr_hardirq_safe++; |
311 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | 311 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) |
312 | nr_hardirq_unsafe++; | 312 | nr_hardirq_unsafe++; |
313 | if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) | 313 | if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) |
314 | nr_irq_read_safe++; | 314 | nr_irq_read_safe++; |
315 | if (class->usage_mask & LOCKF_ENABLED_IRQS_READ) | 315 | if (class->usage_mask & LOCKF_ENABLED_IRQ_READ) |
316 | nr_irq_read_unsafe++; | 316 | nr_irq_read_unsafe++; |
317 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) | 317 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) |
318 | nr_softirq_read_safe++; | 318 | nr_softirq_read_safe++; |
319 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 319 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) |
320 | nr_softirq_read_unsafe++; | 320 | nr_softirq_read_unsafe++; |
321 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) | 321 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) |
322 | nr_hardirq_read_safe++; | 322 | nr_hardirq_read_safe++; |
323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) |
324 | nr_hardirq_read_unsafe++; | 324 | nr_hardirq_read_unsafe++; |
325 | 325 | ||
326 | #ifdef CONFIG_PROVE_LOCKING | 326 | #ifdef CONFIG_PROVE_LOCKING |
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
601 | static void seq_header(struct seq_file *m) | 601 | static void seq_header(struct seq_file *m) |
602 | { | 602 | { |
603 | seq_printf(m, "lock_stat version 0.3\n"); | 603 | seq_printf(m, "lock_stat version 0.3\n"); |
604 | |||
605 | if (unlikely(!debug_locks)) | ||
606 | seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n"); | ||
607 | |||
604 | seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); | 608 | seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); |
605 | seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " | 609 | seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " |
606 | "%14s %14s\n", | 610 | "%14s %14s\n", |
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h new file mode 100644 index 000000000000..995b0cc2b84c --- /dev/null +++ b/kernel/lockdep_states.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Lockdep states, | ||
3 | * | ||
4 | * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever | ||
5 | * you add one, or come up with a nice dynamic solution. | ||
6 | */ | ||
7 | LOCKDEP_STATE(HARDIRQ) | ||
8 | LOCKDEP_STATE(SOFTIRQ) | ||
9 | LOCKDEP_STATE(RECLAIM_FS) | ||
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 1d94160eb532..50d022e5a560 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -26,11 +26,6 @@ | |||
26 | /* | 26 | /* |
27 | * Must be called with lock->wait_lock held. | 27 | * Must be called with lock->wait_lock held. |
28 | */ | 28 | */ |
29 | void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner) | ||
30 | { | ||
31 | lock->owner = new_owner; | ||
32 | } | ||
33 | |||
34 | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) | 29 | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) |
35 | { | 30 | { |
36 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); | 31 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); |
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |||
59 | 54 | ||
60 | /* Mark the current thread as blocked on the lock: */ | 55 | /* Mark the current thread as blocked on the lock: */ |
61 | ti->task->blocked_on = waiter; | 56 | ti->task->blocked_on = waiter; |
62 | waiter->lock = lock; | ||
63 | } | 57 | } |
64 | 58 | ||
65 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 59 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock) | |||
82 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 76 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
83 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 77 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); |
84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 78 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
85 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 79 | mutex_clear_owner(lock); |
86 | } | 80 | } |
87 | 81 | ||
88 | void debug_mutex_init(struct mutex *lock, const char *name, | 82 | void debug_mutex_init(struct mutex *lock, const char *name, |
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name, | |||
95 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 89 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
96 | lockdep_init_map(&lock->dep_map, name, key, 0); | 90 | lockdep_init_map(&lock->dep_map, name, key, 0); |
97 | #endif | 91 | #endif |
98 | lock->owner = NULL; | ||
99 | lock->magic = lock; | 92 | lock->magic = lock; |
100 | } | 93 | } |
101 | 94 | ||
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index babfbdfc534b..6b2d735846a5 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h | |||
@@ -13,14 +13,6 @@ | |||
13 | /* | 13 | /* |
14 | * This must be called with lock->wait_lock held. | 14 | * This must be called with lock->wait_lock held. |
15 | */ | 15 | */ |
16 | extern void | ||
17 | debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner); | ||
18 | |||
19 | static inline void debug_mutex_clear_owner(struct mutex *lock) | ||
20 | { | ||
21 | lock->owner = NULL; | ||
22 | } | ||
23 | |||
24 | extern void debug_mutex_lock_common(struct mutex *lock, | 16 | extern void debug_mutex_lock_common(struct mutex *lock, |
25 | struct mutex_waiter *waiter); | 17 | struct mutex_waiter *waiter); |
26 | extern void debug_mutex_wake_waiter(struct mutex *lock, | 18 | extern void debug_mutex_wake_waiter(struct mutex *lock, |
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock); | |||
35 | extern void debug_mutex_init(struct mutex *lock, const char *name, | 27 | extern void debug_mutex_init(struct mutex *lock, const char *name, |
36 | struct lock_class_key *key); | 28 | struct lock_class_key *key); |
37 | 29 | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | ||
31 | { | ||
32 | lock->owner = current_thread_info(); | ||
33 | } | ||
34 | |||
35 | static inline void mutex_clear_owner(struct mutex *lock) | ||
36 | { | ||
37 | lock->owner = NULL; | ||
38 | } | ||
39 | |||
38 | #define spin_lock_mutex(lock, flags) \ | 40 | #define spin_lock_mutex(lock, flags) \ |
39 | do { \ | 41 | do { \ |
40 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ | 42 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4f45d4b658ef..5d79781394a3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -10,6 +10,11 @@ | |||
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
11 | * David Howells for suggestions and improvements. | 11 | * David Howells for suggestions and improvements. |
12 | * | 12 | * |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline | ||
14 | * from the -rt tree, where it was originally implemented for rtmutexes | ||
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | ||
16 | * and Sven Dietrich. | ||
17 | * | ||
13 | * Also see Documentation/mutex-design.txt. | 18 | * Also see Documentation/mutex-design.txt. |
14 | */ | 19 | */ |
15 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
46 | atomic_set(&lock->count, 1); | 51 | atomic_set(&lock->count, 1); |
47 | spin_lock_init(&lock->wait_lock); | 52 | spin_lock_init(&lock->wait_lock); |
48 | INIT_LIST_HEAD(&lock->wait_list); | 53 | INIT_LIST_HEAD(&lock->wait_list); |
54 | mutex_clear_owner(lock); | ||
49 | 55 | ||
50 | debug_mutex_init(lock, name, key); | 56 | debug_mutex_init(lock, name, key); |
51 | } | 57 | } |
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock) | |||
91 | * 'unlocked' into 'locked' state. | 97 | * 'unlocked' into 'locked' state. |
92 | */ | 98 | */ |
93 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 99 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
100 | mutex_set_owner(lock); | ||
94 | } | 101 | } |
95 | 102 | ||
96 | EXPORT_SYMBOL(mutex_lock); | 103 | EXPORT_SYMBOL(mutex_lock); |
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock) | |||
115 | * The unlocking fastpath is the 0->1 transition from 'locked' | 122 | * The unlocking fastpath is the 0->1 transition from 'locked' |
116 | * into 'unlocked' state: | 123 | * into 'unlocked' state: |
117 | */ | 124 | */ |
125 | #ifndef CONFIG_DEBUG_MUTEXES | ||
126 | /* | ||
127 | * When debugging is enabled we must not clear the owner before time, | ||
128 | * the slow path will always be taken, and that clears the owner field | ||
129 | * after verifying that it was indeed current. | ||
130 | */ | ||
131 | mutex_clear_owner(lock); | ||
132 | #endif | ||
118 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 133 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
119 | } | 134 | } |
120 | 135 | ||
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
129 | { | 144 | { |
130 | struct task_struct *task = current; | 145 | struct task_struct *task = current; |
131 | struct mutex_waiter waiter; | 146 | struct mutex_waiter waiter; |
132 | unsigned int old_val; | ||
133 | unsigned long flags; | 147 | unsigned long flags; |
134 | 148 | ||
149 | preempt_disable(); | ||
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) | ||
152 | /* | ||
153 | * Optimistic spinning. | ||
154 | * | ||
155 | * We try to spin for acquisition when we find that there are no | ||
156 | * pending waiters and the lock owner is currently running on a | ||
157 | * (different) CPU. | ||
158 | * | ||
159 | * The rationale is that if the lock owner is running, it is likely to | ||
160 | * release the lock soon. | ||
161 | * | ||
162 | * Since this needs the lock owner, and this mutex implementation | ||
163 | * doesn't track the owner atomically in the lock field, we need to | ||
164 | * track it non-atomically. | ||
165 | * | ||
166 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | ||
167 | * to serialize everything. | ||
168 | */ | ||
169 | |||
170 | for (;;) { | ||
171 | struct thread_info *owner; | ||
172 | |||
173 | /* | ||
174 | * If there's an owner, wait for it to either | ||
175 | * release the lock or go to sleep. | ||
176 | */ | ||
177 | owner = ACCESS_ONCE(lock->owner); | ||
178 | if (owner && !mutex_spin_on_owner(lock, owner)) | ||
179 | break; | ||
180 | |||
181 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { | ||
182 | lock_acquired(&lock->dep_map, ip); | ||
183 | mutex_set_owner(lock); | ||
184 | preempt_enable(); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * When there's no owner, we might have preempted between the | ||
190 | * owner acquiring the lock and setting the owner field. If | ||
191 | * we're an RT task that will live-lock because we won't let | ||
192 | * the owner complete. | ||
193 | */ | ||
194 | if (!owner && (need_resched() || rt_task(task))) | ||
195 | break; | ||
196 | |||
197 | /* | ||
198 | * The cpu_relax() call is a compiler barrier which forces | ||
199 | * everything in this loop to be re-loaded. We don't need | ||
200 | * memory barriers as we'll eventually observe the right | ||
201 | * values at the cost of a few extra spins. | ||
202 | */ | ||
203 | cpu_relax(); | ||
204 | } | ||
205 | #endif | ||
135 | spin_lock_mutex(&lock->wait_lock, flags); | 206 | spin_lock_mutex(&lock->wait_lock, flags); |
136 | 207 | ||
137 | debug_mutex_lock_common(lock, &waiter); | 208 | debug_mutex_lock_common(lock, &waiter); |
138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 209 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
140 | 210 | ||
141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 211 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
142 | list_add_tail(&waiter.list, &lock->wait_list); | 212 | list_add_tail(&waiter.list, &lock->wait_list); |
143 | waiter.task = task; | 213 | waiter.task = task; |
144 | 214 | ||
145 | old_val = atomic_xchg(&lock->count, -1); | 215 | if (atomic_xchg(&lock->count, -1) == 1) |
146 | if (old_val == 1) | ||
147 | goto done; | 216 | goto done; |
148 | 217 | ||
149 | lock_contended(&lock->dep_map, ip); | 218 | lock_contended(&lock->dep_map, ip); |
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
158 | * that when we release the lock, we properly wake up the | 227 | * that when we release the lock, we properly wake up the |
159 | * other waiters: | 228 | * other waiters: |
160 | */ | 229 | */ |
161 | old_val = atomic_xchg(&lock->count, -1); | 230 | if (atomic_xchg(&lock->count, -1) == 1) |
162 | if (old_val == 1) | ||
163 | break; | 231 | break; |
164 | 232 | ||
165 | /* | 233 | /* |
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
173 | spin_unlock_mutex(&lock->wait_lock, flags); | 241 | spin_unlock_mutex(&lock->wait_lock, flags); |
174 | 242 | ||
175 | debug_mutex_free_waiter(&waiter); | 243 | debug_mutex_free_waiter(&waiter); |
244 | preempt_enable(); | ||
176 | return -EINTR; | 245 | return -EINTR; |
177 | } | 246 | } |
178 | __set_task_state(task, state); | 247 | __set_task_state(task, state); |
179 | 248 | ||
180 | /* didnt get the lock, go to sleep: */ | 249 | /* didnt get the lock, go to sleep: */ |
181 | spin_unlock_mutex(&lock->wait_lock, flags); | 250 | spin_unlock_mutex(&lock->wait_lock, flags); |
182 | schedule(); | 251 | __schedule(); |
183 | spin_lock_mutex(&lock->wait_lock, flags); | 252 | spin_lock_mutex(&lock->wait_lock, flags); |
184 | } | 253 | } |
185 | 254 | ||
186 | done: | 255 | done: |
187 | lock_acquired(&lock->dep_map, ip); | 256 | lock_acquired(&lock->dep_map, ip); |
188 | /* got the lock - rejoice! */ | 257 | /* got the lock - rejoice! */ |
189 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 258 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
190 | debug_mutex_set_owner(lock, task_thread_info(task)); | 259 | mutex_set_owner(lock); |
191 | 260 | ||
192 | /* set it to 0 if there are no waiters left: */ | 261 | /* set it to 0 if there are no waiters left: */ |
193 | if (likely(list_empty(&lock->wait_list))) | 262 | if (likely(list_empty(&lock->wait_list))) |
@@ -196,6 +265,7 @@ done: | |||
196 | spin_unlock_mutex(&lock->wait_lock, flags); | 265 | spin_unlock_mutex(&lock->wait_lock, flags); |
197 | 266 | ||
198 | debug_mutex_free_waiter(&waiter); | 267 | debug_mutex_free_waiter(&waiter); |
268 | preempt_enable(); | ||
199 | 269 | ||
200 | return 0; | 270 | return 0; |
201 | } | 271 | } |
@@ -222,7 +292,8 @@ int __sched | |||
222 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 292 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
223 | { | 293 | { |
224 | might_sleep(); | 294 | might_sleep(); |
225 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); | 295 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
296 | subclass, _RET_IP_); | ||
226 | } | 297 | } |
227 | 298 | ||
228 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 299 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
260 | wake_up_process(waiter->task); | 331 | wake_up_process(waiter->task); |
261 | } | 332 | } |
262 | 333 | ||
263 | debug_mutex_clear_owner(lock); | ||
264 | |||
265 | spin_unlock_mutex(&lock->wait_lock, flags); | 334 | spin_unlock_mutex(&lock->wait_lock, flags); |
266 | } | 335 | } |
267 | 336 | ||
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | |||
298 | */ | 367 | */ |
299 | int __sched mutex_lock_interruptible(struct mutex *lock) | 368 | int __sched mutex_lock_interruptible(struct mutex *lock) |
300 | { | 369 | { |
370 | int ret; | ||
371 | |||
301 | might_sleep(); | 372 | might_sleep(); |
302 | return __mutex_fastpath_lock_retval | 373 | ret = __mutex_fastpath_lock_retval |
303 | (&lock->count, __mutex_lock_interruptible_slowpath); | 374 | (&lock->count, __mutex_lock_interruptible_slowpath); |
375 | if (!ret) | ||
376 | mutex_set_owner(lock); | ||
377 | |||
378 | return ret; | ||
304 | } | 379 | } |
305 | 380 | ||
306 | EXPORT_SYMBOL(mutex_lock_interruptible); | 381 | EXPORT_SYMBOL(mutex_lock_interruptible); |
307 | 382 | ||
308 | int __sched mutex_lock_killable(struct mutex *lock) | 383 | int __sched mutex_lock_killable(struct mutex *lock) |
309 | { | 384 | { |
385 | int ret; | ||
386 | |||
310 | might_sleep(); | 387 | might_sleep(); |
311 | return __mutex_fastpath_lock_retval | 388 | ret = __mutex_fastpath_lock_retval |
312 | (&lock->count, __mutex_lock_killable_slowpath); | 389 | (&lock->count, __mutex_lock_killable_slowpath); |
390 | if (!ret) | ||
391 | mutex_set_owner(lock); | ||
392 | |||
393 | return ret; | ||
313 | } | 394 | } |
314 | EXPORT_SYMBOL(mutex_lock_killable); | 395 | EXPORT_SYMBOL(mutex_lock_killable); |
315 | 396 | ||
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
352 | 433 | ||
353 | prev = atomic_xchg(&lock->count, -1); | 434 | prev = atomic_xchg(&lock->count, -1); |
354 | if (likely(prev == 1)) { | 435 | if (likely(prev == 1)) { |
355 | debug_mutex_set_owner(lock, current_thread_info()); | 436 | mutex_set_owner(lock); |
356 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 437 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
357 | } | 438 | } |
439 | |||
358 | /* Set it back to 0 if there are no waiters: */ | 440 | /* Set it back to 0 if there are no waiters: */ |
359 | if (likely(list_empty(&lock->wait_list))) | 441 | if (likely(list_empty(&lock->wait_list))) |
360 | atomic_set(&lock->count, 0); | 442 | atomic_set(&lock->count, 0); |
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
380 | */ | 462 | */ |
381 | int __sched mutex_trylock(struct mutex *lock) | 463 | int __sched mutex_trylock(struct mutex *lock) |
382 | { | 464 | { |
383 | return __mutex_fastpath_trylock(&lock->count, | 465 | int ret; |
384 | __mutex_trylock_slowpath); | 466 | |
467 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | ||
468 | if (ret) | ||
469 | mutex_set_owner(lock); | ||
470 | |||
471 | return ret; | ||
385 | } | 472 | } |
386 | 473 | ||
387 | EXPORT_SYMBOL(mutex_trylock); | 474 | EXPORT_SYMBOL(mutex_trylock); |
diff --git a/kernel/mutex.h b/kernel/mutex.h index a075dafbb290..67578ca48f94 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h | |||
@@ -16,8 +16,26 @@ | |||
16 | #define mutex_remove_waiter(lock, waiter, ti) \ | 16 | #define mutex_remove_waiter(lock, waiter, ti) \ |
17 | __list_del((waiter)->list.prev, (waiter)->list.next) | 17 | __list_del((waiter)->list.prev, (waiter)->list.next) |
18 | 18 | ||
19 | #define debug_mutex_set_owner(lock, new_owner) do { } while (0) | 19 | #ifdef CONFIG_SMP |
20 | #define debug_mutex_clear_owner(lock) do { } while (0) | 20 | static inline void mutex_set_owner(struct mutex *lock) |
21 | { | ||
22 | lock->owner = current_thread_info(); | ||
23 | } | ||
24 | |||
25 | static inline void mutex_clear_owner(struct mutex *lock) | ||
26 | { | ||
27 | lock->owner = NULL; | ||
28 | } | ||
29 | #else | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | static inline void mutex_clear_owner(struct mutex *lock) | ||
35 | { | ||
36 | } | ||
37 | #endif | ||
38 | |||
21 | #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) | 39 | #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) |
22 | #define debug_mutex_free_waiter(waiter) do { } while (0) | 40 | #define debug_mutex_free_waiter(waiter) do { } while (0) |
23 | #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) | 41 | #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) |
diff --git a/kernel/sched.c b/kernel/sched.c index 5757e03cfac0..196d48babbef 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4942,15 +4942,13 @@ pick_next_task(struct rq *rq) | |||
4942 | /* | 4942 | /* |
4943 | * schedule() is the main scheduler function. | 4943 | * schedule() is the main scheduler function. |
4944 | */ | 4944 | */ |
4945 | asmlinkage void __sched schedule(void) | 4945 | asmlinkage void __sched __schedule(void) |
4946 | { | 4946 | { |
4947 | struct task_struct *prev, *next; | 4947 | struct task_struct *prev, *next; |
4948 | unsigned long *switch_count; | 4948 | unsigned long *switch_count; |
4949 | struct rq *rq; | 4949 | struct rq *rq; |
4950 | int cpu; | 4950 | int cpu; |
4951 | 4951 | ||
4952 | need_resched: | ||
4953 | preempt_disable(); | ||
4954 | cpu = smp_processor_id(); | 4952 | cpu = smp_processor_id(); |
4955 | rq = cpu_rq(cpu); | 4953 | rq = cpu_rq(cpu); |
4956 | rcu_qsctr_inc(cpu); | 4954 | rcu_qsctr_inc(cpu); |
@@ -5007,13 +5005,80 @@ need_resched_nonpreemptible: | |||
5007 | 5005 | ||
5008 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 5006 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
5009 | goto need_resched_nonpreemptible; | 5007 | goto need_resched_nonpreemptible; |
5008 | } | ||
5010 | 5009 | ||
5010 | asmlinkage void __sched schedule(void) | ||
5011 | { | ||
5012 | need_resched: | ||
5013 | preempt_disable(); | ||
5014 | __schedule(); | ||
5011 | preempt_enable_no_resched(); | 5015 | preempt_enable_no_resched(); |
5012 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | 5016 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) |
5013 | goto need_resched; | 5017 | goto need_resched; |
5014 | } | 5018 | } |
5015 | EXPORT_SYMBOL(schedule); | 5019 | EXPORT_SYMBOL(schedule); |
5016 | 5020 | ||
5021 | #ifdef CONFIG_SMP | ||
5022 | /* | ||
5023 | * Look out! "owner" is an entirely speculative pointer | ||
5024 | * access and not reliable. | ||
5025 | */ | ||
5026 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | ||
5027 | { | ||
5028 | unsigned int cpu; | ||
5029 | struct rq *rq; | ||
5030 | |||
5031 | if (!sched_feat(OWNER_SPIN)) | ||
5032 | return 0; | ||
5033 | |||
5034 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
5035 | /* | ||
5036 | * Need to access the cpu field knowing that | ||
5037 | * DEBUG_PAGEALLOC could have unmapped it if | ||
5038 | * the mutex owner just released it and exited. | ||
5039 | */ | ||
5040 | if (probe_kernel_address(&owner->cpu, cpu)) | ||
5041 | goto out; | ||
5042 | #else | ||
5043 | cpu = owner->cpu; | ||
5044 | #endif | ||
5045 | |||
5046 | /* | ||
5047 | * Even if the access succeeded (likely case), | ||
5048 | * the cpu field may no longer be valid. | ||
5049 | */ | ||
5050 | if (cpu >= nr_cpumask_bits) | ||
5051 | goto out; | ||
5052 | |||
5053 | /* | ||
5054 | * We need to validate that we can do a | ||
5055 | * get_cpu() and that we have the percpu area. | ||
5056 | */ | ||
5057 | if (!cpu_online(cpu)) | ||
5058 | goto out; | ||
5059 | |||
5060 | rq = cpu_rq(cpu); | ||
5061 | |||
5062 | for (;;) { | ||
5063 | /* | ||
5064 | * Owner changed, break to re-assess state. | ||
5065 | */ | ||
5066 | if (lock->owner != owner) | ||
5067 | break; | ||
5068 | |||
5069 | /* | ||
5070 | * Is that owner really running on that cpu? | ||
5071 | */ | ||
5072 | if (task_thread_info(rq->curr) != owner || need_resched()) | ||
5073 | return 0; | ||
5074 | |||
5075 | cpu_relax(); | ||
5076 | } | ||
5077 | out: | ||
5078 | return 1; | ||
5079 | } | ||
5080 | #endif | ||
5081 | |||
5017 | #ifdef CONFIG_PREEMPT | 5082 | #ifdef CONFIG_PREEMPT |
5018 | /* | 5083 | /* |
5019 | * this is the entry point to schedule() from in-kernel preemption | 5084 | * this is the entry point to schedule() from in-kernel preemption |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 76f61756e677..4569bfa7df9b 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -14,3 +14,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | |||
14 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 14 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
15 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | 15 | SCHED_FEAT(WAKEUP_OVERLAP, 0) |
16 | SCHED_FEAT(LAST_BUDDY, 1) | 16 | SCHED_FEAT(LAST_BUDDY, 1) |
17 | SCHED_FEAT(OWNER_SPIN, 1) | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 9b77fc9a9ac8..b4555568b4e4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer) | |||
491 | debug_object_free(timer, &timer_debug_descr); | 491 | debug_object_free(timer, &timer_debug_descr); |
492 | } | 492 | } |
493 | 493 | ||
494 | static void __init_timer(struct timer_list *timer); | 494 | static void __init_timer(struct timer_list *timer, |
495 | const char *name, | ||
496 | struct lock_class_key *key); | ||
495 | 497 | ||
496 | void init_timer_on_stack(struct timer_list *timer) | 498 | void init_timer_on_stack_key(struct timer_list *timer, |
499 | const char *name, | ||
500 | struct lock_class_key *key) | ||
497 | { | 501 | { |
498 | debug_object_init_on_stack(timer, &timer_debug_descr); | 502 | debug_object_init_on_stack(timer, &timer_debug_descr); |
499 | __init_timer(timer); | 503 | __init_timer(timer, name, key); |
500 | } | 504 | } |
501 | EXPORT_SYMBOL_GPL(init_timer_on_stack); | 505 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
502 | 506 | ||
503 | void destroy_timer_on_stack(struct timer_list *timer) | 507 | void destroy_timer_on_stack(struct timer_list *timer) |
504 | { | 508 | { |
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { } | |||
512 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 516 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
513 | #endif | 517 | #endif |
514 | 518 | ||
515 | static void __init_timer(struct timer_list *timer) | 519 | static void __init_timer(struct timer_list *timer, |
520 | const char *name, | ||
521 | struct lock_class_key *key) | ||
516 | { | 522 | { |
517 | timer->entry.next = NULL; | 523 | timer->entry.next = NULL; |
518 | timer->base = __raw_get_cpu_var(tvec_bases); | 524 | timer->base = __raw_get_cpu_var(tvec_bases); |
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer) | |||
521 | timer->start_pid = -1; | 527 | timer->start_pid = -1; |
522 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 528 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
523 | #endif | 529 | #endif |
530 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | ||
524 | } | 531 | } |
525 | 532 | ||
526 | /** | 533 | /** |
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer) | |||
530 | * init_timer() must be done to a timer prior calling *any* of the | 537 | * init_timer() must be done to a timer prior calling *any* of the |
531 | * other timer functions. | 538 | * other timer functions. |
532 | */ | 539 | */ |
533 | void init_timer(struct timer_list *timer) | 540 | void init_timer_key(struct timer_list *timer, |
541 | const char *name, | ||
542 | struct lock_class_key *key) | ||
534 | { | 543 | { |
535 | debug_timer_init(timer); | 544 | debug_timer_init(timer); |
536 | __init_timer(timer); | 545 | __init_timer(timer, name, key); |
537 | } | 546 | } |
538 | EXPORT_SYMBOL(init_timer); | 547 | EXPORT_SYMBOL(init_timer_key); |
539 | 548 | ||
540 | void init_timer_deferrable(struct timer_list *timer) | 549 | void init_timer_deferrable_key(struct timer_list *timer, |
550 | const char *name, | ||
551 | struct lock_class_key *key) | ||
541 | { | 552 | { |
542 | init_timer(timer); | 553 | init_timer_key(timer, name, key); |
543 | timer_set_deferrable(timer); | 554 | timer_set_deferrable(timer); |
544 | } | 555 | } |
545 | EXPORT_SYMBOL(init_timer_deferrable); | 556 | EXPORT_SYMBOL(init_timer_deferrable_key); |
546 | 557 | ||
547 | static inline void detach_timer(struct timer_list *timer, | 558 | static inline void detach_timer(struct timer_list *timer, |
548 | int clear_pending) | 559 | int clear_pending) |
@@ -826,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
826 | */ | 837 | */ |
827 | int del_timer_sync(struct timer_list *timer) | 838 | int del_timer_sync(struct timer_list *timer) |
828 | { | 839 | { |
840 | #ifdef CONFIG_LOCKDEP | ||
841 | unsigned long flags; | ||
842 | |||
843 | local_irq_save(flags); | ||
844 | lock_map_acquire(&timer->lockdep_map); | ||
845 | lock_map_release(&timer->lockdep_map); | ||
846 | local_irq_restore(flags); | ||
847 | #endif | ||
848 | |||
829 | for (;;) { | 849 | for (;;) { |
830 | int ret = try_to_del_timer_sync(timer); | 850 | int ret = try_to_del_timer_sync(timer); |
831 | if (ret >= 0) | 851 | if (ret >= 0) |
@@ -897,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base) | |||
897 | 917 | ||
898 | set_running_timer(base, timer); | 918 | set_running_timer(base, timer); |
899 | detach_timer(timer, 1); | 919 | detach_timer(timer, 1); |
920 | |||
900 | spin_unlock_irq(&base->lock); | 921 | spin_unlock_irq(&base->lock); |
901 | { | 922 | { |
902 | int preempt_count = preempt_count(); | 923 | int preempt_count = preempt_count(); |
924 | |||
925 | #ifdef CONFIG_LOCKDEP | ||
926 | /* | ||
927 | * It is permissible to free the timer from | ||
928 | * inside the function that is called from | ||
929 | * it, this we need to take into account for | ||
930 | * lockdep too. To avoid bogus "held lock | ||
931 | * freed" warnings as well as problems when | ||
932 | * looking into timer->lockdep_map, make a | ||
933 | * copy and use that here. | ||
934 | */ | ||
935 | struct lockdep_map lockdep_map = | ||
936 | timer->lockdep_map; | ||
937 | #endif | ||
938 | /* | ||
939 | * Couple the lock chain with the lock chain at | ||
940 | * del_timer_sync() by acquiring the lock_map | ||
941 | * around the fn() call here and in | ||
942 | * del_timer_sync(). | ||
943 | */ | ||
944 | lock_map_acquire(&lockdep_map); | ||
945 | |||
903 | fn(data); | 946 | fn(data); |
947 | |||
948 | lock_map_release(&lockdep_map); | ||
949 | |||
904 | if (preempt_count != preempt_count()) { | 950 | if (preempt_count != preempt_count()) { |
905 | printk(KERN_ERR "huh, entered %p " | 951 | printk(KERN_ERR "huh, entered %p " |
906 | "with preempt_count %08x, exited" | 952 | "with preempt_count %08x, exited" |