diff options
-rw-r--r-- | include/linux/lockdep.h | 17 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/lockdep.c | 229 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 3 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
10 files changed, 254 insertions, 17 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124f..cc97bdbc7969 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -27,12 +27,16 @@ enum lock_usage_bit | |||
27 | LOCK_USED = 0, | 27 | LOCK_USED = 0, |
28 | LOCK_USED_IN_HARDIRQ, | 28 | LOCK_USED_IN_HARDIRQ, |
29 | LOCK_USED_IN_SOFTIRQ, | 29 | LOCK_USED_IN_SOFTIRQ, |
30 | LOCK_USED_IN_RECLAIM_FS, | ||
30 | LOCK_ENABLED_SOFTIRQS, | 31 | LOCK_ENABLED_SOFTIRQS, |
31 | LOCK_ENABLED_HARDIRQS, | 32 | LOCK_ENABLED_HARDIRQS, |
33 | LOCK_HELD_OVER_RECLAIM_FS, | ||
32 | LOCK_USED_IN_HARDIRQ_READ, | 34 | LOCK_USED_IN_HARDIRQ_READ, |
33 | LOCK_USED_IN_SOFTIRQ_READ, | 35 | LOCK_USED_IN_SOFTIRQ_READ, |
36 | LOCK_USED_IN_RECLAIM_FS_READ, | ||
34 | LOCK_ENABLED_SOFTIRQS_READ, | 37 | LOCK_ENABLED_SOFTIRQS_READ, |
35 | LOCK_ENABLED_HARDIRQS_READ, | 38 | LOCK_ENABLED_HARDIRQS_READ, |
39 | LOCK_HELD_OVER_RECLAIM_FS_READ, | ||
36 | LOCK_USAGE_STATES | 40 | LOCK_USAGE_STATES |
37 | }; | 41 | }; |
38 | 42 | ||
@@ -42,16 +46,20 @@ enum lock_usage_bit | |||
42 | #define LOCKF_USED (1 << LOCK_USED) | 46 | #define LOCKF_USED (1 << LOCK_USED) |
43 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) | 47 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) |
44 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) | 48 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) |
49 | #define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) | ||
45 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) | 50 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) |
46 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) | 51 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) |
52 | #define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) | ||
47 | 53 | ||
48 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) | 54 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) |
49 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | 55 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
50 | 56 | ||
51 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) | 57 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) |
52 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) | 58 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) |
59 | #define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) | ||
53 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) | 60 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) |
54 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) | 61 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) |
62 | #define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) | ||
55 | 63 | ||
56 | #define LOCKF_ENABLED_IRQS_READ \ | 64 | #define LOCKF_ENABLED_IRQS_READ \ |
57 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) | 65 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) |
@@ -324,7 +332,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, | |||
324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | 332 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
325 | } | 333 | } |
326 | 334 | ||
327 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 335 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
336 | extern void lockdep_clear_current_reclaim_state(void); | ||
337 | extern void lockdep_trace_alloc(gfp_t mask); | ||
338 | |||
339 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, | ||
328 | 340 | ||
329 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 341 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
330 | 342 | ||
@@ -342,6 +354,9 @@ static inline void lockdep_on(void) | |||
342 | # define lock_release(l, n, i) do { } while (0) | 354 | # define lock_release(l, n, i) do { } while (0) |
343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | 355 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
344 | # define lock_set_subclass(l, s, i) do { } while (0) | 356 | # define lock_set_subclass(l, s, i) do { } while (0) |
357 | # define lockdep_set_current_reclaim_state(g) do { } while (0) | ||
358 | # define lockdep_clear_current_reclaim_state() do { } while (0) | ||
359 | # define lockdep_trace_alloc(g) do { } while (0) | ||
345 | # define lockdep_init() do { } while (0) | 360 | # define lockdep_init() do { } while (0) |
346 | # define lockdep_info() do { } while (0) | 361 | # define lockdep_info() do { } while (0) |
347 | # define lockdep_init_map(lock, name, key, sub) \ | 362 | # define lockdep_init_map(lock, name, key, sub) \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4efb552aca47..b00a77f4999e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1313,6 +1313,7 @@ struct task_struct { | |||
1313 | int lockdep_depth; | 1313 | int lockdep_depth; |
1314 | unsigned int lockdep_recursion; | 1314 | unsigned int lockdep_recursion; |
1315 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1315 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1316 | gfp_t lockdep_reclaim_gfp; | ||
1316 | #endif | 1317 | #endif |
1317 | 1318 | ||
1318 | /* journalling filesystem info */ | 1319 | /* journalling filesystem info */ |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c3568f0b..977f940fd562 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -310,12 +310,14 @@ EXPORT_SYMBOL(lockdep_on); | |||
310 | #if VERBOSE | 310 | #if VERBOSE |
311 | # define HARDIRQ_VERBOSE 1 | 311 | # define HARDIRQ_VERBOSE 1 |
312 | # define SOFTIRQ_VERBOSE 1 | 312 | # define SOFTIRQ_VERBOSE 1 |
313 | # define RECLAIM_VERBOSE 1 | ||
313 | #else | 314 | #else |
314 | # define HARDIRQ_VERBOSE 0 | 315 | # define HARDIRQ_VERBOSE 0 |
315 | # define SOFTIRQ_VERBOSE 0 | 316 | # define SOFTIRQ_VERBOSE 0 |
317 | # define RECLAIM_VERBOSE 0 | ||
316 | #endif | 318 | #endif |
317 | 319 | ||
318 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 320 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE |
319 | /* | 321 | /* |
320 | * Quick filtering for interesting events: | 322 | * Quick filtering for interesting events: |
321 | */ | 323 | */ |
@@ -454,6 +456,10 @@ static const char *usage_str[] = | |||
454 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", | 456 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", |
455 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", | 457 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", |
456 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", | 458 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", |
459 | [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", | ||
460 | [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", | ||
461 | [LOCK_HELD_OVER_RECLAIM_FS] = "ov-reclaim-W", | ||
462 | [LOCK_HELD_OVER_RECLAIM_FS_READ] = "ov-reclaim-R", | ||
457 | }; | 463 | }; |
458 | 464 | ||
459 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 465 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
@@ -462,9 +468,10 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | |||
462 | } | 468 | } |
463 | 469 | ||
464 | void | 470 | void |
465 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | 471 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, |
472 | char *c4, char *c5, char *c6) | ||
466 | { | 473 | { |
467 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 474 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.', *c5 = '.', *c6 = '.'; |
468 | 475 | ||
469 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 476 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) |
470 | *c1 = '+'; | 477 | *c1 = '+'; |
@@ -493,14 +500,29 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 | |||
493 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 500 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) |
494 | *c4 = '?'; | 501 | *c4 = '?'; |
495 | } | 502 | } |
503 | |||
504 | if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS) | ||
505 | *c5 = '+'; | ||
506 | else | ||
507 | if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS) | ||
508 | *c5 = '-'; | ||
509 | |||
510 | if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) | ||
511 | *c6 = '-'; | ||
512 | if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) { | ||
513 | *c6 = '+'; | ||
514 | if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) | ||
515 | *c6 = '?'; | ||
516 | } | ||
517 | |||
496 | } | 518 | } |
497 | 519 | ||
498 | static void print_lock_name(struct lock_class *class) | 520 | static void print_lock_name(struct lock_class *class) |
499 | { | 521 | { |
500 | char str[KSYM_NAME_LEN], c1, c2, c3, c4; | 522 | char str[KSYM_NAME_LEN], c1, c2, c3, c4, c5, c6; |
501 | const char *name; | 523 | const char *name; |
502 | 524 | ||
503 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 525 | get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); |
504 | 526 | ||
505 | name = class->name; | 527 | name = class->name; |
506 | if (!name) { | 528 | if (!name) { |
@@ -513,7 +535,7 @@ static void print_lock_name(struct lock_class *class) | |||
513 | if (class->subclass) | 535 | if (class->subclass) |
514 | printk("/%d", class->subclass); | 536 | printk("/%d", class->subclass); |
515 | } | 537 | } |
516 | printk("){%c%c%c%c}", c1, c2, c3, c4); | 538 | printk("){%c%c%c%c%c%c}", c1, c2, c3, c4, c5, c6); |
517 | } | 539 | } |
518 | 540 | ||
519 | static void print_lockdep_cache(struct lockdep_map *lock) | 541 | static void print_lockdep_cache(struct lockdep_map *lock) |
@@ -1306,6 +1328,26 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | |||
1306 | LOCK_ENABLED_SOFTIRQS, "soft")) | 1328 | LOCK_ENABLED_SOFTIRQS, "soft")) |
1307 | return 0; | 1329 | return 0; |
1308 | 1330 | ||
1331 | /* | ||
1332 | * Prove that the new dependency does not connect a reclaim-fs-safe | ||
1333 | * lock with a reclaim-fs-unsafe lock - to achieve this we search | ||
1334 | * the backwards-subgraph starting at <prev>, and the | ||
1335 | * forwards-subgraph starting at <next>: | ||
1336 | */ | ||
1337 | if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS, | ||
1338 | LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) | ||
1339 | return 0; | ||
1340 | |||
1341 | /* | ||
1342 | * Prove that the new dependency does not connect a reclaim-fs-safe-read | ||
1343 | * lock with a reclaim-fs-unsafe lock - to achieve this we search | ||
1344 | * the backwards-subgraph starting at <prev>, and the | ||
1345 | * forwards-subgraph starting at <next>: | ||
1346 | */ | ||
1347 | if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ, | ||
1348 | LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs-read")) | ||
1349 | return 0; | ||
1350 | |||
1309 | return 1; | 1351 | return 1; |
1310 | } | 1352 | } |
1311 | 1353 | ||
@@ -1949,6 +1991,14 @@ static int softirq_verbose(struct lock_class *class) | |||
1949 | return 0; | 1991 | return 0; |
1950 | } | 1992 | } |
1951 | 1993 | ||
1994 | static int reclaim_verbose(struct lock_class *class) | ||
1995 | { | ||
1996 | #if RECLAIM_VERBOSE | ||
1997 | return class_filter(class); | ||
1998 | #endif | ||
1999 | return 0; | ||
2000 | } | ||
2001 | |||
1952 | #define STRICT_READ_CHECKS 1 | 2002 | #define STRICT_READ_CHECKS 1 |
1953 | 2003 | ||
1954 | static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | 2004 | static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
@@ -2007,6 +2057,31 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2007 | if (softirq_verbose(hlock_class(this))) | 2057 | if (softirq_verbose(hlock_class(this))) |
2008 | ret = 2; | 2058 | ret = 2; |
2009 | break; | 2059 | break; |
2060 | case LOCK_USED_IN_RECLAIM_FS: | ||
2061 | if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) | ||
2062 | return 0; | ||
2063 | if (!valid_state(curr, this, new_bit, | ||
2064 | LOCK_HELD_OVER_RECLAIM_FS_READ)) | ||
2065 | return 0; | ||
2066 | /* | ||
2067 | * just marked it reclaim-fs-safe, check that this lock | ||
2068 | * took no reclaim-fs-unsafe lock in the past: | ||
2069 | */ | ||
2070 | if (!check_usage_forwards(curr, this, | ||
2071 | LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) | ||
2072 | return 0; | ||
2073 | #if STRICT_READ_CHECKS | ||
2074 | /* | ||
2075 | * just marked it reclaim-fs-safe, check that this lock | ||
2076 | * took no reclaim-fs-unsafe-read lock in the past: | ||
2077 | */ | ||
2078 | if (!check_usage_forwards(curr, this, | ||
2079 | LOCK_HELD_OVER_RECLAIM_FS_READ, "reclaim-fs-read")) | ||
2080 | return 0; | ||
2081 | #endif | ||
2082 | if (reclaim_verbose(hlock_class(this))) | ||
2083 | ret = 2; | ||
2084 | break; | ||
2010 | case LOCK_USED_IN_HARDIRQ_READ: | 2085 | case LOCK_USED_IN_HARDIRQ_READ: |
2011 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 2086 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) |
2012 | return 0; | 2087 | return 0; |
@@ -2033,6 +2108,19 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2033 | if (softirq_verbose(hlock_class(this))) | 2108 | if (softirq_verbose(hlock_class(this))) |
2034 | ret = 2; | 2109 | ret = 2; |
2035 | break; | 2110 | break; |
2111 | case LOCK_USED_IN_RECLAIM_FS_READ: | ||
2112 | if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) | ||
2113 | return 0; | ||
2114 | /* | ||
2115 | * just marked it reclaim-fs-read-safe, check that this lock | ||
2116 | * took no reclaim-fs-unsafe lock in the past: | ||
2117 | */ | ||
2118 | if (!check_usage_forwards(curr, this, | ||
2119 | LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) | ||
2120 | return 0; | ||
2121 | if (reclaim_verbose(hlock_class(this))) | ||
2122 | ret = 2; | ||
2123 | break; | ||
2036 | case LOCK_ENABLED_HARDIRQS: | 2124 | case LOCK_ENABLED_HARDIRQS: |
2037 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 2125 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) |
2038 | return 0; | 2126 | return 0; |
@@ -2085,6 +2173,32 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2085 | if (softirq_verbose(hlock_class(this))) | 2173 | if (softirq_verbose(hlock_class(this))) |
2086 | ret = 2; | 2174 | ret = 2; |
2087 | break; | 2175 | break; |
2176 | case LOCK_HELD_OVER_RECLAIM_FS: | ||
2177 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) | ||
2178 | return 0; | ||
2179 | if (!valid_state(curr, this, new_bit, | ||
2180 | LOCK_USED_IN_RECLAIM_FS_READ)) | ||
2181 | return 0; | ||
2182 | /* | ||
2183 | * just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe | ||
2184 | * lock in the system ever took it in the past: | ||
2185 | */ | ||
2186 | if (!check_usage_backwards(curr, this, | ||
2187 | LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) | ||
2188 | return 0; | ||
2189 | #if STRICT_READ_CHECKS | ||
2190 | /* | ||
2191 | * just marked it softirq-unsafe, check that no | ||
2192 | * softirq-safe-read lock in the system ever took | ||
2193 | * it in the past: | ||
2194 | */ | ||
2195 | if (!check_usage_backwards(curr, this, | ||
2196 | LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read")) | ||
2197 | return 0; | ||
2198 | #endif | ||
2199 | if (reclaim_verbose(hlock_class(this))) | ||
2200 | ret = 2; | ||
2201 | break; | ||
2088 | case LOCK_ENABLED_HARDIRQS_READ: | 2202 | case LOCK_ENABLED_HARDIRQS_READ: |
2089 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | 2203 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) |
2090 | return 0; | 2204 | return 0; |
@@ -2115,6 +2229,21 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2115 | if (softirq_verbose(hlock_class(this))) | 2229 | if (softirq_verbose(hlock_class(this))) |
2116 | ret = 2; | 2230 | ret = 2; |
2117 | break; | 2231 | break; |
2232 | case LOCK_HELD_OVER_RECLAIM_FS_READ: | ||
2233 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) | ||
2234 | return 0; | ||
2235 | #if STRICT_READ_CHECKS | ||
2236 | /* | ||
2237 | * just marked it reclaim-fs-read-unsafe, check that no | ||
2238 | * reclaim-fs-safe lock in the system ever took it in the past: | ||
2239 | */ | ||
2240 | if (!check_usage_backwards(curr, this, | ||
2241 | LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) | ||
2242 | return 0; | ||
2243 | #endif | ||
2244 | if (reclaim_verbose(hlock_class(this))) | ||
2245 | ret = 2; | ||
2246 | break; | ||
2118 | default: | 2247 | default: |
2119 | WARN_ON(1); | 2248 | WARN_ON(1); |
2120 | break; | 2249 | break; |
@@ -2123,11 +2252,17 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2123 | return ret; | 2252 | return ret; |
2124 | } | 2253 | } |
2125 | 2254 | ||
2255 | enum mark_type { | ||
2256 | HARDIRQ, | ||
2257 | SOFTIRQ, | ||
2258 | RECLAIM_FS, | ||
2259 | }; | ||
2260 | |||
2126 | /* | 2261 | /* |
2127 | * Mark all held locks with a usage bit: | 2262 | * Mark all held locks with a usage bit: |
2128 | */ | 2263 | */ |
2129 | static int | 2264 | static int |
2130 | mark_held_locks(struct task_struct *curr, int hardirq) | 2265 | mark_held_locks(struct task_struct *curr, enum mark_type mark) |
2131 | { | 2266 | { |
2132 | enum lock_usage_bit usage_bit; | 2267 | enum lock_usage_bit usage_bit; |
2133 | struct held_lock *hlock; | 2268 | struct held_lock *hlock; |
@@ -2136,17 +2271,32 @@ mark_held_locks(struct task_struct *curr, int hardirq) | |||
2136 | for (i = 0; i < curr->lockdep_depth; i++) { | 2271 | for (i = 0; i < curr->lockdep_depth; i++) { |
2137 | hlock = curr->held_locks + i; | 2272 | hlock = curr->held_locks + i; |
2138 | 2273 | ||
2139 | if (hardirq) { | 2274 | switch (mark) { |
2275 | case HARDIRQ: | ||
2140 | if (hlock->read) | 2276 | if (hlock->read) |
2141 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 2277 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; |
2142 | else | 2278 | else |
2143 | usage_bit = LOCK_ENABLED_HARDIRQS; | 2279 | usage_bit = LOCK_ENABLED_HARDIRQS; |
2144 | } else { | 2280 | break; |
2281 | |||
2282 | case SOFTIRQ: | ||
2145 | if (hlock->read) | 2283 | if (hlock->read) |
2146 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | 2284 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; |
2147 | else | 2285 | else |
2148 | usage_bit = LOCK_ENABLED_SOFTIRQS; | 2286 | usage_bit = LOCK_ENABLED_SOFTIRQS; |
2287 | break; | ||
2288 | |||
2289 | case RECLAIM_FS: | ||
2290 | if (hlock->read) | ||
2291 | usage_bit = LOCK_HELD_OVER_RECLAIM_FS_READ; | ||
2292 | else | ||
2293 | usage_bit = LOCK_HELD_OVER_RECLAIM_FS; | ||
2294 | break; | ||
2295 | |||
2296 | default: | ||
2297 | BUG(); | ||
2149 | } | 2298 | } |
2299 | |||
2150 | if (!mark_lock(curr, hlock, usage_bit)) | 2300 | if (!mark_lock(curr, hlock, usage_bit)) |
2151 | return 0; | 2301 | return 0; |
2152 | } | 2302 | } |
@@ -2200,7 +2350,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2200 | * We are going to turn hardirqs on, so set the | 2350 | * We are going to turn hardirqs on, so set the |
2201 | * usage bit for all held locks: | 2351 | * usage bit for all held locks: |
2202 | */ | 2352 | */ |
2203 | if (!mark_held_locks(curr, 1)) | 2353 | if (!mark_held_locks(curr, HARDIRQ)) |
2204 | return; | 2354 | return; |
2205 | /* | 2355 | /* |
2206 | * If we have softirqs enabled, then set the usage | 2356 | * If we have softirqs enabled, then set the usage |
@@ -2208,7 +2358,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2208 | * this bit from being set before) | 2358 | * this bit from being set before) |
2209 | */ | 2359 | */ |
2210 | if (curr->softirqs_enabled) | 2360 | if (curr->softirqs_enabled) |
2211 | if (!mark_held_locks(curr, 0)) | 2361 | if (!mark_held_locks(curr, SOFTIRQ)) |
2212 | return; | 2362 | return; |
2213 | 2363 | ||
2214 | curr->hardirq_enable_ip = ip; | 2364 | curr->hardirq_enable_ip = ip; |
@@ -2288,7 +2438,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2288 | * enabled too: | 2438 | * enabled too: |
2289 | */ | 2439 | */ |
2290 | if (curr->hardirqs_enabled) | 2440 | if (curr->hardirqs_enabled) |
2291 | mark_held_locks(curr, 0); | 2441 | mark_held_locks(curr, SOFTIRQ); |
2292 | } | 2442 | } |
2293 | 2443 | ||
2294 | /* | 2444 | /* |
@@ -2317,6 +2467,31 @@ void trace_softirqs_off(unsigned long ip) | |||
2317 | debug_atomic_inc(&redundant_softirqs_off); | 2467 | debug_atomic_inc(&redundant_softirqs_off); |
2318 | } | 2468 | } |
2319 | 2469 | ||
2470 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2471 | { | ||
2472 | struct task_struct *curr = current; | ||
2473 | |||
2474 | if (unlikely(!debug_locks)) | ||
2475 | return; | ||
2476 | |||
2477 | /* no reclaim without waiting on it */ | ||
2478 | if (!(gfp_mask & __GFP_WAIT)) | ||
2479 | return; | ||
2480 | |||
2481 | /* this guy won't enter reclaim */ | ||
2482 | if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) | ||
2483 | return; | ||
2484 | |||
2485 | /* We're only interested __GFP_FS allocations for now */ | ||
2486 | if (!(gfp_mask & __GFP_FS)) | ||
2487 | return; | ||
2488 | |||
2489 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) | ||
2490 | return; | ||
2491 | |||
2492 | mark_held_locks(curr, RECLAIM_FS); | ||
2493 | } | ||
2494 | |||
2320 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | 2495 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
2321 | { | 2496 | { |
2322 | /* | 2497 | /* |
@@ -2362,6 +2537,22 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | |||
2362 | } | 2537 | } |
2363 | } | 2538 | } |
2364 | 2539 | ||
2540 | /* | ||
2541 | * We reuse the irq context infrastructure more broadly as a general | ||
2542 | * context checking code. This tests GFP_FS recursion (a lock taken | ||
2543 | * during reclaim for a GFP_FS allocation is held over a GFP_FS | ||
2544 | * allocation). | ||
2545 | */ | ||
2546 | if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { | ||
2547 | if (hlock->read) { | ||
2548 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) | ||
2549 | return 0; | ||
2550 | } else { | ||
2551 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) | ||
2552 | return 0; | ||
2553 | } | ||
2554 | } | ||
2555 | |||
2365 | return 1; | 2556 | return 1; |
2366 | } | 2557 | } |
2367 | 2558 | ||
@@ -2453,6 +2644,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2453 | case LOCK_ENABLED_SOFTIRQS: | 2644 | case LOCK_ENABLED_SOFTIRQS: |
2454 | case LOCK_ENABLED_HARDIRQS_READ: | 2645 | case LOCK_ENABLED_HARDIRQS_READ: |
2455 | case LOCK_ENABLED_SOFTIRQS_READ: | 2646 | case LOCK_ENABLED_SOFTIRQS_READ: |
2647 | case LOCK_USED_IN_RECLAIM_FS: | ||
2648 | case LOCK_USED_IN_RECLAIM_FS_READ: | ||
2649 | case LOCK_HELD_OVER_RECLAIM_FS: | ||
2650 | case LOCK_HELD_OVER_RECLAIM_FS_READ: | ||
2456 | ret = mark_lock_irq(curr, this, new_bit); | 2651 | ret = mark_lock_irq(curr, this, new_bit); |
2457 | if (!ret) | 2652 | if (!ret) |
2458 | return 0; | 2653 | return 0; |
@@ -2966,6 +3161,16 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2966 | } | 3161 | } |
2967 | EXPORT_SYMBOL_GPL(lock_release); | 3162 | EXPORT_SYMBOL_GPL(lock_release); |
2968 | 3163 | ||
3164 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) | ||
3165 | { | ||
3166 | current->lockdep_reclaim_gfp = gfp_mask; | ||
3167 | } | ||
3168 | |||
3169 | void lockdep_clear_current_reclaim_state(void) | ||
3170 | { | ||
3171 | current->lockdep_reclaim_gfp = 0; | ||
3172 | } | ||
3173 | |||
2969 | #ifdef CONFIG_LOCK_STAT | 3174 | #ifdef CONFIG_LOCK_STAT |
2970 | static int | 3175 | static int |
2971 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | 3176 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 56b196932c08..e887b783244f 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -32,7 +32,8 @@ extern struct list_head all_lock_classes; | |||
32 | extern struct lock_chain lock_chains[]; | 32 | extern struct lock_chain lock_chains[]; |
33 | 33 | ||
34 | extern void | 34 | extern void |
35 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); | 35 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, |
36 | char *c4, char *c5, char *c6); | ||
36 | 37 | ||
37 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); | 38 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
38 | 39 | ||
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 13716b813896..b84a1dfa9077 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) | |||
84 | { | 84 | { |
85 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
86 | struct lock_list *entry; | 86 | struct lock_list *entry; |
87 | char c1, c2, c3, c4; | 87 | char c1, c2, c3, c4, c5, c6; |
88 | 88 | ||
89 | if (v == SEQ_START_TOKEN) { | 89 | if (v == SEQ_START_TOKEN) { |
90 | seq_printf(m, "all lock classes:\n"); | 90 | seq_printf(m, "all lock classes:\n"); |
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) | |||
100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); |
104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %c%c%c%c%c%c", c1, c2, c3, c4, c5, c6); |
105 | 105 | ||
106 | seq_printf(m, ": "); | 106 | seq_printf(m, ": "); |
107 | print_name(m, class); | 107 | print_name(m, class); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b3073854..22b15a4cde8a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |||
1479 | unsigned long did_some_progress; | 1479 | unsigned long did_some_progress; |
1480 | unsigned long pages_reclaimed = 0; | 1480 | unsigned long pages_reclaimed = 0; |
1481 | 1481 | ||
1482 | lockdep_trace_alloc(gfp_mask); | ||
1483 | |||
1482 | might_sleep_if(wait); | 1484 | might_sleep_if(wait); |
1483 | 1485 | ||
1484 | if (should_fail_alloc_page(gfp_mask, order)) | 1486 | if (should_fail_alloc_page(gfp_mask, order)) |
@@ -1578,12 +1580,15 @@ nofail_alloc: | |||
1578 | */ | 1580 | */ |
1579 | cpuset_update_task_memory_state(); | 1581 | cpuset_update_task_memory_state(); |
1580 | p->flags |= PF_MEMALLOC; | 1582 | p->flags |= PF_MEMALLOC; |
1583 | |||
1584 | lockdep_set_current_reclaim_state(gfp_mask); | ||
1581 | reclaim_state.reclaimed_slab = 0; | 1585 | reclaim_state.reclaimed_slab = 0; |
1582 | p->reclaim_state = &reclaim_state; | 1586 | p->reclaim_state = &reclaim_state; |
1583 | 1587 | ||
1584 | did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); | 1588 | did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); |
1585 | 1589 | ||
1586 | p->reclaim_state = NULL; | 1590 | p->reclaim_state = NULL; |
1591 | lockdep_clear_current_reclaim_state(); | ||
1587 | p->flags &= ~PF_MEMALLOC; | 1592 | p->flags &= ~PF_MEMALLOC; |
1588 | 1593 | ||
1589 | cond_resched(); | 1594 | cond_resched(); |
@@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3318 | unsigned long save_flags; | 3318 | unsigned long save_flags; |
3319 | void *ptr; | 3319 | void *ptr; |
3320 | 3320 | ||
3321 | lockdep_trace_alloc(flags); | ||
3322 | |||
3321 | if (slab_should_failslab(cachep, flags)) | 3323 | if (slab_should_failslab(cachep, flags)) |
3322 | return NULL; | 3324 | return NULL; |
3323 | 3325 | ||
@@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3394 | unsigned long save_flags; | 3396 | unsigned long save_flags; |
3395 | void *objp; | 3397 | void *objp; |
3396 | 3398 | ||
3399 | lockdep_trace_alloc(flags); | ||
3400 | |||
3397 | if (slab_should_failslab(cachep, flags)) | 3401 | if (slab_should_failslab(cachep, flags)) |
3398 | return NULL; | 3402 | return NULL; |
3399 | 3403 | ||
@@ -464,6 +464,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
464 | unsigned int *m; | 464 | unsigned int *m; |
465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
466 | 466 | ||
467 | lockdep_trace_alloc(flags); | ||
468 | |||
467 | if (size < PAGE_SIZE - align) { | 469 | if (size < PAGE_SIZE - align) { |
468 | if (!size) | 470 | if (!size) |
469 | return ZERO_SIZE_PTR; | 471 | return ZERO_SIZE_PTR; |
@@ -1596,6 +1596,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1596 | unsigned long flags; | 1596 | unsigned long flags; |
1597 | unsigned int objsize; | 1597 | unsigned int objsize; |
1598 | 1598 | ||
1599 | lockdep_trace_alloc(gfpflags); | ||
1599 | might_sleep_if(gfpflags & __GFP_WAIT); | 1600 | might_sleep_if(gfpflags & __GFP_WAIT); |
1600 | 1601 | ||
1601 | if (should_failslab(s->objsize, gfpflags)) | 1602 | if (should_failslab(s->objsize, gfpflags)) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a27c44aa327..303eb658b50b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1963,6 +1963,9 @@ static int kswapd(void *p) | |||
1963 | struct reclaim_state reclaim_state = { | 1963 | struct reclaim_state reclaim_state = { |
1964 | .reclaimed_slab = 0, | 1964 | .reclaimed_slab = 0, |
1965 | }; | 1965 | }; |
1966 | |||
1967 | lockdep_set_current_reclaim_state(GFP_KERNEL); | ||
1968 | |||
1966 | node_to_cpumask_ptr(cpumask, pgdat->node_id); | 1969 | node_to_cpumask_ptr(cpumask, pgdat->node_id); |
1967 | 1970 | ||
1968 | if (!cpumask_empty(cpumask)) | 1971 | if (!cpumask_empty(cpumask)) |