aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/lockdep-design.txt30
-rw-r--r--include/linux/lockdep.h50
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/timer.h93
-rw-r--r--kernel/lockdep.c528
-rw-r--r--kernel/lockdep_internals.h45
-rw-r--r--kernel/lockdep_proc.c22
-rw-r--r--kernel/lockdep_states.h9
-rw-r--r--kernel/mutex-debug.c9
-rw-r--r--kernel/mutex-debug.h18
-rw-r--r--kernel/mutex.c121
-rw-r--r--kernel/mutex.h22
-rw-r--r--kernel/sched.c71
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/timer.c68
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c1
-rw-r--r--mm/vmscan.c2
21 files changed, 730 insertions, 379 deletions
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
index 488773018152..938ea22f2cc0 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/lockdep-design.txt
@@ -27,33 +27,37 @@ lock-class.
27State 27State
28----- 28-----
29 29
30The validator tracks lock-class usage history into 5 separate state bits: 30The validator tracks lock-class usage history into 4n + 1 separate state bits:
31 31
32- 'ever held in hardirq context' [ == hardirq-safe ] 32- 'ever held in STATE context'
33- 'ever held in softirq context' [ == softirq-safe ] 33- 'ever head as readlock in STATE context'
34- 'ever held with hardirqs enabled' [ == hardirq-unsafe ] 34- 'ever head with STATE enabled'
35- 'ever held with softirqs and hardirqs enabled' [ == softirq-unsafe ] 35- 'ever head as readlock with STATE enabled'
36
37Where STATE can be either one of (kernel/lockdep_states.h)
38 - hardirq
39 - softirq
40 - reclaim_fs
36 41
37- 'ever used' [ == !unused ] 42- 'ever used' [ == !unused ]
38 43
39When locking rules are violated, these 4 state bits are presented in the 44When locking rules are violated, these state bits are presented in the
40locking error messages, inside curlies. A contrived example: 45locking error messages, inside curlies. A contrived example:
41 46
42 modprobe/2287 is trying to acquire lock: 47 modprobe/2287 is trying to acquire lock:
43 (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24 48 (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
44 49
45 but task is already holding lock: 50 but task is already holding lock:
46 (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24 51 (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
47 52
48 53
49The bit position indicates hardirq, softirq, hardirq-read, 54The bit position indicates STATE, STATE-read, for each of the states listed
50softirq-read respectively, and the character displayed in each 55above, and the character displayed in each indicates:
51indicates:
52 56
53 '.' acquired while irqs disabled 57 '.' acquired while irqs disabled
54 '+' acquired in irq context 58 '+' acquired in irq context
55 '-' acquired with irqs enabled 59 '-' acquired with irqs enabled
56 '?' read acquired in irq context with irqs enabled. 60 '?' acquired in irq context with irqs enabled.
57 61
58Unused mutexes cannot be part of the cause of an error. 62Unused mutexes cannot be part of the cause of an error.
59 63
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 23bf02fb124f..5a58ea3e91e9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -20,43 +20,10 @@ struct lockdep_map;
20#include <linux/stacktrace.h> 20#include <linux/stacktrace.h>
21 21
22/* 22/*
23 * Lock-class usage-state bits: 23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
24 */ 25 */
25enum lock_usage_bit 26#define XXX_LOCK_USAGE_STATES (1+3*4)
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37};
38
39/*
40 * Usage-state bitmasks:
41 */
42#define LOCKF_USED (1 << LOCK_USED)
43#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56#define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58#define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60 27
61#define MAX_LOCKDEP_SUBCLASSES 8UL 28#define MAX_LOCKDEP_SUBCLASSES 8UL
62 29
@@ -97,7 +64,7 @@ struct lock_class {
97 * IRQ/softirq usage tracking bits: 64 * IRQ/softirq usage tracking bits:
98 */ 65 */
99 unsigned long usage_mask; 66 unsigned long usage_mask;
100 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 67 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
101 68
102 /* 69 /*
103 * These fields represent a directed graph of lock dependencies, 70 * These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
324 lock_set_class(lock, lock->name, lock->key, subclass, ip); 291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325} 292}
326 293
327# define INIT_LOCKDEP .lockdep_recursion = 0, 294extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
295extern void lockdep_clear_current_reclaim_state(void);
296extern void lockdep_trace_alloc(gfp_t mask);
297
298# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
328 299
329#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 300#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
330 301
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
342# define lock_release(l, n, i) do { } while (0) 313# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0) 314# define lock_set_class(l, n, k, s, i) do { } while (0)
344# define lock_set_subclass(l, s, i) do { } while (0) 315# define lock_set_subclass(l, s, i) do { } while (0)
316# define lockdep_set_current_reclaim_state(g) do { } while (0)
317# define lockdep_clear_current_reclaim_state() do { } while (0)
318# define lockdep_trace_alloc(g) do { } while (0)
345# define lockdep_init() do { } while (0) 319# define lockdep_init() do { } while (0)
346# define lockdep_info() do { } while (0) 320# define lockdep_info() do { } while (0)
347# define lockdep_init_map(lock, name, key, sub) \ 321# define lockdep_init_map(lock, name, key, sub) \
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 7a0e5c4f8072..3069ec7e0ab8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -50,8 +50,10 @@ struct mutex {
50 atomic_t count; 50 atomic_t count;
51 spinlock_t wait_lock; 51 spinlock_t wait_lock;
52 struct list_head wait_list; 52 struct list_head wait_list;
53#ifdef CONFIG_DEBUG_MUTEXES 53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner; 54 struct thread_info *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
55 const char *name; 57 const char *name;
56 void *magic; 58 void *magic;
57#endif 59#endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
68 struct list_head list; 70 struct list_head list;
69 struct task_struct *task; 71 struct task_struct *task;
70#ifdef CONFIG_DEBUG_MUTEXES 72#ifdef CONFIG_DEBUG_MUTEXES
71 struct mutex *lock;
72 void *magic; 73 void *magic;
73#endif 74#endif
74}; 75};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1d19c025f9d2..29df6374d2de 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -331,7 +331,9 @@ extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout); 331extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout); 332extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout); 333extern signed long schedule_timeout_uninterruptible(signed long timeout);
334asmlinkage void __schedule(void);
334asmlinkage void schedule(void); 335asmlinkage void schedule(void);
336extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
335 337
336struct nsproxy; 338struct nsproxy;
337struct user_namespace; 339struct user_namespace;
@@ -1334,6 +1336,7 @@ struct task_struct {
1334 int lockdep_depth; 1336 int lockdep_depth;
1335 unsigned int lockdep_recursion; 1337 unsigned int lockdep_recursion;
1336 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1338 struct held_lock held_locks[MAX_LOCK_DEPTH];
1339 gfp_t lockdep_reclaim_gfp;
1337#endif 1340#endif
1338 1341
1339/* journalling filesystem info */ 1342/* journalling filesystem info */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e2d662e3416e..6cdb6f3331f1 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -5,6 +5,7 @@
5#include <linux/ktime.h> 5#include <linux/ktime.h>
6#include <linux/stddef.h> 6#include <linux/stddef.h>
7#include <linux/debugobjects.h> 7#include <linux/debugobjects.h>
8#include <linux/stringify.h>
8 9
9struct tvec_base; 10struct tvec_base;
10 11
@@ -21,52 +22,126 @@ struct timer_list {
21 char start_comm[16]; 22 char start_comm[16];
22 int start_pid; 23 int start_pid;
23#endif 24#endif
25#ifdef CONFIG_LOCKDEP
26 struct lockdep_map lockdep_map;
27#endif
24}; 28};
25 29
26extern struct tvec_base boot_tvec_bases; 30extern struct tvec_base boot_tvec_bases;
27 31
32#ifdef CONFIG_LOCKDEP
33/*
34 * NB: because we have to copy the lockdep_map, setting the lockdep_map key
35 * (second argument) here is required, otherwise it could be initialised to
36 * the copy of the lockdep_map later! We use the pointer to and the string
37 * "<file>:<line>" as the key resp. the name of the lockdep_map.
38 */
39#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
40 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
41#else
42#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
43#endif
44
28#define TIMER_INITIALIZER(_function, _expires, _data) { \ 45#define TIMER_INITIALIZER(_function, _expires, _data) { \
29 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 46 .entry = { .prev = TIMER_ENTRY_STATIC }, \
30 .function = (_function), \ 47 .function = (_function), \
31 .expires = (_expires), \ 48 .expires = (_expires), \
32 .data = (_data), \ 49 .data = (_data), \
33 .base = &boot_tvec_bases, \ 50 .base = &boot_tvec_bases, \
51 __TIMER_LOCKDEP_MAP_INITIALIZER( \
52 __FILE__ ":" __stringify(__LINE__)) \
34 } 53 }
35 54
36#define DEFINE_TIMER(_name, _function, _expires, _data) \ 55#define DEFINE_TIMER(_name, _function, _expires, _data) \
37 struct timer_list _name = \ 56 struct timer_list _name = \
38 TIMER_INITIALIZER(_function, _expires, _data) 57 TIMER_INITIALIZER(_function, _expires, _data)
39 58
40void init_timer(struct timer_list *timer); 59void init_timer_key(struct timer_list *timer,
41void init_timer_deferrable(struct timer_list *timer); 60 const char *name,
61 struct lock_class_key *key);
62void init_timer_deferrable_key(struct timer_list *timer,
63 const char *name,
64 struct lock_class_key *key);
65
66#ifdef CONFIG_LOCKDEP
67#define init_timer(timer) \
68 do { \
69 static struct lock_class_key __key; \
70 init_timer_key((timer), #timer, &__key); \
71 } while (0)
72
73#define init_timer_deferrable(timer) \
74 do { \
75 static struct lock_class_key __key; \
76 init_timer_deferrable_key((timer), #timer, &__key); \
77 } while (0)
78
79#define init_timer_on_stack(timer) \
80 do { \
81 static struct lock_class_key __key; \
82 init_timer_on_stack_key((timer), #timer, &__key); \
83 } while (0)
84
85#define setup_timer(timer, fn, data) \
86 do { \
87 static struct lock_class_key __key; \
88 setup_timer_key((timer), #timer, &__key, (fn), (data));\
89 } while (0)
90
91#define setup_timer_on_stack(timer, fn, data) \
92 do { \
93 static struct lock_class_key __key; \
94 setup_timer_on_stack_key((timer), #timer, &__key, \
95 (fn), (data)); \
96 } while (0)
97#else
98#define init_timer(timer)\
99 init_timer_key((timer), NULL, NULL)
100#define init_timer_deferrable(timer)\
101 init_timer_deferrable_key((timer), NULL, NULL)
102#define init_timer_on_stack(timer)\
103 init_timer_on_stack_key((timer), NULL, NULL)
104#define setup_timer(timer, fn, data)\
105 setup_timer_key((timer), NULL, NULL, (fn), (data))
106#define setup_timer_on_stack(timer, fn, data)\
107 setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
108#endif
42 109
43#ifdef CONFIG_DEBUG_OBJECTS_TIMERS 110#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
44extern void init_timer_on_stack(struct timer_list *timer); 111extern void init_timer_on_stack_key(struct timer_list *timer,
112 const char *name,
113 struct lock_class_key *key);
45extern void destroy_timer_on_stack(struct timer_list *timer); 114extern void destroy_timer_on_stack(struct timer_list *timer);
46#else 115#else
47static inline void destroy_timer_on_stack(struct timer_list *timer) { } 116static inline void destroy_timer_on_stack(struct timer_list *timer) { }
48static inline void init_timer_on_stack(struct timer_list *timer) 117static inline void init_timer_on_stack_key(struct timer_list *timer,
118 const char *name,
119 struct lock_class_key *key)
49{ 120{
50 init_timer(timer); 121 init_timer_key(timer, name, key);
51} 122}
52#endif 123#endif
53 124
54static inline void setup_timer(struct timer_list * timer, 125static inline void setup_timer_key(struct timer_list * timer,
126 const char *name,
127 struct lock_class_key *key,
55 void (*function)(unsigned long), 128 void (*function)(unsigned long),
56 unsigned long data) 129 unsigned long data)
57{ 130{
58 timer->function = function; 131 timer->function = function;
59 timer->data = data; 132 timer->data = data;
60 init_timer(timer); 133 init_timer_key(timer, name, key);
61} 134}
62 135
63static inline void setup_timer_on_stack(struct timer_list *timer, 136static inline void setup_timer_on_stack_key(struct timer_list *timer,
137 const char *name,
138 struct lock_class_key *key,
64 void (*function)(unsigned long), 139 void (*function)(unsigned long),
65 unsigned long data) 140 unsigned long data)
66{ 141{
67 timer->function = function; 142 timer->function = function;
68 timer->data = data; 143 timer->data = data;
69 init_timer_on_stack(timer); 144 init_timer_on_stack_key(timer, name, key);
70} 145}
71 146
72/** 147/**
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 06b0c3568f0b..3673a3f44d9d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -41,6 +41,7 @@
41#include <linux/utsname.h> 41#include <linux/utsname.h>
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h>
44 45
45#include <asm/sections.h> 46#include <asm/sections.h>
46 47
@@ -310,12 +311,14 @@ EXPORT_SYMBOL(lockdep_on);
310#if VERBOSE 311#if VERBOSE
311# define HARDIRQ_VERBOSE 1 312# define HARDIRQ_VERBOSE 1
312# define SOFTIRQ_VERBOSE 1 313# define SOFTIRQ_VERBOSE 1
314# define RECLAIM_VERBOSE 1
313#else 315#else
314# define HARDIRQ_VERBOSE 0 316# define HARDIRQ_VERBOSE 0
315# define SOFTIRQ_VERBOSE 0 317# define SOFTIRQ_VERBOSE 0
318# define RECLAIM_VERBOSE 0
316#endif 319#endif
317 320
318#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 321#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
319/* 322/*
320 * Quick filtering for interesting events: 323 * Quick filtering for interesting events:
321 */ 324 */
@@ -443,17 +446,18 @@ atomic_t nr_find_usage_backwards_recursions;
443 * Locking printouts: 446 * Locking printouts:
444 */ 447 */
445 448
449#define __USAGE(__STATE) \
450 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
451 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
452 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
453 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
454
446static const char *usage_str[] = 455static const char *usage_str[] =
447{ 456{
448 [LOCK_USED] = "initial-use ", 457#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
449 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", 458#include "lockdep_states.h"
450 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", 459#undef LOCKDEP_STATE
451 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", 460 [LOCK_USED] = "INITIAL USE",
452 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
453 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
454 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
455 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
456 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
457}; 461};
458 462
459const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 463const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -461,46 +465,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
461 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 465 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
462} 466}
463 467
464void 468static inline unsigned long lock_flag(enum lock_usage_bit bit)
465get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
466{ 469{
467 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; 470 return 1UL << bit;
468 471}
469 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
470 *c1 = '+';
471 else
472 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
473 *c1 = '-';
474 472
475 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) 473static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
476 *c2 = '+'; 474{
477 else 475 char c = '.';
478 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
479 *c2 = '-';
480 476
481 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 477 if (class->usage_mask & lock_flag(bit + 2))
482 *c3 = '-'; 478 c = '+';
483 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { 479 if (class->usage_mask & lock_flag(bit)) {
484 *c3 = '+'; 480 c = '-';
485 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 481 if (class->usage_mask & lock_flag(bit + 2))
486 *c3 = '?'; 482 c = '?';
487 } 483 }
488 484
489 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 485 return c;
490 *c4 = '-'; 486}
491 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { 487
492 *c4 = '+'; 488void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
493 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 489{
494 *c4 = '?'; 490 int i = 0;
495 } 491
492#define LOCKDEP_STATE(__STATE) \
493 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
494 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
495#include "lockdep_states.h"
496#undef LOCKDEP_STATE
497
498 usage[i] = '\0';
496} 499}
497 500
498static void print_lock_name(struct lock_class *class) 501static void print_lock_name(struct lock_class *class)
499{ 502{
500 char str[KSYM_NAME_LEN], c1, c2, c3, c4; 503 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
501 const char *name; 504 const char *name;
502 505
503 get_usage_chars(class, &c1, &c2, &c3, &c4); 506 get_usage_chars(class, usage);
504 507
505 name = class->name; 508 name = class->name;
506 if (!name) { 509 if (!name) {
@@ -513,7 +516,7 @@ static void print_lock_name(struct lock_class *class)
513 if (class->subclass) 516 if (class->subclass)
514 printk("/%d", class->subclass); 517 printk("/%d", class->subclass);
515 } 518 }
516 printk("){%c%c%c%c}", c1, c2, c3, c4); 519 printk("){%s}", usage);
517} 520}
518 521
519static void print_lockdep_cache(struct lockdep_map *lock) 522static void print_lockdep_cache(struct lockdep_map *lock)
@@ -1263,9 +1266,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1263 bit_backwards, bit_forwards, irqclass); 1266 bit_backwards, bit_forwards, irqclass);
1264} 1267}
1265 1268
1266static int 1269static const char *state_names[] = {
1267check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1270#define LOCKDEP_STATE(__STATE) \
1268 struct held_lock *next) 1271 __stringify(__STATE),
1272#include "lockdep_states.h"
1273#undef LOCKDEP_STATE
1274};
1275
1276static const char *state_rnames[] = {
1277#define LOCKDEP_STATE(__STATE) \
1278 __stringify(__STATE)"-READ",
1279#include "lockdep_states.h"
1280#undef LOCKDEP_STATE
1281};
1282
1283static inline const char *state_name(enum lock_usage_bit bit)
1284{
1285 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1286}
1287
1288static int exclusive_bit(int new_bit)
1289{
1290 /*
1291 * USED_IN
1292 * USED_IN_READ
1293 * ENABLED
1294 * ENABLED_READ
1295 *
1296 * bit 0 - write/read
1297 * bit 1 - used_in/enabled
1298 * bit 2+ state
1299 */
1300
1301 int state = new_bit & ~3;
1302 int dir = new_bit & 2;
1303
1304 /*
1305 * keep state, bit flip the direction and strip read.
1306 */
1307 return state | (dir ^ 2);
1308}
1309
1310static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1311 struct held_lock *next, enum lock_usage_bit bit)
1269{ 1312{
1270 /* 1313 /*
1271 * Prove that the new dependency does not connect a hardirq-safe 1314 * Prove that the new dependency does not connect a hardirq-safe
@@ -1273,38 +1316,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1273 * the backwards-subgraph starting at <prev>, and the 1316 * the backwards-subgraph starting at <prev>, and the
1274 * forwards-subgraph starting at <next>: 1317 * forwards-subgraph starting at <next>:
1275 */ 1318 */
1276 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, 1319 if (!check_usage(curr, prev, next, bit,
1277 LOCK_ENABLED_HARDIRQS, "hard")) 1320 exclusive_bit(bit), state_name(bit)))
1278 return 0; 1321 return 0;
1279 1322
1323 bit++; /* _READ */
1324
1280 /* 1325 /*
1281 * Prove that the new dependency does not connect a hardirq-safe-read 1326 * Prove that the new dependency does not connect a hardirq-safe-read
1282 * lock with a hardirq-unsafe lock - to achieve this we search 1327 * lock with a hardirq-unsafe lock - to achieve this we search
1283 * the backwards-subgraph starting at <prev>, and the 1328 * the backwards-subgraph starting at <prev>, and the
1284 * forwards-subgraph starting at <next>: 1329 * forwards-subgraph starting at <next>:
1285 */ 1330 */
1286 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, 1331 if (!check_usage(curr, prev, next, bit,
1287 LOCK_ENABLED_HARDIRQS, "hard-read")) 1332 exclusive_bit(bit), state_name(bit)))
1288 return 0; 1333 return 0;
1289 1334
1290 /* 1335 return 1;
1291 * Prove that the new dependency does not connect a softirq-safe 1336}
1292 * lock with a softirq-unsafe lock - to achieve this we search 1337
1293 * the backwards-subgraph starting at <prev>, and the 1338static int
1294 * forwards-subgraph starting at <next>: 1339check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1295 */ 1340 struct held_lock *next)
1296 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, 1341{
1297 LOCK_ENABLED_SOFTIRQS, "soft")) 1342#define LOCKDEP_STATE(__STATE) \
1298 return 0; 1343 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1299 /*
1300 * Prove that the new dependency does not connect a softirq-safe-read
1301 * lock with a softirq-unsafe lock - to achieve this we search
1302 * the backwards-subgraph starting at <prev>, and the
1303 * forwards-subgraph starting at <next>:
1304 */
1305 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1306 LOCK_ENABLED_SOFTIRQS, "soft"))
1307 return 0; 1344 return 0;
1345#include "lockdep_states.h"
1346#undef LOCKDEP_STATE
1308 1347
1309 return 1; 1348 return 1;
1310} 1349}
@@ -1933,7 +1972,7 @@ void print_irqtrace_events(struct task_struct *curr)
1933 print_ip_sym(curr->softirq_disable_ip); 1972 print_ip_sym(curr->softirq_disable_ip);
1934} 1973}
1935 1974
1936static int hardirq_verbose(struct lock_class *class) 1975static int HARDIRQ_verbose(struct lock_class *class)
1937{ 1976{
1938#if HARDIRQ_VERBOSE 1977#if HARDIRQ_VERBOSE
1939 return class_filter(class); 1978 return class_filter(class);
@@ -1941,7 +1980,7 @@ static int hardirq_verbose(struct lock_class *class)
1941 return 0; 1980 return 0;
1942} 1981}
1943 1982
1944static int softirq_verbose(struct lock_class *class) 1983static int SOFTIRQ_verbose(struct lock_class *class)
1945{ 1984{
1946#if SOFTIRQ_VERBOSE 1985#if SOFTIRQ_VERBOSE
1947 return class_filter(class); 1986 return class_filter(class);
@@ -1949,185 +1988,94 @@ static int softirq_verbose(struct lock_class *class)
1949 return 0; 1988 return 0;
1950} 1989}
1951 1990
1991static int RECLAIM_FS_verbose(struct lock_class *class)
1992{
1993#if RECLAIM_VERBOSE
1994 return class_filter(class);
1995#endif
1996 return 0;
1997}
1998
1952#define STRICT_READ_CHECKS 1 1999#define STRICT_READ_CHECKS 1
1953 2000
1954static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2001static int (*state_verbose_f[])(struct lock_class *class) = {
1955 enum lock_usage_bit new_bit) 2002#define LOCKDEP_STATE(__STATE) \
2003 __STATE##_verbose,
2004#include "lockdep_states.h"
2005#undef LOCKDEP_STATE
2006};
2007
2008static inline int state_verbose(enum lock_usage_bit bit,
2009 struct lock_class *class)
1956{ 2010{
1957 int ret = 1; 2011 return state_verbose_f[bit >> 2](class);
2012}
1958 2013
1959 switch(new_bit) { 2014typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
1960 case LOCK_USED_IN_HARDIRQ: 2015 enum lock_usage_bit bit, const char *name);
1961 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) 2016
1962 return 0; 2017static int
1963 if (!valid_state(curr, this, new_bit, 2018mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit)
1964 LOCK_ENABLED_HARDIRQS_READ)) 2019{
1965 return 0; 2020 int excl_bit = exclusive_bit(new_bit);
1966 /* 2021 int read = new_bit & 1;
1967 * just marked it hardirq-safe, check that this lock 2022 int dir = new_bit & 2;
1968 * took no hardirq-unsafe lock in the past: 2023
1969 */ 2024 /*
1970 if (!check_usage_forwards(curr, this, 2025 * mark USED_IN has to look forwards -- to ensure no dependency
1971 LOCK_ENABLED_HARDIRQS, "hard")) 2026 * has ENABLED state, which would allow recursion deadlocks.
1972 return 0; 2027 *
1973#if STRICT_READ_CHECKS 2028 * mark ENABLED has to look backwards -- to ensure no dependee
1974 /* 2029 * has USED_IN state, which, again, would allow recursion deadlocks.
1975 * just marked it hardirq-safe, check that this lock 2030 */
1976 * took no hardirq-unsafe-read lock in the past: 2031 check_usage_f usage = dir ?
1977 */ 2032 check_usage_backwards : check_usage_forwards;
1978 if (!check_usage_forwards(curr, this, 2033
1979 LOCK_ENABLED_HARDIRQS_READ, "hard-read")) 2034 /*
1980 return 0; 2035 * Validate that this particular lock does not have conflicting
1981#endif 2036 * usage states.
1982 if (hardirq_verbose(hlock_class(this))) 2037 */
1983 ret = 2; 2038 if (!valid_state(curr, this, new_bit, excl_bit))
1984 break; 2039 return 0;
1985 case LOCK_USED_IN_SOFTIRQ: 2040
1986 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) 2041 /*
1987 return 0; 2042 * Validate that the lock dependencies don't have conflicting usage
1988 if (!valid_state(curr, this, new_bit, 2043 * states.
1989 LOCK_ENABLED_SOFTIRQS_READ)) 2044 */
1990 return 0; 2045 if ((!read || !dir || STRICT_READ_CHECKS) &&
1991 /* 2046 !usage(curr, this, excl_bit, state_name(new_bit)))
1992 * just marked it softirq-safe, check that this lock 2047 return 0;
1993 * took no softirq-unsafe lock in the past: 2048
1994 */ 2049 /*
1995 if (!check_usage_forwards(curr, this, 2050 * Check for read in write conflicts
1996 LOCK_ENABLED_SOFTIRQS, "soft")) 2051 */
1997 return 0; 2052 if (!read) {
1998#if STRICT_READ_CHECKS 2053 if (!valid_state(curr, this, new_bit, excl_bit + 1))
1999 /*
2000 * just marked it softirq-safe, check that this lock
2001 * took no softirq-unsafe-read lock in the past:
2002 */
2003 if (!check_usage_forwards(curr, this,
2004 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2005 return 0;
2006#endif
2007 if (softirq_verbose(hlock_class(this)))
2008 ret = 2;
2009 break;
2010 case LOCK_USED_IN_HARDIRQ_READ:
2011 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2012 return 0;
2013 /*
2014 * just marked it hardirq-read-safe, check that this lock
2015 * took no hardirq-unsafe lock in the past:
2016 */
2017 if (!check_usage_forwards(curr, this,
2018 LOCK_ENABLED_HARDIRQS, "hard"))
2019 return 0;
2020 if (hardirq_verbose(hlock_class(this)))
2021 ret = 2;
2022 break;
2023 case LOCK_USED_IN_SOFTIRQ_READ:
2024 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2025 return 0;
2026 /*
2027 * just marked it softirq-read-safe, check that this lock
2028 * took no softirq-unsafe lock in the past:
2029 */
2030 if (!check_usage_forwards(curr, this,
2031 LOCK_ENABLED_SOFTIRQS, "soft"))
2032 return 0;
2033 if (softirq_verbose(hlock_class(this)))
2034 ret = 2;
2035 break;
2036 case LOCK_ENABLED_HARDIRQS:
2037 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2038 return 0;
2039 if (!valid_state(curr, this, new_bit,
2040 LOCK_USED_IN_HARDIRQ_READ))
2041 return 0;
2042 /*
2043 * just marked it hardirq-unsafe, check that no hardirq-safe
2044 * lock in the system ever took it in the past:
2045 */
2046 if (!check_usage_backwards(curr, this,
2047 LOCK_USED_IN_HARDIRQ, "hard"))
2048 return 0;
2049#if STRICT_READ_CHECKS
2050 /*
2051 * just marked it hardirq-unsafe, check that no
2052 * hardirq-safe-read lock in the system ever took
2053 * it in the past:
2054 */
2055 if (!check_usage_backwards(curr, this,
2056 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2057 return 0;
2058#endif
2059 if (hardirq_verbose(hlock_class(this)))
2060 ret = 2;
2061 break;
2062 case LOCK_ENABLED_SOFTIRQS:
2063 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2064 return 0;
2065 if (!valid_state(curr, this, new_bit,
2066 LOCK_USED_IN_SOFTIRQ_READ))
2067 return 0;
2068 /*
2069 * just marked it softirq-unsafe, check that no softirq-safe
2070 * lock in the system ever took it in the past:
2071 */
2072 if (!check_usage_backwards(curr, this,
2073 LOCK_USED_IN_SOFTIRQ, "soft"))
2074 return 0;
2075#if STRICT_READ_CHECKS
2076 /*
2077 * just marked it softirq-unsafe, check that no
2078 * softirq-safe-read lock in the system ever took
2079 * it in the past:
2080 */
2081 if (!check_usage_backwards(curr, this,
2082 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2083 return 0;
2084#endif
2085 if (softirq_verbose(hlock_class(this)))
2086 ret = 2;
2087 break;
2088 case LOCK_ENABLED_HARDIRQS_READ:
2089 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2090 return 0;
2091#if STRICT_READ_CHECKS
2092 /*
2093 * just marked it hardirq-read-unsafe, check that no
2094 * hardirq-safe lock in the system ever took it in the past:
2095 */
2096 if (!check_usage_backwards(curr, this,
2097 LOCK_USED_IN_HARDIRQ, "hard"))
2098 return 0;
2099#endif
2100 if (hardirq_verbose(hlock_class(this)))
2101 ret = 2;
2102 break;
2103 case LOCK_ENABLED_SOFTIRQS_READ:
2104 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2105 return 0; 2054 return 0;
2106#if STRICT_READ_CHECKS 2055
2107 /* 2056 if (STRICT_READ_CHECKS &&
2108 * just marked it softirq-read-unsafe, check that no 2057 !usage(curr, this, excl_bit + 1,
2109 * softirq-safe lock in the system ever took it in the past: 2058 state_name(new_bit + 1)))
2110 */
2111 if (!check_usage_backwards(curr, this,
2112 LOCK_USED_IN_SOFTIRQ, "soft"))
2113 return 0; 2059 return 0;
2114#endif
2115 if (softirq_verbose(hlock_class(this)))
2116 ret = 2;
2117 break;
2118 default:
2119 WARN_ON(1);
2120 break;
2121 } 2060 }
2122 2061
2123 return ret; 2062 if (state_verbose(new_bit, hlock_class(this)))
2063 return 2;
2064
2065 return 1;
2124} 2066}
2125 2067
2068enum mark_type {
2069#define LOCKDEP_STATE(__STATE) __STATE,
2070#include "lockdep_states.h"
2071#undef LOCKDEP_STATE
2072};
2073
2126/* 2074/*
2127 * Mark all held locks with a usage bit: 2075 * Mark all held locks with a usage bit:
2128 */ 2076 */
2129static int 2077static int
2130mark_held_locks(struct task_struct *curr, int hardirq) 2078mark_held_locks(struct task_struct *curr, enum mark_type mark)
2131{ 2079{
2132 enum lock_usage_bit usage_bit; 2080 enum lock_usage_bit usage_bit;
2133 struct held_lock *hlock; 2081 struct held_lock *hlock;
@@ -2136,17 +2084,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
2136 for (i = 0; i < curr->lockdep_depth; i++) { 2084 for (i = 0; i < curr->lockdep_depth; i++) {
2137 hlock = curr->held_locks + i; 2085 hlock = curr->held_locks + i;
2138 2086
2139 if (hardirq) { 2087 usage_bit = 2 + (mark << 2); /* ENABLED */
2140 if (hlock->read) 2088 if (hlock->read)
2141 usage_bit = LOCK_ENABLED_HARDIRQS_READ; 2089 usage_bit += 1; /* READ */
2142 else 2090
2143 usage_bit = LOCK_ENABLED_HARDIRQS; 2091 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2144 } else { 2092
2145 if (hlock->read)
2146 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2147 else
2148 usage_bit = LOCK_ENABLED_SOFTIRQS;
2149 }
2150 if (!mark_lock(curr, hlock, usage_bit)) 2093 if (!mark_lock(curr, hlock, usage_bit))
2151 return 0; 2094 return 0;
2152 } 2095 }
@@ -2200,7 +2143,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2200 * We are going to turn hardirqs on, so set the 2143 * We are going to turn hardirqs on, so set the
2201 * usage bit for all held locks: 2144 * usage bit for all held locks:
2202 */ 2145 */
2203 if (!mark_held_locks(curr, 1)) 2146 if (!mark_held_locks(curr, HARDIRQ))
2204 return; 2147 return;
2205 /* 2148 /*
2206 * If we have softirqs enabled, then set the usage 2149 * If we have softirqs enabled, then set the usage
@@ -2208,7 +2151,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2208 * this bit from being set before) 2151 * this bit from being set before)
2209 */ 2152 */
2210 if (curr->softirqs_enabled) 2153 if (curr->softirqs_enabled)
2211 if (!mark_held_locks(curr, 0)) 2154 if (!mark_held_locks(curr, SOFTIRQ))
2212 return; 2155 return;
2213 2156
2214 curr->hardirq_enable_ip = ip; 2157 curr->hardirq_enable_ip = ip;
@@ -2288,7 +2231,7 @@ void trace_softirqs_on(unsigned long ip)
2288 * enabled too: 2231 * enabled too:
2289 */ 2232 */
2290 if (curr->hardirqs_enabled) 2233 if (curr->hardirqs_enabled)
2291 mark_held_locks(curr, 0); 2234 mark_held_locks(curr, SOFTIRQ);
2292} 2235}
2293 2236
2294/* 2237/*
@@ -2317,6 +2260,48 @@ void trace_softirqs_off(unsigned long ip)
2317 debug_atomic_inc(&redundant_softirqs_off); 2260 debug_atomic_inc(&redundant_softirqs_off);
2318} 2261}
2319 2262
2263static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2264{
2265 struct task_struct *curr = current;
2266
2267 if (unlikely(!debug_locks))
2268 return;
2269
2270 /* no reclaim without waiting on it */
2271 if (!(gfp_mask & __GFP_WAIT))
2272 return;
2273
2274 /* this guy won't enter reclaim */
2275 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2276 return;
2277
2278 /* We're only interested __GFP_FS allocations for now */
2279 if (!(gfp_mask & __GFP_FS))
2280 return;
2281
2282 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2283 return;
2284
2285 mark_held_locks(curr, RECLAIM_FS);
2286}
2287
2288static void check_flags(unsigned long flags);
2289
2290void lockdep_trace_alloc(gfp_t gfp_mask)
2291{
2292 unsigned long flags;
2293
2294 if (unlikely(current->lockdep_recursion))
2295 return;
2296
2297 raw_local_irq_save(flags);
2298 check_flags(flags);
2299 current->lockdep_recursion = 1;
2300 __lockdep_trace_alloc(gfp_mask, flags);
2301 current->lockdep_recursion = 0;
2302 raw_local_irq_restore(flags);
2303}
2304
2320static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 2305static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2321{ 2306{
2322 /* 2307 /*
@@ -2345,19 +2330,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2345 if (!hlock->hardirqs_off) { 2330 if (!hlock->hardirqs_off) {
2346 if (hlock->read) { 2331 if (hlock->read) {
2347 if (!mark_lock(curr, hlock, 2332 if (!mark_lock(curr, hlock,
2348 LOCK_ENABLED_HARDIRQS_READ)) 2333 LOCK_ENABLED_HARDIRQ_READ))
2349 return 0; 2334 return 0;
2350 if (curr->softirqs_enabled) 2335 if (curr->softirqs_enabled)
2351 if (!mark_lock(curr, hlock, 2336 if (!mark_lock(curr, hlock,
2352 LOCK_ENABLED_SOFTIRQS_READ)) 2337 LOCK_ENABLED_SOFTIRQ_READ))
2353 return 0; 2338 return 0;
2354 } else { 2339 } else {
2355 if (!mark_lock(curr, hlock, 2340 if (!mark_lock(curr, hlock,
2356 LOCK_ENABLED_HARDIRQS)) 2341 LOCK_ENABLED_HARDIRQ))
2357 return 0; 2342 return 0;
2358 if (curr->softirqs_enabled) 2343 if (curr->softirqs_enabled)
2359 if (!mark_lock(curr, hlock, 2344 if (!mark_lock(curr, hlock,
2360 LOCK_ENABLED_SOFTIRQS)) 2345 LOCK_ENABLED_SOFTIRQ))
2346 return 0;
2347 }
2348 }
2349
2350 /*
2351 * We reuse the irq context infrastructure more broadly as a general
2352 * context checking code. This tests GFP_FS recursion (a lock taken
2353 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2354 * allocation).
2355 */
2356 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2357 if (hlock->read) {
2358 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2359 return 0;
2360 } else {
2361 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2361 return 0; 2362 return 0;
2362 } 2363 }
2363 } 2364 }
@@ -2412,6 +2413,10 @@ static inline int separate_irq_context(struct task_struct *curr,
2412 return 0; 2413 return 0;
2413} 2414}
2414 2415
2416void lockdep_trace_alloc(gfp_t gfp_mask)
2417{
2418}
2419
2415#endif 2420#endif
2416 2421
2417/* 2422/*
@@ -2445,14 +2450,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2445 return 0; 2450 return 0;
2446 2451
2447 switch (new_bit) { 2452 switch (new_bit) {
2448 case LOCK_USED_IN_HARDIRQ: 2453#define LOCKDEP_STATE(__STATE) \
2449 case LOCK_USED_IN_SOFTIRQ: 2454 case LOCK_USED_IN_##__STATE: \
2450 case LOCK_USED_IN_HARDIRQ_READ: 2455 case LOCK_USED_IN_##__STATE##_READ: \
2451 case LOCK_USED_IN_SOFTIRQ_READ: 2456 case LOCK_ENABLED_##__STATE: \
2452 case LOCK_ENABLED_HARDIRQS: 2457 case LOCK_ENABLED_##__STATE##_READ:
2453 case LOCK_ENABLED_SOFTIRQS: 2458#include "lockdep_states.h"
2454 case LOCK_ENABLED_HARDIRQS_READ: 2459#undef LOCKDEP_STATE
2455 case LOCK_ENABLED_SOFTIRQS_READ:
2456 ret = mark_lock_irq(curr, this, new_bit); 2460 ret = mark_lock_irq(curr, this, new_bit);
2457 if (!ret) 2461 if (!ret)
2458 return 0; 2462 return 0;
@@ -2966,6 +2970,16 @@ void lock_release(struct lockdep_map *lock, int nested,
2966} 2970}
2967EXPORT_SYMBOL_GPL(lock_release); 2971EXPORT_SYMBOL_GPL(lock_release);
2968 2972
2973void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2974{
2975 current->lockdep_reclaim_gfp = gfp_mask;
2976}
2977
2978void lockdep_clear_current_reclaim_state(void)
2979{
2980 current->lockdep_reclaim_gfp = 0;
2981}
2982
2969#ifdef CONFIG_LOCK_STAT 2983#ifdef CONFIG_LOCK_STAT
2970static int 2984static int
2971print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 2985print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 56b196932c08..a2cc7e9a6e84 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -7,6 +7,45 @@
7 */ 7 */
8 8
9/* 9/*
10 * Lock-class usage-state bits:
11 */
12enum lock_usage_bit {
13#define LOCKDEP_STATE(__STATE) \
14 LOCK_USED_IN_##__STATE, \
15 LOCK_USED_IN_##__STATE##_READ, \
16 LOCK_ENABLED_##__STATE, \
17 LOCK_ENABLED_##__STATE##_READ,
18#include "lockdep_states.h"
19#undef LOCKDEP_STATE
20 LOCK_USED,
21 LOCK_USAGE_STATES
22};
23
24/*
25 * Usage-state bitmasks:
26 */
27#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
28
29enum {
30#define LOCKDEP_STATE(__STATE) \
31 __LOCKF(USED_IN_##__STATE) \
32 __LOCKF(USED_IN_##__STATE##_READ) \
33 __LOCKF(ENABLED_##__STATE) \
34 __LOCKF(ENABLED_##__STATE##_READ)
35#include "lockdep_states.h"
36#undef LOCKDEP_STATE
37 __LOCKF(USED)
38};
39
40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42
43#define LOCKF_ENABLED_IRQ_READ \
44 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45#define LOCKF_USED_IN_IRQ_READ \
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47
48/*
10 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
11 * we track. 50 * we track.
12 * 51 *
@@ -31,8 +70,10 @@
31extern struct list_head all_lock_classes; 70extern struct list_head all_lock_classes;
32extern struct lock_chain lock_chains[]; 71extern struct lock_chain lock_chains[];
33 72
34extern void 73#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
35get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); 74
75extern void get_usage_chars(struct lock_class *class,
76 char usage[LOCK_USAGE_CHARS]);
36 77
37extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); 78extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
38 79
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 13716b813896..d7135aa2d2c4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v)
84{ 84{
85 struct lock_class *class = v; 85 struct lock_class *class = v;
86 struct lock_list *entry; 86 struct lock_list *entry;
87 char c1, c2, c3, c4; 87 char usage[LOCK_USAGE_CHARS];
88 88
89 if (v == SEQ_START_TOKEN) { 89 if (v == SEQ_START_TOKEN) {
90 seq_printf(m, "all lock classes:\n"); 90 seq_printf(m, "all lock classes:\n");
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v)
100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); 100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
101#endif 101#endif
102 102
103 get_usage_chars(class, &c1, &c2, &c3, &c4); 103 get_usage_chars(class, usage);
104 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); 104 seq_printf(m, " %s", usage);
105 105
106 seq_printf(m, ": "); 106 seq_printf(m, ": ");
107 print_name(m, class); 107 print_name(m, class);
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
300 nr_uncategorized++; 300 nr_uncategorized++;
301 if (class->usage_mask & LOCKF_USED_IN_IRQ) 301 if (class->usage_mask & LOCKF_USED_IN_IRQ)
302 nr_irq_safe++; 302 nr_irq_safe++;
303 if (class->usage_mask & LOCKF_ENABLED_IRQS) 303 if (class->usage_mask & LOCKF_ENABLED_IRQ)
304 nr_irq_unsafe++; 304 nr_irq_unsafe++;
305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) 305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
306 nr_softirq_safe++; 306 nr_softirq_safe++;
307 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) 307 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
308 nr_softirq_unsafe++; 308 nr_softirq_unsafe++;
309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) 309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
310 nr_hardirq_safe++; 310 nr_hardirq_safe++;
311 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) 311 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
312 nr_hardirq_unsafe++; 312 nr_hardirq_unsafe++;
313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) 313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
314 nr_irq_read_safe++; 314 nr_irq_read_safe++;
315 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ) 315 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
316 nr_irq_read_unsafe++; 316 nr_irq_read_unsafe++;
317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) 317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
318 nr_softirq_read_safe++; 318 nr_softirq_read_safe++;
319 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 319 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
320 nr_softirq_read_unsafe++; 320 nr_softirq_read_unsafe++;
321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) 321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
322 nr_hardirq_read_safe++; 322 nr_hardirq_read_safe++;
323 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 323 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
324 nr_hardirq_read_unsafe++; 324 nr_hardirq_read_unsafe++;
325 325
326#ifdef CONFIG_PROVE_LOCKING 326#ifdef CONFIG_PROVE_LOCKING
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
601static void seq_header(struct seq_file *m) 601static void seq_header(struct seq_file *m)
602{ 602{
603 seq_printf(m, "lock_stat version 0.3\n"); 603 seq_printf(m, "lock_stat version 0.3\n");
604
605 if (unlikely(!debug_locks))
606 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
607
604 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); 608 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
605 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " 609 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
606 "%14s %14s\n", 610 "%14s %14s\n",
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h
new file mode 100644
index 000000000000..995b0cc2b84c
--- /dev/null
+++ b/kernel/lockdep_states.h
@@ -0,0 +1,9 @@
1/*
2 * Lockdep states,
3 *
4 * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
5 * you add one, or come up with a nice dynamic solution.
6 */
7LOCKDEP_STATE(HARDIRQ)
8LOCKDEP_STATE(SOFTIRQ)
9LOCKDEP_STATE(RECLAIM_FS)
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 1d94160eb532..50d022e5a560 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -26,11 +26,6 @@
26/* 26/*
27 * Must be called with lock->wait_lock held. 27 * Must be called with lock->wait_lock held.
28 */ 28 */
29void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
30{
31 lock->owner = new_owner;
32}
33
34void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) 29void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
35{ 30{
36 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); 31 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
59 54
60 /* Mark the current thread as blocked on the lock: */ 55 /* Mark the current thread as blocked on the lock: */
61 ti->task->blocked_on = waiter; 56 ti->task->blocked_on = waiter;
62 waiter->lock = lock;
63} 57}
64 58
65void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 59void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock)
82 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 76 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 77 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 78 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 79 mutex_clear_owner(lock);
86} 80}
87 81
88void debug_mutex_init(struct mutex *lock, const char *name, 82void debug_mutex_init(struct mutex *lock, const char *name,
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name,
95 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 89 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
96 lockdep_init_map(&lock->dep_map, name, key, 0); 90 lockdep_init_map(&lock->dep_map, name, key, 0);
97#endif 91#endif
98 lock->owner = NULL;
99 lock->magic = lock; 92 lock->magic = lock;
100} 93}
101 94
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index babfbdfc534b..6b2d735846a5 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -13,14 +13,6 @@
13/* 13/*
14 * This must be called with lock->wait_lock held. 14 * This must be called with lock->wait_lock held.
15 */ 15 */
16extern void
17debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
18
19static inline void debug_mutex_clear_owner(struct mutex *lock)
20{
21 lock->owner = NULL;
22}
23
24extern void debug_mutex_lock_common(struct mutex *lock, 16extern void debug_mutex_lock_common(struct mutex *lock,
25 struct mutex_waiter *waiter); 17 struct mutex_waiter *waiter);
26extern void debug_mutex_wake_waiter(struct mutex *lock, 18extern void debug_mutex_wake_waiter(struct mutex *lock,
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock);
35extern void debug_mutex_init(struct mutex *lock, const char *name, 27extern void debug_mutex_init(struct mutex *lock, const char *name,
36 struct lock_class_key *key); 28 struct lock_class_key *key);
37 29
30static inline void mutex_set_owner(struct mutex *lock)
31{
32 lock->owner = current_thread_info();
33}
34
35static inline void mutex_clear_owner(struct mutex *lock)
36{
37 lock->owner = NULL;
38}
39
38#define spin_lock_mutex(lock, flags) \ 40#define spin_lock_mutex(lock, flags) \
39 do { \ 41 do { \
40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4f45d4b658ef..5d79781394a3 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -10,6 +10,11 @@
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements. 11 * David Howells for suggestions and improvements.
12 * 12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
13 * Also see Documentation/mutex-design.txt. 18 * Also see Documentation/mutex-design.txt.
14 */ 19 */
15#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
46 atomic_set(&lock->count, 1); 51 atomic_set(&lock->count, 1);
47 spin_lock_init(&lock->wait_lock); 52 spin_lock_init(&lock->wait_lock);
48 INIT_LIST_HEAD(&lock->wait_list); 53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock);
49 55
50 debug_mutex_init(lock, name, key); 56 debug_mutex_init(lock, name, key);
51} 57}
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
91 * 'unlocked' into 'locked' state. 97 * 'unlocked' into 'locked' state.
92 */ 98 */
93 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 99 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
100 mutex_set_owner(lock);
94} 101}
95 102
96EXPORT_SYMBOL(mutex_lock); 103EXPORT_SYMBOL(mutex_lock);
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
115 * The unlocking fastpath is the 0->1 transition from 'locked' 122 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state: 123 * into 'unlocked' state:
117 */ 124 */
125#ifndef CONFIG_DEBUG_MUTEXES
126 /*
127 * When debugging is enabled we must not clear the owner before time,
128 * the slow path will always be taken, and that clears the owner field
129 * after verifying that it was indeed current.
130 */
131 mutex_clear_owner(lock);
132#endif
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 133 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119} 134}
120 135
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
129{ 144{
130 struct task_struct *task = current; 145 struct task_struct *task = current;
131 struct mutex_waiter waiter; 146 struct mutex_waiter waiter;
132 unsigned int old_val;
133 unsigned long flags; 147 unsigned long flags;
134 148
149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
152 /*
153 * Optimistic spinning.
154 *
155 * We try to spin for acquisition when we find that there are no
156 * pending waiters and the lock owner is currently running on a
157 * (different) CPU.
158 *
159 * The rationale is that if the lock owner is running, it is likely to
160 * release the lock soon.
161 *
162 * Since this needs the lock owner, and this mutex implementation
163 * doesn't track the owner atomically in the lock field, we need to
164 * track it non-atomically.
165 *
166 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
167 * to serialize everything.
168 */
169
170 for (;;) {
171 struct thread_info *owner;
172
173 /*
174 * If there's an owner, wait for it to either
175 * release the lock or go to sleep.
176 */
177 owner = ACCESS_ONCE(lock->owner);
178 if (owner && !mutex_spin_on_owner(lock, owner))
179 break;
180
181 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
182 lock_acquired(&lock->dep_map, ip);
183 mutex_set_owner(lock);
184 preempt_enable();
185 return 0;
186 }
187
188 /*
189 * When there's no owner, we might have preempted between the
190 * owner acquiring the lock and setting the owner field. If
191 * we're an RT task that will live-lock because we won't let
192 * the owner complete.
193 */
194 if (!owner && (need_resched() || rt_task(task)))
195 break;
196
197 /*
198 * The cpu_relax() call is a compiler barrier which forces
199 * everything in this loop to be re-loaded. We don't need
200 * memory barriers as we'll eventually observe the right
201 * values at the cost of a few extra spins.
202 */
203 cpu_relax();
204 }
205#endif
135 spin_lock_mutex(&lock->wait_lock, flags); 206 spin_lock_mutex(&lock->wait_lock, flags);
136 207
137 debug_mutex_lock_common(lock, &waiter); 208 debug_mutex_lock_common(lock, &waiter);
138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 209 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140 210
141 /* add waiting tasks to the end of the waitqueue (FIFO): */ 211 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 list_add_tail(&waiter.list, &lock->wait_list); 212 list_add_tail(&waiter.list, &lock->wait_list);
143 waiter.task = task; 213 waiter.task = task;
144 214
145 old_val = atomic_xchg(&lock->count, -1); 215 if (atomic_xchg(&lock->count, -1) == 1)
146 if (old_val == 1)
147 goto done; 216 goto done;
148 217
149 lock_contended(&lock->dep_map, ip); 218 lock_contended(&lock->dep_map, ip);
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
158 * that when we release the lock, we properly wake up the 227 * that when we release the lock, we properly wake up the
159 * other waiters: 228 * other waiters:
160 */ 229 */
161 old_val = atomic_xchg(&lock->count, -1); 230 if (atomic_xchg(&lock->count, -1) == 1)
162 if (old_val == 1)
163 break; 231 break;
164 232
165 /* 233 /*
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
173 spin_unlock_mutex(&lock->wait_lock, flags); 241 spin_unlock_mutex(&lock->wait_lock, flags);
174 242
175 debug_mutex_free_waiter(&waiter); 243 debug_mutex_free_waiter(&waiter);
244 preempt_enable();
176 return -EINTR; 245 return -EINTR;
177 } 246 }
178 __set_task_state(task, state); 247 __set_task_state(task, state);
179 248
180 /* didnt get the lock, go to sleep: */ 249 /* didnt get the lock, go to sleep: */
181 spin_unlock_mutex(&lock->wait_lock, flags); 250 spin_unlock_mutex(&lock->wait_lock, flags);
182 schedule(); 251 __schedule();
183 spin_lock_mutex(&lock->wait_lock, flags); 252 spin_lock_mutex(&lock->wait_lock, flags);
184 } 253 }
185 254
186done: 255done:
187 lock_acquired(&lock->dep_map, ip); 256 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 257 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 258 mutex_remove_waiter(lock, &waiter, current_thread_info());
190 debug_mutex_set_owner(lock, task_thread_info(task)); 259 mutex_set_owner(lock);
191 260
192 /* set it to 0 if there are no waiters left: */ 261 /* set it to 0 if there are no waiters left: */
193 if (likely(list_empty(&lock->wait_list))) 262 if (likely(list_empty(&lock->wait_list)))
@@ -196,6 +265,7 @@ done:
196 spin_unlock_mutex(&lock->wait_lock, flags); 265 spin_unlock_mutex(&lock->wait_lock, flags);
197 266
198 debug_mutex_free_waiter(&waiter); 267 debug_mutex_free_waiter(&waiter);
268 preempt_enable();
199 269
200 return 0; 270 return 0;
201} 271}
@@ -222,7 +292,8 @@ int __sched
222mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 292mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223{ 293{
224 might_sleep(); 294 might_sleep();
225 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); 295 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
296 subclass, _RET_IP_);
226} 297}
227 298
228EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 299EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
260 wake_up_process(waiter->task); 331 wake_up_process(waiter->task);
261 } 332 }
262 333
263 debug_mutex_clear_owner(lock);
264
265 spin_unlock_mutex(&lock->wait_lock, flags); 334 spin_unlock_mutex(&lock->wait_lock, flags);
266} 335}
267 336
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
298 */ 367 */
299int __sched mutex_lock_interruptible(struct mutex *lock) 368int __sched mutex_lock_interruptible(struct mutex *lock)
300{ 369{
370 int ret;
371
301 might_sleep(); 372 might_sleep();
302 return __mutex_fastpath_lock_retval 373 ret = __mutex_fastpath_lock_retval
303 (&lock->count, __mutex_lock_interruptible_slowpath); 374 (&lock->count, __mutex_lock_interruptible_slowpath);
375 if (!ret)
376 mutex_set_owner(lock);
377
378 return ret;
304} 379}
305 380
306EXPORT_SYMBOL(mutex_lock_interruptible); 381EXPORT_SYMBOL(mutex_lock_interruptible);
307 382
308int __sched mutex_lock_killable(struct mutex *lock) 383int __sched mutex_lock_killable(struct mutex *lock)
309{ 384{
385 int ret;
386
310 might_sleep(); 387 might_sleep();
311 return __mutex_fastpath_lock_retval 388 ret = __mutex_fastpath_lock_retval
312 (&lock->count, __mutex_lock_killable_slowpath); 389 (&lock->count, __mutex_lock_killable_slowpath);
390 if (!ret)
391 mutex_set_owner(lock);
392
393 return ret;
313} 394}
314EXPORT_SYMBOL(mutex_lock_killable); 395EXPORT_SYMBOL(mutex_lock_killable);
315 396
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
352 433
353 prev = atomic_xchg(&lock->count, -1); 434 prev = atomic_xchg(&lock->count, -1);
354 if (likely(prev == 1)) { 435 if (likely(prev == 1)) {
355 debug_mutex_set_owner(lock, current_thread_info()); 436 mutex_set_owner(lock);
356 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 437 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 } 438 }
439
358 /* Set it back to 0 if there are no waiters: */ 440 /* Set it back to 0 if there are no waiters: */
359 if (likely(list_empty(&lock->wait_list))) 441 if (likely(list_empty(&lock->wait_list)))
360 atomic_set(&lock->count, 0); 442 atomic_set(&lock->count, 0);
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
380 */ 462 */
381int __sched mutex_trylock(struct mutex *lock) 463int __sched mutex_trylock(struct mutex *lock)
382{ 464{
383 return __mutex_fastpath_trylock(&lock->count, 465 int ret;
384 __mutex_trylock_slowpath); 466
467 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
468 if (ret)
469 mutex_set_owner(lock);
470
471 return ret;
385} 472}
386 473
387EXPORT_SYMBOL(mutex_trylock); 474EXPORT_SYMBOL(mutex_trylock);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index a075dafbb290..67578ca48f94 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -16,8 +16,26 @@
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, ti) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#define debug_mutex_set_owner(lock, new_owner) do { } while (0) 19#ifdef CONFIG_SMP
20#define debug_mutex_clear_owner(lock) do { } while (0) 20static inline void mutex_set_owner(struct mutex *lock)
21{
22 lock->owner = current_thread_info();
23}
24
25static inline void mutex_clear_owner(struct mutex *lock)
26{
27 lock->owner = NULL;
28}
29#else
30static inline void mutex_set_owner(struct mutex *lock)
31{
32}
33
34static inline void mutex_clear_owner(struct mutex *lock)
35{
36}
37#endif
38
21#define debug_mutex_wake_waiter(lock, waiter) do { } while (0) 39#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
22#define debug_mutex_free_waiter(waiter) do { } while (0) 40#define debug_mutex_free_waiter(waiter) do { } while (0)
23#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) 41#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5757e03cfac0..196d48babbef 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4942,15 +4942,13 @@ pick_next_task(struct rq *rq)
4942/* 4942/*
4943 * schedule() is the main scheduler function. 4943 * schedule() is the main scheduler function.
4944 */ 4944 */
4945asmlinkage void __sched schedule(void) 4945asmlinkage void __sched __schedule(void)
4946{ 4946{
4947 struct task_struct *prev, *next; 4947 struct task_struct *prev, *next;
4948 unsigned long *switch_count; 4948 unsigned long *switch_count;
4949 struct rq *rq; 4949 struct rq *rq;
4950 int cpu; 4950 int cpu;
4951 4951
4952need_resched:
4953 preempt_disable();
4954 cpu = smp_processor_id(); 4952 cpu = smp_processor_id();
4955 rq = cpu_rq(cpu); 4953 rq = cpu_rq(cpu);
4956 rcu_qsctr_inc(cpu); 4954 rcu_qsctr_inc(cpu);
@@ -5007,13 +5005,80 @@ need_resched_nonpreemptible:
5007 5005
5008 if (unlikely(reacquire_kernel_lock(current) < 0)) 5006 if (unlikely(reacquire_kernel_lock(current) < 0))
5009 goto need_resched_nonpreemptible; 5007 goto need_resched_nonpreemptible;
5008}
5010 5009
5010asmlinkage void __sched schedule(void)
5011{
5012need_resched:
5013 preempt_disable();
5014 __schedule();
5011 preempt_enable_no_resched(); 5015 preempt_enable_no_resched();
5012 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5016 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
5013 goto need_resched; 5017 goto need_resched;
5014} 5018}
5015EXPORT_SYMBOL(schedule); 5019EXPORT_SYMBOL(schedule);
5016 5020
5021#ifdef CONFIG_SMP
5022/*
5023 * Look out! "owner" is an entirely speculative pointer
5024 * access and not reliable.
5025 */
5026int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
5027{
5028 unsigned int cpu;
5029 struct rq *rq;
5030
5031 if (!sched_feat(OWNER_SPIN))
5032 return 0;
5033
5034#ifdef CONFIG_DEBUG_PAGEALLOC
5035 /*
5036 * Need to access the cpu field knowing that
5037 * DEBUG_PAGEALLOC could have unmapped it if
5038 * the mutex owner just released it and exited.
5039 */
5040 if (probe_kernel_address(&owner->cpu, cpu))
5041 goto out;
5042#else
5043 cpu = owner->cpu;
5044#endif
5045
5046 /*
5047 * Even if the access succeeded (likely case),
5048 * the cpu field may no longer be valid.
5049 */
5050 if (cpu >= nr_cpumask_bits)
5051 goto out;
5052
5053 /*
5054 * We need to validate that we can do a
5055 * get_cpu() and that we have the percpu area.
5056 */
5057 if (!cpu_online(cpu))
5058 goto out;
5059
5060 rq = cpu_rq(cpu);
5061
5062 for (;;) {
5063 /*
5064 * Owner changed, break to re-assess state.
5065 */
5066 if (lock->owner != owner)
5067 break;
5068
5069 /*
5070 * Is that owner really running on that cpu?
5071 */
5072 if (task_thread_info(rq->curr) != owner || need_resched())
5073 return 0;
5074
5075 cpu_relax();
5076 }
5077out:
5078 return 1;
5079}
5080#endif
5081
5017#ifdef CONFIG_PREEMPT 5082#ifdef CONFIG_PREEMPT
5018/* 5083/*
5019 * this is the entry point to schedule() from in-kernel preemption 5084 * this is the entry point to schedule() from in-kernel preemption
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 76f61756e677..4569bfa7df9b 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -14,3 +14,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
14SCHED_FEAT(ASYM_EFF_LOAD, 1) 14SCHED_FEAT(ASYM_EFF_LOAD, 1)
15SCHED_FEAT(WAKEUP_OVERLAP, 0) 15SCHED_FEAT(WAKEUP_OVERLAP, 0)
16SCHED_FEAT(LAST_BUDDY, 1) 16SCHED_FEAT(LAST_BUDDY, 1)
17SCHED_FEAT(OWNER_SPIN, 1)
diff --git a/kernel/timer.c b/kernel/timer.c
index 9b77fc9a9ac8..b4555568b4e4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)
491 debug_object_free(timer, &timer_debug_descr); 491 debug_object_free(timer, &timer_debug_descr);
492} 492}
493 493
494static void __init_timer(struct timer_list *timer); 494static void __init_timer(struct timer_list *timer,
495 const char *name,
496 struct lock_class_key *key);
495 497
496void init_timer_on_stack(struct timer_list *timer) 498void init_timer_on_stack_key(struct timer_list *timer,
499 const char *name,
500 struct lock_class_key *key)
497{ 501{
498 debug_object_init_on_stack(timer, &timer_debug_descr); 502 debug_object_init_on_stack(timer, &timer_debug_descr);
499 __init_timer(timer); 503 __init_timer(timer, name, key);
500} 504}
501EXPORT_SYMBOL_GPL(init_timer_on_stack); 505EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
502 506
503void destroy_timer_on_stack(struct timer_list *timer) 507void destroy_timer_on_stack(struct timer_list *timer)
504{ 508{
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
512static inline void debug_timer_deactivate(struct timer_list *timer) { } 516static inline void debug_timer_deactivate(struct timer_list *timer) { }
513#endif 517#endif
514 518
515static void __init_timer(struct timer_list *timer) 519static void __init_timer(struct timer_list *timer,
520 const char *name,
521 struct lock_class_key *key)
516{ 522{
517 timer->entry.next = NULL; 523 timer->entry.next = NULL;
518 timer->base = __raw_get_cpu_var(tvec_bases); 524 timer->base = __raw_get_cpu_var(tvec_bases);
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer)
521 timer->start_pid = -1; 527 timer->start_pid = -1;
522 memset(timer->start_comm, 0, TASK_COMM_LEN); 528 memset(timer->start_comm, 0, TASK_COMM_LEN);
523#endif 529#endif
530 lockdep_init_map(&timer->lockdep_map, name, key, 0);
524} 531}
525 532
526/** 533/**
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer)
530 * init_timer() must be done to a timer prior calling *any* of the 537 * init_timer() must be done to a timer prior calling *any* of the
531 * other timer functions. 538 * other timer functions.
532 */ 539 */
533void init_timer(struct timer_list *timer) 540void init_timer_key(struct timer_list *timer,
541 const char *name,
542 struct lock_class_key *key)
534{ 543{
535 debug_timer_init(timer); 544 debug_timer_init(timer);
536 __init_timer(timer); 545 __init_timer(timer, name, key);
537} 546}
538EXPORT_SYMBOL(init_timer); 547EXPORT_SYMBOL(init_timer_key);
539 548
540void init_timer_deferrable(struct timer_list *timer) 549void init_timer_deferrable_key(struct timer_list *timer,
550 const char *name,
551 struct lock_class_key *key)
541{ 552{
542 init_timer(timer); 553 init_timer_key(timer, name, key);
543 timer_set_deferrable(timer); 554 timer_set_deferrable(timer);
544} 555}
545EXPORT_SYMBOL(init_timer_deferrable); 556EXPORT_SYMBOL(init_timer_deferrable_key);
546 557
547static inline void detach_timer(struct timer_list *timer, 558static inline void detach_timer(struct timer_list *timer,
548 int clear_pending) 559 int clear_pending)
@@ -826,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
826 */ 837 */
827int del_timer_sync(struct timer_list *timer) 838int del_timer_sync(struct timer_list *timer)
828{ 839{
840#ifdef CONFIG_LOCKDEP
841 unsigned long flags;
842
843 local_irq_save(flags);
844 lock_map_acquire(&timer->lockdep_map);
845 lock_map_release(&timer->lockdep_map);
846 local_irq_restore(flags);
847#endif
848
829 for (;;) { 849 for (;;) {
830 int ret = try_to_del_timer_sync(timer); 850 int ret = try_to_del_timer_sync(timer);
831 if (ret >= 0) 851 if (ret >= 0)
@@ -897,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base)
897 917
898 set_running_timer(base, timer); 918 set_running_timer(base, timer);
899 detach_timer(timer, 1); 919 detach_timer(timer, 1);
920
900 spin_unlock_irq(&base->lock); 921 spin_unlock_irq(&base->lock);
901 { 922 {
902 int preempt_count = preempt_count(); 923 int preempt_count = preempt_count();
924
925#ifdef CONFIG_LOCKDEP
926 /*
927 * It is permissible to free the timer from
928 * inside the function that is called from
929 * it, this we need to take into account for
930 * lockdep too. To avoid bogus "held lock
931 * freed" warnings as well as problems when
932 * looking into timer->lockdep_map, make a
933 * copy and use that here.
934 */
935 struct lockdep_map lockdep_map =
936 timer->lockdep_map;
937#endif
938 /*
939 * Couple the lock chain with the lock chain at
940 * del_timer_sync() by acquiring the lock_map
941 * around the fn() call here and in
942 * del_timer_sync().
943 */
944 lock_map_acquire(&lockdep_map);
945
903 fn(data); 946 fn(data);
947
948 lock_map_release(&lockdep_map);
949
904 if (preempt_count != preempt_count()) { 950 if (preempt_count != preempt_count()) {
905 printk(KERN_ERR "huh, entered %p " 951 printk(KERN_ERR "huh, entered %p "
906 "with preempt_count %08x, exited" 952 "with preempt_count %08x, exited"
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c44ed49ca93..a3803ea8c27d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1479 unsigned long did_some_progress; 1479 unsigned long did_some_progress;
1480 unsigned long pages_reclaimed = 0; 1480 unsigned long pages_reclaimed = 0;
1481 1481
1482 lockdep_trace_alloc(gfp_mask);
1483
1482 might_sleep_if(wait); 1484 might_sleep_if(wait);
1483 1485
1484 if (should_fail_alloc_page(gfp_mask, order)) 1486 if (should_fail_alloc_page(gfp_mask, order))
@@ -1578,12 +1580,15 @@ nofail_alloc:
1578 */ 1580 */
1579 cpuset_update_task_memory_state(); 1581 cpuset_update_task_memory_state();
1580 p->flags |= PF_MEMALLOC; 1582 p->flags |= PF_MEMALLOC;
1583
1584 lockdep_set_current_reclaim_state(gfp_mask);
1581 reclaim_state.reclaimed_slab = 0; 1585 reclaim_state.reclaimed_slab = 0;
1582 p->reclaim_state = &reclaim_state; 1586 p->reclaim_state = &reclaim_state;
1583 1587
1584 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1588 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1585 1589
1586 p->reclaim_state = NULL; 1590 p->reclaim_state = NULL;
1591 lockdep_clear_current_reclaim_state();
1587 p->flags &= ~PF_MEMALLOC; 1592 p->flags &= ~PF_MEMALLOC;
1588 1593
1589 cond_resched(); 1594 cond_resched();
diff --git a/mm/slab.c b/mm/slab.c
index 4d00855629c4..825c606f691d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3318 unsigned long save_flags; 3318 unsigned long save_flags;
3319 void *ptr; 3319 void *ptr;
3320 3320
3321 lockdep_trace_alloc(flags);
3322
3321 if (slab_should_failslab(cachep, flags)) 3323 if (slab_should_failslab(cachep, flags))
3322 return NULL; 3324 return NULL;
3323 3325
@@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3394 unsigned long save_flags; 3396 unsigned long save_flags;
3395 void *objp; 3397 void *objp;
3396 3398
3399 lockdep_trace_alloc(flags);
3400
3397 if (slab_should_failslab(cachep, flags)) 3401 if (slab_should_failslab(cachep, flags))
3398 return NULL; 3402 return NULL;
3399 3403
diff --git a/mm/slob.c b/mm/slob.c
index 0bfa680a8981..7a3411524dac 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -475,6 +475,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
475 unsigned int *m; 475 unsigned int *m;
476 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 476 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
477 477
478 lockdep_trace_alloc(gfp);
479
478 if (size < PAGE_SIZE - align) { 480 if (size < PAGE_SIZE - align) {
479 if (!size) 481 if (!size)
480 return ZERO_SIZE_PTR; 482 return ZERO_SIZE_PTR;
diff --git a/mm/slub.c b/mm/slub.c
index c65a4edafc33..c4ea9158c9fb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1590,6 +1590,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1590 unsigned long flags; 1590 unsigned long flags;
1591 unsigned int objsize; 1591 unsigned int objsize;
1592 1592
1593 lockdep_trace_alloc(gfpflags);
1593 might_sleep_if(gfpflags & __GFP_WAIT); 1594 might_sleep_if(gfpflags & __GFP_WAIT);
1594 1595
1595 if (should_failslab(s->objsize, gfpflags)) 1596 if (should_failslab(s->objsize, gfpflags))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56ddf41149eb..479e46719394 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1965,6 +1965,8 @@ static int kswapd(void *p)
1965 }; 1965 };
1966 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1966 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1967 1967
1968 lockdep_set_current_reclaim_state(GFP_KERNEL);
1969
1968 if (!cpumask_empty(cpumask)) 1970 if (!cpumask_empty(cpumask))
1969 set_cpus_allowed_ptr(tsk, cpumask); 1971 set_cpus_allowed_ptr(tsk, cpumask);
1970 current->reclaim_state = &reclaim_state; 1972 current->reclaim_state = &reclaim_state;