aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 13:40:41 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 13:40:41 -0400
commit419217cb1d0266f62cbea6cdc6b1d1324350bc34 (patch)
tree01f80c026cc94dfc13b3a16d2fce3ba41c69d54f /kernel
parent4937ce87959629d31e9b09cf5bdf1e12a305c805 (diff)
parent14358e6ddaed27499d7d366b3e65c3e46b39e1c4 (diff)
Merge branch 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep
* 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep: lockdep: annotate dir vs file i_mutex lockdep: per filesystem inode lock class lockdep: annotate kprobes irq fiddling lockdep: annotate rcu_read_{,un}lock{,_bh} lockdep: annotate journal_start() lockdep: s390: connect the sysexit hook lockdep: x86_64: connect the sysexit hook lockdep: i386: connect the sysexit hook lockdep: syscall exit check lockdep: fixup mutex annotations lockdep: fix mismatched lockdep_depth/curr_chain_hash lockdep: Avoid /proc/lockdep & lock_stat infinite output lockdep: maintainers
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c26
-rw-r--r--kernel/lockdep_proc.c61
-rw-r--r--kernel/mutex.c35
-rw-r--r--kernel/rcupdate.c8
4 files changed, 91 insertions, 39 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 734da579ad13..a6f1ee9c92d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1521,7 +1521,7 @@ cache_hit:
1521} 1521}
1522 1522
1523static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, 1523static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1524 struct held_lock *hlock, int chain_head) 1524 struct held_lock *hlock, int chain_head, u64 chain_key)
1525{ 1525{
1526 /* 1526 /*
1527 * Trylock needs to maintain the stack of held locks, but it 1527 * Trylock needs to maintain the stack of held locks, but it
@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1534 * graph_lock for us) 1534 * graph_lock for us)
1535 */ 1535 */
1536 if (!hlock->trylock && (hlock->check == 2) && 1536 if (!hlock->trylock && (hlock->check == 2) &&
1537 lookup_chain_cache(curr->curr_chain_key, hlock->class)) { 1537 lookup_chain_cache(chain_key, hlock->class)) {
1538 /* 1538 /*
1539 * Check whether last held lock: 1539 * Check whether last held lock:
1540 * 1540 *
@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1576#else 1576#else
1577static inline int validate_chain(struct task_struct *curr, 1577static inline int validate_chain(struct task_struct *curr,
1578 struct lockdep_map *lock, struct held_lock *hlock, 1578 struct lockdep_map *lock, struct held_lock *hlock,
1579 int chain_head) 1579 int chain_head, u64 chain_key)
1580{ 1580{
1581 return 1; 1581 return 1;
1582} 1582}
@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2450 chain_head = 1; 2450 chain_head = 1;
2451 } 2451 }
2452 chain_key = iterate_chain_key(chain_key, id); 2452 chain_key = iterate_chain_key(chain_key, id);
2453 curr->curr_chain_key = chain_key;
2454 2453
2455 if (!validate_chain(curr, lock, hlock, chain_head)) 2454 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2456 return 0; 2455 return 0;
2457 2456
2457 curr->curr_chain_key = chain_key;
2458 curr->lockdep_depth++; 2458 curr->lockdep_depth++;
2459 check_chain_key(curr); 2459 check_chain_key(curr);
2460#ifdef CONFIG_DEBUG_LOCKDEP 2460#ifdef CONFIG_DEBUG_LOCKDEP
@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task)
3199} 3199}
3200 3200
3201EXPORT_SYMBOL_GPL(debug_show_held_locks); 3201EXPORT_SYMBOL_GPL(debug_show_held_locks);
3202
3203void lockdep_sys_exit(void)
3204{
3205 struct task_struct *curr = current;
3206
3207 if (unlikely(curr->lockdep_depth)) {
3208 if (!debug_locks_off())
3209 return;
3210 printk("\n================================================\n");
3211 printk( "[ BUG: lock held when returning to user space! ]\n");
3212 printk( "------------------------------------------------\n");
3213 printk("%s/%d is leaving the kernel with locks still held!\n",
3214 curr->comm, curr->pid);
3215 lockdep_print_held_locks(curr);
3216 }
3217}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index c851b2dcc685..8a135bd163c2 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,28 +25,38 @@
25 25
26static void *l_next(struct seq_file *m, void *v, loff_t *pos) 26static void *l_next(struct seq_file *m, void *v, loff_t *pos)
27{ 27{
28 struct lock_class *class = v; 28 struct lock_class *class;
29 29
30 (*pos)++; 30 (*pos)++;
31 31
32 if (class->lock_entry.next != &all_lock_classes) 32 if (v == SEQ_START_TOKEN)
33 class = list_entry(class->lock_entry.next, struct lock_class, 33 class = m->private;
34 lock_entry); 34 else {
35 else 35 class = v;
36 class = NULL; 36
37 m->private = class; 37 if (class->lock_entry.next != &all_lock_classes)
38 class = list_entry(class->lock_entry.next,
39 struct lock_class, lock_entry);
40 else
41 class = NULL;
42 }
38 43
39 return class; 44 return class;
40} 45}
41 46
42static void *l_start(struct seq_file *m, loff_t *pos) 47static void *l_start(struct seq_file *m, loff_t *pos)
43{ 48{
44 struct lock_class *class = m->private; 49 struct lock_class *class;
50 loff_t i = 0;
45 51
46 if (&class->lock_entry == all_lock_classes.next) 52 if (*pos == 0)
47 seq_printf(m, "all lock classes:\n"); 53 return SEQ_START_TOKEN;
48 54
49 return class; 55 list_for_each_entry(class, &all_lock_classes, lock_entry) {
56 if (++i == *pos)
57 return class;
58 }
59 return NULL;
50} 60}
51 61
52static void l_stop(struct seq_file *m, void *v) 62static void l_stop(struct seq_file *m, void *v)
@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class)
101static int l_show(struct seq_file *m, void *v) 111static int l_show(struct seq_file *m, void *v)
102{ 112{
103 unsigned long nr_forward_deps, nr_backward_deps; 113 unsigned long nr_forward_deps, nr_backward_deps;
104 struct lock_class *class = m->private; 114 struct lock_class *class = v;
105 struct lock_list *entry; 115 struct lock_list *entry;
106 char c1, c2, c3, c4; 116 char c1, c2, c3, c4;
107 117
118 if (v == SEQ_START_TOKEN) {
119 seq_printf(m, "all lock classes:\n");
120 return 0;
121 }
122
108 seq_printf(m, "%p", class->key); 123 seq_printf(m, "%p", class->key);
109#ifdef CONFIG_DEBUG_LOCKDEP 124#ifdef CONFIG_DEBUG_LOCKDEP
110 seq_printf(m, " OPS:%8ld", class->ops); 125 seq_printf(m, " OPS:%8ld", class->ops);
@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
523{ 538{
524 struct lock_stat_seq *data = m->private; 539 struct lock_stat_seq *data = m->private;
525 540
526 if (data->iter == data->stats) 541 if (*pos == 0)
527 seq_header(m); 542 return SEQ_START_TOKEN;
528 543
529 if (data->iter == data->iter_end) 544 data->iter = data->stats + *pos;
545 if (data->iter >= data->iter_end)
530 data->iter = NULL; 546 data->iter = NULL;
531 547
532 return data->iter; 548 return data->iter;
@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
538 554
539 (*pos)++; 555 (*pos)++;
540 556
541 data->iter = v; 557 if (v == SEQ_START_TOKEN)
542 data->iter++; 558 data->iter = data->stats;
559 else {
560 data->iter = v;
561 data->iter++;
562 }
563
543 if (data->iter == data->iter_end) 564 if (data->iter == data->iter_end)
544 data->iter = NULL; 565 data->iter = NULL;
545 566
@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v)
552 573
553static int ls_show(struct seq_file *m, void *v) 574static int ls_show(struct seq_file *m, void *v)
554{ 575{
555 struct lock_stat_seq *data = m->private; 576 if (v == SEQ_START_TOKEN)
577 seq_header(m);
578 else
579 seq_stats(m, v);
556 580
557 seq_stats(m, data->iter);
558 return 0; 581 return 0;
559} 582}
560 583
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 691b86564dd9..d7fe50cc556f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51 51
52EXPORT_SYMBOL(__mutex_init); 52EXPORT_SYMBOL(__mutex_init);
53 53
54#ifndef CONFIG_DEBUG_LOCK_ALLOC
54/* 55/*
55 * We split the mutex lock/unlock logic into separate fastpath and 56 * We split the mutex lock/unlock logic into separate fastpath and
56 * slowpath functions, to reduce the register pressure on the fastpath. 57 * slowpath functions, to reduce the register pressure on the fastpath.
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
92} 93}
93 94
94EXPORT_SYMBOL(mutex_lock); 95EXPORT_SYMBOL(mutex_lock);
96#endif
95 97
96static void fastcall noinline __sched 98static void fastcall noinline __sched
97__mutex_unlock_slowpath(atomic_t *lock_count); 99__mutex_unlock_slowpath(atomic_t *lock_count);
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock);
122 * Lock a mutex (possibly interruptible), slowpath: 124 * Lock a mutex (possibly interruptible), slowpath:
123 */ 125 */
124static inline int __sched 126static inline int __sched
125__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) 127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 unsigned long ip)
126{ 129{
127 struct task_struct *task = current; 130 struct task_struct *task = current;
128 struct mutex_waiter waiter; 131 struct mutex_waiter waiter;
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
132 spin_lock_mutex(&lock->wait_lock, flags); 135 spin_lock_mutex(&lock->wait_lock, flags);
133 136
134 debug_mutex_lock_common(lock, &waiter); 137 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
136 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
137 140
138 /* add waiting tasks to the end of the waitqueue (FIFO): */ 141 /* add waiting tasks to the end of the waitqueue (FIFO): */
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
143 if (old_val == 1) 146 if (old_val == 1)
144 goto done; 147 goto done;
145 148
146 lock_contended(&lock->dep_map, _RET_IP_); 149 lock_contended(&lock->dep_map, ip);
147 150
148 for (;;) { 151 for (;;) {
149 /* 152 /*
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
166 if (unlikely(state == TASK_INTERRUPTIBLE && 169 if (unlikely(state == TASK_INTERRUPTIBLE &&
167 signal_pending(task))) { 170 signal_pending(task))) {
168 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 171 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
169 mutex_release(&lock->dep_map, 1, _RET_IP_); 172 mutex_release(&lock->dep_map, 1, ip);
170 spin_unlock_mutex(&lock->wait_lock, flags); 173 spin_unlock_mutex(&lock->wait_lock, flags);
171 174
172 debug_mutex_free_waiter(&waiter); 175 debug_mutex_free_waiter(&waiter);
@@ -197,20 +200,12 @@ done:
197 return 0; 200 return 0;
198} 201}
199 202
200static void fastcall noinline __sched
201__mutex_lock_slowpath(atomic_t *lock_count)
202{
203 struct mutex *lock = container_of(lock_count, struct mutex, count);
204
205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
206}
207
208#ifdef CONFIG_DEBUG_LOCK_ALLOC 203#ifdef CONFIG_DEBUG_LOCK_ALLOC
209void __sched 204void __sched
210mutex_lock_nested(struct mutex *lock, unsigned int subclass) 205mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211{ 206{
212 might_sleep(); 207 might_sleep();
213 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); 208 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
214} 209}
215 210
216EXPORT_SYMBOL_GPL(mutex_lock_nested); 211EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -219,7 +214,7 @@ int __sched
219mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
220{ 215{
221 might_sleep(); 216 might_sleep();
222 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); 217 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
223} 218}
224 219
225EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 220EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
271 __mutex_unlock_common_slowpath(lock_count, 1); 266 __mutex_unlock_common_slowpath(lock_count, 1);
272} 267}
273 268
269#ifndef CONFIG_DEBUG_LOCK_ALLOC
274/* 270/*
275 * Here come the less common (and hence less performance-critical) APIs: 271 * Here come the less common (and hence less performance-critical) APIs:
276 * mutex_lock_interruptible() and mutex_trylock(). 272 * mutex_lock_interruptible() and mutex_trylock().
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
298 294
299EXPORT_SYMBOL(mutex_lock_interruptible); 295EXPORT_SYMBOL(mutex_lock_interruptible);
300 296
297static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count)
299{
300 struct mutex *lock = container_of(lock_count, struct mutex, count);
301
302 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
303}
304
301static int fastcall noinline __sched 305static int fastcall noinline __sched
302__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 306__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
303{ 307{
304 struct mutex *lock = container_of(lock_count, struct mutex, count); 308 struct mutex *lock = container_of(lock_count, struct mutex, count);
305 309
306 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); 310 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
307} 311}
312#endif
308 313
309/* 314/*
310 * Spinlock based trylock, we take the spinlock and check whether we 315 * Spinlock based trylock, we take the spinlock and check whether we
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c2dd8410dc4..130214f3d229 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -49,6 +49,14 @@
49#include <linux/cpu.h> 49#include <linux/cpu.h>
50#include <linux/mutex.h> 50#include <linux/mutex.h>
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56
57EXPORT_SYMBOL_GPL(rcu_lock_map);
58#endif
59
52/* Definition for rcupdate control block. */ 60/* Definition for rcupdate control block. */
53static struct rcu_ctrlblk rcu_ctrlblk = { 61static struct rcu_ctrlblk rcu_ctrlblk = {
54 .cur = -300, 62 .cur = -300,