diff options
-rw-r--r-- | include/asm-i386/spinlock.h | 7 | ||||
-rw-r--r-- | include/linux/spinlock.h | 63 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 2 | ||||
-rw-r--r-- | include/linux/spinlock_api_up.h | 1 | ||||
-rw-r--r-- | include/linux/spinlock_types.h | 32 | ||||
-rw-r--r-- | include/linux/spinlock_types_up.h | 9 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 1 | ||||
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/spinlock.c | 79 | ||||
-rw-r--r-- | lib/kernel_lock.c | 7 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 36 | ||||
-rw-r--r-- | net/ipv4/route.c | 3 |
13 files changed, 217 insertions, 34 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 7e29b51bcaa..87c40f83065 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -68,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
68 | "=m" (lock->slock) : : "memory"); | 68 | "=m" (lock->slock) : : "memory"); |
69 | } | 69 | } |
70 | 70 | ||
71 | /* | ||
72 | * It is easier for the lock validator if interrupts are not re-enabled | ||
73 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
74 | * so we turn it off: | ||
75 | */ | ||
76 | #ifndef CONFIG_PROVE_LOCKING | ||
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 77 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
72 | { | 78 | { |
73 | alternative_smp( | 79 | alternative_smp( |
@@ -75,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
75 | __raw_spin_lock_string_up, | 81 | __raw_spin_lock_string_up, |
76 | "=m" (lock->slock) : "r" (flags) : "memory"); | 82 | "=m" (lock->slock) : "r" (flags) : "memory"); |
77 | } | 83 | } |
84 | #endif | ||
78 | 85 | ||
79 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
80 | { | 87 | { |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ae23beef9cc..31473db92d3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
82 | /* | 82 | /* |
83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
84 | */ | 84 | */ |
85 | #if defined(CONFIG_SMP) | 85 | #ifdef CONFIG_SMP |
86 | # include <asm/spinlock.h> | 86 | # include <asm/spinlock.h> |
87 | #else | 87 | #else |
88 | # include <linux/spinlock_up.h> | 88 | # include <linux/spinlock_up.h> |
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | 91 | #ifdef CONFIG_DEBUG_SPINLOCK |
92 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | 92 | extern void __spin_lock_init(spinlock_t *lock, const char *name, |
93 | struct lock_class_key *key); | ||
94 | # define spin_lock_init(lock) \ | ||
95 | do { \ | ||
96 | static struct lock_class_key __key; \ | ||
97 | \ | ||
98 | __spin_lock_init((lock), #lock, &__key); \ | ||
99 | } while (0) | ||
100 | |||
101 | #else | ||
102 | # define spin_lock_init(lock) \ | ||
103 | do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
104 | #endif | ||
105 | |||
106 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
107 | extern void __rwlock_init(rwlock_t *lock, const char *name, | ||
108 | struct lock_class_key *key); | ||
109 | # define rwlock_init(lock) \ | ||
110 | do { \ | ||
111 | static struct lock_class_key __key; \ | ||
112 | \ | ||
113 | __rwlock_init((lock), #lock, &__key); \ | ||
114 | } while (0) | ||
115 | #else | ||
116 | # define rwlock_init(lock) \ | ||
117 | do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
118 | #endif | ||
93 | 119 | ||
94 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 120 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
95 | 121 | ||
@@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
113 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 139 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
114 | extern int _raw_spin_trylock(spinlock_t *lock); | 140 | extern int _raw_spin_trylock(spinlock_t *lock); |
115 | extern void _raw_spin_unlock(spinlock_t *lock); | 141 | extern void _raw_spin_unlock(spinlock_t *lock); |
116 | |||
117 | extern void _raw_read_lock(rwlock_t *lock); | 142 | extern void _raw_read_lock(rwlock_t *lock); |
118 | extern int _raw_read_trylock(rwlock_t *lock); | 143 | extern int _raw_read_trylock(rwlock_t *lock); |
119 | extern void _raw_read_unlock(rwlock_t *lock); | 144 | extern void _raw_read_unlock(rwlock_t *lock); |
@@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
121 | extern int _raw_write_trylock(rwlock_t *lock); | 146 | extern int _raw_write_trylock(rwlock_t *lock); |
122 | extern void _raw_write_unlock(rwlock_t *lock); | 147 | extern void _raw_write_unlock(rwlock_t *lock); |
123 | #else | 148 | #else |
124 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | ||
125 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | ||
126 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | 149 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) |
127 | # define _raw_spin_lock_flags(lock, flags) \ | 150 | # define _raw_spin_lock_flags(lock, flags) \ |
128 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 151 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
152 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | ||
153 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | ||
129 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 154 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
130 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | ||
131 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | ||
132 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | ||
133 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 155 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
156 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | ||
157 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | ||
134 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 158 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
159 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | ||
135 | #endif | 160 | #endif |
136 | 161 | ||
137 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) | 162 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
@@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
147 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 172 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
148 | 173 | ||
149 | #define spin_lock(lock) _spin_lock(lock) | 174 | #define spin_lock(lock) _spin_lock(lock) |
175 | |||
176 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
177 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | ||
178 | #else | ||
179 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) | ||
180 | #endif | ||
181 | |||
150 | #define write_lock(lock) _write_lock(lock) | 182 | #define write_lock(lock) _write_lock(lock) |
151 | #define read_lock(lock) _read_lock(lock) | 183 | #define read_lock(lock) _read_lock(lock) |
152 | 184 | ||
@@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
172 | /* | 204 | /* |
173 | * We inline the unlock functions in the nondebug case: | 205 | * We inline the unlock functions in the nondebug case: |
174 | */ | 206 | */ |
175 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) | 207 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ |
208 | !defined(CONFIG_SMP) | ||
176 | # define spin_unlock(lock) _spin_unlock(lock) | 209 | # define spin_unlock(lock) _spin_unlock(lock) |
177 | # define read_unlock(lock) _read_unlock(lock) | 210 | # define read_unlock(lock) _read_unlock(lock) |
178 | # define write_unlock(lock) _write_unlock(lock) | 211 | # define write_unlock(lock) _write_unlock(lock) |
179 | #else | ||
180 | # define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | ||
181 | # define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) | ||
182 | # define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) | ||
183 | #endif | ||
184 | |||
185 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) | ||
186 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 212 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
187 | # define read_unlock_irq(lock) _read_unlock_irq(lock) | 213 | # define read_unlock_irq(lock) _read_unlock_irq(lock) |
188 | # define write_unlock_irq(lock) _write_unlock_irq(lock) | 214 | # define write_unlock_irq(lock) _write_unlock_irq(lock) |
189 | #else | 215 | #else |
216 | # define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | ||
217 | # define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) | ||
218 | # define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) | ||
190 | # define spin_unlock_irq(lock) \ | 219 | # define spin_unlock_irq(lock) \ |
191 | do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) | 220 | do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) |
192 | # define read_unlock_irq(lock) \ | 221 | # define read_unlock_irq(lock) \ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 78e6989ffb5..b2c4f829946 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr); | |||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | 20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) |
21 | 21 | ||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | 22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); |
23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | ||
24 | __acquires(spinlock_t); | ||
23 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | 25 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); |
24 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | 26 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); |
25 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | 27 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); |
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index cd81cee566f..67faa044c5f 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h | |||
@@ -49,6 +49,7 @@ | |||
49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | 49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
50 | 50 | ||
51 | #define _spin_lock(lock) __LOCK(lock) | 51 | #define _spin_lock(lock) __LOCK(lock) |
52 | #define _spin_lock_nested(lock, subclass) __LOCK(lock) | ||
52 | #define _read_lock(lock) __LOCK(lock) | 53 | #define _read_lock(lock) __LOCK(lock) |
53 | #define _write_lock(lock) __LOCK(lock) | 54 | #define _write_lock(lock) __LOCK(lock) |
54 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | 55 | #define _spin_lock_bh(lock) __LOCK_BH(lock) |
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f5d4ed7bc78..dc5fb69e4de 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
@@ -9,6 +9,8 @@ | |||
9 | * Released under the General Public License (GPL). | 9 | * Released under the General Public License (GPL). |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/lockdep.h> | ||
13 | |||
12 | #if defined(CONFIG_SMP) | 14 | #if defined(CONFIG_SMP) |
13 | # include <asm/spinlock_types.h> | 15 | # include <asm/spinlock_types.h> |
14 | #else | 16 | #else |
@@ -24,6 +26,9 @@ typedef struct { | |||
24 | unsigned int magic, owner_cpu; | 26 | unsigned int magic, owner_cpu; |
25 | void *owner; | 27 | void *owner; |
26 | #endif | 28 | #endif |
29 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
30 | struct lockdep_map dep_map; | ||
31 | #endif | ||
27 | } spinlock_t; | 32 | } spinlock_t; |
28 | 33 | ||
29 | #define SPINLOCK_MAGIC 0xdead4ead | 34 | #define SPINLOCK_MAGIC 0xdead4ead |
@@ -37,28 +42,47 @@ typedef struct { | |||
37 | unsigned int magic, owner_cpu; | 42 | unsigned int magic, owner_cpu; |
38 | void *owner; | 43 | void *owner; |
39 | #endif | 44 | #endif |
45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
46 | struct lockdep_map dep_map; | ||
47 | #endif | ||
40 | } rwlock_t; | 48 | } rwlock_t; |
41 | 49 | ||
42 | #define RWLOCK_MAGIC 0xdeaf1eed | 50 | #define RWLOCK_MAGIC 0xdeaf1eed |
43 | 51 | ||
44 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | 52 | #define SPINLOCK_OWNER_INIT ((void *)-1L) |
45 | 53 | ||
54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
55 | # define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } | ||
56 | #else | ||
57 | # define SPIN_DEP_MAP_INIT(lockname) | ||
58 | #endif | ||
59 | |||
60 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
61 | # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } | ||
62 | #else | ||
63 | # define RW_DEP_MAP_INIT(lockname) | ||
64 | #endif | ||
65 | |||
46 | #ifdef CONFIG_DEBUG_SPINLOCK | 66 | #ifdef CONFIG_DEBUG_SPINLOCK |
47 | # define __SPIN_LOCK_UNLOCKED(lockname) \ | 67 | # define __SPIN_LOCK_UNLOCKED(lockname) \ |
48 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | 68 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
49 | .magic = SPINLOCK_MAGIC, \ | 69 | .magic = SPINLOCK_MAGIC, \ |
50 | .owner = SPINLOCK_OWNER_INIT, \ | 70 | .owner = SPINLOCK_OWNER_INIT, \ |
51 | .owner_cpu = -1 } | 71 | .owner_cpu = -1, \ |
72 | SPIN_DEP_MAP_INIT(lockname) } | ||
52 | #define __RW_LOCK_UNLOCKED(lockname) \ | 73 | #define __RW_LOCK_UNLOCKED(lockname) \ |
53 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | 74 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
54 | .magic = RWLOCK_MAGIC, \ | 75 | .magic = RWLOCK_MAGIC, \ |
55 | .owner = SPINLOCK_OWNER_INIT, \ | 76 | .owner = SPINLOCK_OWNER_INIT, \ |
56 | .owner_cpu = -1 } | 77 | .owner_cpu = -1, \ |
78 | RW_DEP_MAP_INIT(lockname) } | ||
57 | #else | 79 | #else |
58 | # define __SPIN_LOCK_UNLOCKED(lockname) \ | 80 | # define __SPIN_LOCK_UNLOCKED(lockname) \ |
59 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } | 81 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
82 | SPIN_DEP_MAP_INIT(lockname) } | ||
60 | #define __RW_LOCK_UNLOCKED(lockname) \ | 83 | #define __RW_LOCK_UNLOCKED(lockname) \ |
61 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } | 84 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
85 | RW_DEP_MAP_INIT(lockname) } | ||
62 | #endif | 86 | #endif |
63 | 87 | ||
64 | #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) | 88 | #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) |
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198..27644af20b7 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h | |||
@@ -12,10 +12,14 @@ | |||
12 | * Released under the General Public License (GPL). | 12 | * Released under the General Public License (GPL). |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifdef CONFIG_DEBUG_SPINLOCK | 15 | #if defined(CONFIG_DEBUG_SPINLOCK) || \ |
16 | defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
16 | 17 | ||
17 | typedef struct { | 18 | typedef struct { |
18 | volatile unsigned int slock; | 19 | volatile unsigned int slock; |
20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
21 | struct lockdep_map dep_map; | ||
22 | #endif | ||
19 | } raw_spinlock_t; | 23 | } raw_spinlock_t; |
20 | 24 | ||
21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 25 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
@@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t; | |||
30 | 34 | ||
31 | typedef struct { | 35 | typedef struct { |
32 | /* no debug version on UP */ | 36 | /* no debug version on UP */ |
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
38 | struct lockdep_map dep_map; | ||
39 | #endif | ||
33 | } raw_rwlock_t; | 40 | } raw_rwlock_t; |
34 | 41 | ||
35 | #define __RAW_RW_LOCK_UNLOCKED { } | 42 | #define __RAW_RW_LOCK_UNLOCKED { } |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 31accf2f0b1..ea54c4c9a4e 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -18,7 +18,6 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifdef CONFIG_DEBUG_SPINLOCK | 20 | #ifdef CONFIG_DEBUG_SPINLOCK |
21 | |||
22 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | 21 | #define __raw_spin_is_locked(x) ((x)->slock == 0) |
23 | 22 | ||
24 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
diff --git a/kernel/Makefile b/kernel/Makefile index df6ef326369..47dbcd570cd 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -27,6 +27,7 @@ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | |||
27 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 27 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
28 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 28 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
30 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | ||
30 | obj-$(CONFIG_UID16) += uid16.o | 31 | obj-$(CONFIG_UID16) += uid16.o |
31 | obj-$(CONFIG_MODULES) += module.o | 32 | obj-$(CONFIG_MODULES) += module.o |
32 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 33 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
diff --git a/kernel/sched.c b/kernel/sched.c index 91182996653..ae4db0185bb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -308,6 +308,13 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | |||
308 | /* this is a valid case when another task releases the spinlock */ | 308 | /* this is a valid case when another task releases the spinlock */ |
309 | rq->lock.owner = current; | 309 | rq->lock.owner = current; |
310 | #endif | 310 | #endif |
311 | /* | ||
312 | * If we are tracking spinlock dependencies then we have to | ||
313 | * fix up the runqueue lock - which gets 'carried over' from | ||
314 | * prev into current: | ||
315 | */ | ||
316 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); | ||
317 | |||
311 | spin_unlock_irq(&rq->lock); | 318 | spin_unlock_irq(&rq->lock); |
312 | } | 319 | } |
313 | 320 | ||
@@ -1778,6 +1785,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) | |||
1778 | WARN_ON(rq->prev_mm); | 1785 | WARN_ON(rq->prev_mm); |
1779 | rq->prev_mm = oldmm; | 1786 | rq->prev_mm = oldmm; |
1780 | } | 1787 | } |
1788 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | ||
1781 | 1789 | ||
1782 | /* Here we just switch the register state and the stack. */ | 1790 | /* Here we just switch the register state and the stack. */ |
1783 | switch_to(prev, next, prev); | 1791 | switch_to(prev, next, prev); |
@@ -4384,6 +4392,7 @@ asmlinkage long sys_sched_yield(void) | |||
4384 | * no need to preempt or enable interrupts: | 4392 | * no need to preempt or enable interrupts: |
4385 | */ | 4393 | */ |
4386 | __release(rq->lock); | 4394 | __release(rq->lock); |
4395 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | ||
4387 | _raw_spin_unlock(&rq->lock); | 4396 | _raw_spin_unlock(&rq->lock); |
4388 | preempt_enable_no_resched(); | 4397 | preempt_enable_no_resched(); |
4389 | 4398 | ||
@@ -4447,6 +4456,7 @@ int cond_resched_lock(spinlock_t *lock) | |||
4447 | spin_lock(lock); | 4456 | spin_lock(lock); |
4448 | } | 4457 | } |
4449 | if (need_resched() && __resched_legal()) { | 4458 | if (need_resched() && __resched_legal()) { |
4459 | spin_release(&lock->dep_map, 1, _THIS_IP_); | ||
4450 | _raw_spin_unlock(lock); | 4460 | _raw_spin_unlock(lock); |
4451 | preempt_enable_no_resched(); | 4461 | preempt_enable_no_resched(); |
4452 | __cond_resched(); | 4462 | __cond_resched(); |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index b31e54eadf5..bfd6ad9c033 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/preempt.h> | 13 | #include <linux/preempt.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/debug_locks.h> | ||
16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -29,8 +30,10 @@ EXPORT_SYMBOL(generic__raw_read_trylock); | |||
29 | int __lockfunc _spin_trylock(spinlock_t *lock) | 30 | int __lockfunc _spin_trylock(spinlock_t *lock) |
30 | { | 31 | { |
31 | preempt_disable(); | 32 | preempt_disable(); |
32 | if (_raw_spin_trylock(lock)) | 33 | if (_raw_spin_trylock(lock)) { |
34 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
33 | return 1; | 35 | return 1; |
36 | } | ||
34 | 37 | ||
35 | preempt_enable(); | 38 | preempt_enable(); |
36 | return 0; | 39 | return 0; |
@@ -40,8 +43,10 @@ EXPORT_SYMBOL(_spin_trylock); | |||
40 | int __lockfunc _read_trylock(rwlock_t *lock) | 43 | int __lockfunc _read_trylock(rwlock_t *lock) |
41 | { | 44 | { |
42 | preempt_disable(); | 45 | preempt_disable(); |
43 | if (_raw_read_trylock(lock)) | 46 | if (_raw_read_trylock(lock)) { |
47 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
44 | return 1; | 48 | return 1; |
49 | } | ||
45 | 50 | ||
46 | preempt_enable(); | 51 | preempt_enable(); |
47 | return 0; | 52 | return 0; |
@@ -51,19 +56,28 @@ EXPORT_SYMBOL(_read_trylock); | |||
51 | int __lockfunc _write_trylock(rwlock_t *lock) | 56 | int __lockfunc _write_trylock(rwlock_t *lock) |
52 | { | 57 | { |
53 | preempt_disable(); | 58 | preempt_disable(); |
54 | if (_raw_write_trylock(lock)) | 59 | if (_raw_write_trylock(lock)) { |
60 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
55 | return 1; | 61 | return 1; |
62 | } | ||
56 | 63 | ||
57 | preempt_enable(); | 64 | preempt_enable(); |
58 | return 0; | 65 | return 0; |
59 | } | 66 | } |
60 | EXPORT_SYMBOL(_write_trylock); | 67 | EXPORT_SYMBOL(_write_trylock); |
61 | 68 | ||
62 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) | 69 | /* |
70 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
71 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
72 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
73 | */ | ||
74 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ | ||
75 | defined(CONFIG_PROVE_LOCKING) | ||
63 | 76 | ||
64 | void __lockfunc _read_lock(rwlock_t *lock) | 77 | void __lockfunc _read_lock(rwlock_t *lock) |
65 | { | 78 | { |
66 | preempt_disable(); | 79 | preempt_disable(); |
80 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
67 | _raw_read_lock(lock); | 81 | _raw_read_lock(lock); |
68 | } | 82 | } |
69 | EXPORT_SYMBOL(_read_lock); | 83 | EXPORT_SYMBOL(_read_lock); |
@@ -74,7 +88,17 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |||
74 | 88 | ||
75 | local_irq_save(flags); | 89 | local_irq_save(flags); |
76 | preempt_disable(); | 90 | preempt_disable(); |
91 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
92 | /* | ||
93 | * On lockdep we dont want the hand-coded irq-enable of | ||
94 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
95 | * that interrupts are not re-enabled during lock-acquire: | ||
96 | */ | ||
97 | #ifdef CONFIG_PROVE_LOCKING | ||
98 | _raw_spin_lock(lock); | ||
99 | #else | ||
77 | _raw_spin_lock_flags(lock, &flags); | 100 | _raw_spin_lock_flags(lock, &flags); |
101 | #endif | ||
78 | return flags; | 102 | return flags; |
79 | } | 103 | } |
80 | EXPORT_SYMBOL(_spin_lock_irqsave); | 104 | EXPORT_SYMBOL(_spin_lock_irqsave); |
@@ -83,6 +107,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock) | |||
83 | { | 107 | { |
84 | local_irq_disable(); | 108 | local_irq_disable(); |
85 | preempt_disable(); | 109 | preempt_disable(); |
110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
86 | _raw_spin_lock(lock); | 111 | _raw_spin_lock(lock); |
87 | } | 112 | } |
88 | EXPORT_SYMBOL(_spin_lock_irq); | 113 | EXPORT_SYMBOL(_spin_lock_irq); |
@@ -91,6 +116,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock) | |||
91 | { | 116 | { |
92 | local_bh_disable(); | 117 | local_bh_disable(); |
93 | preempt_disable(); | 118 | preempt_disable(); |
119 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
94 | _raw_spin_lock(lock); | 120 | _raw_spin_lock(lock); |
95 | } | 121 | } |
96 | EXPORT_SYMBOL(_spin_lock_bh); | 122 | EXPORT_SYMBOL(_spin_lock_bh); |
@@ -101,6 +127,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | |||
101 | 127 | ||
102 | local_irq_save(flags); | 128 | local_irq_save(flags); |
103 | preempt_disable(); | 129 | preempt_disable(); |
130 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
104 | _raw_read_lock(lock); | 131 | _raw_read_lock(lock); |
105 | return flags; | 132 | return flags; |
106 | } | 133 | } |
@@ -110,6 +137,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock) | |||
110 | { | 137 | { |
111 | local_irq_disable(); | 138 | local_irq_disable(); |
112 | preempt_disable(); | 139 | preempt_disable(); |
140 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
113 | _raw_read_lock(lock); | 141 | _raw_read_lock(lock); |
114 | } | 142 | } |
115 | EXPORT_SYMBOL(_read_lock_irq); | 143 | EXPORT_SYMBOL(_read_lock_irq); |
@@ -118,6 +146,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock) | |||
118 | { | 146 | { |
119 | local_bh_disable(); | 147 | local_bh_disable(); |
120 | preempt_disable(); | 148 | preempt_disable(); |
149 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
121 | _raw_read_lock(lock); | 150 | _raw_read_lock(lock); |
122 | } | 151 | } |
123 | EXPORT_SYMBOL(_read_lock_bh); | 152 | EXPORT_SYMBOL(_read_lock_bh); |
@@ -128,6 +157,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | |||
128 | 157 | ||
129 | local_irq_save(flags); | 158 | local_irq_save(flags); |
130 | preempt_disable(); | 159 | preempt_disable(); |
160 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
131 | _raw_write_lock(lock); | 161 | _raw_write_lock(lock); |
132 | return flags; | 162 | return flags; |
133 | } | 163 | } |
@@ -137,6 +167,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock) | |||
137 | { | 167 | { |
138 | local_irq_disable(); | 168 | local_irq_disable(); |
139 | preempt_disable(); | 169 | preempt_disable(); |
170 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
140 | _raw_write_lock(lock); | 171 | _raw_write_lock(lock); |
141 | } | 172 | } |
142 | EXPORT_SYMBOL(_write_lock_irq); | 173 | EXPORT_SYMBOL(_write_lock_irq); |
@@ -145,6 +176,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock) | |||
145 | { | 176 | { |
146 | local_bh_disable(); | 177 | local_bh_disable(); |
147 | preempt_disable(); | 178 | preempt_disable(); |
179 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
148 | _raw_write_lock(lock); | 180 | _raw_write_lock(lock); |
149 | } | 181 | } |
150 | EXPORT_SYMBOL(_write_lock_bh); | 182 | EXPORT_SYMBOL(_write_lock_bh); |
@@ -152,6 +184,7 @@ EXPORT_SYMBOL(_write_lock_bh); | |||
152 | void __lockfunc _spin_lock(spinlock_t *lock) | 184 | void __lockfunc _spin_lock(spinlock_t *lock) |
153 | { | 185 | { |
154 | preempt_disable(); | 186 | preempt_disable(); |
187 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
155 | _raw_spin_lock(lock); | 188 | _raw_spin_lock(lock); |
156 | } | 189 | } |
157 | 190 | ||
@@ -160,6 +193,7 @@ EXPORT_SYMBOL(_spin_lock); | |||
160 | void __lockfunc _write_lock(rwlock_t *lock) | 193 | void __lockfunc _write_lock(rwlock_t *lock) |
161 | { | 194 | { |
162 | preempt_disable(); | 195 | preempt_disable(); |
196 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
163 | _raw_write_lock(lock); | 197 | _raw_write_lock(lock); |
164 | } | 198 | } |
165 | 199 | ||
@@ -255,8 +289,22 @@ BUILD_LOCK_OPS(write, rwlock); | |||
255 | 289 | ||
256 | #endif /* CONFIG_PREEMPT */ | 290 | #endif /* CONFIG_PREEMPT */ |
257 | 291 | ||
292 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
293 | |||
294 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | ||
295 | { | ||
296 | preempt_disable(); | ||
297 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
298 | _raw_spin_lock(lock); | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL(_spin_lock_nested); | ||
302 | |||
303 | #endif | ||
304 | |||
258 | void __lockfunc _spin_unlock(spinlock_t *lock) | 305 | void __lockfunc _spin_unlock(spinlock_t *lock) |
259 | { | 306 | { |
307 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
260 | _raw_spin_unlock(lock); | 308 | _raw_spin_unlock(lock); |
261 | preempt_enable(); | 309 | preempt_enable(); |
262 | } | 310 | } |
@@ -264,6 +312,7 @@ EXPORT_SYMBOL(_spin_unlock); | |||
264 | 312 | ||
265 | void __lockfunc _write_unlock(rwlock_t *lock) | 313 | void __lockfunc _write_unlock(rwlock_t *lock) |
266 | { | 314 | { |
315 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
267 | _raw_write_unlock(lock); | 316 | _raw_write_unlock(lock); |
268 | preempt_enable(); | 317 | preempt_enable(); |
269 | } | 318 | } |
@@ -271,6 +320,7 @@ EXPORT_SYMBOL(_write_unlock); | |||
271 | 320 | ||
272 | void __lockfunc _read_unlock(rwlock_t *lock) | 321 | void __lockfunc _read_unlock(rwlock_t *lock) |
273 | { | 322 | { |
323 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
274 | _raw_read_unlock(lock); | 324 | _raw_read_unlock(lock); |
275 | preempt_enable(); | 325 | preempt_enable(); |
276 | } | 326 | } |
@@ -278,6 +328,7 @@ EXPORT_SYMBOL(_read_unlock); | |||
278 | 328 | ||
279 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 329 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
280 | { | 330 | { |
331 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
281 | _raw_spin_unlock(lock); | 332 | _raw_spin_unlock(lock); |
282 | local_irq_restore(flags); | 333 | local_irq_restore(flags); |
283 | preempt_enable(); | 334 | preempt_enable(); |
@@ -286,6 +337,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore); | |||
286 | 337 | ||
287 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 338 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
288 | { | 339 | { |
340 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
289 | _raw_spin_unlock(lock); | 341 | _raw_spin_unlock(lock); |
290 | local_irq_enable(); | 342 | local_irq_enable(); |
291 | preempt_enable(); | 343 | preempt_enable(); |
@@ -294,14 +346,16 @@ EXPORT_SYMBOL(_spin_unlock_irq); | |||
294 | 346 | ||
295 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 347 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
296 | { | 348 | { |
349 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
297 | _raw_spin_unlock(lock); | 350 | _raw_spin_unlock(lock); |
298 | preempt_enable_no_resched(); | 351 | preempt_enable_no_resched(); |
299 | local_bh_enable(); | 352 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
300 | } | 353 | } |
301 | EXPORT_SYMBOL(_spin_unlock_bh); | 354 | EXPORT_SYMBOL(_spin_unlock_bh); |
302 | 355 | ||
303 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 356 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
304 | { | 357 | { |
358 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
305 | _raw_read_unlock(lock); | 359 | _raw_read_unlock(lock); |
306 | local_irq_restore(flags); | 360 | local_irq_restore(flags); |
307 | preempt_enable(); | 361 | preempt_enable(); |
@@ -310,6 +364,7 @@ EXPORT_SYMBOL(_read_unlock_irqrestore); | |||
310 | 364 | ||
311 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 365 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
312 | { | 366 | { |
367 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
313 | _raw_read_unlock(lock); | 368 | _raw_read_unlock(lock); |
314 | local_irq_enable(); | 369 | local_irq_enable(); |
315 | preempt_enable(); | 370 | preempt_enable(); |
@@ -318,14 +373,16 @@ EXPORT_SYMBOL(_read_unlock_irq); | |||
318 | 373 | ||
319 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 374 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
320 | { | 375 | { |
376 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
321 | _raw_read_unlock(lock); | 377 | _raw_read_unlock(lock); |
322 | preempt_enable_no_resched(); | 378 | preempt_enable_no_resched(); |
323 | local_bh_enable(); | 379 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
324 | } | 380 | } |
325 | EXPORT_SYMBOL(_read_unlock_bh); | 381 | EXPORT_SYMBOL(_read_unlock_bh); |
326 | 382 | ||
327 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 383 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
328 | { | 384 | { |
385 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
329 | _raw_write_unlock(lock); | 386 | _raw_write_unlock(lock); |
330 | local_irq_restore(flags); | 387 | local_irq_restore(flags); |
331 | preempt_enable(); | 388 | preempt_enable(); |
@@ -334,6 +391,7 @@ EXPORT_SYMBOL(_write_unlock_irqrestore); | |||
334 | 391 | ||
335 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 392 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
336 | { | 393 | { |
394 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
337 | _raw_write_unlock(lock); | 395 | _raw_write_unlock(lock); |
338 | local_irq_enable(); | 396 | local_irq_enable(); |
339 | preempt_enable(); | 397 | preempt_enable(); |
@@ -342,9 +400,10 @@ EXPORT_SYMBOL(_write_unlock_irq); | |||
342 | 400 | ||
343 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 401 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
344 | { | 402 | { |
403 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
345 | _raw_write_unlock(lock); | 404 | _raw_write_unlock(lock); |
346 | preempt_enable_no_resched(); | 405 | preempt_enable_no_resched(); |
347 | local_bh_enable(); | 406 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
348 | } | 407 | } |
349 | EXPORT_SYMBOL(_write_unlock_bh); | 408 | EXPORT_SYMBOL(_write_unlock_bh); |
350 | 409 | ||
@@ -352,11 +411,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) | |||
352 | { | 411 | { |
353 | local_bh_disable(); | 412 | local_bh_disable(); |
354 | preempt_disable(); | 413 | preempt_disable(); |
355 | if (_raw_spin_trylock(lock)) | 414 | if (_raw_spin_trylock(lock)) { |
415 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
356 | return 1; | 416 | return 1; |
417 | } | ||
357 | 418 | ||
358 | preempt_enable_no_resched(); | 419 | preempt_enable_no_resched(); |
359 | local_bh_enable(); | 420 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
360 | return 0; | 421 | return 0; |
361 | } | 422 | } |
362 | EXPORT_SYMBOL(_spin_trylock_bh); | 423 | EXPORT_SYMBOL(_spin_trylock_bh); |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index e713e86811a..e0fdfddb406 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -177,7 +177,12 @@ static inline void __lock_kernel(void) | |||
177 | 177 | ||
178 | static inline void __unlock_kernel(void) | 178 | static inline void __unlock_kernel(void) |
179 | { | 179 | { |
180 | spin_unlock(&kernel_flag); | 180 | /* |
181 | * the BKL is not covered by lockdep, so we open-code the | ||
182 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
183 | */ | ||
184 | _raw_spin_unlock(&kernel_flag); | ||
185 | preempt_enable(); | ||
181 | } | 186 | } |
182 | 187 | ||
183 | /* | 188 | /* |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3de2ccf48ac..3d9c4dc965e 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -12,6 +12,42 @@ | |||
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | void __spin_lock_init(spinlock_t *lock, const char *name, | ||
16 | struct lock_class_key *key) | ||
17 | { | ||
18 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
19 | /* | ||
20 | * Make sure we are not reinitializing a held lock: | ||
21 | */ | ||
22 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
23 | lockdep_init_map(&lock->dep_map, name, key); | ||
24 | #endif | ||
25 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
26 | lock->magic = SPINLOCK_MAGIC; | ||
27 | lock->owner = SPINLOCK_OWNER_INIT; | ||
28 | lock->owner_cpu = -1; | ||
29 | } | ||
30 | |||
31 | EXPORT_SYMBOL(__spin_lock_init); | ||
32 | |||
33 | void __rwlock_init(rwlock_t *lock, const char *name, | ||
34 | struct lock_class_key *key) | ||
35 | { | ||
36 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
37 | /* | ||
38 | * Make sure we are not reinitializing a held lock: | ||
39 | */ | ||
40 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
41 | lockdep_init_map(&lock->dep_map, name, key); | ||
42 | #endif | ||
43 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | ||
44 | lock->magic = RWLOCK_MAGIC; | ||
45 | lock->owner = SPINLOCK_OWNER_INIT; | ||
46 | lock->owner_cpu = -1; | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(__rwlock_init); | ||
50 | |||
15 | static void spin_bug(spinlock_t *lock, const char *msg) | 51 | static void spin_bug(spinlock_t *lock, const char *msg) |
16 | { | 52 | { |
17 | struct task_struct *owner = NULL; | 53 | struct task_struct *owner = NULL; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc..283a72247e5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -205,7 +205,8 @@ __u8 ip_tos2prio[16] = { | |||
205 | struct rt_hash_bucket { | 205 | struct rt_hash_bucket { |
206 | struct rtable *chain; | 206 | struct rtable *chain; |
207 | }; | 207 | }; |
208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
209 | defined(CONFIG_PROVE_LOCKING) | ||
209 | /* | 210 | /* |
210 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks | 211 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks |
211 | * The size of this table is a power of two and depends on the number of CPUS. | 212 | * The size of this table is a power of two and depends on the number of CPUS. |