aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/spinlock_debug.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2017-02-08 14:46:48 -0500
committerIngo Molnar <mingo@kernel.org>2017-02-10 03:09:49 -0500
commitbc88c10d7e6900916f5e1ba3829d66a9de92b633 (patch)
tree1b9c1a97e1fa7daab8461f9660f78bd416b528c5 /kernel/locking/spinlock_debug.c
parentf9af456a61ecfbef8233c5046a9e347c9b98ba05 (diff)
locking/spinlock/debug: Remove spinlock lockup detection code
The current spinlock lockup detection code can sometimes produce false positives because of the unfairness of the locking algorithm itself. So the lockup detection code is now removed. Instead, we are relying on the NMI watchdog to detect potential lockup. We won't have lockup detection if the watchdog isn't running. The commented-out read-write lock lockup detection code are also removed. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1486583208-11038-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/spinlock_debug.c')
-rw-r--r--kernel/locking/spinlock_debug.c86
1 files changed, 5 insertions, 81 deletions
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a596cffa..9aa0fccd5d43 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
103 lock->owner_cpu = -1; 103 lock->owner_cpu = -1;
104} 104}
105 105
106static void __spin_lock_debug(raw_spinlock_t *lock) 106/*
107{ 107 * We are now relying on the NMI watchdog to detect lockup instead of doing
108 u64 i; 108 * the detection here with an unfair lock which can cause problem of its own.
109 u64 loops = loops_per_jiffy * HZ; 109 */
110
111 for (i = 0; i < loops; i++) {
112 if (arch_spin_trylock(&lock->raw_lock))
113 return;
114 __delay(1);
115 }
116 /* lockup suspected: */
117 spin_dump(lock, "lockup suspected");
118#ifdef CONFIG_SMP
119 trigger_all_cpu_backtrace();
120#endif
121
122 /*
123 * The trylock above was causing a livelock. Give the lower level arch
124 * specific lock code a chance to acquire the lock. We have already
125 * printed a warning/backtrace at this point. The non-debug arch
126 * specific code might actually succeed in acquiring the lock. If it is
127 * not successful, the end-result is the same - there is no forward
128 * progress.
129 */
130 arch_spin_lock(&lock->raw_lock);
131}
132
133void do_raw_spin_lock(raw_spinlock_t *lock) 110void do_raw_spin_lock(raw_spinlock_t *lock)
134{ 111{
135 debug_spin_lock_before(lock); 112 debug_spin_lock_before(lock);
136 if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 113 arch_spin_lock(&lock->raw_lock);
137 __spin_lock_debug(lock);
138 debug_spin_lock_after(lock); 114 debug_spin_lock_after(lock);
139} 115}
140 116
@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
172 148
173#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) 149#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
174 150
175#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
176static void __read_lock_debug(rwlock_t *lock)
177{
178 u64 i;
179 u64 loops = loops_per_jiffy * HZ;
180 int print_once = 1;
181
182 for (;;) {
183 for (i = 0; i < loops; i++) {
184 if (arch_read_trylock(&lock->raw_lock))
185 return;
186 __delay(1);
187 }
188 /* lockup suspected: */
189 if (print_once) {
190 print_once = 0;
191 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
192 "%s/%d, %p\n",
193 raw_smp_processor_id(), current->comm,
194 current->pid, lock);
195 dump_stack();
196 }
197 }
198}
199#endif
200
201void do_raw_read_lock(rwlock_t *lock) 151void do_raw_read_lock(rwlock_t *lock)
202{ 152{
203 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 153 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock)
247 lock->owner_cpu = -1; 197 lock->owner_cpu = -1;
248} 198}
249 199
250#if 0 /* This can cause lockups */
251static void __write_lock_debug(rwlock_t *lock)
252{
253 u64 i;
254 u64 loops = loops_per_jiffy * HZ;
255 int print_once = 1;
256
257 for (;;) {
258 for (i = 0; i < loops; i++) {
259 if (arch_write_trylock(&lock->raw_lock))
260 return;
261 __delay(1);
262 }
263 /* lockup suspected: */
264 if (print_once) {
265 print_once = 0;
266 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
267 "%s/%d, %p\n",
268 raw_smp_processor_id(), current->comm,
269 current->pid, lock);
270 dump_stack();
271 }
272 }
273}
274#endif
275
276void do_raw_write_lock(rwlock_t *lock) 200void do_raw_write_lock(rwlock_t *lock)
277{ 201{
278 debug_write_lock_before(lock); 202 debug_write_lock_before(lock);