aboutsummaryrefslogtreecommitdiffstats
path: root/lib/spinlock_debug.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-10-19 10:54:24 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-10-19 10:55:09 -0400
commit4533d86270d7986e00594495dde9a109d6be27ae (patch)
treec2473cac653f7b98e5bd5e6475e63734be4b7644 /lib/spinlock_debug.c
parent21c5e50e15b1abd797e62f18fd7f90b9cc004cbd (diff)
parent5bc66170dc486556a1e36fd384463536573f4b82 (diff)
Merge commit '5bc66170dc486556a1e36fd384463536573f4b82' into x86/urgent
From Borislav Petkov <bp@amd64.org>: Below is a RAS fix which reverts the addition of a sysfs attribute which we agreed is not needed, post-factum. And this should go in now because that sysfs attribute is going to end up in 3.7 otherwise and thus exposed to userspace; removing it then would be a lot harder. This is done as a merge rather than a simple patch/cherry-pick since the baseline for this patch was not in the previous x86/urgent. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'lib/spinlock_debug.c')
-rw-r--r--lib/spinlock_debug.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
107{ 107{
108 u64 i; 108 u64 i;
109 u64 loops = loops_per_jiffy * HZ; 109 u64 loops = loops_per_jiffy * HZ;
110 int print_once = 1;
111 110
112 for (;;) { 111 for (i = 0; i < loops; i++) {
113 for (i = 0; i < loops; i++) { 112 if (arch_spin_trylock(&lock->raw_lock))
114 if (arch_spin_trylock(&lock->raw_lock)) 113 return;
115 return; 114 __delay(1);
116 __delay(1); 115 }
117 } 116 /* lockup suspected: */
118 /* lockup suspected: */ 117 spin_dump(lock, "lockup suspected");
119 if (print_once) {
120 print_once = 0;
121 spin_dump(lock, "lockup suspected");
122#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
123 trigger_all_cpu_backtrace(); 119 trigger_all_cpu_backtrace();
124#endif 120#endif
125 } 121
126 } 122 /*
123 * The trylock above was causing a livelock. Give the lower level arch
124 * specific lock code a chance to acquire the lock. We have already
125 * printed a warning/backtrace at this point. The non-debug arch
126 * specific code might actually succeed in acquiring the lock. If it is
127 * not successful, the end-result is the same - there is no forward
128 * progress.
129 */
130 arch_spin_lock(&lock->raw_lock);
127} 131}
128 132
129void do_raw_spin_lock(raw_spinlock_t *lock) 133void do_raw_spin_lock(raw_spinlock_t *lock)