aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-07-15 08:28:02 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:09 -0400
commit59d958d2c7de20409a0dc202adc87d3973ada13d (patch)
tree420e971a6d67e0f59f71fd3d5e61df5f079bfef6 /arch/x86/kernel
parente12f65f7a49905c013263ac522af224892aafc00 (diff)
locking, x86: mce: Annotate cmci_discover_lock as raw
The cmci_discover_lock can be taken in atomic context (cpu bring up sequence) and therefore cannot be preempted on -rt. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 8694ef56459d..38e49bc95ffc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
28 * cmci_discover_lock protects against parallel discovery attempts 28 * cmci_discover_lock protects against parallel discovery attempts
29 * which could race against each other. 29 * which could race against each other.
30 */ 30 */
31static DEFINE_SPINLOCK(cmci_discover_lock); 31static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
32 32
33#define CMCI_THRESHOLD 1 33#define CMCI_THRESHOLD 1
34 34
@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int boot)
85 int hdr = 0; 85 int hdr = 0;
86 int i; 86 int i;
87 87
88 spin_lock_irqsave(&cmci_discover_lock, flags); 88 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
89 for (i = 0; i < banks; i++) { 89 for (i = 0; i < banks; i++) {
90 u64 val; 90 u64 val;
91 91
@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int boot)
116 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 116 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
117 } 117 }
118 } 118 }
119 spin_unlock_irqrestore(&cmci_discover_lock, flags); 119 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
120 if (hdr) 120 if (hdr)
121 printk(KERN_CONT "\n"); 121 printk(KERN_CONT "\n");
122} 122}
@@ -150,7 +150,7 @@ void cmci_clear(void)
150 150
151 if (!cmci_supported(&banks)) 151 if (!cmci_supported(&banks))
152 return; 152 return;
153 spin_lock_irqsave(&cmci_discover_lock, flags); 153 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
154 for (i = 0; i < banks; i++) { 154 for (i = 0; i < banks; i++) {
155 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 155 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
156 continue; 156 continue;
@@ -160,7 +160,7 @@ void cmci_clear(void)
160 wrmsrl(MSR_IA32_MCx_CTL2(i), val); 160 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
161 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 161 __clear_bit(i, __get_cpu_var(mce_banks_owned));
162 } 162 }
163 spin_unlock_irqrestore(&cmci_discover_lock, flags); 163 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
164} 164}
165 165
166/* 166/*