aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
commit72eb6a791459c87a0340318840bb3bd9252b627b (patch)
tree3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /kernel/watchdog.c
parent23d69b09b78c4876e134f104a3814c30747c53f1 (diff)
parent55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff)
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e7b575ac33c..d7ebdf4cea98 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -118,12 +118,12 @@ static void __touch_watchdog(void)
118{ 118{
119 int this_cpu = smp_processor_id(); 119 int this_cpu = smp_processor_id();
120 120
121 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); 121 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
122} 122}
123 123
124void touch_softlockup_watchdog(void) 124void touch_softlockup_watchdog(void)
125{ 125{
126 __raw_get_cpu_var(watchdog_touch_ts) = 0; 126 __this_cpu_write(watchdog_touch_ts, 0);
127} 127}
128EXPORT_SYMBOL(touch_softlockup_watchdog); 128EXPORT_SYMBOL(touch_softlockup_watchdog);
129 129
@@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void)
167/* watchdog detector functions */ 167/* watchdog detector functions */
168static int is_hardlockup(void) 168static int is_hardlockup(void)
169{ 169{
170 unsigned long hrint = __get_cpu_var(hrtimer_interrupts); 170 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
171 171
172 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) 172 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
173 return 1; 173 return 1;
174 174
175 __get_cpu_var(hrtimer_interrupts_saved) = hrint; 175 __this_cpu_write(hrtimer_interrupts_saved, hrint);
176 return 0; 176 return 0;
177} 177}
178#endif 178#endif
@@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
205 /* Ensure the watchdog never gets throttled */ 205 /* Ensure the watchdog never gets throttled */
206 event->hw.interrupts = 0; 206 event->hw.interrupts = 0;
207 207
208 if (__get_cpu_var(watchdog_nmi_touch) == true) { 208 if (__this_cpu_read(watchdog_nmi_touch) == true) {
209 __get_cpu_var(watchdog_nmi_touch) = false; 209 __this_cpu_write(watchdog_nmi_touch, false);
210 return; 210 return;
211 } 211 }
212 212
@@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
220 int this_cpu = smp_processor_id(); 220 int this_cpu = smp_processor_id();
221 221
222 /* only print hardlockups once */ 222 /* only print hardlockups once */
223 if (__get_cpu_var(hard_watchdog_warn) == true) 223 if (__this_cpu_read(hard_watchdog_warn) == true)
224 return; 224 return;
225 225
226 if (hardlockup_panic) 226 if (hardlockup_panic)
@@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
228 else 228 else
229 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 229 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
230 230
231 __get_cpu_var(hard_watchdog_warn) = true; 231 __this_cpu_write(hard_watchdog_warn, true);
232 return; 232 return;
233 } 233 }
234 234
235 __get_cpu_var(hard_watchdog_warn) = false; 235 __this_cpu_write(hard_watchdog_warn, false);
236 return; 236 return;
237} 237}
238static void watchdog_interrupt_count(void) 238static void watchdog_interrupt_count(void)
239{ 239{
240 __get_cpu_var(hrtimer_interrupts)++; 240 __this_cpu_inc(hrtimer_interrupts);
241} 241}
242#else 242#else
243static inline void watchdog_interrupt_count(void) { return; } 243static inline void watchdog_interrupt_count(void) { return; }
@@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; }
246/* watchdog kicker functions */ 246/* watchdog kicker functions */
247static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 247static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
248{ 248{
249 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); 249 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
250 struct pt_regs *regs = get_irq_regs(); 250 struct pt_regs *regs = get_irq_regs();
251 int duration; 251 int duration;
252 252
@@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
254 watchdog_interrupt_count(); 254 watchdog_interrupt_count();
255 255
256 /* kick the softlockup detector */ 256 /* kick the softlockup detector */
257 wake_up_process(__get_cpu_var(softlockup_watchdog)); 257 wake_up_process(__this_cpu_read(softlockup_watchdog));
258 258
259 /* .. and repeat */ 259 /* .. and repeat */
260 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); 260 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
261 261
262 if (touch_ts == 0) { 262 if (touch_ts == 0) {
263 if (unlikely(__get_cpu_var(softlockup_touch_sync))) { 263 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
264 /* 264 /*
265 * If the time stamp was touched atomically 265 * If the time stamp was touched atomically
266 * make sure the scheduler tick is up to date. 266 * make sure the scheduler tick is up to date.
267 */ 267 */
268 __get_cpu_var(softlockup_touch_sync) = false; 268 __this_cpu_write(softlockup_touch_sync, false);
269 sched_clock_tick(); 269 sched_clock_tick();
270 } 270 }
271 __touch_watchdog(); 271 __touch_watchdog();
@@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
281 duration = is_softlockup(touch_ts); 281 duration = is_softlockup(touch_ts);
282 if (unlikely(duration)) { 282 if (unlikely(duration)) {
283 /* only warn once */ 283 /* only warn once */
284 if (__get_cpu_var(soft_watchdog_warn) == true) 284 if (__this_cpu_read(soft_watchdog_warn) == true)
285 return HRTIMER_RESTART; 285 return HRTIMER_RESTART;
286 286
287 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 287 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
296 296
297 if (softlockup_panic) 297 if (softlockup_panic)
298 panic("softlockup: hung tasks"); 298 panic("softlockup: hung tasks");
299 __get_cpu_var(soft_watchdog_warn) = true; 299 __this_cpu_write(soft_watchdog_warn, true);
300 } else 300 } else
301 __get_cpu_var(soft_watchdog_warn) = false; 301 __this_cpu_write(soft_watchdog_warn, false);
302 302
303 return HRTIMER_RESTART; 303 return HRTIMER_RESTART;
304} 304}