aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-12-08 10:22:55 -0500
committerTejun Heo <tj@kernel.org>2010-12-17 09:07:19 -0500
commit909ea96468096b07fbb41aaf69be060d92bd9271 (patch)
treea7e015edd96b5f674874fe78cdd889769e130a2a /kernel/watchdog.c
parent780f36d8b3fa9572f731d4fb85067b2e45e6f993 (diff)
core: Replace __get_cpu_var with __this_cpu_read if not used for an address.
__get_cpu_var() can be replaced with this_cpu_read and will then use a single read instruction with implied address calculation to access the correct per cpu instance. However, the address of a per cpu variable passed to __this_cpu_read() cannot be determined (since it's an implied address conversion through segment prefixes). Therefore apply this only to uses of __get_cpu_var where the address of the variable is not used. Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Hugh Dickins <hughd@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e3c41a4024c..8037a86106ed 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -116,12 +116,12 @@ static void __touch_watchdog(void)
116{ 116{
117 int this_cpu = smp_processor_id(); 117 int this_cpu = smp_processor_id();
118 118
119 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); 119 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
120} 120}
121 121
122void touch_softlockup_watchdog(void) 122void touch_softlockup_watchdog(void)
123{ 123{
124 __raw_get_cpu_var(watchdog_touch_ts) = 0; 124 __this_cpu_write(watchdog_touch_ts, 0);
125} 125}
126EXPORT_SYMBOL(touch_softlockup_watchdog); 126EXPORT_SYMBOL(touch_softlockup_watchdog);
127 127
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void)
165/* watchdog detector functions */ 165/* watchdog detector functions */
166static int is_hardlockup(void) 166static int is_hardlockup(void)
167{ 167{
168 unsigned long hrint = __get_cpu_var(hrtimer_interrupts); 168 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
169 169
170 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) 170 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
171 return 1; 171 return 1;
172 172
173 __get_cpu_var(hrtimer_interrupts_saved) = hrint; 173 __this_cpu_write(hrtimer_interrupts_saved, hrint);
174 return 0; 174 return 0;
175} 175}
176#endif 176#endif
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
203 /* Ensure the watchdog never gets throttled */ 203 /* Ensure the watchdog never gets throttled */
204 event->hw.interrupts = 0; 204 event->hw.interrupts = 0;
205 205
206 if (__get_cpu_var(watchdog_nmi_touch) == true) { 206 if (__this_cpu_read(watchdog_nmi_touch) == true) {
207 __get_cpu_var(watchdog_nmi_touch) = false; 207 __this_cpu_write(watchdog_nmi_touch, false);
208 return; 208 return;
209 } 209 }
210 210
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
218 int this_cpu = smp_processor_id(); 218 int this_cpu = smp_processor_id();
219 219
220 /* only print hardlockups once */ 220 /* only print hardlockups once */
221 if (__get_cpu_var(hard_watchdog_warn) == true) 221 if (__this_cpu_read(hard_watchdog_warn) == true)
222 return; 222 return;
223 223
224 if (hardlockup_panic) 224 if (hardlockup_panic)
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
226 else 226 else
227 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 227 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
228 228
229 __get_cpu_var(hard_watchdog_warn) = true; 229 __this_cpu_write(hard_watchdog_warn, true);
230 return; 230 return;
231 } 231 }
232 232
233 __get_cpu_var(hard_watchdog_warn) = false; 233 __this_cpu_write(hard_watchdog_warn, false);
234 return; 234 return;
235} 235}
236static void watchdog_interrupt_count(void) 236static void watchdog_interrupt_count(void)
237{ 237{
238 __get_cpu_var(hrtimer_interrupts)++; 238 __this_cpu_inc(hrtimer_interrupts);
239} 239}
240#else 240#else
241static inline void watchdog_interrupt_count(void) { return; } 241static inline void watchdog_interrupt_count(void) { return; }
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; }
244/* watchdog kicker functions */ 244/* watchdog kicker functions */
245static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 245static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
246{ 246{
247 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); 247 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
248 struct pt_regs *regs = get_irq_regs(); 248 struct pt_regs *regs = get_irq_regs();
249 int duration; 249 int duration;
250 250
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
252 watchdog_interrupt_count(); 252 watchdog_interrupt_count();
253 253
254 /* kick the softlockup detector */ 254 /* kick the softlockup detector */
255 wake_up_process(__get_cpu_var(softlockup_watchdog)); 255 wake_up_process(__this_cpu_read(softlockup_watchdog));
256 256
257 /* .. and repeat */ 257 /* .. and repeat */
258 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); 258 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
259 259
260 if (touch_ts == 0) { 260 if (touch_ts == 0) {
261 if (unlikely(__get_cpu_var(softlockup_touch_sync))) { 261 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
262 /* 262 /*
263 * If the time stamp was touched atomically 263 * If the time stamp was touched atomically
264 * make sure the scheduler tick is up to date. 264 * make sure the scheduler tick is up to date.
265 */ 265 */
266 __get_cpu_var(softlockup_touch_sync) = false; 266 __this_cpu_write(softlockup_touch_sync, false);
267 sched_clock_tick(); 267 sched_clock_tick();
268 } 268 }
269 __touch_watchdog(); 269 __touch_watchdog();
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
279 duration = is_softlockup(touch_ts); 279 duration = is_softlockup(touch_ts);
280 if (unlikely(duration)) { 280 if (unlikely(duration)) {
281 /* only warn once */ 281 /* only warn once */
282 if (__get_cpu_var(soft_watchdog_warn) == true) 282 if (__this_cpu_read(soft_watchdog_warn) == true)
283 return HRTIMER_RESTART; 283 return HRTIMER_RESTART;
284 284
285 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 285 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
294 294
295 if (softlockup_panic) 295 if (softlockup_panic)
296 panic("softlockup: hung tasks"); 296 panic("softlockup: hung tasks");
297 __get_cpu_var(soft_watchdog_warn) = true; 297 __this_cpu_write(soft_watchdog_warn, true);
298 } else 298 } else
299 __get_cpu_var(soft_watchdog_warn) = false; 299 __this_cpu_write(soft_watchdog_warn, false);
300 300
301 return HRTIMER_RESTART; 301 return HRTIMER_RESTART;
302} 302}