aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wessel <jason.wessel@windriver.com>2010-01-27 17:25:22 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-01 02:22:32 -0500
commitd6ad3e286d2c075a60b9f11075a2c55aeeeca2ad (patch)
tree24f374a6cb1ef36c2aec41b9de6b8f3b1b09780d
parent48d50674179981e41f432167b2441cec782d5484 (diff)
softlockup: Add sched_clock_tick() to avoid kernel warning on kgdb resume
When CONFIG_HAVE_UNSTABLE_SCHED_CLOCK is set, sched_clock() gets the time from hardware such as the TSC on x86. In this configuration kgdb will report a softlock warning message on resuming or detaching from a debug session. Sequence of events in the problem case: 1) "cpu sched clock" and "hardware time" are at 100 sec prior to a call to kgdb_handle_exception() 2) Debugger waits in kgdb_handle_exception() for 80 sec and on exit the following is called ... touch_softlockup_watchdog() --> __raw_get_cpu_var(touch_timestamp) = 0; 3) "cpu sched clock" = 100s (it was not updated, because the interrupt was disabled in kgdb) but the "hardware time" = 180 sec 4) The first timer interrupt after resuming from kgdb_handle_exception updates the watchdog from the "cpu sched clock" update_process_times() { ... run_local_timers() --> softlockup_tick() --> check (touch_timestamp == 0) (it is "YES" here, we have set "touch_timestamp = 0" at kgdb) --> __touch_softlockup_watchdog() ***(A)--> reset "touch_timestamp" to "get_timestamp()" (Here, the "touch_timestamp" will still be set to 100s.) ... scheduler_tick() ***(B)--> sched_clock_tick() (update "cpu sched clock" to "hardware time" = 180s) ... } 5) The Second timer interrupt handler appears to have a large jump and trips the softlockup warning. update_process_times() { ... run_local_timers() --> softlockup_tick() --> "cpu sched clock" - "touch_timestamp" = 180s-100s > 60s --> printk "soft lockup error messages" ... } note: ***(A) reset "touch_timestamp" to "get_timestamp(this_cpu)" Why is "touch_timestamp" 100 sec, instead of 180 sec? When CONFIG_HAVE_UNSTABLE_SCHED_CLOCK is set, the call trace of get_timestamp() is: get_timestamp(this_cpu) -->cpu_clock(this_cpu) -->sched_clock_cpu(this_cpu) -->__update_sched_clock(sched_clock_data, now) The __update_sched_clock() function uses the GTOD tick value to create a window to normalize the "now" values. So if "now" value is too big for sched_clock_data, it will be ignored. The fix is to invoke sched_clock_tick() to update "cpu sched clock" in order to recover from this state. This is done by introducing the function touch_softlockup_watchdog_sync(). This allows kgdb to request that the sched clock is updated when the watchdog thread runs the first time after a resume from kgdb. [yong.zhang0@gmail.com: Use per cpu instead of an array] Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Signed-off-by: Dongdong Deng <Dongdong.Deng@windriver.com> Cc: kgdb-bugreport@lists.sourceforge.net Cc: peterz@infradead.org LKML-Reference: <1264631124-4837-2-git-send-email-jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/kgdb.c6
-rw-r--r--kernel/softlockup.c15
3 files changed, 22 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f7bba93929b..89232151a9d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
310#ifdef CONFIG_DETECT_SOFTLOCKUP 310#ifdef CONFIG_DETECT_SOFTLOCKUP
311extern void softlockup_tick(void); 311extern void softlockup_tick(void);
312extern void touch_softlockup_watchdog(void); 312extern void touch_softlockup_watchdog(void);
313extern void touch_softlockup_watchdog_sync(void);
313extern void touch_all_softlockup_watchdogs(void); 314extern void touch_all_softlockup_watchdogs(void);
314extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 315extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
315 void __user *buffer, 316 void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
323static inline void touch_softlockup_watchdog(void) 324static inline void touch_softlockup_watchdog(void)
324{ 325{
325} 326}
327static inline void touch_softlockup_watchdog_sync(void)
328{
329}
326static inline void touch_all_softlockup_watchdogs(void) 330static inline void touch_all_softlockup_watchdogs(void)
327{ 331{
328} 332}
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 2eb517e23514..87f2cc557553 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -596,7 +596,7 @@ static void kgdb_wait(struct pt_regs *regs)
596 596
597 /* Signal the primary CPU that we are done: */ 597 /* Signal the primary CPU that we are done: */
598 atomic_set(&cpu_in_kgdb[cpu], 0); 598 atomic_set(&cpu_in_kgdb[cpu], 0);
599 touch_softlockup_watchdog(); 599 touch_softlockup_watchdog_sync();
600 clocksource_touch_watchdog(); 600 clocksource_touch_watchdog();
601 local_irq_restore(flags); 601 local_irq_restore(flags);
602} 602}
@@ -1450,7 +1450,7 @@ acquirelock:
1450 (kgdb_info[cpu].task && 1450 (kgdb_info[cpu].task &&
1451 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1451 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
1452 atomic_set(&kgdb_active, -1); 1452 atomic_set(&kgdb_active, -1);
1453 touch_softlockup_watchdog(); 1453 touch_softlockup_watchdog_sync();
1454 clocksource_touch_watchdog(); 1454 clocksource_touch_watchdog();
1455 local_irq_restore(flags); 1455 local_irq_restore(flags);
1456 1456
@@ -1550,7 +1550,7 @@ kgdb_restore:
1550 } 1550 }
1551 /* Free kgdb_active */ 1551 /* Free kgdb_active */
1552 atomic_set(&kgdb_active, -1); 1552 atomic_set(&kgdb_active, -1);
1553 touch_softlockup_watchdog(); 1553 touch_softlockup_watchdog_sync();
1554 clocksource_touch_watchdog(); 1554 clocksource_touch_watchdog();
1555 local_irq_restore(flags); 1555 local_irq_restore(flags);
1556 1556
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d22579087e27..0d4c7898ab80 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock);
25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
28static DEFINE_PER_CPU(bool, softlock_touch_sync);
28 29
29static int __read_mostly did_panic; 30static int __read_mostly did_panic;
30int __read_mostly softlockup_thresh = 60; 31int __read_mostly softlockup_thresh = 60;
@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void)
79} 80}
80EXPORT_SYMBOL(touch_softlockup_watchdog); 81EXPORT_SYMBOL(touch_softlockup_watchdog);
81 82
83void touch_softlockup_watchdog_sync(void)
84{
85 __raw_get_cpu_var(softlock_touch_sync) = true;
86 __raw_get_cpu_var(softlockup_touch_ts) = 0;
87}
88
82void touch_all_softlockup_watchdogs(void) 89void touch_all_softlockup_watchdogs(void)
83{ 90{
84 int cpu; 91 int cpu;
@@ -118,6 +125,14 @@ void softlockup_tick(void)
118 } 125 }
119 126
120 if (touch_ts == 0) { 127 if (touch_ts == 0) {
128 if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
129 /*
130 * If the time stamp was touched atomically
131 * make sure the scheduler tick is up to date.
132 */
133 per_cpu(softlock_touch_sync, this_cpu) = false;
134 sched_clock_tick();
135 }
121 __touch_softlockup_watchdog(); 136 __touch_softlockup_watchdog();
122 return; 137 return;
123 } 138 }