aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-08 03:28:02 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:06 -0400
commit966812dc98e6a7fcdf759cbfa0efab77500a8868 (patch)
tree47e38e3c866f1855962e212e6e11f2ab656df710 /kernel
parent8524070b7982d76258942275908b7434cfcab4b4 (diff)
Ignore stolen time in the softlockup watchdog
The softlockup watchdog is currently a nuisance in a virtual machine, since the whole system could have the CPU stolen from it for a long period of time. While it would be unlikely for a guest domain to be denied timer interrupts for over 10s, it could happen and any softlockup message would be completely spurious. Earlier I proposed that sched_clock() return time in unstolen nanoseconds, which is how Xen and VMI currently implement it. If the softlockup watchdog uses sched_clock() to measure time, it would automatically ignore stolen time, and therefore only report when the guest itself locked up. When running native, sched_clock() returns real-time nanoseconds, so the behaviour would be unchanged. Note that sched_clock() used this way is inherently per-cpu, so this patch makes sure that the per-processor watchdog thread initialized its own timestamp. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Zachary Amsden <zach@vmware.com> Cc: James Morris <jmorris@namei.org> Cc: Dan Hecht <dhecht@vmware.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Chris Lalancette <clalance@redhat.com> Cc: Rick Lindsley <ricklind@us.ibm.com> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/softlockup.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 81d43caa2012..5ea631742dbc 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -34,9 +34,19 @@ static struct notifier_block panic_block = {
34 .notifier_call = softlock_panic, 34 .notifier_call = softlock_panic,
35}; 35};
36 36
37/*
38 * Returns seconds, approximately. We don't need nanosecond
39 * resolution, and we don't need to waste time with a big divide when
40 * 2^30ns == 1.074s.
41 */
42static unsigned long get_timestamp(void)
43{
44 return sched_clock() >> 30; /* 2^30 ~= 10^9 */
45}
46
37void touch_softlockup_watchdog(void) 47void touch_softlockup_watchdog(void)
38{ 48{
39 __raw_get_cpu_var(touch_timestamp) = jiffies; 49 __raw_get_cpu_var(touch_timestamp) = get_timestamp();
40} 50}
41EXPORT_SYMBOL(touch_softlockup_watchdog); 51EXPORT_SYMBOL(touch_softlockup_watchdog);
42 52
@@ -48,9 +58,17 @@ void softlockup_tick(void)
48{ 58{
49 int this_cpu = smp_processor_id(); 59 int this_cpu = smp_processor_id();
50 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); 60 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
61 unsigned long print_timestamp;
62 unsigned long now;
51 63
52 /* prevent double reports: */ 64 /* watchdog task hasn't updated timestamp yet */
53 if (per_cpu(print_timestamp, this_cpu) == touch_timestamp || 65 if (touch_timestamp == 0)
66 return;
67
68 print_timestamp = per_cpu(print_timestamp, this_cpu);
69
70 /* report at most once a second */
71 if (print_timestamp < (touch_timestamp + 1) ||
54 did_panic || 72 did_panic ||
55 !per_cpu(watchdog_task, this_cpu)) 73 !per_cpu(watchdog_task, this_cpu))
56 return; 74 return;
@@ -61,12 +79,14 @@ void softlockup_tick(void)
61 return; 79 return;
62 } 80 }
63 81
82 now = get_timestamp();
83
64 /* Wake up the high-prio watchdog task every second: */ 84 /* Wake up the high-prio watchdog task every second: */
65 if (time_after(jiffies, touch_timestamp + HZ)) 85 if (now > (touch_timestamp + 1))
66 wake_up_process(per_cpu(watchdog_task, this_cpu)); 86 wake_up_process(per_cpu(watchdog_task, this_cpu));
67 87
68 /* Warn about unreasonable 10+ seconds delays: */ 88 /* Warn about unreasonable 10+ seconds delays: */
69 if (time_after(jiffies, touch_timestamp + 10*HZ)) { 89 if (now > (touch_timestamp + 10)) {
70 per_cpu(print_timestamp, this_cpu) = touch_timestamp; 90 per_cpu(print_timestamp, this_cpu) = touch_timestamp;
71 91
72 spin_lock(&print_lock); 92 spin_lock(&print_lock);
@@ -87,6 +107,9 @@ static int watchdog(void * __bind_cpu)
87 sched_setscheduler(current, SCHED_FIFO, &param); 107 sched_setscheduler(current, SCHED_FIFO, &param);
88 current->flags |= PF_NOFREEZE; 108 current->flags |= PF_NOFREEZE;
89 109
110 /* initialize timestamp */
111 touch_softlockup_watchdog();
112
90 /* 113 /*
91 * Run briefly once per second to reset the softlockup timestamp. 114 * Run briefly once per second to reset the softlockup timestamp.
92 * If this gets delayed for more than 10 seconds then the 115 * If this gets delayed for more than 10 seconds then the
@@ -118,7 +141,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
118 printk("watchdog for %i failed\n", hotcpu); 141 printk("watchdog for %i failed\n", hotcpu);
119 return NOTIFY_BAD; 142 return NOTIFY_BAD;
120 } 143 }
121 per_cpu(touch_timestamp, hotcpu) = jiffies; 144 per_cpu(touch_timestamp, hotcpu) = 0;
122 per_cpu(watchdog_task, hotcpu) = p; 145 per_cpu(watchdog_task, hotcpu) = p;
123 kthread_bind(p, hotcpu); 146 kthread_bind(p, hotcpu);
124 break; 147 break;