aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2005-06-23 03:08:13 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:08 -0400
commit8a9e1b0f564615bd92ba50162623e25c2904e564 (patch)
treeff8fe9b280cc539e4a57826e2dee072d247d284a
parent0f8e2d62fa04441cd12c08ce521e84e5bd3f8a46 (diff)
[PATCH] Platform SMIs and their interferance with tsc based delay calibration
Issue: Current tsc based delay_calibration can result in significant errors in loops_per_jiffy count when the platform events like SMIs (System Management Interrupts that are non-maskable) are present. This could lead to potential kernel panic(). This issue is becoming more visible with 2.6 kernel (as default HZ is 1000) and on platforms with higher SMI handling latencies. During the boot time, SMIs are mostly used by BIOS (for things like legacy keyboard emulation). Description: The psuedocode for current delay calibration with tsc based delay looks like (0) Estimate a value for loops_per_jiffy (1) While (loops_per_jiffy estimate is accurate enough) (2) wait for jiffy transition (jiffy1) (3) Note down current tsc (tsc1) (4) loop until tsc becomes tsc1 + loops_per_jiffy (5) check whether jiffy changed since jiffy1 or not and refine loops_per_jiffy estimate Consider the following cases Case 1: If SMIs happen between (2) and (3) above, we can end up with a loops_per_jiffy value that is too low. This results in shorted delays and kernel can panic () during boot (Mostly at IOAPIC timer initialization timer_irq_works() as we don't have enough timer interrupts in a specified interval). Case 2: If SMIs happen between (3) and (4) above, then we can end up with a loops_per_jiffy value that is too high. And with current i386 code, too high lpj value (greater than 17M) can result in a overflow in delay.c:__const_udelay() again resulting in shorter delay and panic(). Solution: The patch below makes the calibration routine aware of asynchronous events like SMIs. We increase the delay calibration time and also identify any significant errors (greater than 12.5%) in the calibration and notify it to user. Patch below changes both i386 and x86-64 architectures to use this new and improved calibrate_delay_direct() routine. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/timers/common.c9
-rw-r--r--arch/i386/kernel/timers/timer.c9
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c1
-rw-r--r--arch/i386/kernel/timers/timer_pm.c1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c1
-rw-r--r--arch/x86_64/lib/delay.c7
-rw-r--r--include/asm-i386/timer.h2
-rw-r--r--include/asm-i386/timex.h3
-rw-r--r--include/asm-x86_64/timex.h3
-rw-r--r--init/calibrate.c94
10 files changed, 130 insertions, 0 deletions
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index 8e201219f525..b38cc0d0c71a 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -139,6 +139,15 @@ bad_calibration:
139} 139}
140#endif 140#endif
141 141
142
143unsigned long read_timer_tsc(void)
144{
145 unsigned long retval;
146 rdtscl(retval);
147 return retval;
148}
149
150
142/* calculate cpu_khz */ 151/* calculate cpu_khz */
143void init_cpu_khz(void) 152void init_cpu_khz(void)
144{ 153{
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
index a3d6a288088b..7e39ed8e33f8 100644
--- a/arch/i386/kernel/timers/timer.c
+++ b/arch/i386/kernel/timers/timer.c
@@ -64,3 +64,12 @@ struct timer_opts* __init select_timer(void)
64 panic("select_timer: Cannot find a suitable timer\n"); 64 panic("select_timer: Cannot find a suitable timer\n");
65 return NULL; 65 return NULL;
66} 66}
67
68int read_current_timer(unsigned long *timer_val)
69{
70 if (cur_timer->read_timer) {
71 *timer_val = cur_timer->read_timer();
72 return 0;
73 }
74 return -1;
75}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index f778f471a09a..15a7d727bd6f 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -186,6 +186,7 @@ static struct timer_opts timer_hpet = {
186 .get_offset = get_offset_hpet, 186 .get_offset = get_offset_hpet,
187 .monotonic_clock = monotonic_clock_hpet, 187 .monotonic_clock = monotonic_clock_hpet,
188 .delay = delay_hpet, 188 .delay = delay_hpet,
189 .read_timer = read_timer_tsc,
189}; 190};
190 191
191struct init_timer_opts __initdata timer_hpet_init = { 192struct init_timer_opts __initdata timer_hpet_init = {
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index d77f22030fe6..4ef20e663498 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -246,6 +246,7 @@ static struct timer_opts timer_pmtmr = {
246 .get_offset = get_offset_pmtmr, 246 .get_offset = get_offset_pmtmr,
247 .monotonic_clock = monotonic_clock_pmtmr, 247 .monotonic_clock = monotonic_clock_pmtmr,
248 .delay = delay_pmtmr, 248 .delay = delay_pmtmr,
249 .read_timer = read_timer_tsc,
249}; 250};
250 251
251struct init_timer_opts __initdata timer_pmtmr_init = { 252struct init_timer_opts __initdata timer_pmtmr_init = {
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 180444d87824..27f08ae9120b 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -572,6 +572,7 @@ static struct timer_opts timer_tsc = {
572 .get_offset = get_offset_tsc, 572 .get_offset = get_offset_tsc,
573 .monotonic_clock = monotonic_clock_tsc, 573 .monotonic_clock = monotonic_clock_tsc,
574 .delay = delay_tsc, 574 .delay = delay_tsc,
575 .read_timer = read_timer_tsc,
575}; 576};
576 577
577struct init_timer_opts __initdata timer_tsc_init = { 578struct init_timer_opts __initdata timer_tsc_init = {
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index aed61a668a1b..33a873a3c223 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <asm/delay.h> 14#include <asm/delay.h>
15#include <asm/msr.h>
15 16
16#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
17#include <asm/smp.h> 18#include <asm/smp.h>
@@ -19,6 +20,12 @@
19 20
20int x86_udelay_tsc = 0; /* Delay via TSC */ 21int x86_udelay_tsc = 0; /* Delay via TSC */
21 22
23int read_current_timer(unsigned long *timer_value)
24{
25 rdtscll(*timer_value);
26 return 0;
27}
28
22void __delay(unsigned long loops) 29void __delay(unsigned long loops)
23{ 30{
24 unsigned bclock, now; 31 unsigned bclock, now;
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index c34709849839..dcf1e07db08a 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -22,6 +22,7 @@ struct timer_opts {
22 unsigned long (*get_offset)(void); 22 unsigned long (*get_offset)(void);
23 unsigned long long (*monotonic_clock)(void); 23 unsigned long long (*monotonic_clock)(void);
24 void (*delay)(unsigned long); 24 void (*delay)(unsigned long);
25 unsigned long (*read_timer)(void);
25}; 26};
26 27
27struct init_timer_opts { 28struct init_timer_opts {
@@ -52,6 +53,7 @@ extern struct init_timer_opts timer_cyclone_init;
52#endif 53#endif
53 54
54extern unsigned long calibrate_tsc(void); 55extern unsigned long calibrate_tsc(void);
56extern unsigned long read_timer_tsc(void);
55extern void init_cpu_khz(void); 57extern void init_cpu_khz(void);
56extern int recalibrate_cpu_khz(void); 58extern int recalibrate_cpu_khz(void);
57#ifdef CONFIG_HPET_TIMER 59#ifdef CONFIG_HPET_TIMER
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index b41e484c3445..e370f907bd39 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -49,4 +49,7 @@ static inline cycles_t get_cycles (void)
49 49
50extern unsigned long cpu_khz; 50extern unsigned long cpu_khz;
51 51
52extern int read_current_timer(unsigned long *timer_value);
53#define ARCH_HAS_READ_CURRENT_TIMER 1
54
52#endif 55#endif
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index 34f31a18f90b..24ecf6a637cb 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -26,6 +26,9 @@ static inline cycles_t get_cycles (void)
26 26
27extern unsigned int cpu_khz; 27extern unsigned int cpu_khz;
28 28
29extern int read_current_timer(unsigned long *timer_value);
30#define ARCH_HAS_READ_CURRENT_TIMER 1
31
29extern struct vxtime_data vxtime; 32extern struct vxtime_data vxtime;
30 33
31#endif 34#endif
diff --git a/init/calibrate.c b/init/calibrate.c
index c698e04a3dbe..d206c7548fe6 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -8,6 +8,8 @@
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/init.h> 9#include <linux/init.h>
10 10
11#include <asm/timex.h>
12
11static unsigned long preset_lpj; 13static unsigned long preset_lpj;
12static int __init lpj_setup(char *str) 14static int __init lpj_setup(char *str)
13{ 15{
@@ -17,6 +19,92 @@ static int __init lpj_setup(char *str)
17 19
18__setup("lpj=", lpj_setup); 20__setup("lpj=", lpj_setup);
19 21
22#ifdef ARCH_HAS_READ_CURRENT_TIMER
23
24/* This routine uses the read_current_timer() routine and gets the
25 * loops per jiffy directly, instead of guessing it using delay().
26 * Also, this code tries to handle non-maskable asynchronous events
27 * (like SMIs)
28 */
29#define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
30#define MAX_DIRECT_CALIBRATION_RETRIES 5
31
32static unsigned long __devinit calibrate_delay_direct(void)
33{
34 unsigned long pre_start, start, post_start;
35 unsigned long pre_end, end, post_end;
36 unsigned long start_jiffies;
37 unsigned long tsc_rate_min, tsc_rate_max;
38 unsigned long good_tsc_sum = 0;
39 unsigned long good_tsc_count = 0;
40 int i;
41
42 if (read_current_timer(&pre_start) < 0 )
43 return 0;
44
45 /*
46 * A simple loop like
47 * while ( jiffies < start_jiffies+1)
48 * start = read_current_timer();
49 * will not do. As we don't really know whether jiffy switch
50 * happened first or timer_value was read first. And some asynchronous
51 * event can happen between these two events introducing errors in lpj.
52 *
53 * So, we do
54 * 1. pre_start <- When we are sure that jiffy switch hasn't happened
55 * 2. check jiffy switch
56 * 3. start <- timer value before or after jiffy switch
57 * 4. post_start <- When we are sure that jiffy switch has happened
58 *
59 * Note, we don't know anything about order of 2 and 3.
60 * Now, by looking at post_start and pre_start difference, we can
61 * check whether any asynchronous event happened or not
62 */
63
64 for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
65 pre_start = 0;
66 read_current_timer(&start);
67 start_jiffies = jiffies;
68 while (jiffies <= (start_jiffies + 1)) {
69 pre_start = start;
70 read_current_timer(&start);
71 }
72 read_current_timer(&post_start);
73
74 pre_end = 0;
75 end = post_start;
76 while (jiffies <=
77 (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
78 pre_end = end;
79 read_current_timer(&end);
80 }
81 read_current_timer(&post_end);
82
83 tsc_rate_max = (post_end - pre_start) / DELAY_CALIBRATION_TICKS;
84 tsc_rate_min = (pre_end - post_start) / DELAY_CALIBRATION_TICKS;
85
86 /*
87 * If the upper limit and lower limit of the tsc_rate is
88 * >= 12.5% apart, redo calibration.
89 */
90 if (pre_start != 0 && pre_end != 0 &&
91 (tsc_rate_max - tsc_rate_min) < (tsc_rate_max >> 3)) {
92 good_tsc_count++;
93 good_tsc_sum += tsc_rate_max;
94 }
95 }
96
97 if (good_tsc_count)
98 return (good_tsc_sum/good_tsc_count);
99
100 printk(KERN_WARNING "calibrate_delay_direct() failed to get a good "
101 "estimate for loops_per_jiffy.\nProbably due to long platform interrupts. Consider using \"lpj=\" boot option.\n");
102 return 0;
103}
104#else
105static unsigned long __devinit calibrate_delay_direct(void) {return 0;}
106#endif
107
20/* 108/*
21 * This is the number of bits of precision for the loops_per_jiffy. Each 109 * This is the number of bits of precision for the loops_per_jiffy. Each
22 * bit takes on average 1.5/HZ seconds. This (like the original) is a little 110 * bit takes on average 1.5/HZ seconds. This (like the original) is a little
@@ -35,6 +123,12 @@ void __devinit calibrate_delay(void)
35 "%lu.%02lu BogoMIPS preset\n", 123 "%lu.%02lu BogoMIPS preset\n",
36 loops_per_jiffy/(500000/HZ), 124 loops_per_jiffy/(500000/HZ),
37 (loops_per_jiffy/(5000/HZ)) % 100); 125 (loops_per_jiffy/(5000/HZ)) % 100);
126 } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
127 printk("Calibrating delay using timer specific routine.. ");
128 printk("%lu.%02lu BogoMIPS (lpj=%lu)\n",
129 loops_per_jiffy/(500000/HZ),
130 (loops_per_jiffy/(5000/HZ)) % 100,
131 loops_per_jiffy);
38 } else { 132 } else {
39 loops_per_jiffy = (1<<12); 133 loops_per_jiffy = (1<<12);
40 134