diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpuid.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/io_delay.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/msr.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 240 |
6 files changed, 245 insertions, 79 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 65a339678ece..726a5fcdf341 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = { | |||
759 | }; | 759 | }; |
760 | 760 | ||
761 | DEFINE_PER_CPU(struct sys_device, device_mce); | 761 | DEFINE_PER_CPU(struct sys_device, device_mce); |
762 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; | ||
762 | 763 | ||
763 | /* Why are there no generic functions for this? */ | 764 | /* Why are there no generic functions for this? */ |
764 | #define ACCESSOR(name, var, start) \ | 765 | #define ACCESSOR(name, var, start) \ |
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
883 | case CPU_ONLINE: | 884 | case CPU_ONLINE: |
884 | case CPU_ONLINE_FROZEN: | 885 | case CPU_ONLINE_FROZEN: |
885 | mce_create_device(cpu); | 886 | mce_create_device(cpu); |
887 | if (threshold_cpu_callback) | ||
888 | threshold_cpu_callback(action, cpu); | ||
886 | break; | 889 | break; |
887 | case CPU_DEAD: | 890 | case CPU_DEAD: |
888 | case CPU_DEAD_FROZEN: | 891 | case CPU_DEAD_FROZEN: |
892 | if (threshold_cpu_callback) | ||
893 | threshold_cpu_callback(action, cpu); | ||
889 | mce_remove_device(cpu); | 894 | mce_remove_device(cpu); |
890 | break; | 895 | break; |
891 | } | 896 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 88736cadbaa6..5eb390a4b2e9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
628 | deallocate_threshold_block(cpu, bank); | 628 | deallocate_threshold_block(cpu, bank); |
629 | 629 | ||
630 | free_out: | 630 | free_out: |
631 | kobject_del(b->kobj); | ||
631 | kobject_put(b->kobj); | 632 | kobject_put(b->kobj); |
632 | kfree(b); | 633 | kfree(b); |
633 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 634 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu) | |||
645 | } | 646 | } |
646 | 647 | ||
647 | /* get notified when a cpu comes on/off */ | 648 | /* get notified when a cpu comes on/off */ |
648 | static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | 649 | static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, |
649 | unsigned long action, void *hcpu) | 650 | unsigned int cpu) |
650 | { | 651 | { |
651 | /* cpu was unsigned int to begin with */ | ||
652 | unsigned int cpu = (unsigned long)hcpu; | ||
653 | |||
654 | if (cpu >= NR_CPUS) | 652 | if (cpu >= NR_CPUS) |
655 | goto out; | 653 | return; |
656 | 654 | ||
657 | switch (action) { | 655 | switch (action) { |
658 | case CPU_ONLINE: | 656 | case CPU_ONLINE: |
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | |||
666 | default: | 664 | default: |
667 | break; | 665 | break; |
668 | } | 666 | } |
669 | out: | ||
670 | return NOTIFY_OK; | ||
671 | } | 667 | } |
672 | 668 | ||
673 | static struct notifier_block threshold_cpu_notifier __cpuinitdata = { | ||
674 | .notifier_call = threshold_cpu_callback, | ||
675 | }; | ||
676 | |||
677 | static __init int threshold_init_device(void) | 669 | static __init int threshold_init_device(void) |
678 | { | 670 | { |
679 | unsigned lcpu = 0; | 671 | unsigned lcpu = 0; |
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void) | |||
684 | if (err) | 676 | if (err) |
685 | return err; | 677 | return err; |
686 | } | 678 | } |
687 | register_hotcpu_notifier(&threshold_cpu_notifier); | 679 | threshold_cpu_callback = amd_64_threshold_cpu_callback; |
688 | return 0; | 680 | return 0; |
689 | } | 681 | } |
690 | 682 | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 14b11b3be31c..8e9cd6a8ec12 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -89,6 +89,8 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
89 | struct cpuid_regs cmd; | 89 | struct cpuid_regs cmd; |
90 | int cpu = iminor(file->f_path.dentry->d_inode); | 90 | int cpu = iminor(file->f_path.dentry->d_inode); |
91 | u64 pos = *ppos; | 91 | u64 pos = *ppos; |
92 | ssize_t bytes = 0; | ||
93 | int err = 0; | ||
92 | 94 | ||
93 | if (count % 16) | 95 | if (count % 16) |
94 | return -EINVAL; /* Invalid chunk size */ | 96 | return -EINVAL; /* Invalid chunk size */ |
@@ -96,14 +98,19 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
96 | for (; count; count -= 16) { | 98 | for (; count; count -= 16) { |
97 | cmd.eax = pos; | 99 | cmd.eax = pos; |
98 | cmd.ecx = pos >> 32; | 100 | cmd.ecx = pos >> 32; |
99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); | 101 | err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
100 | if (copy_to_user(tmp, &cmd, 16)) | 102 | if (err) |
101 | return -EFAULT; | 103 | break; |
104 | if (copy_to_user(tmp, &cmd, 16)) { | ||
105 | err = -EFAULT; | ||
106 | break; | ||
107 | } | ||
102 | tmp += 16; | 108 | tmp += 16; |
109 | bytes += 16; | ||
103 | *ppos = ++pos; | 110 | *ppos = ++pos; |
104 | } | 111 | } |
105 | 112 | ||
106 | return tmp - buf; | 113 | return bytes ? bytes : err; |
107 | } | 114 | } |
108 | 115 | ||
109 | static int cpuid_open(struct inode *inode, struct file *file) | 116 | static int cpuid_open(struct inode *inode, struct file *file) |
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index 1c3a66a67f83..720d2607aacb 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c | |||
@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { | |||
92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") | 92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") |
93 | } | 93 | } |
94 | }, | 94 | }, |
95 | { | ||
96 | .callback = dmi_io_delay_0xed_port, | ||
97 | .ident = "Presario F700", | ||
98 | .matches = { | ||
99 | DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), | ||
100 | DMI_MATCH(DMI_BOARD_NAME, "30D3") | ||
101 | } | ||
102 | }, | ||
95 | { } | 103 | { } |
96 | }; | 104 | }; |
97 | 105 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index e43938086885..2e2af5d18191 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -72,21 +72,28 @@ static ssize_t msr_read(struct file *file, char __user *buf, | |||
72 | u32 data[2]; | 72 | u32 data[2]; |
73 | u32 reg = *ppos; | 73 | u32 reg = *ppos; |
74 | int cpu = iminor(file->f_path.dentry->d_inode); | 74 | int cpu = iminor(file->f_path.dentry->d_inode); |
75 | int err; | 75 | int err = 0; |
76 | ssize_t bytes = 0; | ||
76 | 77 | ||
77 | if (count % 8) | 78 | if (count % 8) |
78 | return -EINVAL; /* Invalid chunk size */ | 79 | return -EINVAL; /* Invalid chunk size */ |
79 | 80 | ||
80 | for (; count; count -= 8) { | 81 | for (; count; count -= 8) { |
81 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); | 82 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); |
82 | if (err) | 83 | if (err) { |
83 | return -EIO; | 84 | if (err == -EFAULT) /* Fix idiotic error code */ |
84 | if (copy_to_user(tmp, &data, 8)) | 85 | err = -EIO; |
85 | return -EFAULT; | 86 | break; |
87 | } | ||
88 | if (copy_to_user(tmp, &data, 8)) { | ||
89 | err = -EFAULT; | ||
90 | break; | ||
91 | } | ||
86 | tmp += 2; | 92 | tmp += 2; |
93 | bytes += 8; | ||
87 | } | 94 | } |
88 | 95 | ||
89 | return ((char __user *)tmp) - buf; | 96 | return bytes ? bytes : err; |
90 | } | 97 | } |
91 | 98 | ||
92 | static ssize_t msr_write(struct file *file, const char __user *buf, | 99 | static ssize_t msr_write(struct file *file, const char __user *buf, |
@@ -96,21 +103,28 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
96 | u32 data[2]; | 103 | u32 data[2]; |
97 | u32 reg = *ppos; | 104 | u32 reg = *ppos; |
98 | int cpu = iminor(file->f_path.dentry->d_inode); | 105 | int cpu = iminor(file->f_path.dentry->d_inode); |
99 | int err; | 106 | int err = 0; |
107 | ssize_t bytes = 0; | ||
100 | 108 | ||
101 | if (count % 8) | 109 | if (count % 8) |
102 | return -EINVAL; /* Invalid chunk size */ | 110 | return -EINVAL; /* Invalid chunk size */ |
103 | 111 | ||
104 | for (; count; count -= 8) { | 112 | for (; count; count -= 8) { |
105 | if (copy_from_user(&data, tmp, 8)) | 113 | if (copy_from_user(&data, tmp, 8)) { |
106 | return -EFAULT; | 114 | err = -EFAULT; |
115 | break; | ||
116 | } | ||
107 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); | 117 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); |
108 | if (err) | 118 | if (err) { |
109 | return -EIO; | 119 | if (err == -EFAULT) /* Fix idiotic error code */ |
120 | err = -EIO; | ||
121 | break; | ||
122 | } | ||
110 | tmp += 2; | 123 | tmp += 2; |
124 | bytes += 8; | ||
111 | } | 125 | } |
112 | 126 | ||
113 | return ((char __user *)tmp) - buf; | 127 | return bytes ? bytes : err; |
114 | } | 128 | } |
115 | 129 | ||
116 | static int msr_open(struct inode *inode, struct file *file) | 130 | static int msr_open(struct inode *inode, struct file *file) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 46af71676738..8f98e9de1b82 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -122,80 +122,216 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet) | |||
122 | return ULLONG_MAX; | 122 | return ULLONG_MAX; |
123 | } | 123 | } |
124 | 124 | ||
125 | /** | 125 | /* |
126 | * native_calibrate_tsc - calibrate the tsc on boot | 126 | * Try to calibrate the TSC against the Programmable |
127 | * Interrupt Timer and return the frequency of the TSC | ||
128 | * in kHz. | ||
129 | * | ||
130 | * Return ULONG_MAX on failure to calibrate. | ||
127 | */ | 131 | */ |
128 | unsigned long native_calibrate_tsc(void) | 132 | static unsigned long pit_calibrate_tsc(void) |
129 | { | 133 | { |
130 | unsigned long flags; | 134 | u64 tsc, t1, t2, delta; |
131 | u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; | 135 | unsigned long tscmin, tscmax; |
132 | int hpet = is_hpet_enabled(); | 136 | int pitcnt; |
133 | unsigned int tsc_khz_val = 0; | ||
134 | |||
135 | local_irq_save(flags); | ||
136 | |||
137 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | ||
138 | 137 | ||
138 | /* Set the Gate high, disable speaker */ | ||
139 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | 139 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
140 | 140 | ||
141 | /* | ||
142 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | ||
143 | * count mode), binary count. Set the latch register to 50ms | ||
144 | * (LSB then MSB) to begin countdown. | ||
145 | */ | ||
141 | outb(0xb0, 0x43); | 146 | outb(0xb0, 0x43); |
142 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | 147 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); |
143 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | 148 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); |
144 | tr1 = get_cycles(); | ||
145 | while ((inb(0x61) & 0x20) == 0); | ||
146 | tr2 = get_cycles(); | ||
147 | 149 | ||
148 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | 150 | tsc = t1 = t2 = get_cycles(); |
149 | 151 | ||
150 | local_irq_restore(flags); | 152 | pitcnt = 0; |
153 | tscmax = 0; | ||
154 | tscmin = ULONG_MAX; | ||
155 | while ((inb(0x61) & 0x20) == 0) { | ||
156 | t2 = get_cycles(); | ||
157 | delta = t2 - tsc; | ||
158 | tsc = t2; | ||
159 | if ((unsigned long) delta < tscmin) | ||
160 | tscmin = (unsigned int) delta; | ||
161 | if ((unsigned long) delta > tscmax) | ||
162 | tscmax = (unsigned int) delta; | ||
163 | pitcnt++; | ||
164 | } | ||
151 | 165 | ||
152 | /* | 166 | /* |
153 | * Preset the result with the raw and inaccurate PIT | 167 | * Sanity checks: |
154 | * calibration value | 168 | * |
169 | * If we were not able to read the PIT more than 5000 | ||
170 | * times, then we have been hit by a massive SMI | ||
171 | * | ||
172 | * If the maximum is 10 times larger than the minimum, | ||
173 | * then we got hit by an SMI as well. | ||
155 | */ | 174 | */ |
156 | delta = (tr2 - tr1); | 175 | if (pitcnt < 5000 || tscmax > 10 * tscmin) |
176 | return ULONG_MAX; | ||
177 | |||
178 | /* Calculate the PIT value */ | ||
179 | delta = t2 - t1; | ||
157 | do_div(delta, 50); | 180 | do_div(delta, 50); |
158 | tsc_khz_val = delta; | 181 | return delta; |
182 | } | ||
183 | |||
184 | |||
185 | /** | ||
186 | * native_calibrate_tsc - calibrate the tsc on boot | ||
187 | */ | ||
188 | unsigned long native_calibrate_tsc(void) | ||
189 | { | ||
190 | u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; | ||
191 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; | ||
192 | unsigned long flags; | ||
193 | int hpet = is_hpet_enabled(), i; | ||
194 | |||
195 | /* | ||
196 | * Run 5 calibration loops to get the lowest frequency value | ||
197 | * (the best estimate). We use two different calibration modes | ||
198 | * here: | ||
199 | * | ||
200 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | ||
201 | * load a timeout of 50ms. We read the time right after we | ||
202 | * started the timer and wait until the PIT count down reaches | ||
203 | * zero. In each wait loop iteration we read the TSC and check | ||
204 | * the delta to the previous read. We keep track of the min | ||
205 | * and max values of that delta. The delta is mostly defined | ||
206 | * by the IO time of the PIT access, so we can detect when a | ||
207 | * SMI/SMM disturbance happend between the two reads. If the | ||
208 | * maximum time is significantly larger than the minimum time, | ||
209 | * then we discard the result and have another try. | ||
210 | * | ||
211 | * 2) Reference counter. If available we use the HPET or the | ||
212 | * PMTIMER as a reference to check the sanity of that value. | ||
213 | * We use separate TSC readouts and check inside of the | ||
214 | * reference read for a SMI/SMM disturbance. We dicard | ||
215 | * disturbed values here as well. We do that around the PIT | ||
216 | * calibration delay loop as we have to wait for a certain | ||
217 | * amount of time anyway. | ||
218 | */ | ||
219 | for (i = 0; i < 5; i++) { | ||
220 | unsigned long tsc_pit_khz; | ||
221 | |||
222 | /* | ||
223 | * Read the start value and the reference count of | ||
224 | * hpet/pmtimer when available. Then do the PIT | ||
225 | * calibration, which will take at least 50ms, and | ||
226 | * read the end value. | ||
227 | */ | ||
228 | local_irq_save(flags); | ||
229 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | ||
230 | tsc_pit_khz = pit_calibrate_tsc(); | ||
231 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | ||
232 | local_irq_restore(flags); | ||
233 | |||
234 | /* Pick the lowest PIT TSC calibration so far */ | ||
235 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | ||
236 | |||
237 | /* hpet or pmtimer available ? */ | ||
238 | if (!hpet && !pm1 && !pm2) | ||
239 | continue; | ||
240 | |||
241 | /* Check, whether the sampling was disturbed by an SMI */ | ||
242 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | ||
243 | continue; | ||
244 | |||
245 | tsc2 = (tsc2 - tsc1) * 1000000LL; | ||
246 | |||
247 | if (hpet) { | ||
248 | if (hpet2 < hpet1) | ||
249 | hpet2 += 0x100000000ULL; | ||
250 | hpet2 -= hpet1; | ||
251 | tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | ||
252 | do_div(tsc1, 1000000); | ||
253 | } else { | ||
254 | if (pm2 < pm1) | ||
255 | pm2 += (u64)ACPI_PM_OVRRUN; | ||
256 | pm2 -= pm1; | ||
257 | tsc1 = pm2 * 1000000000LL; | ||
258 | do_div(tsc1, PMTMR_TICKS_PER_SEC); | ||
259 | } | ||
260 | |||
261 | do_div(tsc2, tsc1); | ||
262 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Now check the results. | ||
267 | */ | ||
268 | if (tsc_pit_min == ULONG_MAX) { | ||
269 | /* PIT gave no useful value */ | ||
270 | printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); | ||
271 | |||
272 | /* We don't have an alternative source, disable TSC */ | ||
273 | if (!hpet && !pm1 && !pm2) { | ||
274 | printk("TSC: No reference (HPET/PMTIMER) available\n"); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /* The alternative source failed as well, disable TSC */ | ||
279 | if (tsc_ref_min == ULONG_MAX) { | ||
280 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " | ||
281 | "failed due to SMI disturbance.\n"); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | /* Use the alternative source */ | ||
286 | printk(KERN_INFO "TSC: using %s reference calibration\n", | ||
287 | hpet ? "HPET" : "PMTIMER"); | ||
288 | |||
289 | return tsc_ref_min; | ||
290 | } | ||
159 | 291 | ||
160 | /* hpet or pmtimer available ? */ | 292 | /* We don't have an alternative source, use the PIT calibration value */ |
161 | if (!hpet && !pm1 && !pm2) { | 293 | if (!hpet && !pm1 && !pm2) { |
162 | printk(KERN_INFO "TSC calibrated against PIT\n"); | 294 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); |
163 | goto out; | 295 | return tsc_pit_min; |
164 | } | 296 | } |
165 | 297 | ||
166 | /* Check, whether the sampling was disturbed by an SMI */ | 298 | /* The alternative source failed, use the PIT calibration value */ |
167 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { | 299 | if (tsc_ref_min == ULONG_MAX) { |
168 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | 300 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due " |
169 | "using PIT calibration result\n"); | 301 | "to SMI disturbance. Using PIT calibration\n"); |
170 | goto out; | 302 | return tsc_pit_min; |
171 | } | 303 | } |
172 | 304 | ||
173 | tsc2 = (tsc2 - tsc1) * 1000000LL; | 305 | /* Check the reference deviation */ |
174 | 306 | delta = ((u64) tsc_pit_min) * 100; | |
175 | if (hpet) { | 307 | do_div(delta, tsc_ref_min); |
176 | printk(KERN_INFO "TSC calibrated against HPET\n"); | 308 | |
177 | if (hpet2 < hpet1) | 309 | /* |
178 | hpet2 += 0x100000000ULL; | 310 | * If both calibration results are inside a 5% window, the we |
179 | hpet2 -= hpet1; | 311 | * use the lower frequency of those as it is probably the |
180 | tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | 312 | * closest estimate. |
181 | do_div(tsc1, 1000000); | 313 | */ |
182 | } else { | 314 | if (delta >= 95 && delta <= 105) { |
183 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | 315 | printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n", |
184 | if (pm2 < pm1) | 316 | hpet ? "HPET" : "PMTIMER"); |
185 | pm2 += (u64)ACPI_PM_OVRRUN; | 317 | printk(KERN_INFO "TSC: using %s calibration value\n", |
186 | pm2 -= pm1; | 318 | tsc_pit_min <= tsc_ref_min ? "PIT" : |
187 | tsc1 = pm2 * 1000000000LL; | 319 | hpet ? "HPET" : "PMTIMER"); |
188 | do_div(tsc1, PMTMR_TICKS_PER_SEC); | 320 | return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min; |
189 | } | 321 | } |
190 | 322 | ||
191 | do_div(tsc2, tsc1); | 323 | printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", |
192 | tsc_khz_val = tsc2; | 324 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); |
193 | 325 | ||
194 | out: | 326 | /* |
195 | return tsc_khz_val; | 327 | * The calibration values differ too much. In doubt, we use |
328 | * the PIT value as we know that there are PMTIMERs around | ||
329 | * running at double speed. | ||
330 | */ | ||
331 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); | ||
332 | return tsc_pit_min; | ||
196 | } | 333 | } |
197 | 334 | ||
198 | |||
199 | #ifdef CONFIG_X86_32 | 335 | #ifdef CONFIG_X86_32 |
200 | /* Only called from the Powernow K7 cpu freq driver */ | 336 | /* Only called from the Powernow K7 cpu freq driver */ |
201 | int recalibrate_cpu_khz(void) | 337 | int recalibrate_cpu_khz(void) |
@@ -314,7 +450,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
314 | mark_tsc_unstable("cpufreq changes"); | 450 | mark_tsc_unstable("cpufreq changes"); |
315 | } | 451 | } |
316 | 452 | ||
317 | set_cyc2ns_scale(tsc_khz_ref, freq->cpu); | 453 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
318 | 454 | ||
319 | return 0; | 455 | return 0; |
320 | } | 456 | } |
@@ -325,6 +461,10 @@ static struct notifier_block time_cpufreq_notifier_block = { | |||
325 | 461 | ||
326 | static int __init cpufreq_tsc(void) | 462 | static int __init cpufreq_tsc(void) |
327 | { | 463 | { |
464 | if (!cpu_has_tsc) | ||
465 | return 0; | ||
466 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
467 | return 0; | ||
328 | cpufreq_register_notifier(&time_cpufreq_notifier_block, | 468 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
329 | CPUFREQ_TRANSITION_NOTIFIER); | 469 | CPUFREQ_TRANSITION_NOTIFIER); |
330 | return 0; | 470 | return 0; |