diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/longrun.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 5 |
8 files changed, 62 insertions, 28 deletions
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 5910020c3f24..0633cfd0dc29 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
534 | */ | 534 | */ |
535 | void clear_local_APIC(void) | 535 | void clear_local_APIC(void) |
536 | { | 536 | { |
537 | int maxlvt = lapic_get_maxlvt(); | 537 | int maxlvt; |
538 | u32 v; | 538 | u32 v; |
539 | 539 | ||
540 | /* APIC hasn't been mapped yet */ | 540 | /* APIC hasn't been mapped yet */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index af4a867a097c..777a7ff075de 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c | |||
@@ -245,7 +245,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, | |||
245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) | 245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) |
246 | return -EIO; | 246 | return -EIO; |
247 | 247 | ||
248 | edx = (eax - ebx) / (100 - ecx); | 248 | edx = ((eax - ebx) * 100) / (100 - ecx); |
249 | *low_freq = edx * 1000; /* back to kHz */ | 249 | *low_freq = edx * 1000; /* back to kHz */ |
250 | 250 | ||
251 | dprintk("low frequency is %u kHz\n", *low_freq); | 251 | dprintk("low frequency is %u kHz\n", *low_freq); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f37..206791eb46e3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1127,12 +1127,23 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1127 | * an UP version, and is deprecated by AMD. | 1127 | * an UP version, and is deprecated by AMD. |
1128 | */ | 1128 | */ |
1129 | if (num_online_cpus() != 1) { | 1129 | if (num_online_cpus() != 1) { |
1130 | printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); | 1130 | #ifndef CONFIG_ACPI_PROCESSOR |
1131 | printk(KERN_ERR PFX "ACPI Processor support is required " | ||
1132 | "for SMP systems but is absent. Please load the " | ||
1133 | "ACPI Processor module before starting this " | ||
1134 | "driver.\n"); | ||
1135 | #else | ||
1136 | printk(KERN_ERR PFX "Your BIOS does not provide ACPI " | ||
1137 | "_PSS objects in a way that Linux understands. " | ||
1138 | "Please report this to the Linux ACPI maintainers" | ||
1139 | " and complain to your BIOS vendor.\n"); | ||
1140 | #endif | ||
1131 | kfree(data); | 1141 | kfree(data); |
1132 | return -ENODEV; | 1142 | return -ENODEV; |
1133 | } | 1143 | } |
1134 | if (pol->cpu != 0) { | 1144 | if (pol->cpu != 0) { |
1135 | printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); | 1145 | printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " |
1146 | "CPU0. Complain to your BIOS vendor.\n"); | ||
1136 | kfree(data); | 1147 | kfree(data); |
1137 | return -ENODEV; | 1148 | return -ENODEV; |
1138 | } | 1149 | } |
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3d01e47777db..a4f93b4120c1 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/desc.h> | 11 | #include <asm/desc.h> |
12 | 12 | ||
13 | static struct fs_struct init_fs = INIT_FS; | 13 | static struct fs_struct init_fs = INIT_FS; |
14 | static struct files_struct init_files = INIT_FILES; | ||
15 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 14 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
16 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 15 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
17 | struct mm_struct init_mm = INIT_MM(init_mm); | 16 | struct mm_struct init_mm = INIT_MM(init_mm); |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 4bc1be5d5472..08a30986d472 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void); | |||
53 | * have elapsed since the hypervisor wrote the data. So we try to account for | 53 | * have elapsed since the hypervisor wrote the data. So we try to account for |
54 | * that with system time | 54 | * that with system time |
55 | */ | 55 | */ |
56 | unsigned long kvm_get_wallclock(void) | 56 | static unsigned long kvm_get_wallclock(void) |
57 | { | 57 | { |
58 | u32 wc_sec, wc_nsec; | 58 | u32 wc_sec, wc_nsec; |
59 | u64 delta; | 59 | u64 delta; |
@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void) | |||
86 | return ts.tv_sec + 1; | 86 | return ts.tv_sec + 1; |
87 | } | 87 | } |
88 | 88 | ||
89 | int kvm_set_wallclock(unsigned long now) | 89 | static int kvm_set_wallclock(unsigned long now) |
90 | { | 90 | { |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 67e9b4a1e89d..ba370dc8685b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -99,15 +99,6 @@ static void mwait_idle(void) | |||
99 | local_irq_enable(); | 99 | local_irq_enable(); |
100 | } | 100 | } |
101 | 101 | ||
102 | |||
103 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
104 | { | ||
105 | if (force_mwait) | ||
106 | return 1; | ||
107 | /* Any C1 states supported? */ | ||
108 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
109 | } | ||
110 | |||
111 | /* | 102 | /* |
112 | * On SMP it's slightly faster (but much more power-consuming!) | 103 | * On SMP it's slightly faster (but much more power-consuming!) |
113 | * to poll the ->work.need_resched flag instead of waiting for the | 104 | * to poll the ->work.need_resched flag instead of waiting for the |
@@ -119,6 +110,33 @@ static void poll_idle(void) | |||
119 | cpu_relax(); | 110 | cpu_relax(); |
120 | } | 111 | } |
121 | 112 | ||
113 | /* | ||
114 | * mwait selection logic: | ||
115 | * | ||
116 | * It depends on the CPU. For AMD CPUs that support MWAIT this is | ||
117 | * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings | ||
118 | * then depend on a clock divisor and current Pstate of the core. If | ||
119 | * all cores of a processor are in halt state (C1) the processor can | ||
120 | * enter the C1E (C1 enhanced) state. If mwait is used this will never | ||
121 | * happen. | ||
122 | * | ||
123 | * idle=mwait overrides this decision and forces the usage of mwait. | ||
124 | */ | ||
125 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
126 | { | ||
127 | if (force_mwait) | ||
128 | return 1; | ||
129 | |||
130 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
131 | switch(c->x86) { | ||
132 | case 0x10: | ||
133 | case 0x11: | ||
134 | return 0; | ||
135 | } | ||
136 | } | ||
137 | return 1; | ||
138 | } | ||
139 | |||
122 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 140 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
123 | { | 141 | { |
124 | static int selected; | 142 | static int selected; |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e4790728b224..068759db63dd 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include "mach_timer.h" | 15 | #include "mach_timer.h" |
16 | 16 | ||
17 | static int tsc_enabled; | 17 | static int tsc_disabled; |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * On some systems the TSC frequency does not | 20 | * On some systems the TSC frequency does not |
@@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz); | |||
28 | static int __init tsc_setup(char *str) | 28 | static int __init tsc_setup(char *str) |
29 | { | 29 | { |
30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | 30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " |
31 | "cannot disable TSC completely.\n"); | 31 | "cannot disable TSC completely.\n"); |
32 | mark_tsc_unstable("user disabled TSC"); | 32 | tsc_disabled = 1; |
33 | return 1; | 33 | return 1; |
34 | } | 34 | } |
35 | #else | 35 | #else |
@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void) | |||
120 | * very important for it to be as fast as the platform | 120 | * very important for it to be as fast as the platform |
121 | * can achive it. ) | 121 | * can achive it. ) |
122 | */ | 122 | */ |
123 | if (unlikely(!tsc_enabled && !tsc_unstable)) | 123 | if (unlikely(tsc_disabled)) |
124 | /* No locking but a rare wrong value is not a big deal: */ | 124 | /* No locking but a rare wrong value is not a big deal: */ |
125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
126 | 126 | ||
@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason) | |||
322 | { | 322 | { |
323 | if (!tsc_unstable) { | 323 | if (!tsc_unstable) { |
324 | tsc_unstable = 1; | 324 | tsc_unstable = 1; |
325 | tsc_enabled = 0; | ||
326 | printk("Marking TSC unstable due to: %s.\n", reason); | 325 | printk("Marking TSC unstable due to: %s.\n", reason); |
327 | /* Can be called before registration */ | 326 | /* Can be called before registration */ |
328 | if (clocksource_tsc.mult) | 327 | if (clocksource_tsc.mult) |
@@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |||
336 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | 335 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) |
337 | { | 336 | { |
338 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | 337 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", |
339 | d->ident); | 338 | d->ident); |
340 | tsc_unstable = 1; | 339 | tsc_unstable = 1; |
341 | return 0; | 340 | return 0; |
342 | } | 341 | } |
@@ -403,14 +402,22 @@ void __init tsc_init(void) | |||
403 | { | 402 | { |
404 | int cpu; | 403 | int cpu; |
405 | 404 | ||
406 | if (!cpu_has_tsc) | 405 | if (!cpu_has_tsc || tsc_disabled) { |
406 | /* Disable the TSC in case of !cpu_has_tsc */ | ||
407 | tsc_disabled = 1; | ||
407 | return; | 408 | return; |
409 | } | ||
408 | 410 | ||
409 | cpu_khz = calculate_cpu_khz(); | 411 | cpu_khz = calculate_cpu_khz(); |
410 | tsc_khz = cpu_khz; | 412 | tsc_khz = cpu_khz; |
411 | 413 | ||
412 | if (!cpu_khz) { | 414 | if (!cpu_khz) { |
413 | mark_tsc_unstable("could not calculate TSC khz"); | 415 | mark_tsc_unstable("could not calculate TSC khz"); |
416 | /* | ||
417 | * We need to disable the TSC completely in this case | ||
418 | * to prevent sched_clock() from using it. | ||
419 | */ | ||
420 | tsc_disabled = 1; | ||
414 | return; | 421 | return; |
415 | } | 422 | } |
416 | 423 | ||
@@ -441,8 +448,6 @@ void __init tsc_init(void) | |||
441 | if (check_tsc_unstable()) { | 448 | if (check_tsc_unstable()) { |
442 | clocksource_tsc.rating = 0; | 449 | clocksource_tsc.rating = 0; |
443 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 450 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
444 | } else | 451 | } |
445 | tsc_enabled = 1; | ||
446 | |||
447 | clocksource_register(&clocksource_tsc); | 452 | clocksource_register(&clocksource_tsc); |
448 | } | 453 | } |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index fcc16e58609e..1784b8077a12 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -227,14 +227,14 @@ void __init tsc_calibrate(void) | |||
227 | /* hpet or pmtimer available ? */ | 227 | /* hpet or pmtimer available ? */ |
228 | if (!hpet && !pm1 && !pm2) { | 228 | if (!hpet && !pm1 && !pm2) { |
229 | printk(KERN_INFO "TSC calibrated against PIT\n"); | 229 | printk(KERN_INFO "TSC calibrated against PIT\n"); |
230 | return; | 230 | goto out; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Check, whether the sampling was disturbed by an SMI */ | 233 | /* Check, whether the sampling was disturbed by an SMI */ |
234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { | 234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { |
235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | 235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " |
236 | "using PIT calibration result\n"); | 236 | "using PIT calibration result\n"); |
237 | return; | 237 | goto out; |
238 | } | 238 | } |
239 | 239 | ||
240 | tsc2 = (tsc2 - tsc1) * 1000000L; | 240 | tsc2 = (tsc2 - tsc1) * 1000000L; |
@@ -255,6 +255,7 @@ void __init tsc_calibrate(void) | |||
255 | 255 | ||
256 | tsc_khz = tsc2 / tsc1; | 256 | tsc_khz = tsc2 / tsc1; |
257 | 257 | ||
258 | out: | ||
258 | for_each_possible_cpu(cpu) | 259 | for_each_possible_cpu(cpu) |
259 | set_cyc2ns_scale(tsc_khz, cpu); | 260 | set_cyc2ns_scale(tsc_khz, cpu); |
260 | } | 261 | } |