diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-13 03:44:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-13 03:44:22 -0500 |
commit | f8a6b2b9cee298a9663cbe38ce1eb5240987cb62 (patch) | |
tree | b356490269c9e77d164dcc1477792b882fbb8bdb /arch/x86/kernel | |
parent | ba1511bf7fbda452138e4096bf10d5a382710f4f (diff) | |
parent | 071a0bc2ceace31266836801510879407a3701fa (diff) |
Merge branch 'linus' into x86/apic
Conflicts:
arch/x86/kernel/acpi/boot.c
arch/x86/mm/fault.c
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/Kconfig | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 28 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/i8237.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 11 |
10 files changed, 80 insertions, 44 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 4abff454c55b..7c243a2c5115 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -157,11 +157,11 @@ static int __init acpi_sleep_setup(char *str) | |||
157 | #ifdef CONFIG_HIBERNATION | 157 | #ifdef CONFIG_HIBERNATION |
158 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 158 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
159 | acpi_no_s4_hw_signature(); | 159 | acpi_no_s4_hw_signature(); |
160 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
161 | acpi_s4_no_nvs(); | ||
160 | #endif | 162 | #endif |
161 | if (strncmp(str, "old_ordering", 12) == 0) | 163 | if (strncmp(str, "old_ordering", 12) == 0) |
162 | acpi_old_suspend_ordering(); | 164 | acpi_old_suspend_ordering(); |
163 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
164 | acpi_s4_no_nvs(); | ||
165 | str = strchr(str, ','); | 165 | str = strchr(str, ','); |
166 | if (str != NULL) | 166 | if (str != NULL) |
167 | str += strspn(str, ", \t"); | 167 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index efae3b22a0ff..65792c2cc462 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER | |||
245 | 245 | ||
246 | comment "shared options" | 246 | comment "shared options" |
247 | 247 | ||
248 | config X86_ACPI_CPUFREQ_PROC_INTF | ||
249 | bool "/proc/acpi/processor/../performance interface (deprecated)" | ||
250 | depends on PROC_FS | ||
251 | depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI | ||
252 | help | ||
253 | This enables the deprecated /proc/acpi/processor/../performance | ||
254 | interface. While it is helpful for debugging, the generic, | ||
255 | cross-architecture cpufreq interfaces should be used. | ||
256 | |||
257 | If in doubt, say N. | ||
258 | |||
259 | config X86_SPEEDSTEP_LIB | 248 | config X86_SPEEDSTEP_LIB |
260 | tristate | 249 | tristate |
261 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) | 250 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 5c28b37dea11..fb039cd345d8 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | |||
939 | free_cpumask_var(data->acpi_data.shared_cpu_map); | 939 | free_cpumask_var(data->acpi_data.shared_cpu_map); |
940 | } | 940 | } |
941 | 941 | ||
942 | static int get_transition_latency(struct powernow_k8_data *data) | ||
943 | { | ||
944 | int max_latency = 0; | ||
945 | int i; | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
947 | int cur_latency = data->acpi_data.states[i].transition_latency | ||
948 | + data->acpi_data.states[i].bus_master_latency; | ||
949 | if (cur_latency > max_latency) | ||
950 | max_latency = cur_latency; | ||
951 | } | ||
952 | /* value in usecs, needs to be in nanoseconds */ | ||
953 | return 1000 * max_latency; | ||
954 | } | ||
955 | |||
942 | #else | 956 | #else |
943 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 957 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
944 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 958 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
945 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 959 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
960 | static int get_transition_latency(struct powernow_k8_data *data) { return 0; } | ||
946 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | 961 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ |
947 | 962 | ||
948 | /* Take a frequency, and issue the fid/vid transition command */ | 963 | /* Take a frequency, and issue the fid/vid transition command */ |
@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1173 | if (rc) { | 1188 | if (rc) { |
1174 | goto err_out; | 1189 | goto err_out; |
1175 | } | 1190 | } |
1176 | } | 1191 | /* Take a crude guess here. |
1192 | * That guess was in microseconds, so multiply with 1000 */ | ||
1193 | pol->cpuinfo.transition_latency = ( | ||
1194 | ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + | ||
1195 | ((1 << data->irt) * 30)) * 1000; | ||
1196 | } else /* ACPI _PSS objects available */ | ||
1197 | pol->cpuinfo.transition_latency = get_transition_latency(data); | ||
1177 | 1198 | ||
1178 | /* only run on specific CPU from here on */ | 1199 | /* only run on specific CPU from here on */ |
1179 | oldmask = current->cpus_allowed; | 1200 | oldmask = current->cpus_allowed; |
@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1204 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1225 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); |
1205 | data->available_cores = pol->cpus; | 1226 | data->available_cores = pol->cpus; |
1206 | 1227 | ||
1207 | /* Take a crude guess here. | ||
1208 | * That guess was in microseconds, so multiply with 1000 */ | ||
1209 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1210 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1211 | |||
1212 | if (cpu_family == CPU_HW_PSTATE) | 1228 | if (cpu_family == CPU_HW_PSTATE) |
1213 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 1229 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1214 | else | 1230 | else |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1cef0aa5e5dc..1f137a87d4bd 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -303,6 +303,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
303 | ds_init_intel(c); | 303 | ds_init_intel(c); |
304 | } | 304 | } |
305 | 305 | ||
306 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | ||
307 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | ||
308 | |||
306 | #ifdef CONFIG_X86_64 | 309 | #ifdef CONFIG_X86_64 |
307 | if (c->x86 == 15) | 310 | if (c->x86 == 15) |
308 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 311 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1b43086b097a..231bdd3c5b1c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
488 | * ignore such a protection. | 488 | * ignore such a protection. |
489 | */ | 489 | */ |
490 | asm volatile( | 490 | asm volatile( |
491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" | 491 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" |
493 | " movl $0, %[faulted]\n" | 493 | " movl $0, %[faulted]\n" |
494 | "3:\n" | ||
494 | 495 | ||
495 | ".section .fixup, \"ax\"\n" | 496 | ".section .fixup, \"ax\"\n" |
496 | "3: movl $1, %[faulted]\n" | 497 | "4: movl $1, %[faulted]\n" |
498 | " jmp 3b\n" | ||
497 | ".previous\n" | 499 | ".previous\n" |
498 | 500 | ||
499 | _ASM_EXTABLE(1b, 3b) | 501 | _ASM_EXTABLE(1b, 4b) |
500 | _ASM_EXTABLE(2b, 3b) | 502 | _ASM_EXTABLE(2b, 4b) |
501 | 503 | ||
502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | 504 | : [old] "=r" (old), [faulted] "=r" (faulted) |
503 | [faulted] "=r" (faulted) | 505 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
505 | : "memory" | 506 | : "memory" |
506 | ); | 507 | ); |
507 | 508 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 64d5ad0b8add..388254f69a2a 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -897,7 +897,7 @@ static unsigned long hpet_rtc_flags; | |||
897 | static int hpet_prev_update_sec; | 897 | static int hpet_prev_update_sec; |
898 | static struct rtc_time hpet_alarm_time; | 898 | static struct rtc_time hpet_alarm_time; |
899 | static unsigned long hpet_pie_count; | 899 | static unsigned long hpet_pie_count; |
900 | static unsigned long hpet_t1_cmp; | 900 | static u32 hpet_t1_cmp; |
901 | static unsigned long hpet_default_delta; | 901 | static unsigned long hpet_default_delta; |
902 | static unsigned long hpet_pie_delta; | 902 | static unsigned long hpet_pie_delta; |
903 | static unsigned long hpet_pie_limit; | 903 | static unsigned long hpet_pie_limit; |
@@ -905,6 +905,14 @@ static unsigned long hpet_pie_limit; | |||
905 | static rtc_irq_handler irq_handler; | 905 | static rtc_irq_handler irq_handler; |
906 | 906 | ||
907 | /* | 907 | /* |
908 | * Check that the hpet counter c1 is ahead of the c2 | ||
909 | */ | ||
910 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | ||
911 | { | ||
912 | return (s32)(c2 - c1) < 0; | ||
913 | } | ||
914 | |||
915 | /* | ||
908 | * Registers a IRQ handler. | 916 | * Registers a IRQ handler. |
909 | */ | 917 | */ |
910 | int hpet_register_irq_handler(rtc_irq_handler handler) | 918 | int hpet_register_irq_handler(rtc_irq_handler handler) |
@@ -1075,7 +1083,7 @@ static void hpet_rtc_timer_reinit(void) | |||
1075 | hpet_t1_cmp += delta; | 1083 | hpet_t1_cmp += delta; |
1076 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | 1084 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
1077 | lost_ints++; | 1085 | lost_ints++; |
1078 | } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0); | 1086 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); |
1079 | 1087 | ||
1080 | if (lost_ints) { | 1088 | if (lost_ints) { |
1081 | if (hpet_rtc_flags & RTC_PIE) | 1089 | if (hpet_rtc_flags & RTC_PIE) |
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c index dbd6c1d1b638..b42ca694dc68 100644 --- a/arch/x86/kernel/i8237.c +++ b/arch/x86/kernel/i8237.c | |||
@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev) | |||
28 | 28 | ||
29 | flags = claim_dma_lock(); | 29 | flags = claim_dma_lock(); |
30 | 30 | ||
31 | dma_outb(DMA1_RESET_REG, 0); | 31 | dma_outb(0, DMA1_RESET_REG); |
32 | dma_outb(DMA2_RESET_REG, 0); | 32 | dma_outb(0, DMA2_RESET_REG); |
33 | 33 | ||
34 | for (i = 0;i < 8;i++) { | 34 | for (i = 0; i < 8; i++) { |
35 | set_dma_addr(i, 0x000000); | 35 | set_dma_addr(i, 0x000000); |
36 | /* DMA count is a bit weird so this is not 0 */ | 36 | /* DMA count is a bit weird so this is not 0 */ |
37 | set_dma_count(i, 1); | 37 | set_dma_count(i, 1); |
@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static struct sysdev_class i8237_sysdev_class = { | 53 | static struct sysdev_class i8237_sysdev_class = { |
54 | .name = "i8237", | 54 | .name = "i8237", |
55 | .suspend = i8237A_suspend, | 55 | .suspend = i8237A_suspend, |
56 | .resume = i8237A_resume, | 56 | .resume = i8237A_resume, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static struct sys_device device_i8237A = { | 59 | static struct sys_device device_i8237A = { |
60 | .id = 0, | 60 | .id = 0, |
61 | .cls = &i8237_sysdev_class, | 61 | .cls = &i8237_sysdev_class, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static int __init i8237A_init_sysfs(void) | 64 | static int __init i8237A_init_sysfs(void) |
@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void) | |||
68 | error = sysdev_register(&device_i8237A); | 68 | error = sysdev_register(&device_i8237A); |
69 | return error; | 69 | return error; |
70 | } | 70 | } |
71 | |||
72 | device_initcall(i8237A_init_sysfs); | 71 | device_initcall(i8237A_init_sysfs); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 89537f678b2d..87b69d4fac16 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
180 | 180 | ||
181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); | 181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); |
182 | if (!need_resched()) { | 182 | if (!need_resched()) { |
183 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
184 | clflush((void *)¤t_thread_info()->flags); | ||
185 | |||
183 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 186 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
184 | smp_mb(); | 187 | smp_mb(); |
185 | if (!need_resched()) | 188 | if (!need_resched()) |
@@ -194,6 +197,9 @@ static void mwait_idle(void) | |||
194 | struct power_trace it; | 197 | struct power_trace it; |
195 | if (!need_resched()) { | 198 | if (!need_resched()) { |
196 | trace_power_start(&it, POWER_CSTATE, 1); | 199 | trace_power_start(&it, POWER_CSTATE, 1); |
200 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
201 | clflush((void *)¤t_thread_info()->flags); | ||
202 | |||
197 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 203 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
198 | smp_mb(); | 204 | smp_mb(); |
199 | if (!need_resched()) | 205 | if (!need_resched()) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 214bc327a0c3..0d032d2d8a18 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -895,7 +895,7 @@ asmlinkage void math_state_restore(void) | |||
895 | EXPORT_SYMBOL_GPL(math_state_restore); | 895 | EXPORT_SYMBOL_GPL(math_state_restore); |
896 | 896 | ||
897 | #ifndef CONFIG_MATH_EMULATION | 897 | #ifndef CONFIG_MATH_EMULATION |
898 | asmlinkage void math_emulate(long arg) | 898 | void math_emulate(struct math_emu_info *info) |
899 | { | 899 | { |
900 | printk(KERN_EMERG | 900 | printk(KERN_EMERG |
901 | "math-emulation not enabled and no coprocessor found.\n"); | 901 | "math-emulation not enabled and no coprocessor found.\n"); |
@@ -905,16 +905,19 @@ asmlinkage void math_emulate(long arg) | |||
905 | } | 905 | } |
906 | #endif /* CONFIG_MATH_EMULATION */ | 906 | #endif /* CONFIG_MATH_EMULATION */ |
907 | 907 | ||
908 | dotraplinkage void __kprobes | 908 | dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) |
909 | do_device_not_available(struct pt_regs *regs, long error) | ||
910 | { | 909 | { |
911 | #ifdef CONFIG_X86_32 | 910 | #ifdef CONFIG_X86_32 |
912 | if (read_cr0() & X86_CR0_EM) { | 911 | if (read_cr0() & X86_CR0_EM) { |
913 | conditional_sti(regs); | 912 | struct math_emu_info info = { }; |
914 | math_emulate(0); | 913 | |
914 | conditional_sti(®s); | ||
915 | |||
916 | info.regs = ®s; | ||
917 | math_emulate(&info); | ||
915 | } else { | 918 | } else { |
916 | math_state_restore(); /* interrupts still off */ | 919 | math_state_restore(); /* interrupts still off */ |
917 | conditional_sti(regs); | 920 | conditional_sti(®s); |
918 | } | 921 | } |
919 | #else | 922 | #else |
920 | math_state_restore(); | 923 | math_state_restore(); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index eb9e7347928e..f052c84ecbe4 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -321,6 +321,16 @@ static void vmi_release_pmd(unsigned long pfn) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * We use the pgd_free hook for releasing the pgd page: | ||
325 | */ | ||
326 | static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
327 | { | ||
328 | unsigned long pfn = __pa(pgd) >> PAGE_SHIFT; | ||
329 | |||
330 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
331 | } | ||
332 | |||
333 | /* | ||
324 | * Helper macros for MMU update flags. We can defer updates until a flush | 334 | * Helper macros for MMU update flags. We can defer updates until a flush |
325 | * or page invalidation only if the update is to the current address space | 335 | * or page invalidation only if the update is to the current address space |
326 | * (otherwise, there is no flush). We must check against init_mm, since | 336 | * (otherwise, there is no flush). We must check against init_mm, since |
@@ -763,6 +773,7 @@ static inline int __init activate_vmi(void) | |||
763 | if (vmi_ops.release_page) { | 773 | if (vmi_ops.release_page) { |
764 | pv_mmu_ops.release_pte = vmi_release_pte; | 774 | pv_mmu_ops.release_pte = vmi_release_pte; |
765 | pv_mmu_ops.release_pmd = vmi_release_pmd; | 775 | pv_mmu_ops.release_pmd = vmi_release_pmd; |
776 | pv_mmu_ops.pgd_free = vmi_pgd_free; | ||
766 | } | 777 | } |
767 | 778 | ||
768 | /* Set linear is needed in all cases */ | 779 | /* Set linear is needed in all cases */ |