diff options
Diffstat (limited to 'arch/x86/kernel')
38 files changed, 339 insertions, 201 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d37593c2f438..7678f10c4568 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -973,6 +973,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
973 | nr_ioapics++; | 973 | nr_ioapics++; |
974 | } | 974 | } |
975 | 975 | ||
976 | int __init acpi_probe_gsi(void) | ||
977 | { | ||
978 | int idx; | ||
979 | int gsi; | ||
980 | int max_gsi = 0; | ||
981 | |||
982 | if (acpi_disabled) | ||
983 | return 0; | ||
984 | |||
985 | if (!acpi_ioapic) | ||
986 | return 0; | ||
987 | |||
988 | max_gsi = 0; | ||
989 | for (idx = 0; idx < nr_ioapics; idx++) { | ||
990 | gsi = mp_ioapic_routing[idx].gsi_end; | ||
991 | |||
992 | if (gsi > max_gsi) | ||
993 | max_gsi = gsi; | ||
994 | } | ||
995 | |||
996 | return max_gsi + 1; | ||
997 | } | ||
998 | |||
976 | static void assign_to_mp_irq(struct mp_config_intsrc *m, | 999 | static void assign_to_mp_irq(struct mp_config_intsrc *m, |
977 | struct mp_config_intsrc *mp_irq) | 1000 | struct mp_config_intsrc *mp_irq) |
978 | { | 1001 | { |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 707c1f6f95fa..a60c1f3bcb87 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -156,11 +156,11 @@ static int __init acpi_sleep_setup(char *str) | |||
156 | #ifdef CONFIG_HIBERNATION | 156 | #ifdef CONFIG_HIBERNATION |
157 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 157 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
158 | acpi_no_s4_hw_signature(); | 158 | acpi_no_s4_hw_signature(); |
159 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
160 | acpi_s4_no_nvs(); | ||
159 | #endif | 161 | #endif |
160 | if (strncmp(str, "old_ordering", 12) == 0) | 162 | if (strncmp(str, "old_ordering", 12) == 0) |
161 | acpi_old_suspend_ordering(); | 163 | acpi_old_suspend_ordering(); |
162 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
163 | acpi_s4_no_nvs(); | ||
164 | str = strchr(str, ','); | 164 | str = strchr(str, ','); |
165 | if (str != NULL) | 165 | if (str != NULL) |
166 | str += strspn(str, ", \t"); | 166 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index bcc293423a70..96258d9dc974 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -13,7 +13,6 @@ | |||
13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) | 13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) |
14 | */ | 14 | */ |
15 | ENTRY(wakeup_long64) | 15 | ENTRY(wakeup_long64) |
16 | wakeup_long64: | ||
17 | movq saved_magic, %rax | 16 | movq saved_magic, %rax |
18 | movq $0x123456789abcdef0, %rdx | 17 | movq $0x123456789abcdef0, %rdx |
19 | cmpq %rdx, %rax | 18 | cmpq %rdx, %rax |
@@ -34,16 +33,12 @@ wakeup_long64: | |||
34 | 33 | ||
35 | movq saved_rip, %rax | 34 | movq saved_rip, %rax |
36 | jmp *%rax | 35 | jmp *%rax |
36 | ENDPROC(wakeup_long64) | ||
37 | 37 | ||
38 | bogus_64_magic: | 38 | bogus_64_magic: |
39 | jmp bogus_64_magic | 39 | jmp bogus_64_magic |
40 | 40 | ||
41 | .align 2 | 41 | ENTRY(do_suspend_lowlevel) |
42 | .p2align 4,,15 | ||
43 | .globl do_suspend_lowlevel | ||
44 | .type do_suspend_lowlevel,@function | ||
45 | do_suspend_lowlevel: | ||
46 | .LFB5: | ||
47 | subq $8, %rsp | 42 | subq $8, %rsp |
48 | xorl %eax, %eax | 43 | xorl %eax, %eax |
49 | call save_processor_state | 44 | call save_processor_state |
@@ -67,7 +62,7 @@ do_suspend_lowlevel: | |||
67 | pushfq | 62 | pushfq |
68 | popq pt_regs_flags(%rax) | 63 | popq pt_regs_flags(%rax) |
69 | 64 | ||
70 | movq $.L97, saved_rip(%rip) | 65 | movq $resume_point, saved_rip(%rip) |
71 | 66 | ||
72 | movq %rsp, saved_rsp | 67 | movq %rsp, saved_rsp |
73 | movq %rbp, saved_rbp | 68 | movq %rbp, saved_rbp |
@@ -78,14 +73,12 @@ do_suspend_lowlevel: | |||
78 | addq $8, %rsp | 73 | addq $8, %rsp |
79 | movl $3, %edi | 74 | movl $3, %edi |
80 | xorl %eax, %eax | 75 | xorl %eax, %eax |
81 | jmp acpi_enter_sleep_state | 76 | call acpi_enter_sleep_state |
82 | .L97: | 77 | /* in case something went wrong, restore the machine status and go on */ |
83 | .p2align 4,,7 | 78 | jmp resume_point |
84 | .L99: | ||
85 | .align 4 | ||
86 | movl $24, %eax | ||
87 | movw %ax, %ds | ||
88 | 79 | ||
80 | .align 4 | ||
81 | resume_point: | ||
89 | /* We don't restore %rax, it must be 0 anyway */ | 82 | /* We don't restore %rax, it must be 0 anyway */ |
90 | movq $saved_context, %rax | 83 | movq $saved_context, %rax |
91 | movq saved_context_cr4(%rax), %rbx | 84 | movq saved_context_cr4(%rax), %rbx |
@@ -117,12 +110,9 @@ do_suspend_lowlevel: | |||
117 | xorl %eax, %eax | 110 | xorl %eax, %eax |
118 | addq $8, %rsp | 111 | addq $8, %rsp |
119 | jmp restore_processor_state | 112 | jmp restore_processor_state |
120 | .LFE5: | 113 | ENDPROC(do_suspend_lowlevel) |
121 | .Lfe5: | 114 | |
122 | .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel | ||
123 | |||
124 | .data | 115 | .data |
125 | ALIGN | ||
126 | ENTRY(saved_rbp) .quad 0 | 116 | ENTRY(saved_rbp) .quad 0 |
127 | ENTRY(saved_rsi) .quad 0 | 117 | ENTRY(saved_rsi) .quad 0 |
128 | ENTRY(saved_rdi) .quad 0 | 118 | ENTRY(saved_rdi) .quad 0 |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 4b6df2469fe3..570f36e44e59 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -862,7 +862,7 @@ void clear_local_APIC(void) | |||
862 | } | 862 | } |
863 | 863 | ||
864 | /* lets not touch this if we didn't frob it */ | 864 | /* lets not touch this if we didn't frob it */ |
865 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) | 865 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) |
866 | if (maxlvt >= 5) { | 866 | if (maxlvt >= 5) { |
867 | v = apic_read(APIC_LVTTHMR); | 867 | v = apic_read(APIC_LVTTHMR); |
868 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 868 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
@@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void) | |||
1436 | switch (boot_cpu_data.x86_vendor) { | 1436 | switch (boot_cpu_data.x86_vendor) { |
1437 | case X86_VENDOR_AMD: | 1437 | case X86_VENDOR_AMD: |
1438 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || | 1438 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || |
1439 | (boot_cpu_data.x86 == 15)) | 1439 | (boot_cpu_data.x86 >= 15)) |
1440 | break; | 1440 | break; |
1441 | goto no_apic; | 1441 | goto no_apic; |
1442 | case X86_VENDOR_INTEL: | 1442 | case X86_VENDOR_INTEL: |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 98807bb095ad..266ec6c18b6c 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1192,6 +1192,7 @@ static int suspend(int vetoable) | |||
1192 | device_suspend(PMSG_SUSPEND); | 1192 | device_suspend(PMSG_SUSPEND); |
1193 | local_irq_disable(); | 1193 | local_irq_disable(); |
1194 | device_power_down(PMSG_SUSPEND); | 1194 | device_power_down(PMSG_SUSPEND); |
1195 | sysdev_suspend(PMSG_SUSPEND); | ||
1195 | 1196 | ||
1196 | local_irq_enable(); | 1197 | local_irq_enable(); |
1197 | 1198 | ||
@@ -1208,6 +1209,7 @@ static int suspend(int vetoable) | |||
1208 | if (err != APM_SUCCESS) | 1209 | if (err != APM_SUCCESS) |
1209 | apm_error("suspend", err); | 1210 | apm_error("suspend", err); |
1210 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1211 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1212 | sysdev_resume(); | ||
1211 | device_power_up(PMSG_RESUME); | 1213 | device_power_up(PMSG_RESUME); |
1212 | local_irq_enable(); | 1214 | local_irq_enable(); |
1213 | device_resume(PMSG_RESUME); | 1215 | device_resume(PMSG_RESUME); |
@@ -1228,6 +1230,7 @@ static void standby(void) | |||
1228 | 1230 | ||
1229 | local_irq_disable(); | 1231 | local_irq_disable(); |
1230 | device_power_down(PMSG_SUSPEND); | 1232 | device_power_down(PMSG_SUSPEND); |
1233 | sysdev_suspend(PMSG_SUSPEND); | ||
1231 | local_irq_enable(); | 1234 | local_irq_enable(); |
1232 | 1235 | ||
1233 | err = set_system_power_state(APM_STATE_STANDBY); | 1236 | err = set_system_power_state(APM_STATE_STANDBY); |
@@ -1235,6 +1238,7 @@ static void standby(void) | |||
1235 | apm_error("standby", err); | 1238 | apm_error("standby", err); |
1236 | 1239 | ||
1237 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | sysdev_resume(); | ||
1238 | device_power_up(PMSG_RESUME); | 1242 | device_power_up(PMSG_RESUME); |
1239 | local_irq_enable(); | 1243 | local_irq_enable(); |
1240 | } | 1244 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index efae3b22a0ff..65792c2cc462 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER | |||
245 | 245 | ||
246 | comment "shared options" | 246 | comment "shared options" |
247 | 247 | ||
248 | config X86_ACPI_CPUFREQ_PROC_INTF | ||
249 | bool "/proc/acpi/processor/../performance interface (deprecated)" | ||
250 | depends on PROC_FS | ||
251 | depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI | ||
252 | help | ||
253 | This enables the deprecated /proc/acpi/processor/../performance | ||
254 | interface. While it is helpful for debugging, the generic, | ||
255 | cross-architecture cpufreq interfaces should be used. | ||
256 | |||
257 | If in doubt, say N. | ||
258 | |||
259 | config X86_SPEEDSTEP_LIB | 248 | config X86_SPEEDSTEP_LIB |
260 | tristate | 249 | tristate |
261 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) | 250 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b585e04cbc9e..3178c3acd97e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -277,7 +277,6 @@ static struct cpufreq_driver p4clockmod_driver = { | |||
277 | .name = "p4-clockmod", | 277 | .name = "p4-clockmod", |
278 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
279 | .attr = p4clockmod_attr, | 279 | .attr = p4clockmod_attr, |
280 | .hide_interface = 1, | ||
281 | }; | 280 | }; |
282 | 281 | ||
283 | 282 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 5c28b37dea11..6428aa17b40e 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | |||
939 | free_cpumask_var(data->acpi_data.shared_cpu_map); | 939 | free_cpumask_var(data->acpi_data.shared_cpu_map); |
940 | } | 940 | } |
941 | 941 | ||
942 | static int get_transition_latency(struct powernow_k8_data *data) | ||
943 | { | ||
944 | int max_latency = 0; | ||
945 | int i; | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
947 | int cur_latency = data->acpi_data.states[i].transition_latency | ||
948 | + data->acpi_data.states[i].bus_master_latency; | ||
949 | if (cur_latency > max_latency) | ||
950 | max_latency = cur_latency; | ||
951 | } | ||
952 | /* value in usecs, needs to be in nanoseconds */ | ||
953 | return 1000 * max_latency; | ||
954 | } | ||
955 | |||
942 | #else | 956 | #else |
943 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 957 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
944 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 958 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
945 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 959 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
960 | static int get_transition_latency(struct powernow_k8_data *data) { return 0; } | ||
946 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | 961 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ |
947 | 962 | ||
948 | /* Take a frequency, and issue the fid/vid transition command */ | 963 | /* Take a frequency, and issue the fid/vid transition command */ |
@@ -1142,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1142 | data->cpu = pol->cpu; | 1157 | data->cpu = pol->cpu; |
1143 | data->currpstate = HW_PSTATE_INVALID; | 1158 | data->currpstate = HW_PSTATE_INVALID; |
1144 | 1159 | ||
1145 | rc = powernow_k8_cpu_init_acpi(data); | 1160 | if (powernow_k8_cpu_init_acpi(data)) { |
1146 | if (rc) { | ||
1147 | /* | 1161 | /* |
1148 | * Use the PSB BIOS structure. This is only availabe on | 1162 | * Use the PSB BIOS structure. This is only availabe on |
1149 | * an UP version, and is deprecated by AMD. | 1163 | * an UP version, and is deprecated by AMD. |
@@ -1161,19 +1175,28 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1161 | "ACPI maintainers and complain to your BIOS " | 1175 | "ACPI maintainers and complain to your BIOS " |
1162 | "vendor.\n"); | 1176 | "vendor.\n"); |
1163 | #endif | 1177 | #endif |
1164 | goto err_out; | 1178 | kfree(data); |
1179 | return -ENODEV; | ||
1165 | } | 1180 | } |
1166 | if (pol->cpu != 0) { | 1181 | if (pol->cpu != 0) { |
1167 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " | 1182 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " |
1168 | "CPU other than CPU0. Complain to your BIOS " | 1183 | "CPU other than CPU0. Complain to your BIOS " |
1169 | "vendor.\n"); | 1184 | "vendor.\n"); |
1170 | goto err_out; | 1185 | kfree(data); |
1186 | return -ENODEV; | ||
1171 | } | 1187 | } |
1172 | rc = find_psb_table(data); | 1188 | rc = find_psb_table(data); |
1173 | if (rc) { | 1189 | if (rc) { |
1174 | goto err_out; | 1190 | kfree(data); |
1191 | return -ENODEV; | ||
1175 | } | 1192 | } |
1176 | } | 1193 | /* Take a crude guess here. |
1194 | * That guess was in microseconds, so multiply with 1000 */ | ||
1195 | pol->cpuinfo.transition_latency = ( | ||
1196 | ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + | ||
1197 | ((1 << data->irt) * 30)) * 1000; | ||
1198 | } else /* ACPI _PSS objects available */ | ||
1199 | pol->cpuinfo.transition_latency = get_transition_latency(data); | ||
1177 | 1200 | ||
1178 | /* only run on specific CPU from here on */ | 1201 | /* only run on specific CPU from here on */ |
1179 | oldmask = current->cpus_allowed; | 1202 | oldmask = current->cpus_allowed; |
@@ -1204,11 +1227,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1204 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1227 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); |
1205 | data->available_cores = pol->cpus; | 1228 | data->available_cores = pol->cpus; |
1206 | 1229 | ||
1207 | /* Take a crude guess here. | ||
1208 | * That guess was in microseconds, so multiply with 1000 */ | ||
1209 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1210 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1211 | |||
1212 | if (cpu_family == CPU_HW_PSTATE) | 1230 | if (cpu_family == CPU_HW_PSTATE) |
1213 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 1231 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1214 | else | 1232 | else |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 549f2ada55f5..24ff26a38ade 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -30,7 +30,7 @@ | |||
30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | /* Unmask CPUID levels if masked: */ | 32 | /* Unmask CPUID levels if masked: */ |
33 | if (c->x86 == 6 && c->x86_model >= 15) { | 33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
34 | u64 misc_enable; | 34 | u64 misc_enable; |
35 | 35 | ||
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
@@ -291,6 +291,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
291 | ds_init_intel(c); | 291 | ds_init_intel(c); |
292 | } | 292 | } |
293 | 293 | ||
294 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | ||
295 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | ||
296 | |||
294 | #ifdef CONFIG_X86_64 | 297 | #ifdef CONFIG_X86_64 |
295 | if (c->x86 == 15) | 298 | if (c->x86 == 15) |
296 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 299 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be78..da299eb85fc0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
36 | { | 36 | { |
37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
39 | { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ | ||
39 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ | 40 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ |
40 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ | 41 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ |
42 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ | ||
43 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ | ||
41 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 44 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
42 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 45 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
43 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 46 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
85 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ | 88 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ |
86 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ | 89 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ |
87 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ | 90 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ |
91 | { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ | ||
92 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | ||
93 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | ||
94 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | ||
95 | { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ | ||
96 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
97 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | ||
98 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
99 | { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */ | ||
100 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | ||
101 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | ||
102 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | ||
88 | { 0x00, 0, 0} | 103 | { 0x00, 0, 0} |
89 | }; | 104 | }; |
90 | 105 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 1c838032fd37..fe79985ce0f2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
295 | * If we know that the error was in user space, send a | 295 | * If we know that the error was in user space, send a |
296 | * SIGBUS. Otherwise, panic if tolerance is low. | 296 | * SIGBUS. Otherwise, panic if tolerance is low. |
297 | * | 297 | * |
298 | * do_exit() takes an awful lot of locks and has a slight | 298 | * force_sig() takes an awful lot of locks and has a slight |
299 | * risk of deadlocking. | 299 | * risk of deadlocking. |
300 | */ | 300 | */ |
301 | if (user_space) { | 301 | if (user_space) { |
302 | do_exit(SIGBUS); | 302 | force_sig(SIGBUS, current); |
303 | } else if (panic_on_oops || tolerant < 2) { | 303 | } else if (panic_on_oops || tolerant < 2) { |
304 | mce_panic("Uncorrected machine check", | 304 | mce_panic("Uncorrected machine check", |
305 | &panicm, mcestart); | 305 | &panicm, mcestart); |
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
490 | 490 | ||
491 | } | 491 | } |
492 | 492 | ||
493 | static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | 493 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
494 | { | 494 | { |
495 | switch (c->x86_vendor) { | 495 | switch (c->x86_vendor) { |
496 | case X86_VENDOR_INTEL: | 496 | case X86_VENDOR_INTEL: |
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable); | |||
734 | static int mce_resume(struct sys_device *dev) | 734 | static int mce_resume(struct sys_device *dev) |
735 | { | 735 | { |
736 | mce_init(NULL); | 736 | mce_init(NULL); |
737 | mce_cpu_features(¤t_cpu_data); | ||
737 | return 0; | 738 | return 0; |
738 | } | 739 | } |
739 | 740 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 8ae8c4ff094d..f2ee0ae29bd6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
124 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | 124 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
125 | { | 125 | { |
126 | unsigned int bank, block; | 126 | unsigned int bank, block; |
127 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 4b48f251fd39..f44c36624360 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -30,7 +30,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
30 | irq_exit(); | 30 | irq_exit(); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | 33 | static void intel_init_thermal(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | u32 l, h; | 35 | u32 l, h; |
36 | int tm2 = 0; | 36 | int tm2 = 0; |
@@ -84,7 +84,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | |||
84 | return; | 84 | return; |
85 | } | 85 | } |
86 | 86 | ||
87 | void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) | 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
88 | { | 88 | { |
89 | intel_init_thermal(c); | 89 | intel_init_thermal(c); |
90 | } | 90 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index d259e5d2e054..236a401b8259 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1594 | 1594 | ||
1595 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1595 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
1596 | if (!highest_pfn) { | 1596 | if (!highest_pfn) { |
1597 | WARN(!kvm_para_available(), KERN_WARNING | 1597 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
1598 | "WARNING: strange, CPU MTRRs all blank?\n"); | ||
1599 | return 0; | 1598 | return 0; |
1600 | } | 1599 | } |
1601 | 1600 | ||
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index da91701a2348..87b67e3a765a 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -15,8 +15,8 @@ | |||
15 | * - buffer allocation (memory accounting) | 15 | * - buffer allocation (memory accounting) |
16 | * | 16 | * |
17 | * | 17 | * |
18 | * Copyright (C) 2007-2008 Intel Corporation. | 18 | * Copyright (C) 2007-2009 Intel Corporation. |
19 | * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 | 19 | * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009 |
20 | */ | 20 | */ |
21 | 21 | ||
22 | 22 | ||
@@ -729,7 +729,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task, | |||
729 | 729 | ||
730 | spin_unlock_irqrestore(&ds_lock, irq); | 730 | spin_unlock_irqrestore(&ds_lock, irq); |
731 | 731 | ||
732 | ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts); | 732 | ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs); |
733 | ds_resume_pebs(tracer); | 733 | ds_resume_pebs(tracer); |
734 | 734 | ||
735 | return tracer; | 735 | return tracer; |
@@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value) | |||
890 | } | 890 | } |
891 | 891 | ||
892 | static const struct ds_configuration ds_cfg_netburst = { | 892 | static const struct ds_configuration ds_cfg_netburst = { |
893 | .name = "netburst", | 893 | .name = "Netburst", |
894 | .ctl[dsf_bts] = (1 << 2) | (1 << 3), | 894 | .ctl[dsf_bts] = (1 << 2) | (1 << 3), |
895 | .ctl[dsf_bts_kernel] = (1 << 5), | 895 | .ctl[dsf_bts_kernel] = (1 << 5), |
896 | .ctl[dsf_bts_user] = (1 << 6), | 896 | .ctl[dsf_bts_user] = (1 << 6), |
@@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = { | |||
904 | #endif | 904 | #endif |
905 | }; | 905 | }; |
906 | static const struct ds_configuration ds_cfg_pentium_m = { | 906 | static const struct ds_configuration ds_cfg_pentium_m = { |
907 | .name = "pentium m", | 907 | .name = "Pentium M", |
908 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), | 908 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), |
909 | 909 | ||
910 | .sizeof_field = sizeof(long), | 910 | .sizeof_field = sizeof(long), |
@@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = { | |||
915 | .sizeof_rec[ds_pebs] = sizeof(long) * 18, | 915 | .sizeof_rec[ds_pebs] = sizeof(long) * 18, |
916 | #endif | 916 | #endif |
917 | }; | 917 | }; |
918 | static const struct ds_configuration ds_cfg_core2 = { | 918 | static const struct ds_configuration ds_cfg_core2_atom = { |
919 | .name = "core 2", | 919 | .name = "Core 2/Atom", |
920 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), | 920 | .ctl[dsf_bts] = (1 << 6) | (1 << 7), |
921 | .ctl[dsf_bts_kernel] = (1 << 9), | 921 | .ctl[dsf_bts_kernel] = (1 << 9), |
922 | .ctl[dsf_bts_user] = (1 << 10), | 922 | .ctl[dsf_bts_user] = (1 << 10), |
@@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) | |||
949 | switch (c->x86) { | 949 | switch (c->x86) { |
950 | case 0x6: | 950 | case 0x6: |
951 | switch (c->x86_model) { | 951 | switch (c->x86_model) { |
952 | case 0 ... 0xC: | 952 | case 0x9: |
953 | /* sorry, don't know about them */ | 953 | case 0xd: /* Pentium M */ |
954 | break; | ||
955 | case 0xD: | ||
956 | case 0xE: /* Pentium M */ | ||
957 | ds_configure(&ds_cfg_pentium_m); | 954 | ds_configure(&ds_cfg_pentium_m); |
958 | break; | 955 | break; |
959 | default: /* Core2, Atom, ... */ | 956 | case 0xf: |
960 | ds_configure(&ds_cfg_core2); | 957 | case 0x17: /* Core2 */ |
958 | case 0x1c: /* Atom */ | ||
959 | ds_configure(&ds_cfg_core2_atom); | ||
960 | break; | ||
961 | case 0x1a: /* i7 */ | ||
962 | default: | ||
963 | /* sorry, don't know about them */ | ||
961 | break; | 964 | break; |
962 | } | 965 | } |
963 | break; | 966 | break; |
964 | case 0xF: | 967 | case 0xf: |
965 | switch (c->x86_model) { | 968 | switch (c->x86_model) { |
966 | case 0x0: | 969 | case 0x0: |
967 | case 0x1: | 970 | case 0x1: |
@@ -1026,5 +1029,4 @@ void ds_copy_thread(struct task_struct *tsk, struct task_struct *father) | |||
1026 | 1029 | ||
1027 | void ds_exit_thread(struct task_struct *tsk) | 1030 | void ds_exit_thread(struct task_struct *tsk) |
1028 | { | 1031 | { |
1029 | WARN_ON(tsk->thread.ds_ctx); | ||
1030 | } | 1032 | } |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 1119d247fe11..eb1ef3b67dd5 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -467,7 +467,7 @@ void __init efi_enter_virtual_mode(void) | |||
467 | efi_memory_desc_t *md; | 467 | efi_memory_desc_t *md; |
468 | efi_status_t status; | 468 | efi_status_t status; |
469 | unsigned long size; | 469 | unsigned long size; |
470 | u64 end, systab, addr, npages; | 470 | u64 end, systab, addr, npages, end_pfn; |
471 | void *p, *va; | 471 | void *p, *va; |
472 | 472 | ||
473 | efi.systab = NULL; | 473 | efi.systab = NULL; |
@@ -479,7 +479,10 @@ void __init efi_enter_virtual_mode(void) | |||
479 | size = md->num_pages << EFI_PAGE_SHIFT; | 479 | size = md->num_pages << EFI_PAGE_SHIFT; |
480 | end = md->phys_addr + size; | 480 | end = md->phys_addr + size; |
481 | 481 | ||
482 | if (PFN_UP(end) <= max_low_pfn_mapped) | 482 | end_pfn = PFN_UP(end); |
483 | if (end_pfn <= max_low_pfn_mapped | ||
484 | || (end_pfn > (1UL << (32 - PAGE_SHIFT)) | ||
485 | && end_pfn <= max_pfn_mapped)) | ||
483 | va = __va(md->phys_addr); | 486 | va = __va(md->phys_addr); |
484 | else | 487 | else |
485 | va = efi_ioremap(md->phys_addr, size); | 488 | va = efi_ioremap(md->phys_addr, size); |
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 652c5287215f..cb783b92c50c 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -99,24 +99,11 @@ void __init efi_call_phys_epilog(void) | |||
99 | 99 | ||
100 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) | 100 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) |
101 | { | 101 | { |
102 | static unsigned pages_mapped __initdata; | 102 | unsigned long last_map_pfn; |
103 | unsigned i, pages; | ||
104 | unsigned long offset; | ||
105 | 103 | ||
106 | pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr); | 104 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
107 | offset = phys_addr & ~PAGE_MASK; | 105 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) |
108 | phys_addr &= PAGE_MASK; | ||
109 | |||
110 | if (pages_mapped + pages > MAX_EFI_IO_PAGES) | ||
111 | return NULL; | 106 | return NULL; |
112 | 107 | ||
113 | for (i = 0; i < pages; i++) { | 108 | return (void __iomem *)__va(phys_addr); |
114 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, | ||
115 | phys_addr, PAGE_KERNEL); | ||
116 | phys_addr += PAGE_SIZE; | ||
117 | pages_mapped++; | ||
118 | } | ||
119 | |||
120 | return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \ | ||
121 | (pages_mapped - pages)) + offset; | ||
122 | } | 109 | } |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a987793..a1346217e43c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -346,6 +346,7 @@ ENTRY(save_args) | |||
346 | popq_cfi %rax /* move return address... */ | 346 | popq_cfi %rax /* move return address... */ |
347 | mov %gs:pda_irqstackptr,%rsp | 347 | mov %gs:pda_irqstackptr,%rsp |
348 | EMPTY_FRAME 0 | 348 | EMPTY_FRAME 0 |
349 | pushq_cfi %rbp /* backlink for unwinder */ | ||
349 | pushq_cfi %rax /* ... to the new stack */ | 350 | pushq_cfi %rax /* ... to the new stack */ |
350 | /* | 351 | /* |
351 | * We entered an interrupt context - irqs are off: | 352 | * We entered an interrupt context - irqs are off: |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1b43086b097a..231bdd3c5b1c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
488 | * ignore such a protection. | 488 | * ignore such a protection. |
489 | */ | 489 | */ |
490 | asm volatile( | 490 | asm volatile( |
491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" | 491 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" |
493 | " movl $0, %[faulted]\n" | 493 | " movl $0, %[faulted]\n" |
494 | "3:\n" | ||
494 | 495 | ||
495 | ".section .fixup, \"ax\"\n" | 496 | ".section .fixup, \"ax\"\n" |
496 | "3: movl $1, %[faulted]\n" | 497 | "4: movl $1, %[faulted]\n" |
498 | " jmp 3b\n" | ||
497 | ".previous\n" | 499 | ".previous\n" |
498 | 500 | ||
499 | _ASM_EXTABLE(1b, 3b) | 501 | _ASM_EXTABLE(1b, 4b) |
500 | _ASM_EXTABLE(2b, 3b) | 502 | _ASM_EXTABLE(2b, 4b) |
501 | 503 | ||
502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | 504 | : [old] "=r" (old), [faulted] "=r" (faulted) |
503 | [faulted] "=r" (faulted) | 505 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
505 | : "memory" | 506 | : "memory" |
506 | ); | 507 | ); |
507 | 508 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 64d5ad0b8add..a00545fe5cdd 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
269 | now = hpet_readl(HPET_COUNTER); | 269 | now = hpet_readl(HPET_COUNTER); |
270 | cmp = now + (unsigned long) delta; | 270 | cmp = now + (unsigned long) delta; |
271 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | 271 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
272 | /* Make sure we use edge triggered interrupts */ | ||
273 | cfg &= ~HPET_TN_LEVEL; | ||
272 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | 274 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | |
273 | HPET_TN_SETVAL | HPET_TN_32BIT; | 275 | HPET_TN_SETVAL | HPET_TN_32BIT; |
274 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | 276 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
@@ -897,7 +899,7 @@ static unsigned long hpet_rtc_flags; | |||
897 | static int hpet_prev_update_sec; | 899 | static int hpet_prev_update_sec; |
898 | static struct rtc_time hpet_alarm_time; | 900 | static struct rtc_time hpet_alarm_time; |
899 | static unsigned long hpet_pie_count; | 901 | static unsigned long hpet_pie_count; |
900 | static unsigned long hpet_t1_cmp; | 902 | static u32 hpet_t1_cmp; |
901 | static unsigned long hpet_default_delta; | 903 | static unsigned long hpet_default_delta; |
902 | static unsigned long hpet_pie_delta; | 904 | static unsigned long hpet_pie_delta; |
903 | static unsigned long hpet_pie_limit; | 905 | static unsigned long hpet_pie_limit; |
@@ -905,6 +907,14 @@ static unsigned long hpet_pie_limit; | |||
905 | static rtc_irq_handler irq_handler; | 907 | static rtc_irq_handler irq_handler; |
906 | 908 | ||
907 | /* | 909 | /* |
910 | * Check that the hpet counter c1 is ahead of the c2 | ||
911 | */ | ||
912 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | ||
913 | { | ||
914 | return (s32)(c2 - c1) < 0; | ||
915 | } | ||
916 | |||
917 | /* | ||
908 | * Registers a IRQ handler. | 918 | * Registers a IRQ handler. |
909 | */ | 919 | */ |
910 | int hpet_register_irq_handler(rtc_irq_handler handler) | 920 | int hpet_register_irq_handler(rtc_irq_handler handler) |
@@ -1075,7 +1085,7 @@ static void hpet_rtc_timer_reinit(void) | |||
1075 | hpet_t1_cmp += delta; | 1085 | hpet_t1_cmp += delta; |
1076 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | 1086 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
1077 | lost_ints++; | 1087 | lost_ints++; |
1078 | } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0); | 1088 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); |
1079 | 1089 | ||
1080 | if (lost_ints) { | 1090 | if (lost_ints) { |
1081 | if (hpet_rtc_flags & RTC_PIE) | 1091 | if (hpet_rtc_flags & RTC_PIE) |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index b0f61f0dcd0a..f2f8540a7f3d 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk) | |||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | if (!HAVE_HWFP) { | 137 | if (!HAVE_HWFP) { |
138 | memset(tsk->thread.xstate, 0, xstate_size); | 138 | memset(tsk->thread.xstate, 0, xstate_size); |
139 | finit(); | 139 | finit_task(tsk); |
140 | set_stopped_child_used_math(tsk); | 140 | set_stopped_child_used_math(tsk); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c index dbd6c1d1b638..b42ca694dc68 100644 --- a/arch/x86/kernel/i8237.c +++ b/arch/x86/kernel/i8237.c | |||
@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev) | |||
28 | 28 | ||
29 | flags = claim_dma_lock(); | 29 | flags = claim_dma_lock(); |
30 | 30 | ||
31 | dma_outb(DMA1_RESET_REG, 0); | 31 | dma_outb(0, DMA1_RESET_REG); |
32 | dma_outb(DMA2_RESET_REG, 0); | 32 | dma_outb(0, DMA2_RESET_REG); |
33 | 33 | ||
34 | for (i = 0;i < 8;i++) { | 34 | for (i = 0; i < 8; i++) { |
35 | set_dma_addr(i, 0x000000); | 35 | set_dma_addr(i, 0x000000); |
36 | /* DMA count is a bit weird so this is not 0 */ | 36 | /* DMA count is a bit weird so this is not 0 */ |
37 | set_dma_count(i, 1); | 37 | set_dma_count(i, 1); |
@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static struct sysdev_class i8237_sysdev_class = { | 53 | static struct sysdev_class i8237_sysdev_class = { |
54 | .name = "i8237", | 54 | .name = "i8237", |
55 | .suspend = i8237A_suspend, | 55 | .suspend = i8237A_suspend, |
56 | .resume = i8237A_resume, | 56 | .resume = i8237A_resume, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static struct sys_device device_i8237A = { | 59 | static struct sys_device device_i8237A = { |
60 | .id = 0, | 60 | .id = 0, |
61 | .cls = &i8237_sysdev_class, | 61 | .cls = &i8237_sysdev_class, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static int __init i8237A_init_sysfs(void) | 64 | static int __init i8237A_init_sysfs(void) |
@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void) | |||
68 | error = sysdev_register(&device_i8237A); | 68 | error = sysdev_register(&device_i8237A); |
69 | return error; | 69 | return error; |
70 | } | 70 | } |
71 | |||
72 | device_initcall(i8237A_init_sysfs); | 71 | device_initcall(i8237A_init_sysfs); |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536c..bc7ac4da90d7 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2528 | 2528 | ||
2529 | vector = ~get_irq_regs()->orig_ax; | 2529 | vector = ~get_irq_regs()->orig_ax; |
2530 | me = smp_processor_id(); | 2530 | me = smp_processor_id(); |
2531 | |||
2532 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { | ||
2531 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2532 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2533 | /* get the new one */ | 2535 | /* get the new one */ |
2534 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2535 | #endif | 2537 | #endif |
2536 | |||
2537 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
2538 | send_cleanup_vector(cfg); | 2538 | send_cleanup_vector(cfg); |
2539 | } | ||
2539 | } | 2540 | } |
2540 | #else | 2541 | #else |
2541 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2542 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -3840,14 +3841,24 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
3840 | 3841 | ||
3841 | void __init probe_nr_irqs_gsi(void) | 3842 | void __init probe_nr_irqs_gsi(void) |
3842 | { | 3843 | { |
3843 | int idx; | ||
3844 | int nr = 0; | 3844 | int nr = 0; |
3845 | 3845 | ||
3846 | for (idx = 0; idx < nr_ioapics; idx++) | 3846 | nr = acpi_probe_gsi(); |
3847 | nr += io_apic_get_redir_entries(idx) + 1; | 3847 | if (nr > nr_irqs_gsi) { |
3848 | |||
3849 | if (nr > nr_irqs_gsi) | ||
3850 | nr_irqs_gsi = nr; | 3848 | nr_irqs_gsi = nr; |
3849 | } else { | ||
3850 | /* for acpi=off or acpi is not compiled in */ | ||
3851 | int idx; | ||
3852 | |||
3853 | nr = 0; | ||
3854 | for (idx = 0; idx < nr_ioapics; idx++) | ||
3855 | nr += io_apic_get_redir_entries(idx) + 1; | ||
3856 | |||
3857 | if (nr > nr_irqs_gsi) | ||
3858 | nr_irqs_gsi = nr; | ||
3859 | } | ||
3860 | |||
3861 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); | ||
3851 | } | 3862 | } |
3852 | 3863 | ||
3853 | /* -------------------------------------------------------------------------- | 3864 | /* -------------------------------------------------------------------------- |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674d..10a09c2f1828 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) | |||
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | ||
82 | * IRQ2 is cascade interrupt to second interrupt controller | ||
83 | */ | ||
84 | static struct irqaction irq2 = { | ||
85 | .handler = no_action, | ||
86 | .mask = CPU_MASK_NONE, | ||
87 | .name = "cascade", | ||
88 | }; | ||
89 | |||
90 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 81 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
91 | [0 ... IRQ0_VECTOR - 1] = -1, | 82 | [0 ... IRQ0_VECTOR - 1] = -1, |
92 | [IRQ0_VECTOR] = 0, | 83 | [IRQ0_VECTOR] = 0, |
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void) | |||
178 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 169 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
179 | #endif | 170 | #endif |
180 | 171 | ||
181 | if (!acpi_ioapic) | ||
182 | setup_irq(2, &irq2); | ||
183 | |||
184 | /* setup after call gates are initialised (usually add in | 172 | /* setup after call gates are initialised (usually add in |
185 | * the architecture specific gates) | 173 | * the architecture specific gates) |
186 | */ | 174 | */ |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index e948b28a5a9a..4558dd3918cf 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -193,6 +193,9 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes) | |||
193 | kprobe_opcode_t opcode; | 193 | kprobe_opcode_t opcode; |
194 | kprobe_opcode_t *orig_opcodes = opcodes; | 194 | kprobe_opcode_t *orig_opcodes = opcodes; |
195 | 195 | ||
196 | if (search_exception_tables(opcodes)) | ||
197 | return 0; /* Page fault may occur on this address. */ | ||
198 | |||
196 | retry: | 199 | retry: |
197 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) | 200 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) |
198 | return 0; | 201 | return 0; |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 7a13fac63a1f..4006c522adc7 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -203,7 +203,7 @@ static void __init platform_detect(void) | |||
203 | static void __init platform_detect(void) | 203 | static void __init platform_detect(void) |
204 | { | 204 | { |
205 | /* stopgap until OFW support is added to the kernel */ | 205 | /* stopgap until OFW support is added to the kernel */ |
206 | olpc_platform_info.boardrev = 0xc2; | 206 | olpc_platform_info.boardrev = olpc_board(0xc2); |
207 | } | 207 | } |
208 | #endif | 208 | #endif |
209 | 209 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e4c8fb608873..c6520a4e85d4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -268,6 +268,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
268 | return __get_cpu_var(paravirt_lazy_mode); | 268 | return __get_cpu_var(paravirt_lazy_mode); |
269 | } | 269 | } |
270 | 270 | ||
271 | void arch_flush_lazy_mmu_mode(void) | ||
272 | { | ||
273 | preempt_disable(); | ||
274 | |||
275 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
276 | WARN_ON(preempt_count() == 1); | ||
277 | arch_leave_lazy_mmu_mode(); | ||
278 | arch_enter_lazy_mmu_mode(); | ||
279 | } | ||
280 | |||
281 | preempt_enable(); | ||
282 | } | ||
283 | |||
284 | void arch_flush_lazy_cpu_mode(void) | ||
285 | { | ||
286 | preempt_disable(); | ||
287 | |||
288 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { | ||
289 | WARN_ON(preempt_count() == 1); | ||
290 | arch_leave_lazy_cpu_mode(); | ||
291 | arch_enter_lazy_cpu_mode(); | ||
292 | } | ||
293 | |||
294 | preempt_enable(); | ||
295 | } | ||
296 | |||
271 | struct pv_info pv_info = { | 297 | struct pv_info pv_info = { |
272 | .name = "bare hardware", | 298 | .name = "bare hardware", |
273 | .paravirt_enabled = 0, | 299 | .paravirt_enabled = 0, |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e68bb9e30864..6d12f7e37f8c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
180 | 180 | ||
181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); | 181 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); |
182 | if (!need_resched()) { | 182 | if (!need_resched()) { |
183 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
184 | clflush((void *)¤t_thread_info()->flags); | ||
185 | |||
183 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 186 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
184 | smp_mb(); | 187 | smp_mb(); |
185 | if (!need_resched()) | 188 | if (!need_resched()) |
@@ -194,6 +197,9 @@ static void mwait_idle(void) | |||
194 | struct power_trace it; | 197 | struct power_trace it; |
195 | if (!need_resched()) { | 198 | if (!need_resched()) { |
196 | trace_power_start(&it, POWER_CSTATE, 1); | 199 | trace_power_start(&it, POWER_CSTATE, 1); |
200 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | ||
201 | clflush((void *)¤t_thread_info()->flags); | ||
202 | |||
197 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 203 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
198 | smp_mb(); | 204 | smp_mb(); |
199 | if (!need_resched()) | 205 | if (!need_resched()) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a546f55c77b4..bd4da2af08ae 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -104,9 +104,6 @@ void cpu_idle(void) | |||
104 | check_pgt_cache(); | 104 | check_pgt_cache(); |
105 | rmb(); | 105 | rmb(); |
106 | 106 | ||
107 | if (rcu_pending(cpu)) | ||
108 | rcu_check_callbacks(cpu, 0); | ||
109 | |||
110 | if (cpu_is_offline(cpu)) | 107 | if (cpu_is_offline(cpu)) |
111 | play_dead(); | 108 | play_dead(); |
112 | 109 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 416fb9282f4f..85b4cb5c1980 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
41 | #include <linux/io.h> | 41 | #include <linux/io.h> |
42 | #include <linux/ftrace.h> | 42 | #include <linux/ftrace.h> |
43 | #include <linux/dmi.h> | ||
43 | 44 | ||
44 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
@@ -151,14 +152,18 @@ void __show_regs(struct pt_regs *regs, int all) | |||
151 | unsigned long d0, d1, d2, d3, d6, d7; | 152 | unsigned long d0, d1, d2, d3, d6, d7; |
152 | unsigned int fsindex, gsindex; | 153 | unsigned int fsindex, gsindex; |
153 | unsigned int ds, cs, es; | 154 | unsigned int ds, cs, es; |
155 | const char *board; | ||
154 | 156 | ||
155 | printk("\n"); | 157 | printk("\n"); |
156 | print_modules(); | 158 | print_modules(); |
157 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", | 159 | board = dmi_get_system_info(DMI_PRODUCT_NAME); |
160 | if (!board) | ||
161 | board = ""; | ||
162 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n", | ||
158 | current->pid, current->comm, print_tainted(), | 163 | current->pid, current->comm, print_tainted(), |
159 | init_utsname()->release, | 164 | init_utsname()->release, |
160 | (int)strcspn(init_utsname()->version, " "), | 165 | (int)strcspn(init_utsname()->version, " "), |
161 | init_utsname()->version); | 166 | init_utsname()->version, board); |
162 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 167 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
163 | printk_address(regs->ip, 1); | 168 | printk_address(regs->ip, 1); |
164 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, | 169 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0a5df5f82fb9..06ca07f6ad86 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child) | |||
810 | 810 | ||
811 | static void ptrace_bts_detach(struct task_struct *child) | 811 | static void ptrace_bts_detach(struct task_struct *child) |
812 | { | 812 | { |
813 | if (unlikely(child->bts)) { | 813 | /* |
814 | ds_release_bts(child->bts); | 814 | * Ptrace_detach() races with ptrace_untrace() in case |
815 | child->bts = NULL; | 815 | * the child dies and is reaped by another thread. |
816 | 816 | * | |
817 | ptrace_bts_free_buffer(child); | 817 | * We only do the memory accounting at this point and |
818 | } | 818 | * leave the buffer deallocation and the bts tracer |
819 | * release to ptrace_bts_untrace() which will be called | ||
820 | * later on with tasklist_lock held. | ||
821 | */ | ||
822 | release_locked_buffer(child->bts_buffer, child->bts_size); | ||
819 | } | 823 | } |
820 | #else | 824 | #else |
821 | static inline void ptrace_bts_fork(struct task_struct *tsk) {} | 825 | static inline void ptrace_bts_fork(struct task_struct *tsk) {} |
@@ -1384,7 +1388,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
1384 | #ifdef CONFIG_X86_32 | 1388 | #ifdef CONFIG_X86_32 |
1385 | # define IS_IA32 1 | 1389 | # define IS_IA32 1 |
1386 | #elif defined CONFIG_IA32_EMULATION | 1390 | #elif defined CONFIG_IA32_EMULATION |
1387 | # define IS_IA32 test_thread_flag(TIF_IA32) | 1391 | # define IS_IA32 is_compat_task() |
1388 | #else | 1392 | #else |
1389 | # define IS_IA32 0 | 1393 | # define IS_IA32 0 |
1390 | #endif | 1394 | #endif |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2b46eb41643b..4526b3a75ed2 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -217,6 +217,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
217 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), | 217 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), |
218 | }, | 218 | }, |
219 | }, | 219 | }, |
220 | { /* Handle problems with rebooting on Dell XPS710 */ | ||
221 | .callback = set_bios_reboot, | ||
222 | .ident = "Dell XPS710", | ||
223 | .matches = { | ||
224 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
225 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | ||
226 | }, | ||
227 | }, | ||
220 | { } | 228 | { } |
221 | }; | 229 | }; |
222 | 230 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ae0d8042cf69..6a8811a69324 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -607,7 +607,7 @@ struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; | |||
607 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | 607 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
608 | { | 608 | { |
609 | printk(KERN_NOTICE | 609 | printk(KERN_NOTICE |
610 | "%s detected: BIOS may corrupt low RAM, working it around.\n", | 610 | "%s detected: BIOS may corrupt low RAM, working around it.\n", |
611 | d->ident); | 611 | d->ident); |
612 | 612 | ||
613 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); | 613 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); |
@@ -770,6 +770,9 @@ void __init setup_arch(char **cmdline_p) | |||
770 | 770 | ||
771 | finish_e820_parsing(); | 771 | finish_e820_parsing(); |
772 | 772 | ||
773 | if (efi_enabled) | ||
774 | efi_init(); | ||
775 | |||
773 | dmi_scan_machine(); | 776 | dmi_scan_machine(); |
774 | 777 | ||
775 | dmi_check_system(bad_bios_dmi_table); | 778 | dmi_check_system(bad_bios_dmi_table); |
@@ -789,8 +792,6 @@ void __init setup_arch(char **cmdline_p) | |||
789 | insert_resource(&iomem_resource, &data_resource); | 792 | insert_resource(&iomem_resource, &data_resource); |
790 | insert_resource(&iomem_resource, &bss_resource); | 793 | insert_resource(&iomem_resource, &bss_resource); |
791 | 794 | ||
792 | if (efi_enabled) | ||
793 | efi_init(); | ||
794 | 795 | ||
795 | #ifdef CONFIG_X86_32 | 796 | #ifdef CONFIG_X86_32 |
796 | if (ppro_with_ram_bug()) { | 797 | if (ppro_with_ram_bug()) { |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index e6e695acd725..241ec3923f61 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -115,7 +115,7 @@ unsigned long __init calibrate_cpu(void) | |||
115 | 115 | ||
116 | static struct irqaction irq0 = { | 116 | static struct irqaction irq0 = { |
117 | .handler = timer_interrupt, | 117 | .handler = timer_interrupt, |
118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, | 118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, |
119 | .mask = CPU_MASK_NONE, | 119 | .mask = CPU_MASK_NONE, |
120 | .name = "timer" | 120 | .name = "timer" |
121 | }; | 121 | }; |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 98c2d055284b..a9e7548e1790 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -99,6 +99,12 @@ static inline void preempt_conditional_sti(struct pt_regs *regs) | |||
99 | local_irq_enable(); | 99 | local_irq_enable(); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void conditional_cli(struct pt_regs *regs) | ||
103 | { | ||
104 | if (regs->flags & X86_EFLAGS_IF) | ||
105 | local_irq_disable(); | ||
106 | } | ||
107 | |||
102 | static inline void preempt_conditional_cli(struct pt_regs *regs) | 108 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
103 | { | 109 | { |
104 | if (regs->flags & X86_EFLAGS_IF) | 110 | if (regs->flags & X86_EFLAGS_IF) |
@@ -626,8 +632,10 @@ clear_dr7: | |||
626 | 632 | ||
627 | #ifdef CONFIG_X86_32 | 633 | #ifdef CONFIG_X86_32 |
628 | debug_vm86: | 634 | debug_vm86: |
635 | /* reenable preemption: handle_vm86_trap() might sleep */ | ||
636 | dec_preempt_count(); | ||
629 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | 637 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); |
630 | preempt_conditional_cli(regs); | 638 | conditional_cli(regs); |
631 | return; | 639 | return; |
632 | #endif | 640 | #endif |
633 | 641 | ||
@@ -896,7 +904,7 @@ asmlinkage void math_state_restore(void) | |||
896 | EXPORT_SYMBOL_GPL(math_state_restore); | 904 | EXPORT_SYMBOL_GPL(math_state_restore); |
897 | 905 | ||
898 | #ifndef CONFIG_MATH_EMULATION | 906 | #ifndef CONFIG_MATH_EMULATION |
899 | asmlinkage void math_emulate(long arg) | 907 | void math_emulate(struct math_emu_info *info) |
900 | { | 908 | { |
901 | printk(KERN_EMERG | 909 | printk(KERN_EMERG |
902 | "math-emulation not enabled and no coprocessor found.\n"); | 910 | "math-emulation not enabled and no coprocessor found.\n"); |
@@ -906,16 +914,19 @@ asmlinkage void math_emulate(long arg) | |||
906 | } | 914 | } |
907 | #endif /* CONFIG_MATH_EMULATION */ | 915 | #endif /* CONFIG_MATH_EMULATION */ |
908 | 916 | ||
909 | dotraplinkage void __kprobes | 917 | dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) |
910 | do_device_not_available(struct pt_regs *regs, long error) | ||
911 | { | 918 | { |
912 | #ifdef CONFIG_X86_32 | 919 | #ifdef CONFIG_X86_32 |
913 | if (read_cr0() & X86_CR0_EM) { | 920 | if (read_cr0() & X86_CR0_EM) { |
914 | conditional_sti(regs); | 921 | struct math_emu_info info = { }; |
915 | math_emulate(0); | 922 | |
923 | conditional_sti(®s); | ||
924 | |||
925 | info.regs = ®s; | ||
926 | math_emulate(&info); | ||
916 | } else { | 927 | } else { |
917 | math_state_restore(); /* interrupts still off */ | 928 | math_state_restore(); /* interrupts still off */ |
918 | conditional_sti(regs); | 929 | conditional_sti(®s); |
919 | } | 930 | } |
920 | #else | 931 | #else |
921 | math_state_restore(); | 932 | math_state_restore(); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e58168631..d5cebb52d45b 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -273,30 +273,43 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) | |||
273 | * use the TSC value at the transitions to calculate a pretty | 273 | * use the TSC value at the transitions to calculate a pretty |
274 | * good value for the TSC frequencty. | 274 | * good value for the TSC frequencty. |
275 | */ | 275 | */ |
276 | static inline int pit_expect_msb(unsigned char val) | 276 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
277 | { | 277 | { |
278 | int count = 0; | 278 | int count; |
279 | u64 tsc = 0; | ||
279 | 280 | ||
280 | for (count = 0; count < 50000; count++) { | 281 | for (count = 0; count < 50000; count++) { |
281 | /* Ignore LSB */ | 282 | /* Ignore LSB */ |
282 | inb(0x42); | 283 | inb(0x42); |
283 | if (inb(0x42) != val) | 284 | if (inb(0x42) != val) |
284 | break; | 285 | break; |
286 | tsc = get_cycles(); | ||
285 | } | 287 | } |
286 | return count > 50; | 288 | *deltap = get_cycles() - tsc; |
289 | *tscp = tsc; | ||
290 | |||
291 | /* | ||
292 | * We require _some_ success, but the quality control | ||
293 | * will be based on the error terms on the TSC values. | ||
294 | */ | ||
295 | return count > 5; | ||
287 | } | 296 | } |
288 | 297 | ||
289 | /* | 298 | /* |
290 | * How many MSB values do we want to see? We aim for a | 299 | * How many MSB values do we want to see? We aim for |
291 | * 15ms calibration, which assuming a 2us counter read | 300 | * a maximum error rate of 500ppm (in practice the |
292 | * error should give us roughly 150 ppm precision for | 301 | * real error is much smaller), but refuse to spend |
293 | * the calibration. | 302 | * more than 25ms on it. |
294 | */ | 303 | */ |
295 | #define QUICK_PIT_MS 15 | 304 | #define MAX_QUICK_PIT_MS 25 |
296 | #define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) | 305 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
297 | 306 | ||
298 | static unsigned long quick_pit_calibrate(void) | 307 | static unsigned long quick_pit_calibrate(void) |
299 | { | 308 | { |
309 | int i; | ||
310 | u64 tsc, delta; | ||
311 | unsigned long d1, d2; | ||
312 | |||
300 | /* Set the Gate high, disable speaker */ | 313 | /* Set the Gate high, disable speaker */ |
301 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | 314 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
302 | 315 | ||
@@ -315,45 +328,52 @@ static unsigned long quick_pit_calibrate(void) | |||
315 | outb(0xff, 0x42); | 328 | outb(0xff, 0x42); |
316 | outb(0xff, 0x42); | 329 | outb(0xff, 0x42); |
317 | 330 | ||
318 | if (pit_expect_msb(0xff)) { | 331 | /* |
319 | int i; | 332 | * The PIT starts counting at the next edge, so we |
320 | u64 t1, t2, delta; | 333 | * need to delay for a microsecond. The easiest way |
321 | unsigned char expect = 0xfe; | 334 | * to do that is to just read back the 16-bit counter |
322 | 335 | * once from the PIT. | |
323 | t1 = get_cycles(); | 336 | */ |
324 | for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) { | 337 | inb(0x42); |
325 | if (!pit_expect_msb(expect)) | 338 | inb(0x42); |
326 | goto failed; | 339 | |
340 | if (pit_expect_msb(0xff, &tsc, &d1)) { | ||
341 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | ||
342 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | ||
343 | break; | ||
344 | |||
345 | /* | ||
346 | * Iterate until the error is less than 500 ppm | ||
347 | */ | ||
348 | delta -= tsc; | ||
349 | if (d1+d2 < delta >> 11) | ||
350 | goto success; | ||
327 | } | 351 | } |
328 | t2 = get_cycles(); | ||
329 | |||
330 | /* | ||
331 | * Make sure we can rely on the second TSC timestamp: | ||
332 | */ | ||
333 | if (!pit_expect_msb(expect)) | ||
334 | goto failed; | ||
335 | |||
336 | /* | ||
337 | * Ok, if we get here, then we've seen the | ||
338 | * MSB of the PIT decrement QUICK_PIT_ITERATIONS | ||
339 | * times, and each MSB had many hits, so we never | ||
340 | * had any sudden jumps. | ||
341 | * | ||
342 | * As a result, we can depend on there not being | ||
343 | * any odd delays anywhere, and the TSC reads are | ||
344 | * reliable. | ||
345 | * | ||
346 | * kHz = ticks / time-in-seconds / 1000; | ||
347 | * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000 | ||
348 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000) | ||
349 | */ | ||
350 | delta = (t2 - t1)*PIT_TICK_RATE; | ||
351 | do_div(delta, QUICK_PIT_ITERATIONS*256*1000); | ||
352 | printk("Fast TSC calibration using PIT\n"); | ||
353 | return delta; | ||
354 | } | 352 | } |
355 | failed: | 353 | printk("Fast TSC calibration failed\n"); |
356 | return 0; | 354 | return 0; |
355 | |||
356 | success: | ||
357 | /* | ||
358 | * Ok, if we get here, then we've seen the | ||
359 | * MSB of the PIT decrement 'i' times, and the | ||
360 | * error has shrunk to less than 500 ppm. | ||
361 | * | ||
362 | * As a result, we can depend on there not being | ||
363 | * any odd delays anywhere, and the TSC reads are | ||
364 | * reliable (within the error). We also adjust the | ||
365 | * delta to the middle of the error bars, just | ||
366 | * because it looks nicer. | ||
367 | * | ||
368 | * kHz = ticks / time-in-seconds / 1000; | ||
369 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | ||
370 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | ||
371 | */ | ||
372 | delta += (long)(d2 - d1)/2; | ||
373 | delta *= PIT_TICK_RATE; | ||
374 | do_div(delta, i*256*1000); | ||
375 | printk("Fast TSC calibration using PIT\n"); | ||
376 | return delta; | ||
357 | } | 377 | } |
358 | 378 | ||
359 | /** | 379 | /** |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 1d3302cc2ddf..bef58b4982db 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -321,6 +321,16 @@ static void vmi_release_pmd(unsigned long pfn) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * We use the pgd_free hook for releasing the pgd page: | ||
325 | */ | ||
326 | static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
327 | { | ||
328 | unsigned long pfn = __pa(pgd) >> PAGE_SHIFT; | ||
329 | |||
330 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
331 | } | ||
332 | |||
333 | /* | ||
324 | * Helper macros for MMU update flags. We can defer updates until a flush | 334 | * Helper macros for MMU update flags. We can defer updates until a flush |
325 | * or page invalidation only if the update is to the current address space | 335 | * or page invalidation only if the update is to the current address space |
326 | * (otherwise, there is no flush). We must check against init_mm, since | 336 | * (otherwise, there is no flush). We must check against init_mm, since |
@@ -762,6 +772,7 @@ static inline int __init activate_vmi(void) | |||
762 | if (vmi_ops.release_page) { | 772 | if (vmi_ops.release_page) { |
763 | pv_mmu_ops.release_pte = vmi_release_pte; | 773 | pv_mmu_ops.release_pte = vmi_release_pte; |
764 | pv_mmu_ops.release_pmd = vmi_release_pmd; | 774 | pv_mmu_ops.release_pmd = vmi_release_pmd; |
775 | pv_mmu_ops.pgd_free = vmi_pgd_free; | ||
765 | } | 776 | } |
766 | 777 | ||
767 | /* Set linear is needed in all cases */ | 778 | /* Set linear is needed in all cases */ |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index c4c1f9e09402..e5b088fffa40 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -202,7 +202,7 @@ static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id) | |||
202 | static struct irqaction vmi_clock_action = { | 202 | static struct irqaction vmi_clock_action = { |
203 | .name = "vmi-timer", | 203 | .name = "vmi-timer", |
204 | .handler = vmi_timer_interrupt, | 204 | .handler = vmi_timer_interrupt, |
205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | 205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
206 | .mask = CPU_MASK_ALL, | 206 | .mask = CPU_MASK_ALL, |
207 | }; | 207 | }; |
208 | 208 | ||
@@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | /** vmi clocksource */ | 285 | /** vmi clocksource */ |
286 | static struct clocksource clocksource_vmi; | ||
286 | 287 | ||
287 | static cycle_t read_real_cycles(void) | 288 | static cycle_t read_real_cycles(void) |
288 | { | 289 | { |
289 | return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | 290 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); |
291 | return ret >= clocksource_vmi.cycle_last ? | ||
292 | ret : clocksource_vmi.cycle_last; | ||
290 | } | 293 | } |
291 | 294 | ||
292 | static struct clocksource clocksource_vmi = { | 295 | static struct clocksource clocksource_vmi = { |