diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2009-02-23 17:05:56 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2009-02-23 17:05:56 -0500 |
commit | dc731ca60954310be0993e8992d450c7089fd13d (patch) | |
tree | 6a997916f963d9e6dfa76fc5564296f57c3f909e /arch/x86/kernel | |
parent | ec5b3d32437571b8a742069a4cfd04edb6b6eda5 (diff) | |
parent | 20f4d6c3a2a23c5d7d9cc7f42fbb943ca7a03d1f (diff) |
Merge branch 'x86/urgent' into x86/mce2
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_64.S | 30 | ||||
-rw-r--r-- | arch/x86/kernel/apm_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/Kconfig | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 40 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmiclock_32.c | 7 |
12 files changed, 74 insertions, 61 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 707c1f6f95fa..a60c1f3bcb87 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -156,11 +156,11 @@ static int __init acpi_sleep_setup(char *str) | |||
156 | #ifdef CONFIG_HIBERNATION | 156 | #ifdef CONFIG_HIBERNATION |
157 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 157 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
158 | acpi_no_s4_hw_signature(); | 158 | acpi_no_s4_hw_signature(); |
159 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
160 | acpi_s4_no_nvs(); | ||
159 | #endif | 161 | #endif |
160 | if (strncmp(str, "old_ordering", 12) == 0) | 162 | if (strncmp(str, "old_ordering", 12) == 0) |
161 | acpi_old_suspend_ordering(); | 163 | acpi_old_suspend_ordering(); |
162 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
163 | acpi_s4_no_nvs(); | ||
164 | str = strchr(str, ','); | 164 | str = strchr(str, ','); |
165 | if (str != NULL) | 165 | if (str != NULL) |
166 | str += strspn(str, ", \t"); | 166 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index bcc293423a70..96258d9dc974 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -13,7 +13,6 @@ | |||
13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) | 13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) |
14 | */ | 14 | */ |
15 | ENTRY(wakeup_long64) | 15 | ENTRY(wakeup_long64) |
16 | wakeup_long64: | ||
17 | movq saved_magic, %rax | 16 | movq saved_magic, %rax |
18 | movq $0x123456789abcdef0, %rdx | 17 | movq $0x123456789abcdef0, %rdx |
19 | cmpq %rdx, %rax | 18 | cmpq %rdx, %rax |
@@ -34,16 +33,12 @@ wakeup_long64: | |||
34 | 33 | ||
35 | movq saved_rip, %rax | 34 | movq saved_rip, %rax |
36 | jmp *%rax | 35 | jmp *%rax |
36 | ENDPROC(wakeup_long64) | ||
37 | 37 | ||
38 | bogus_64_magic: | 38 | bogus_64_magic: |
39 | jmp bogus_64_magic | 39 | jmp bogus_64_magic |
40 | 40 | ||
41 | .align 2 | 41 | ENTRY(do_suspend_lowlevel) |
42 | .p2align 4,,15 | ||
43 | .globl do_suspend_lowlevel | ||
44 | .type do_suspend_lowlevel,@function | ||
45 | do_suspend_lowlevel: | ||
46 | .LFB5: | ||
47 | subq $8, %rsp | 42 | subq $8, %rsp |
48 | xorl %eax, %eax | 43 | xorl %eax, %eax |
49 | call save_processor_state | 44 | call save_processor_state |
@@ -67,7 +62,7 @@ do_suspend_lowlevel: | |||
67 | pushfq | 62 | pushfq |
68 | popq pt_regs_flags(%rax) | 63 | popq pt_regs_flags(%rax) |
69 | 64 | ||
70 | movq $.L97, saved_rip(%rip) | 65 | movq $resume_point, saved_rip(%rip) |
71 | 66 | ||
72 | movq %rsp, saved_rsp | 67 | movq %rsp, saved_rsp |
73 | movq %rbp, saved_rbp | 68 | movq %rbp, saved_rbp |
@@ -78,14 +73,12 @@ do_suspend_lowlevel: | |||
78 | addq $8, %rsp | 73 | addq $8, %rsp |
79 | movl $3, %edi | 74 | movl $3, %edi |
80 | xorl %eax, %eax | 75 | xorl %eax, %eax |
81 | jmp acpi_enter_sleep_state | 76 | call acpi_enter_sleep_state |
82 | .L97: | 77 | /* in case something went wrong, restore the machine status and go on */ |
83 | .p2align 4,,7 | 78 | jmp resume_point |
84 | .L99: | ||
85 | .align 4 | ||
86 | movl $24, %eax | ||
87 | movw %ax, %ds | ||
88 | 79 | ||
80 | .align 4 | ||
81 | resume_point: | ||
89 | /* We don't restore %rax, it must be 0 anyway */ | 82 | /* We don't restore %rax, it must be 0 anyway */ |
90 | movq $saved_context, %rax | 83 | movq $saved_context, %rax |
91 | movq saved_context_cr4(%rax), %rbx | 84 | movq saved_context_cr4(%rax), %rbx |
@@ -117,12 +110,9 @@ do_suspend_lowlevel: | |||
117 | xorl %eax, %eax | 110 | xorl %eax, %eax |
118 | addq $8, %rsp | 111 | addq $8, %rsp |
119 | jmp restore_processor_state | 112 | jmp restore_processor_state |
120 | .LFE5: | 113 | ENDPROC(do_suspend_lowlevel) |
121 | .Lfe5: | 114 | |
122 | .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel | ||
123 | |||
124 | .data | 115 | .data |
125 | ALIGN | ||
126 | ENTRY(saved_rbp) .quad 0 | 116 | ENTRY(saved_rbp) .quad 0 |
127 | ENTRY(saved_rsi) .quad 0 | 117 | ENTRY(saved_rsi) .quad 0 |
128 | ENTRY(saved_rdi) .quad 0 | 118 | ENTRY(saved_rdi) .quad 0 |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 98807bb095ad..266ec6c18b6c 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1192,6 +1192,7 @@ static int suspend(int vetoable) | |||
1192 | device_suspend(PMSG_SUSPEND); | 1192 | device_suspend(PMSG_SUSPEND); |
1193 | local_irq_disable(); | 1193 | local_irq_disable(); |
1194 | device_power_down(PMSG_SUSPEND); | 1194 | device_power_down(PMSG_SUSPEND); |
1195 | sysdev_suspend(PMSG_SUSPEND); | ||
1195 | 1196 | ||
1196 | local_irq_enable(); | 1197 | local_irq_enable(); |
1197 | 1198 | ||
@@ -1208,6 +1209,7 @@ static int suspend(int vetoable) | |||
1208 | if (err != APM_SUCCESS) | 1209 | if (err != APM_SUCCESS) |
1209 | apm_error("suspend", err); | 1210 | apm_error("suspend", err); |
1210 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1211 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1212 | sysdev_resume(); | ||
1211 | device_power_up(PMSG_RESUME); | 1213 | device_power_up(PMSG_RESUME); |
1212 | local_irq_enable(); | 1214 | local_irq_enable(); |
1213 | device_resume(PMSG_RESUME); | 1215 | device_resume(PMSG_RESUME); |
@@ -1228,6 +1230,7 @@ static void standby(void) | |||
1228 | 1230 | ||
1229 | local_irq_disable(); | 1231 | local_irq_disable(); |
1230 | device_power_down(PMSG_SUSPEND); | 1232 | device_power_down(PMSG_SUSPEND); |
1233 | sysdev_suspend(PMSG_SUSPEND); | ||
1231 | local_irq_enable(); | 1234 | local_irq_enable(); |
1232 | 1235 | ||
1233 | err = set_system_power_state(APM_STATE_STANDBY); | 1236 | err = set_system_power_state(APM_STATE_STANDBY); |
@@ -1235,6 +1238,7 @@ static void standby(void) | |||
1235 | apm_error("standby", err); | 1238 | apm_error("standby", err); |
1236 | 1239 | ||
1237 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | sysdev_resume(); | ||
1238 | device_power_up(PMSG_RESUME); | 1242 | device_power_up(PMSG_RESUME); |
1239 | local_irq_enable(); | 1243 | local_irq_enable(); |
1240 | } | 1244 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index efae3b22a0ff..65792c2cc462 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER | |||
245 | 245 | ||
246 | comment "shared options" | 246 | comment "shared options" |
247 | 247 | ||
248 | config X86_ACPI_CPUFREQ_PROC_INTF | ||
249 | bool "/proc/acpi/processor/../performance interface (deprecated)" | ||
250 | depends on PROC_FS | ||
251 | depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI | ||
252 | help | ||
253 | This enables the deprecated /proc/acpi/processor/../performance | ||
254 | interface. While it is helpful for debugging, the generic, | ||
255 | cross-architecture cpufreq interfaces should be used. | ||
256 | |||
257 | If in doubt, say N. | ||
258 | |||
259 | config X86_SPEEDSTEP_LIB | 248 | config X86_SPEEDSTEP_LIB |
260 | tristate | 249 | tristate |
261 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) | 250 | default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 5c28b37dea11..6428aa17b40e 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | |||
939 | free_cpumask_var(data->acpi_data.shared_cpu_map); | 939 | free_cpumask_var(data->acpi_data.shared_cpu_map); |
940 | } | 940 | } |
941 | 941 | ||
942 | static int get_transition_latency(struct powernow_k8_data *data) | ||
943 | { | ||
944 | int max_latency = 0; | ||
945 | int i; | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
947 | int cur_latency = data->acpi_data.states[i].transition_latency | ||
948 | + data->acpi_data.states[i].bus_master_latency; | ||
949 | if (cur_latency > max_latency) | ||
950 | max_latency = cur_latency; | ||
951 | } | ||
952 | /* value in usecs, needs to be in nanoseconds */ | ||
953 | return 1000 * max_latency; | ||
954 | } | ||
955 | |||
942 | #else | 956 | #else |
943 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 957 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
944 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 958 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
945 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 959 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
960 | static int get_transition_latency(struct powernow_k8_data *data) { return 0; } | ||
946 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | 961 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ |
947 | 962 | ||
948 | /* Take a frequency, and issue the fid/vid transition command */ | 963 | /* Take a frequency, and issue the fid/vid transition command */ |
@@ -1142,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1142 | data->cpu = pol->cpu; | 1157 | data->cpu = pol->cpu; |
1143 | data->currpstate = HW_PSTATE_INVALID; | 1158 | data->currpstate = HW_PSTATE_INVALID; |
1144 | 1159 | ||
1145 | rc = powernow_k8_cpu_init_acpi(data); | 1160 | if (powernow_k8_cpu_init_acpi(data)) { |
1146 | if (rc) { | ||
1147 | /* | 1161 | /* |
1148 | * Use the PSB BIOS structure. This is only availabe on | 1162 | * Use the PSB BIOS structure. This is only availabe on |
1149 | * an UP version, and is deprecated by AMD. | 1163 | * an UP version, and is deprecated by AMD. |
@@ -1161,19 +1175,28 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1161 | "ACPI maintainers and complain to your BIOS " | 1175 | "ACPI maintainers and complain to your BIOS " |
1162 | "vendor.\n"); | 1176 | "vendor.\n"); |
1163 | #endif | 1177 | #endif |
1164 | goto err_out; | 1178 | kfree(data); |
1179 | return -ENODEV; | ||
1165 | } | 1180 | } |
1166 | if (pol->cpu != 0) { | 1181 | if (pol->cpu != 0) { |
1167 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " | 1182 | printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " |
1168 | "CPU other than CPU0. Complain to your BIOS " | 1183 | "CPU other than CPU0. Complain to your BIOS " |
1169 | "vendor.\n"); | 1184 | "vendor.\n"); |
1170 | goto err_out; | 1185 | kfree(data); |
1186 | return -ENODEV; | ||
1171 | } | 1187 | } |
1172 | rc = find_psb_table(data); | 1188 | rc = find_psb_table(data); |
1173 | if (rc) { | 1189 | if (rc) { |
1174 | goto err_out; | 1190 | kfree(data); |
1191 | return -ENODEV; | ||
1175 | } | 1192 | } |
1176 | } | 1193 | /* Take a crude guess here. |
1194 | * That guess was in microseconds, so multiply with 1000 */ | ||
1195 | pol->cpuinfo.transition_latency = ( | ||
1196 | ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + | ||
1197 | ((1 << data->irt) * 30)) * 1000; | ||
1198 | } else /* ACPI _PSS objects available */ | ||
1199 | pol->cpuinfo.transition_latency = get_transition_latency(data); | ||
1177 | 1200 | ||
1178 | /* only run on specific CPU from here on */ | 1201 | /* only run on specific CPU from here on */ |
1179 | oldmask = current->cpus_allowed; | 1202 | oldmask = current->cpus_allowed; |
@@ -1204,11 +1227,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1204 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1227 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); |
1205 | data->available_cores = pol->cpus; | 1228 | data->available_cores = pol->cpus; |
1206 | 1229 | ||
1207 | /* Take a crude guess here. | ||
1208 | * That guess was in microseconds, so multiply with 1000 */ | ||
1209 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1210 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1211 | |||
1212 | if (cpu_family == CPU_HW_PSTATE) | 1230 | if (cpu_family == CPU_HW_PSTATE) |
1213 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 1231 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1214 | else | 1232 | else |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 0625993bf955..a4a7c686ce90 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -614,7 +614,7 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
614 | 614 | ||
615 | } | 615 | } |
616 | 616 | ||
617 | static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | 617 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
618 | { | 618 | { |
619 | switch (c->x86_vendor) { | 619 | switch (c->x86_vendor) { |
620 | case X86_VENDOR_INTEL: | 620 | case X86_VENDOR_INTEL: |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 0069c653f4ed..e82c8208b81e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
124 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | 124 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
125 | { | 125 | { |
126 | unsigned int bank, block; | 126 | unsigned int bank, block; |
127 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 7f7f1015ef19..1b1491a76b55 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -30,7 +30,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
30 | irq_exit(); | 30 | irq_exit(); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | 33 | static void intel_init_thermal(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | u32 l, h; | 35 | u32 l, h; |
36 | int tm2 = 0; | 36 | int tm2 = 0; |
@@ -84,7 +84,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | |||
84 | return; | 84 | return; |
85 | } | 85 | } |
86 | 86 | ||
87 | void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) | 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
88 | { | 88 | { |
89 | intel_init_thermal(c); | 89 | intel_init_thermal(c); |
90 | } | 90 | } |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1b43086b097a..231bdd3c5b1c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
488 | * ignore such a protection. | 488 | * ignore such a protection. |
489 | */ | 489 | */ |
490 | asm volatile( | 490 | asm volatile( |
491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" | 491 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" |
493 | " movl $0, %[faulted]\n" | 493 | " movl $0, %[faulted]\n" |
494 | "3:\n" | ||
494 | 495 | ||
495 | ".section .fixup, \"ax\"\n" | 496 | ".section .fixup, \"ax\"\n" |
496 | "3: movl $1, %[faulted]\n" | 497 | "4: movl $1, %[faulted]\n" |
498 | " jmp 3b\n" | ||
497 | ".previous\n" | 499 | ".previous\n" |
498 | 500 | ||
499 | _ASM_EXTABLE(1b, 3b) | 501 | _ASM_EXTABLE(1b, 4b) |
500 | _ASM_EXTABLE(2b, 3b) | 502 | _ASM_EXTABLE(2b, 4b) |
501 | 503 | ||
502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | 504 | : [old] "=r" (old), [faulted] "=r" (faulted) |
503 | [faulted] "=r" (faulted) | 505 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
505 | : "memory" | 506 | : "memory" |
506 | ); | 507 | ); |
507 | 508 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 5c8da2c2c185..a00545fe5cdd 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -899,7 +899,7 @@ static unsigned long hpet_rtc_flags; | |||
899 | static int hpet_prev_update_sec; | 899 | static int hpet_prev_update_sec; |
900 | static struct rtc_time hpet_alarm_time; | 900 | static struct rtc_time hpet_alarm_time; |
901 | static unsigned long hpet_pie_count; | 901 | static unsigned long hpet_pie_count; |
902 | static unsigned long hpet_t1_cmp; | 902 | static u32 hpet_t1_cmp; |
903 | static unsigned long hpet_default_delta; | 903 | static unsigned long hpet_default_delta; |
904 | static unsigned long hpet_pie_delta; | 904 | static unsigned long hpet_pie_delta; |
905 | static unsigned long hpet_pie_limit; | 905 | static unsigned long hpet_pie_limit; |
@@ -907,6 +907,14 @@ static unsigned long hpet_pie_limit; | |||
907 | static rtc_irq_handler irq_handler; | 907 | static rtc_irq_handler irq_handler; |
908 | 908 | ||
909 | /* | 909 | /* |
910 | * Check that the hpet counter c1 is ahead of the c2 | ||
911 | */ | ||
912 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | ||
913 | { | ||
914 | return (s32)(c2 - c1) < 0; | ||
915 | } | ||
916 | |||
917 | /* | ||
910 | * Registers a IRQ handler. | 918 | * Registers a IRQ handler. |
911 | */ | 919 | */ |
912 | int hpet_register_irq_handler(rtc_irq_handler handler) | 920 | int hpet_register_irq_handler(rtc_irq_handler handler) |
@@ -1077,7 +1085,7 @@ static void hpet_rtc_timer_reinit(void) | |||
1077 | hpet_t1_cmp += delta; | 1085 | hpet_t1_cmp += delta; |
1078 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | 1086 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); |
1079 | lost_ints++; | 1087 | lost_ints++; |
1080 | } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0); | 1088 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); |
1081 | 1089 | ||
1082 | if (lost_ints) { | 1090 | if (lost_ints) { |
1083 | if (hpet_rtc_flags & RTC_PIE) | 1091 | if (hpet_rtc_flags & RTC_PIE) |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index e6e695acd725..241ec3923f61 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -115,7 +115,7 @@ unsigned long __init calibrate_cpu(void) | |||
115 | 115 | ||
116 | static struct irqaction irq0 = { | 116 | static struct irqaction irq0 = { |
117 | .handler = timer_interrupt, | 117 | .handler = timer_interrupt, |
118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, | 118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, |
119 | .mask = CPU_MASK_NONE, | 119 | .mask = CPU_MASK_NONE, |
120 | .name = "timer" | 120 | .name = "timer" |
121 | }; | 121 | }; |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index c4c1f9e09402..e5b088fffa40 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -202,7 +202,7 @@ static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id) | |||
202 | static struct irqaction vmi_clock_action = { | 202 | static struct irqaction vmi_clock_action = { |
203 | .name = "vmi-timer", | 203 | .name = "vmi-timer", |
204 | .handler = vmi_timer_interrupt, | 204 | .handler = vmi_timer_interrupt, |
205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | 205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
206 | .mask = CPU_MASK_ALL, | 206 | .mask = CPU_MASK_ALL, |
207 | }; | 207 | }; |
208 | 208 | ||
@@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | /** vmi clocksource */ | 285 | /** vmi clocksource */ |
286 | static struct clocksource clocksource_vmi; | ||
286 | 287 | ||
287 | static cycle_t read_real_cycles(void) | 288 | static cycle_t read_real_cycles(void) |
288 | { | 289 | { |
289 | return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | 290 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); |
291 | return ret >= clocksource_vmi.cycle_last ? | ||
292 | ret : clocksource_vmi.cycle_last; | ||
290 | } | 293 | } |
291 | 294 | ||
292 | static struct clocksource clocksource_vmi = { | 295 | static struct clocksource clocksource_vmi = { |