diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/boot/compressed/aslr.c | 9 | ||||
| -rw-r--r-- | arch/x86/include/asm/tsc.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 11 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 11 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 48 | ||||
| -rw-r--r-- | arch/x86/kernel/machine_kexec_64.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-dma.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/tsc.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/tsc_msr.c | 30 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 2 |
14 files changed, 86 insertions, 54 deletions
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 90a21f430117..4dbf967da50d 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
| @@ -111,7 +111,7 @@ struct mem_vector { | |||
| 111 | }; | 111 | }; |
| 112 | 112 | ||
| 113 | #define MEM_AVOID_MAX 5 | 113 | #define MEM_AVOID_MAX 5 |
| 114 | struct mem_vector mem_avoid[MEM_AVOID_MAX]; | 114 | static struct mem_vector mem_avoid[MEM_AVOID_MAX]; |
| 115 | 115 | ||
| 116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) | 116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) |
| 117 | { | 117 | { |
| @@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, | |||
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | /* Does this memory vector overlap a known avoided area? */ | 182 | /* Does this memory vector overlap a known avoided area? */ |
| 183 | bool mem_avoid_overlap(struct mem_vector *img) | 183 | static bool mem_avoid_overlap(struct mem_vector *img) |
| 184 | { | 184 | { |
| 185 | int i; | 185 | int i; |
| 186 | 186 | ||
| @@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img) | |||
| 192 | return false; | 192 | return false; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; | 195 | static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / |
| 196 | unsigned long slot_max = 0; | 196 | CONFIG_PHYSICAL_ALIGN]; |
| 197 | static unsigned long slot_max; | ||
| 197 | 198 | ||
| 198 | static void slots_append(unsigned long addr) | 199 | static void slots_append(unsigned long addr) |
| 199 | { | 200 | { |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 57ae63cd6ee2..94605c0e9cee 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
| @@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void); | |||
| 66 | extern void tsc_restore_sched_clock_state(void); | 66 | extern void tsc_restore_sched_clock_state(void); |
| 67 | 67 | ||
| 68 | /* MSR based TSC calibration for Intel Atom SoC platforms */ | 68 | /* MSR based TSC calibration for Intel Atom SoC platforms */ |
| 69 | int try_msr_calibrate_tsc(unsigned long *fast_calibrate); | 69 | unsigned long try_msr_calibrate_tsc(void); |
| 70 | 70 | ||
| 71 | #endif /* _ASM_X86_TSC_H */ | 71 | #endif /* _ASM_X86_TSC_H */ |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b88645191fe5..79f9f848bee4 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags) | |||
| 1192 | for (i = 0; i < cpuc->n_events; i++) { | 1192 | for (i = 0; i < cpuc->n_events; i++) { |
| 1193 | if (event == cpuc->event_list[i]) { | 1193 | if (event == cpuc->event_list[i]) { |
| 1194 | 1194 | ||
| 1195 | if (i >= cpuc->n_events - cpuc->n_added) | ||
| 1196 | --cpuc->n_added; | ||
| 1197 | |||
| 1195 | if (x86_pmu.put_event_constraints) | 1198 | if (x86_pmu.put_event_constraints) |
| 1196 | x86_pmu.put_event_constraints(cpuc, event); | 1199 | x86_pmu.put_event_constraints(cpuc, event); |
| 1197 | 1200 | ||
| @@ -1521,6 +1524,8 @@ static int __init init_hw_perf_events(void) | |||
| 1521 | 1524 | ||
| 1522 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1525 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
| 1523 | 1526 | ||
| 1527 | x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ | ||
| 1528 | |||
| 1524 | for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) | 1529 | for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) |
| 1525 | quirk->func(); | 1530 | quirk->func(); |
| 1526 | 1531 | ||
| @@ -1534,7 +1539,6 @@ static int __init init_hw_perf_events(void) | |||
| 1534 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, | 1539 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |
| 1535 | 0, x86_pmu.num_counters, 0, 0); | 1540 | 0, x86_pmu.num_counters, 0, 0); |
| 1536 | 1541 | ||
| 1537 | x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ | ||
| 1538 | x86_pmu_format_group.attrs = x86_pmu.format_attrs; | 1542 | x86_pmu_format_group.attrs = x86_pmu.format_attrs; |
| 1539 | 1543 | ||
| 1540 | if (x86_pmu.event_attrs) | 1544 | if (x86_pmu.event_attrs) |
| @@ -1820,9 +1824,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev, | |||
| 1820 | if (ret) | 1824 | if (ret) |
| 1821 | return ret; | 1825 | return ret; |
| 1822 | 1826 | ||
| 1827 | if (x86_pmu.attr_rdpmc_broken) | ||
| 1828 | return -ENOTSUPP; | ||
| 1829 | |||
| 1823 | if (!!val != !!x86_pmu.attr_rdpmc) { | 1830 | if (!!val != !!x86_pmu.attr_rdpmc) { |
| 1824 | x86_pmu.attr_rdpmc = !!val; | 1831 | x86_pmu.attr_rdpmc = !!val; |
| 1825 | smp_call_function(change_rdpmc, (void *)val, 1); | 1832 | on_each_cpu(change_rdpmc, (void *)val, 1); |
| 1826 | } | 1833 | } |
| 1827 | 1834 | ||
| 1828 | return count; | 1835 | return count; |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index c1a861829d81..4972c244d0bc 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
| @@ -409,6 +409,7 @@ struct x86_pmu { | |||
| 409 | /* | 409 | /* |
| 410 | * sysfs attrs | 410 | * sysfs attrs |
| 411 | */ | 411 | */ |
| 412 | int attr_rdpmc_broken; | ||
| 412 | int attr_rdpmc; | 413 | int attr_rdpmc; |
| 413 | struct attribute **format_attrs; | 414 | struct attribute **format_attrs; |
| 414 | struct attribute **event_attrs; | 415 | struct attribute **event_attrs; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 0fa4f242f050..aa333d966886 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 1361 | intel_pmu_disable_all(); | 1361 | intel_pmu_disable_all(); |
| 1362 | handled = intel_pmu_drain_bts_buffer(); | 1362 | handled = intel_pmu_drain_bts_buffer(); |
| 1363 | status = intel_pmu_get_status(); | 1363 | status = intel_pmu_get_status(); |
| 1364 | if (!status) { | 1364 | if (!status) |
| 1365 | intel_pmu_enable_all(0); | 1365 | goto done; |
| 1366 | return handled; | ||
| 1367 | } | ||
| 1368 | 1366 | ||
| 1369 | loops = 0; | 1367 | loops = 0; |
| 1370 | again: | 1368 | again: |
| @@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void) | |||
| 2310 | if (version > 1) | 2308 | if (version > 1) |
| 2311 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); | 2309 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
| 2312 | 2310 | ||
| 2313 | /* | 2311 | if (boot_cpu_has(X86_FEATURE_PDCM)) { |
| 2314 | * v2 and above have a perf capabilities MSR | ||
| 2315 | */ | ||
| 2316 | if (version > 1) { | ||
| 2317 | u64 capabilities; | 2312 | u64 capabilities; |
| 2318 | 2313 | ||
| 2319 | rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); | 2314 | rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 29c248799ced..c88f7f4b03ee 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
| @@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { | |||
| 501 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | 501 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
| 502 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | 502 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
| 503 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | 503 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
| 504 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), | ||
| 504 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | 505 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
| 506 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), | ||
| 505 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | 507 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
| 508 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), | ||
| 506 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), | 509 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), |
| 507 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), | 510 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), |
| 508 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), | 511 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), |
| @@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | |||
| 1178 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | 1181 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
| 1179 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | 1182 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
| 1180 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), | 1183 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), |
| 1184 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | ||
| 1185 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | ||
| 1186 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), | ||
| 1181 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), | 1187 | SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), |
| 1188 | SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), | ||
| 1182 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), | 1189 | SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), |
| 1190 | SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), | ||
| 1183 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), | 1191 | SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), |
| 1184 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | 1192 | SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), |
| 1185 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), | 1193 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), |
| 1186 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), | 1194 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), |
| 1187 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), | 1195 | SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index b1e2fe115323..7c1a0c07b607 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
| @@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = { | |||
| 231 | 231 | ||
| 232 | }; | 232 | }; |
| 233 | 233 | ||
| 234 | static __init void p6_pmu_rdpmc_quirk(void) | ||
| 235 | { | ||
| 236 | if (boot_cpu_data.x86_mask < 9) { | ||
| 237 | /* | ||
| 238 | * PPro erratum 26; fixed in stepping 9 and above. | ||
| 239 | */ | ||
| 240 | pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n"); | ||
| 241 | x86_pmu.attr_rdpmc_broken = 1; | ||
| 242 | x86_pmu.attr_rdpmc = 0; | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 234 | __init int p6_pmu_init(void) | 246 | __init int p6_pmu_init(void) |
| 235 | { | 247 | { |
| 248 | x86_pmu = p6_pmu; | ||
| 249 | |||
| 236 | switch (boot_cpu_data.x86_model) { | 250 | switch (boot_cpu_data.x86_model) { |
| 237 | case 1: | 251 | case 1: /* Pentium Pro */ |
| 238 | case 3: /* Pentium Pro */ | 252 | x86_add_quirk(p6_pmu_rdpmc_quirk); |
| 239 | case 5: | 253 | break; |
| 240 | case 6: /* Pentium II */ | 254 | |
| 241 | case 7: | 255 | case 3: /* Pentium II - Klamath */ |
| 242 | case 8: | 256 | case 5: /* Pentium II - Deschutes */ |
| 243 | case 11: /* Pentium III */ | 257 | case 6: /* Pentium II - Mendocino */ |
| 244 | case 9: | ||
| 245 | case 13: | ||
| 246 | /* Pentium M */ | ||
| 247 | break; | 258 | break; |
| 259 | |||
| 260 | case 7: /* Pentium III - Katmai */ | ||
| 261 | case 8: /* Pentium III - Coppermine */ | ||
| 262 | case 10: /* Pentium III Xeon */ | ||
| 263 | case 11: /* Pentium III - Tualatin */ | ||
| 264 | break; | ||
| 265 | |||
| 266 | case 9: /* Pentium M - Banias */ | ||
| 267 | case 13: /* Pentium M - Dothan */ | ||
| 268 | break; | ||
| 269 | |||
| 248 | default: | 270 | default: |
| 249 | pr_cont("unsupported p6 CPU model %d ", | 271 | pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); |
| 250 | boot_cpu_data.x86_model); | ||
| 251 | return -ENODEV; | 272 | return -ENODEV; |
| 252 | } | 273 | } |
| 253 | 274 | ||
| 254 | x86_pmu = p6_pmu; | ||
| 255 | |||
| 256 | memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, | 275 | memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, |
| 257 | sizeof(hw_cache_event_ids)); | 276 | sizeof(hw_cache_event_ids)); |
| 258 | 277 | ||
| 259 | |||
| 260 | return 0; | 278 | return 0; |
| 261 | } | 279 | } |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4eabc160696f..679cef0791cd 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
| @@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void) | |||
| 279 | VMCOREINFO_SYMBOL(node_data); | 279 | VMCOREINFO_SYMBOL(node_data); |
| 280 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); | 280 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); |
| 281 | #endif | 281 | #endif |
| 282 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", | ||
| 283 | (unsigned long)&_text - __START_KERNEL); | ||
| 282 | } | 284 | } |
| 283 | 285 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 872079a67e4d..f7d0672481fd 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 100 | flag |= __GFP_ZERO; | 100 | flag |= __GFP_ZERO; |
| 101 | again: | 101 | again: |
| 102 | page = NULL; | 102 | page = NULL; |
| 103 | if (!(flag & GFP_ATOMIC)) | 103 | /* CMA can be used only in the context which permits sleeping */ |
| 104 | if (flag & __GFP_WAIT) | ||
| 104 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 105 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); |
| 106 | /* fallback */ | ||
| 105 | if (!page) | 107 | if (!page) |
| 106 | page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); | 108 | page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); |
| 107 | if (!page) | 109 | if (!page) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index acb3b606613e..cfbe99f88830 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -653,13 +653,10 @@ unsigned long native_calibrate_tsc(void) | |||
| 653 | 653 | ||
| 654 | /* Calibrate TSC using MSR for Intel Atom SoCs */ | 654 | /* Calibrate TSC using MSR for Intel Atom SoCs */ |
| 655 | local_irq_save(flags); | 655 | local_irq_save(flags); |
| 656 | i = try_msr_calibrate_tsc(&fast_calibrate); | 656 | fast_calibrate = try_msr_calibrate_tsc(); |
| 657 | local_irq_restore(flags); | 657 | local_irq_restore(flags); |
| 658 | if (i >= 0) { | 658 | if (fast_calibrate) |
| 659 | if (i == 0) | ||
| 660 | pr_warn("Fast TSC calibration using MSR failed\n"); | ||
| 661 | return fast_calibrate; | 659 | return fast_calibrate; |
| 662 | } | ||
| 663 | 660 | ||
| 664 | local_irq_save(flags); | 661 | local_irq_save(flags); |
| 665 | fast_calibrate = quick_pit_calibrate(); | 662 | fast_calibrate = quick_pit_calibrate(); |
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 8b5434f4389f..92ae6acac8a7 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c | |||
| @@ -53,7 +53,7 @@ static struct freq_desc freq_desc_tables[] = { | |||
| 53 | /* TNG */ | 53 | /* TNG */ |
| 54 | { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, | 54 | { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, |
| 55 | /* VLV2 */ | 55 | /* VLV2 */ |
| 56 | { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, | 56 | { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, |
| 57 | /* ANN */ | 57 | /* ANN */ |
| 58 | { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, | 58 | { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, |
| 59 | }; | 59 | }; |
| @@ -77,21 +77,18 @@ static int match_cpu(u8 family, u8 model) | |||
| 77 | 77 | ||
| 78 | /* | 78 | /* |
| 79 | * Do MSR calibration only for known/supported CPUs. | 79 | * Do MSR calibration only for known/supported CPUs. |
| 80 | * Return values: | 80 | * |
| 81 | * -1: CPU is unknown/unsupported for MSR based calibration | 81 | * Returns the calibration value or 0 if MSR calibration failed. |
| 82 | * 0: CPU is known/supported, but calibration failed | ||
| 83 | * 1: CPU is known/supported, and calibration succeeded | ||
| 84 | */ | 82 | */ |
| 85 | int try_msr_calibrate_tsc(unsigned long *fast_calibrate) | 83 | unsigned long try_msr_calibrate_tsc(void) |
| 86 | { | 84 | { |
| 87 | int cpu_index; | ||
| 88 | u32 lo, hi, ratio, freq_id, freq; | 85 | u32 lo, hi, ratio, freq_id, freq; |
| 86 | unsigned long res; | ||
| 87 | int cpu_index; | ||
| 89 | 88 | ||
| 90 | cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); | 89 | cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); |
| 91 | if (cpu_index < 0) | 90 | if (cpu_index < 0) |
| 92 | return -1; | 91 | return 0; |
| 93 | |||
| 94 | *fast_calibrate = 0; | ||
| 95 | 92 | ||
| 96 | if (freq_desc_tables[cpu_index].msr_plat) { | 93 | if (freq_desc_tables[cpu_index].msr_plat) { |
| 97 | rdmsr(MSR_PLATFORM_INFO, lo, hi); | 94 | rdmsr(MSR_PLATFORM_INFO, lo, hi); |
| @@ -103,7 +100,7 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate) | |||
| 103 | pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); | 100 | pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); |
| 104 | 101 | ||
| 105 | if (!ratio) | 102 | if (!ratio) |
| 106 | return 0; | 103 | goto fail; |
| 107 | 104 | ||
| 108 | /* Get FSB FREQ ID */ | 105 | /* Get FSB FREQ ID */ |
| 109 | rdmsr(MSR_FSB_FREQ, lo, hi); | 106 | rdmsr(MSR_FSB_FREQ, lo, hi); |
| @@ -112,16 +109,19 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate) | |||
| 112 | pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", | 109 | pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", |
| 113 | freq_id, freq); | 110 | freq_id, freq); |
| 114 | if (!freq) | 111 | if (!freq) |
| 115 | return 0; | 112 | goto fail; |
| 116 | 113 | ||
| 117 | /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ | 114 | /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ |
| 118 | *fast_calibrate = freq * ratio; | 115 | res = freq * ratio; |
| 119 | pr_info("TSC runs at %lu KHz\n", *fast_calibrate); | 116 | pr_info("TSC runs at %lu KHz\n", res); |
| 120 | 117 | ||
| 121 | #ifdef CONFIG_X86_LOCAL_APIC | 118 | #ifdef CONFIG_X86_LOCAL_APIC |
| 122 | lapic_timer_frequency = (freq * 1000) / HZ; | 119 | lapic_timer_frequency = (freq * 1000) / HZ; |
| 123 | pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); | 120 | pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); |
| 124 | #endif | 121 | #endif |
| 122 | return res; | ||
| 125 | 123 | ||
| 126 | return 1; | 124 | fail: |
| 125 | pr_warn("Fast TSC calibration using MSR failed\n"); | ||
| 126 | return 0; | ||
| 127 | } | 127 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e50425d0f5f7..9b531351a587 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
| 2672 | break; | 2672 | break; |
| 2673 | } | 2673 | } |
| 2674 | 2674 | ||
| 2675 | drop_large_spte(vcpu, iterator.sptep); | ||
| 2675 | if (!is_shadow_present_pte(*iterator.sptep)) { | 2676 | if (!is_shadow_present_pte(*iterator.sptep)) { |
| 2676 | u64 base_addr = iterator.addr; | 2677 | u64 base_addr = iterator.addr; |
| 2677 | 2678 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a06f101ef64b..392752834751 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
| 6688 | else if (is_page_fault(intr_info)) | 6688 | else if (is_page_fault(intr_info)) |
| 6689 | return enable_ept; | 6689 | return enable_ept; |
| 6690 | else if (is_no_device(intr_info) && | 6690 | else if (is_no_device(intr_info) && |
| 6691 | !(nested_read_cr0(vmcs12) & X86_CR0_TS)) | 6691 | !(vmcs12->guest_cr0 & X86_CR0_TS)) |
| 6692 | return 0; | 6692 | return 0; |
| 6693 | return vmcs12->exception_bitmap & | 6693 | return vmcs12->exception_bitmap & |
| 6694 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); | 6694 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39c28f09dfd5..2b8578432d5b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | |||
| 6186 | frag->len -= len; | 6186 | frag->len -= len; |
| 6187 | } | 6187 | } |
| 6188 | 6188 | ||
| 6189 | if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { | 6189 | if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { |
| 6190 | vcpu->mmio_needed = 0; | 6190 | vcpu->mmio_needed = 0; |
| 6191 | 6191 | ||
| 6192 | /* FIXME: return into emulator if single-stepping. */ | 6192 | /* FIXME: return into emulator if single-stepping. */ |
