diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-20 17:57:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-20 17:57:43 -0500 |
commit | a4cc3889f7e2c3f2fd15b492822c889fed5e1800 (patch) | |
tree | d455a7392bc48aa44f61346e09b9cab63522d5b9 /arch/x86 | |
parent | bb893d15b564f7711b60e0bc12966d049980582d (diff) | |
parent | 95ef1e52922cf75b1ea2eae54ef886f2cc47eecb (diff) |
Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM guest: prevent tracing recursion with kvmclock
Revert "KVM: PPC: Add support for explicit HIOR setting"
KVM: VMX: Check for automatic switch msr table overflow
KVM: VMX: Add support for guest/host-only profiling
KVM: VMX: add support for switching of PERF_GLOBAL_CTRL
KVM: s390: announce SYNC_MMU
KVM: s390: Fix tprot locking
KVM: s390: handle SIGP sense running intercepts
KVM: s390: Fix RUNNING flag misinterpretation
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 131 |
2 files changed, 122 insertions, 14 deletions
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index c1a0188e29ae..44842d756b29 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void) | |||
74 | struct pvclock_vcpu_time_info *src; | 74 | struct pvclock_vcpu_time_info *src; |
75 | cycle_t ret; | 75 | cycle_t ret; |
76 | 76 | ||
77 | src = &get_cpu_var(hv_clock); | 77 | preempt_disable_notrace(); |
78 | src = &__get_cpu_var(hv_clock); | ||
78 | ret = pvclock_clocksource_read(src); | 79 | ret = pvclock_clocksource_read(src); |
79 | put_cpu_var(hv_clock); | 80 | preempt_enable_notrace(); |
80 | return ret; | 81 | return ret; |
81 | } | 82 | } |
82 | 83 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a0d6bd9ad442..579a0b51696a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/mce.h> | 39 | #include <asm/mce.h> |
40 | #include <asm/i387.h> | 40 | #include <asm/i387.h> |
41 | #include <asm/xcr.h> | 41 | #include <asm/xcr.h> |
42 | #include <asm/perf_event.h> | ||
42 | 43 | ||
43 | #include "trace.h" | 44 | #include "trace.h" |
44 | 45 | ||
@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO); | |||
118 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; | 119 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
119 | module_param(ple_window, int, S_IRUGO); | 120 | module_param(ple_window, int, S_IRUGO); |
120 | 121 | ||
121 | #define NR_AUTOLOAD_MSRS 1 | 122 | #define NR_AUTOLOAD_MSRS 8 |
122 | #define VMCS02_POOL_SIZE 1 | 123 | #define VMCS02_POOL_SIZE 1 |
123 | 124 | ||
124 | struct vmcs { | 125 | struct vmcs { |
@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy; | |||
622 | static unsigned long *vmx_msr_bitmap_longmode; | 623 | static unsigned long *vmx_msr_bitmap_longmode; |
623 | 624 | ||
624 | static bool cpu_has_load_ia32_efer; | 625 | static bool cpu_has_load_ia32_efer; |
626 | static bool cpu_has_load_perf_global_ctrl; | ||
625 | 627 | ||
626 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); | 628 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
627 | static DEFINE_SPINLOCK(vmx_vpid_lock); | 629 | static DEFINE_SPINLOCK(vmx_vpid_lock); |
@@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
1191 | vmcs_write32(EXCEPTION_BITMAP, eb); | 1193 | vmcs_write32(EXCEPTION_BITMAP, eb); |
1192 | } | 1194 | } |
1193 | 1195 | ||
1196 | static void clear_atomic_switch_msr_special(unsigned long entry, | ||
1197 | unsigned long exit) | ||
1198 | { | ||
1199 | vmcs_clear_bits(VM_ENTRY_CONTROLS, entry); | ||
1200 | vmcs_clear_bits(VM_EXIT_CONTROLS, exit); | ||
1201 | } | ||
1202 | |||
1194 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | 1203 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
1195 | { | 1204 | { |
1196 | unsigned i; | 1205 | unsigned i; |
1197 | struct msr_autoload *m = &vmx->msr_autoload; | 1206 | struct msr_autoload *m = &vmx->msr_autoload; |
1198 | 1207 | ||
1199 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | 1208 | switch (msr) { |
1200 | vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | 1209 | case MSR_EFER: |
1201 | vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | 1210 | if (cpu_has_load_ia32_efer) { |
1202 | return; | 1211 | clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, |
1212 | VM_EXIT_LOAD_IA32_EFER); | ||
1213 | return; | ||
1214 | } | ||
1215 | break; | ||
1216 | case MSR_CORE_PERF_GLOBAL_CTRL: | ||
1217 | if (cpu_has_load_perf_global_ctrl) { | ||
1218 | clear_atomic_switch_msr_special( | ||
1219 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
1220 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); | ||
1221 | return; | ||
1222 | } | ||
1223 | break; | ||
1203 | } | 1224 | } |
1204 | 1225 | ||
1205 | for (i = 0; i < m->nr; ++i) | 1226 | for (i = 0; i < m->nr; ++i) |
@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | |||
1215 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | 1236 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
1216 | } | 1237 | } |
1217 | 1238 | ||
1239 | static void add_atomic_switch_msr_special(unsigned long entry, | ||
1240 | unsigned long exit, unsigned long guest_val_vmcs, | ||
1241 | unsigned long host_val_vmcs, u64 guest_val, u64 host_val) | ||
1242 | { | ||
1243 | vmcs_write64(guest_val_vmcs, guest_val); | ||
1244 | vmcs_write64(host_val_vmcs, host_val); | ||
1245 | vmcs_set_bits(VM_ENTRY_CONTROLS, entry); | ||
1246 | vmcs_set_bits(VM_EXIT_CONTROLS, exit); | ||
1247 | } | ||
1248 | |||
1218 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | 1249 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
1219 | u64 guest_val, u64 host_val) | 1250 | u64 guest_val, u64 host_val) |
1220 | { | 1251 | { |
1221 | unsigned i; | 1252 | unsigned i; |
1222 | struct msr_autoload *m = &vmx->msr_autoload; | 1253 | struct msr_autoload *m = &vmx->msr_autoload; |
1223 | 1254 | ||
1224 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | 1255 | switch (msr) { |
1225 | vmcs_write64(GUEST_IA32_EFER, guest_val); | 1256 | case MSR_EFER: |
1226 | vmcs_write64(HOST_IA32_EFER, host_val); | 1257 | if (cpu_has_load_ia32_efer) { |
1227 | vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | 1258 | add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, |
1228 | vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | 1259 | VM_EXIT_LOAD_IA32_EFER, |
1229 | return; | 1260 | GUEST_IA32_EFER, |
1261 | HOST_IA32_EFER, | ||
1262 | guest_val, host_val); | ||
1263 | return; | ||
1264 | } | ||
1265 | break; | ||
1266 | case MSR_CORE_PERF_GLOBAL_CTRL: | ||
1267 | if (cpu_has_load_perf_global_ctrl) { | ||
1268 | add_atomic_switch_msr_special( | ||
1269 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
1270 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, | ||
1271 | GUEST_IA32_PERF_GLOBAL_CTRL, | ||
1272 | HOST_IA32_PERF_GLOBAL_CTRL, | ||
1273 | guest_val, host_val); | ||
1274 | return; | ||
1275 | } | ||
1276 | break; | ||
1230 | } | 1277 | } |
1231 | 1278 | ||
1232 | for (i = 0; i < m->nr; ++i) | 1279 | for (i = 0; i < m->nr; ++i) |
1233 | if (m->guest[i].index == msr) | 1280 | if (m->guest[i].index == msr) |
1234 | break; | 1281 | break; |
1235 | 1282 | ||
1236 | if (i == m->nr) { | 1283 | if (i == NR_AUTOLOAD_MSRS) { |
1284 | printk_once(KERN_WARNING"Not enough mst switch entries. " | ||
1285 | "Can't add msr %x\n", msr); | ||
1286 | return; | ||
1287 | } else if (i == m->nr) { | ||
1237 | ++m->nr; | 1288 | ++m->nr; |
1238 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); | 1289 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
1239 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); | 1290 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2455 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, | 2506 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, |
2456 | VM_EXIT_LOAD_IA32_EFER); | 2507 | VM_EXIT_LOAD_IA32_EFER); |
2457 | 2508 | ||
2509 | cpu_has_load_perf_global_ctrl = | ||
2510 | allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, | ||
2511 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) | ||
2512 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, | ||
2513 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); | ||
2514 | |||
2515 | /* | ||
2516 | * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL | ||
2517 | * but due to arrata below it can't be used. Workaround is to use | ||
2518 | * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. | ||
2519 | * | ||
2520 | * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] | ||
2521 | * | ||
2522 | * AAK155 (model 26) | ||
2523 | * AAP115 (model 30) | ||
2524 | * AAT100 (model 37) | ||
2525 | * BC86,AAY89,BD102 (model 44) | ||
2526 | * BA97 (model 46) | ||
2527 | * | ||
2528 | */ | ||
2529 | if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { | ||
2530 | switch (boot_cpu_data.x86_model) { | ||
2531 | case 26: | ||
2532 | case 30: | ||
2533 | case 37: | ||
2534 | case 44: | ||
2535 | case 46: | ||
2536 | cpu_has_load_perf_global_ctrl = false; | ||
2537 | printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " | ||
2538 | "does not work properly. Using workaround\n"); | ||
2539 | break; | ||
2540 | default: | ||
2541 | break; | ||
2542 | } | ||
2543 | } | ||
2544 | |||
2458 | return 0; | 2545 | return 0; |
2459 | } | 2546 | } |
2460 | 2547 | ||
@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) | |||
5968 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); | 6055 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); |
5969 | } | 6056 | } |
5970 | 6057 | ||
6058 | static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) | ||
6059 | { | ||
6060 | int i, nr_msrs; | ||
6061 | struct perf_guest_switch_msr *msrs; | ||
6062 | |||
6063 | msrs = perf_guest_get_msrs(&nr_msrs); | ||
6064 | |||
6065 | if (!msrs) | ||
6066 | return; | ||
6067 | |||
6068 | for (i = 0; i < nr_msrs; i++) | ||
6069 | if (msrs[i].host == msrs[i].guest) | ||
6070 | clear_atomic_switch_msr(vmx, msrs[i].msr); | ||
6071 | else | ||
6072 | add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, | ||
6073 | msrs[i].host); | ||
6074 | } | ||
6075 | |||
5971 | #ifdef CONFIG_X86_64 | 6076 | #ifdef CONFIG_X86_64 |
5972 | #define R "r" | 6077 | #define R "r" |
5973 | #define Q "q" | 6078 | #define Q "q" |
@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
6017 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 6122 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
6018 | vmx_set_interrupt_shadow(vcpu, 0); | 6123 | vmx_set_interrupt_shadow(vcpu, 0); |
6019 | 6124 | ||
6125 | atomic_switch_perf_msrs(vmx); | ||
6126 | |||
6020 | vmx->__launched = vmx->loaded_vmcs->launched; | 6127 | vmx->__launched = vmx->loaded_vmcs->launched; |
6021 | asm( | 6128 | asm( |
6022 | /* Store host registers */ | 6129 | /* Store host registers */ |