aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-10-05 08:01:22 -0400
committerAvi Kivity <avi@redhat.com>2011-11-17 09:27:54 -0500
commit8bf00a529967dafbbb210b377c38a15834d1e979 (patch)
tree10b03537524c8dcc14f9398e4890a2b450691d20 /arch
parent52e16b185fdd0aeb0522c203e944873e38ac1575 (diff)
KVM: VMX: add support for switching of PERF_GLOBAL_CTRL
Some cpus have special support for switching PERF_GLOBAL_CTRL msr. Add logic to detect if such support exists and works properly and extend msr switching code to use it if available. Also extend number of generic msr switching entries to 8. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c104
1 files changed, 93 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a0d6bd9ad442..55e849b52d9e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -118,7 +118,7 @@ module_param(ple_gap, int, S_IRUGO);
118static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 118static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
119module_param(ple_window, int, S_IRUGO); 119module_param(ple_window, int, S_IRUGO);
120 120
121#define NR_AUTOLOAD_MSRS 1 121#define NR_AUTOLOAD_MSRS 8
122#define VMCS02_POOL_SIZE 1 122#define VMCS02_POOL_SIZE 1
123 123
124struct vmcs { 124struct vmcs {
@@ -622,6 +622,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
622static unsigned long *vmx_msr_bitmap_longmode; 622static unsigned long *vmx_msr_bitmap_longmode;
623 623
624static bool cpu_has_load_ia32_efer; 624static bool cpu_has_load_ia32_efer;
625static bool cpu_has_load_perf_global_ctrl;
625 626
626static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 627static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
627static DEFINE_SPINLOCK(vmx_vpid_lock); 628static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -1191,15 +1192,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1191 vmcs_write32(EXCEPTION_BITMAP, eb); 1192 vmcs_write32(EXCEPTION_BITMAP, eb);
1192} 1193}
1193 1194
1195static void clear_atomic_switch_msr_special(unsigned long entry,
1196 unsigned long exit)
1197{
1198 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1199 vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1200}
1201
1194static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 1202static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1195{ 1203{
1196 unsigned i; 1204 unsigned i;
1197 struct msr_autoload *m = &vmx->msr_autoload; 1205 struct msr_autoload *m = &vmx->msr_autoload;
1198 1206
1199 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1207 switch (msr) {
1200 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1208 case MSR_EFER:
1201 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1209 if (cpu_has_load_ia32_efer) {
1202 return; 1210 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1211 VM_EXIT_LOAD_IA32_EFER);
1212 return;
1213 }
1214 break;
1215 case MSR_CORE_PERF_GLOBAL_CTRL:
1216 if (cpu_has_load_perf_global_ctrl) {
1217 clear_atomic_switch_msr_special(
1218 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1219 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1220 return;
1221 }
1222 break;
1203 } 1223 }
1204 1224
1205 for (i = 0; i < m->nr; ++i) 1225 for (i = 0; i < m->nr; ++i)
@@ -1215,18 +1235,44 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1215 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1235 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1216} 1236}
1217 1237
1238static void add_atomic_switch_msr_special(unsigned long entry,
1239 unsigned long exit, unsigned long guest_val_vmcs,
1240 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1241{
1242 vmcs_write64(guest_val_vmcs, guest_val);
1243 vmcs_write64(host_val_vmcs, host_val);
1244 vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1245 vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1246}
1247
1218static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 1248static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1219 u64 guest_val, u64 host_val) 1249 u64 guest_val, u64 host_val)
1220{ 1250{
1221 unsigned i; 1251 unsigned i;
1222 struct msr_autoload *m = &vmx->msr_autoload; 1252 struct msr_autoload *m = &vmx->msr_autoload;
1223 1253
1224 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1254 switch (msr) {
1225 vmcs_write64(GUEST_IA32_EFER, guest_val); 1255 case MSR_EFER:
1226 vmcs_write64(HOST_IA32_EFER, host_val); 1256 if (cpu_has_load_ia32_efer) {
1227 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1257 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1228 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1258 VM_EXIT_LOAD_IA32_EFER,
1229 return; 1259 GUEST_IA32_EFER,
1260 HOST_IA32_EFER,
1261 guest_val, host_val);
1262 return;
1263 }
1264 break;
1265 case MSR_CORE_PERF_GLOBAL_CTRL:
1266 if (cpu_has_load_perf_global_ctrl) {
1267 add_atomic_switch_msr_special(
1268 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1269 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1270 GUEST_IA32_PERF_GLOBAL_CTRL,
1271 HOST_IA32_PERF_GLOBAL_CTRL,
1272 guest_val, host_val);
1273 return;
1274 }
1275 break;
1230 } 1276 }
1231 1277
1232 for (i = 0; i < m->nr; ++i) 1278 for (i = 0; i < m->nr; ++i)
@@ -2455,6 +2501,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2455 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, 2501 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2456 VM_EXIT_LOAD_IA32_EFER); 2502 VM_EXIT_LOAD_IA32_EFER);
2457 2503
2504 cpu_has_load_perf_global_ctrl =
2505 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2506 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2507 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2508 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2509
2510 /*
2511 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2512 * but due to arrata below it can't be used. Workaround is to use
2513 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2514 *
2515 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2516 *
2517 * AAK155 (model 26)
2518 * AAP115 (model 30)
2519 * AAT100 (model 37)
2520 * BC86,AAY89,BD102 (model 44)
2521 * BA97 (model 46)
2522 *
2523 */
2524 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2525 switch (boot_cpu_data.x86_model) {
2526 case 26:
2527 case 30:
2528 case 37:
2529 case 44:
2530 case 46:
2531 cpu_has_load_perf_global_ctrl = false;
2532 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2533 "does not work properly. Using workaround\n");
2534 break;
2535 default:
2536 break;
2537 }
2538 }
2539
2458 return 0; 2540 return 0;
2459} 2541}
2460 2542