aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-02-13 12:58:46 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:21 -0400
commitf65c229c3e7743c6654c16b9ec6248466b5eef21 (patch)
tree0326e1e3e7a407c8271d2ae5427c300e6e741a39 /arch/x86/kvm
parente6101a96c9efb74c98bba6322d4c5ea89e47e0fe (diff)
KVM: SVM: allocate the MSR permission map per VCPU
This patch changes the kvm-amd module to allocate the SVM MSR permission map per VCPU instead of a global map for all VCPUs. With this we have more flexibility allowing specific guests to access virtualized MSRs. This is required for LBR virtualization. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Markus Rechberger <markus.rechberger@amd.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/kvm_svm.h2
-rw-r--r--arch/x86/kvm/svm.c67
2 files changed, 34 insertions, 35 deletions
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index ecdfe97e4635..65ef0fc2c036 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -39,6 +39,8 @@ struct vcpu_svm {
39 unsigned long host_db_regs[NUM_DB_REGS]; 39 unsigned long host_db_regs[NUM_DB_REGS];
40 unsigned long host_dr6; 40 unsigned long host_dr6;
41 unsigned long host_dr7; 41 unsigned long host_dr7;
42
43 u32 *msrpm;
42}; 44};
43 45
44#endif 46#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d934819733ce..281a2ffe1224 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -65,7 +65,6 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
65} 65}
66 66
67unsigned long iopm_base; 67unsigned long iopm_base;
68unsigned long msrpm_base;
69 68
70struct kvm_ldttss_desc { 69struct kvm_ldttss_desc {
71 u16 limit0; 70 u16 limit0;
@@ -370,12 +369,29 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
370 BUG(); 369 BUG();
371} 370}
372 371
372static void svm_vcpu_init_msrpm(u32 *msrpm)
373{
374 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
375
376#ifdef CONFIG_X86_64
377 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
378 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
379 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
380 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
381 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
382 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
383#endif
384 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
386 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
387 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
388}
389
373static __init int svm_hardware_setup(void) 390static __init int svm_hardware_setup(void)
374{ 391{
375 int cpu; 392 int cpu;
376 struct page *iopm_pages; 393 struct page *iopm_pages;
377 struct page *msrpm_pages; 394 void *iopm_va;
378 void *iopm_va, *msrpm_va;
379 int r; 395 int r;
380 396
381 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 397 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
@@ -388,37 +404,13 @@ static __init int svm_hardware_setup(void)
388 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */ 404 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
389 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 405 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
390 406
391
392 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
393
394 r = -ENOMEM;
395 if (!msrpm_pages)
396 goto err_1;
397
398 msrpm_va = page_address(msrpm_pages);
399 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
400 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
401
402#ifdef CONFIG_X86_64
403 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
404 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
405 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
406 set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
407 set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
408 set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
409#endif
410 set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
411 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
412 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
413 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
414
415 if (boot_cpu_has(X86_FEATURE_NX)) 407 if (boot_cpu_has(X86_FEATURE_NX))
416 kvm_enable_efer_bits(EFER_NX); 408 kvm_enable_efer_bits(EFER_NX);
417 409
418 for_each_online_cpu(cpu) { 410 for_each_online_cpu(cpu) {
419 r = svm_cpu_init(cpu); 411 r = svm_cpu_init(cpu);
420 if (r) 412 if (r)
421 goto err_2; 413 goto err;
422 } 414 }
423 415
424 svm_features = cpuid_edx(SVM_CPUID_FUNC); 416 svm_features = cpuid_edx(SVM_CPUID_FUNC);
@@ -438,10 +430,7 @@ static __init int svm_hardware_setup(void)
438 430
439 return 0; 431 return 0;
440 432
441err_2: 433err:
442 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
443 msrpm_base = 0;
444err_1:
445 __free_pages(iopm_pages, IOPM_ALLOC_ORDER); 434 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
446 iopm_base = 0; 435 iopm_base = 0;
447 return r; 436 return r;
@@ -449,9 +438,8 @@ err_1:
449 438
450static __exit void svm_hardware_unsetup(void) 439static __exit void svm_hardware_unsetup(void)
451{ 440{
452 __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
453 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); 441 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
454 iopm_base = msrpm_base = 0; 442 iopm_base = 0;
455} 443}
456 444
457static void init_seg(struct vmcb_seg *seg) 445static void init_seg(struct vmcb_seg *seg)
@@ -536,7 +524,7 @@ static void init_vmcb(struct vcpu_svm *svm)
536 (1ULL << INTERCEPT_MWAIT); 524 (1ULL << INTERCEPT_MWAIT);
537 525
538 control->iopm_base_pa = iopm_base; 526 control->iopm_base_pa = iopm_base;
539 control->msrpm_base_pa = msrpm_base; 527 control->msrpm_base_pa = __pa(svm->msrpm);
540 control->tsc_offset = 0; 528 control->tsc_offset = 0;
541 control->int_ctl = V_INTR_MASKING_MASK; 529 control->int_ctl = V_INTR_MASKING_MASK;
542 530
@@ -615,6 +603,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
615{ 603{
616 struct vcpu_svm *svm; 604 struct vcpu_svm *svm;
617 struct page *page; 605 struct page *page;
606 struct page *msrpm_pages;
618 int err; 607 int err;
619 608
620 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 609 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -633,6 +622,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
633 goto uninit; 622 goto uninit;
634 } 623 }
635 624
625 err = -ENOMEM;
626 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
627 if (!msrpm_pages)
628 goto uninit;
629 svm->msrpm = page_address(msrpm_pages);
630 svm_vcpu_init_msrpm(svm->msrpm);
631
636 svm->vmcb = page_address(page); 632 svm->vmcb = page_address(page);
637 clear_page(svm->vmcb); 633 clear_page(svm->vmcb);
638 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 634 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
@@ -661,6 +657,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
661 struct vcpu_svm *svm = to_svm(vcpu); 657 struct vcpu_svm *svm = to_svm(vcpu);
662 658
663 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); 659 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
660 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
664 kvm_vcpu_uninit(vcpu); 661 kvm_vcpu_uninit(vcpu);
665 kmem_cache_free(kvm_vcpu_cache, svm); 662 kmem_cache_free(kvm_vcpu_cache, svm);
666} 663}