aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-03-01 09:34:37 -0500
committerAvi Kivity <avi@redhat.com>2010-05-17 05:15:12 -0400
commit323c3d809b8bd42d6d557c734d4bdfdefa110445 (patch)
treeac96b682183194f70e10be5dd85d34cef205e7ba /arch/x86/kvm/svm.c
parentac72a9b733995cb3ef538000f6309b5e724aa469 (diff)
KVM: SVM: Optimize nested svm msrpm merging
This patch optimizes the way the msrpm of the host and the guest are merged. The old code merged the 2 msrpm pages completly. This code needed to touch 24kb of memory for that operation. The optimized variant this patch introduces merges only the parts where the host msrpm may contain zero bits. This reduces the amount of memory which is touched to 48 bytes. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c80
1 files changed, 71 insertions, 9 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a079550d3886..45a287e51e18 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -93,6 +93,9 @@ struct nested_state {
93 93
94}; 94};
95 95
96#define MSRPM_OFFSETS 16
97static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
98
96struct vcpu_svm { 99struct vcpu_svm {
97 struct kvm_vcpu vcpu; 100 struct kvm_vcpu vcpu;
98 struct vmcb *vmcb; 101 struct vmcb *vmcb;
@@ -510,6 +513,49 @@ static void svm_vcpu_init_msrpm(u32 *msrpm)
510 } 513 }
511} 514}
512 515
516static void add_msr_offset(u32 offset)
517{
518 int i;
519
520 for (i = 0; i < MSRPM_OFFSETS; ++i) {
521
522 /* Offset already in list? */
523 if (msrpm_offsets[i] == offset)
524 return;
525
526 /* Slot used by another offset? */
527 if (msrpm_offsets[i] != MSR_INVALID)
528 continue;
529
530 /* Add offset to list */
531 msrpm_offsets[i] = offset;
532
533 return;
534 }
535
536 /*
537 * If this BUG triggers the msrpm_offsets table has an overflow. Just
538 * increase MSRPM_OFFSETS in this case.
539 */
540 BUG();
541}
542
543static void init_msrpm_offsets(void)
544{
545 int i;
546
547 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
548
549 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
550 u32 offset;
551
552 offset = svm_msrpm_offset(direct_access_msrs[i].index);
553 BUG_ON(offset == MSR_INVALID);
554
555 add_msr_offset(offset);
556 }
557}
558
513static void svm_enable_lbrv(struct vcpu_svm *svm) 559static void svm_enable_lbrv(struct vcpu_svm *svm)
514{ 560{
515 u32 *msrpm = svm->msrpm; 561 u32 *msrpm = svm->msrpm;
@@ -548,6 +594,8 @@ static __init int svm_hardware_setup(void)
548 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 594 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
549 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 595 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
550 596
597 init_msrpm_offsets();
598
551 if (boot_cpu_has(X86_FEATURE_NX)) 599 if (boot_cpu_has(X86_FEATURE_NX))
552 kvm_enable_efer_bits(EFER_NX); 600 kvm_enable_efer_bits(EFER_NX);
553 601
@@ -811,6 +859,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
811 svm_vcpu_init_msrpm(svm->msrpm); 859 svm_vcpu_init_msrpm(svm->msrpm);
812 860
813 svm->nested.msrpm = page_address(nested_msrpm_pages); 861 svm->nested.msrpm = page_address(nested_msrpm_pages);
862 svm_vcpu_init_msrpm(svm->nested.msrpm);
814 863
815 svm->vmcb = page_address(page); 864 svm->vmcb = page_address(page);
816 clear_page(svm->vmcb); 865 clear_page(svm->vmcb);
@@ -1888,20 +1937,33 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1888 1937
1889static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 1938static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1890{ 1939{
1891 u32 *nested_msrpm; 1940 /*
1892 struct page *page; 1941 * This function merges the msr permission bitmaps of kvm and the
1942 * nested vmcb. It is omptimized in that it only merges the parts where
1943 * the kvm msr permission bitmap may contain zero bits
1944 */
1893 int i; 1945 int i;
1894 1946
1895 nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page); 1947 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1896 if (!nested_msrpm) 1948 return true;
1897 return false;
1898 1949
1899 for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) 1950 for (i = 0; i < MSRPM_OFFSETS; i++) {
1900 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; 1951 u32 value, p;
1952 u64 offset;
1901 1953
1902 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); 1954 if (msrpm_offsets[i] == 0xffffffff)
1955 break;
1903 1956
1904 nested_svm_unmap(page); 1957 offset = svm->nested.vmcb_msrpm + msrpm_offsets[i];
1958 p = msrpm_offsets[i] / 4;
1959
1960 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
1961 return false;
1962
1963 svm->nested.msrpm[p] = svm->msrpm[p] | value;
1964 }
1965
1966 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1905 1967
1906 return true; 1968 return true;
1907} 1969}