diff options
author | Andrew Honig <ahonig@google.com> | 2013-03-29 12:35:21 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-04-07 06:05:35 -0400 |
commit | 8f964525a121f2ff2df948dac908dcc65be21b5b (patch) | |
tree | 1986d7677a1cae8f639c91812da2d8c6ed5bba26 /arch | |
parent | 09a6e1f4ad32243989b30485f78985c0923284cd (diff) |
KVM: Allow cross page reads and writes from cached translations.
This patch adds support for kvm_gfn_to_hva_cache_init functions for
reads and writes that will cross a page. If the range falls within
the same memslot, then this will be a fast operation. If the range
is split between two memslots, then the slower kvm_read_guest and
kvm_write_guest are used.
Tested: Test against kvm_clock unit tests.
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/lapic.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 13 |
2 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) | |||
1857 | if (!pv_eoi_enabled(vcpu)) | 1857 | if (!pv_eoi_enabled(vcpu)) |
1858 | return 0; | 1858 | return 0; |
1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, | 1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, |
1860 | addr); | 1860 | addr, sizeof(u8)); |
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | void kvm_lapic_init(void) | 1863 | void kvm_lapic_init(void) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60d..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) | |||
1823 | return 0; | 1823 | return 0; |
1824 | } | 1824 | } |
1825 | 1825 | ||
1826 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) | 1826 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, |
1827 | sizeof(u32))) | ||
1827 | return 1; | 1828 | return 1; |
1828 | 1829 | ||
1829 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); | 1830 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); |
@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1952 | 1953 | ||
1953 | gpa_offset = data & ~(PAGE_MASK | 1); | 1954 | gpa_offset = data & ~(PAGE_MASK | 1); |
1954 | 1955 | ||
1955 | /* Check that the address is 32-byte aligned. */ | ||
1956 | if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) | ||
1957 | break; | ||
1958 | |||
1959 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, | 1956 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, |
1960 | &vcpu->arch.pv_time, data & ~1ULL)) | 1957 | &vcpu->arch.pv_time, data & ~1ULL, |
1958 | sizeof(struct pvclock_vcpu_time_info))) | ||
1961 | vcpu->arch.pv_time_enabled = false; | 1959 | vcpu->arch.pv_time_enabled = false; |
1962 | else | 1960 | else |
1963 | vcpu->arch.pv_time_enabled = true; | 1961 | vcpu->arch.pv_time_enabled = true; |
@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1977 | return 1; | 1975 | return 1; |
1978 | 1976 | ||
1979 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, | 1977 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, |
1980 | data & KVM_STEAL_VALID_BITS)) | 1978 | data & KVM_STEAL_VALID_BITS, |
1979 | sizeof(struct kvm_steal_time))) | ||
1981 | return 1; | 1980 | return 1; |
1982 | 1981 | ||
1983 | vcpu->arch.st.msr_val = data; | 1982 | vcpu->arch.st.msr_val = data; |