aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-05-02 10:20:18 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2017-05-03 10:30:26 -0400
commit4e335d9e7ddbcf83d03e7fbe65797ebed2272c18 (patch)
treeef791b6623e77a8d3abef05ff0410709def329d8
parentee5f7d79a80550179b258417442b7bdbccaf476a (diff)
Revert "KVM: Support vCPU-based gfn->hva cache"
This reverts commit bbd6411513aa8ef3ea02abab61318daf87c1af1e. I've been sitting on this revert for too long and it unfortunately missed 4.11. It's also the reason why I haven't merged ring-based dirty tracking for 4.12. Using kvm_vcpu_memslots in kvm_gfn_to_hva_cache_init and kvm_vcpu_write_guest_offset_cached means that the MSR value can now be used to access SMRAM, simply by making it point to an SMRAM physical address. This is problematic because it lets the guest OS overwrite memory that it shouldn't be able to touch. Cc: stable@vger.kernel.org Fixes: bbd6411513aa8ef3ea02abab61318daf87c1af1e Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/lapic.c22
-rw-r--r--arch/x86/kvm/x86.c41
-rw-r--r--include/linux/kvm_host.h16
-rw-r--r--virt/kvm/kvm_main.c34
4 files changed, 58 insertions, 55 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bad6a25067bc..9fa5b8164961 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -529,14 +529,16 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
529 529
530static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) 530static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
531{ 531{
532 return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val, 532
533 sizeof(val)); 533 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
534 sizeof(val));
534} 535}
535 536
536static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) 537static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
537{ 538{
538 return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val, 539
539 sizeof(*val)); 540 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
541 sizeof(*val));
540} 542}
541 543
542static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) 544static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
@@ -2285,8 +2287,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2285 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 2287 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2286 return; 2288 return;
2287 2289
2288 if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, 2290 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2289 sizeof(u32))) 2291 sizeof(u32)))
2290 return; 2292 return;
2291 2293
2292 apic_set_tpr(vcpu->arch.apic, data & 0xff); 2294 apic_set_tpr(vcpu->arch.apic, data & 0xff);
@@ -2338,14 +2340,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2338 max_isr = 0; 2340 max_isr = 0;
2339 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 2341 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2340 2342
2341 kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, 2343 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2342 sizeof(u32)); 2344 sizeof(u32));
2343} 2345}
2344 2346
2345int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 2347int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2346{ 2348{
2347 if (vapic_addr) { 2349 if (vapic_addr) {
2348 if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, 2350 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2349 &vcpu->arch.apic->vapic_cache, 2351 &vcpu->arch.apic->vapic_cache,
2350 vapic_addr, sizeof(u32))) 2352 vapic_addr, sizeof(u32)))
2351 return -EINVAL; 2353 return -EINVAL;
@@ -2439,7 +2441,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2439 vcpu->arch.pv_eoi.msr_val = data; 2441 vcpu->arch.pv_eoi.msr_val = data;
2440 if (!pv_eoi_enabled(vcpu)) 2442 if (!pv_eoi_enabled(vcpu))
2441 return 0; 2443 return 0;
2442 return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data, 2444 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2443 addr, sizeof(u8)); 2445 addr, sizeof(u8));
2444} 2446}
2445 2447
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2fe9aa116288..b38a302858a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1785,7 +1785,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1785 struct kvm_vcpu_arch *vcpu = &v->arch; 1785 struct kvm_vcpu_arch *vcpu = &v->arch;
1786 struct pvclock_vcpu_time_info guest_hv_clock; 1786 struct pvclock_vcpu_time_info guest_hv_clock;
1787 1787
1788 if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time, 1788 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1789 &guest_hv_clock, sizeof(guest_hv_clock)))) 1789 &guest_hv_clock, sizeof(guest_hv_clock))))
1790 return; 1790 return;
1791 1791
@@ -1806,9 +1806,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1806 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 1806 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1807 1807
1808 vcpu->hv_clock.version = guest_hv_clock.version + 1; 1808 vcpu->hv_clock.version = guest_hv_clock.version + 1;
1809 kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, 1809 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1810 &vcpu->hv_clock, 1810 &vcpu->hv_clock,
1811 sizeof(vcpu->hv_clock.version)); 1811 sizeof(vcpu->hv_clock.version));
1812 1812
1813 smp_wmb(); 1813 smp_wmb();
1814 1814
@@ -1822,16 +1822,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1822 1822
1823 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 1823 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1824 1824
1825 kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, 1825 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1826 &vcpu->hv_clock, 1826 &vcpu->hv_clock,
1827 sizeof(vcpu->hv_clock)); 1827 sizeof(vcpu->hv_clock));
1828 1828
1829 smp_wmb(); 1829 smp_wmb();
1830 1830
1831 vcpu->hv_clock.version++; 1831 vcpu->hv_clock.version++;
1832 kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, 1832 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1833 &vcpu->hv_clock, 1833 &vcpu->hv_clock,
1834 sizeof(vcpu->hv_clock.version)); 1834 sizeof(vcpu->hv_clock.version));
1835} 1835}
1836 1836
1837static int kvm_guest_time_update(struct kvm_vcpu *v) 1837static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2064,7 +2064,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2064 return 0; 2064 return 0;
2065 } 2065 }
2066 2066
2067 if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa, 2067 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2068 sizeof(u32))) 2068 sizeof(u32)))
2069 return 1; 2069 return 1;
2070 2070
@@ -2083,7 +2083,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
2083 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 2083 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2084 return; 2084 return;
2085 2085
2086 if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime, 2086 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2087 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) 2087 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2088 return; 2088 return;
2089 2089
@@ -2094,7 +2094,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
2094 2094
2095 vcpu->arch.st.steal.version += 1; 2095 vcpu->arch.st.steal.version += 1;
2096 2096
2097 kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, 2097 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2098 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 2098 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2099 2099
2100 smp_wmb(); 2100 smp_wmb();
@@ -2103,14 +2103,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
2103 vcpu->arch.st.last_steal; 2103 vcpu->arch.st.last_steal;
2104 vcpu->arch.st.last_steal = current->sched_info.run_delay; 2104 vcpu->arch.st.last_steal = current->sched_info.run_delay;
2105 2105
2106 kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, 2106 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2107 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 2107 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2108 2108
2109 smp_wmb(); 2109 smp_wmb();
2110 2110
2111 vcpu->arch.st.steal.version += 1; 2111 vcpu->arch.st.steal.version += 1;
2112 2112
2113 kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, 2113 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2114 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 2114 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2115} 2115}
2116 2116
@@ -2215,7 +2215,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2215 if (!(data & 1)) 2215 if (!(data & 1))
2216 break; 2216 break;
2217 2217
2218 if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, 2218 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2219 &vcpu->arch.pv_time, data & ~1ULL, 2219 &vcpu->arch.pv_time, data & ~1ULL,
2220 sizeof(struct pvclock_vcpu_time_info))) 2220 sizeof(struct pvclock_vcpu_time_info)))
2221 vcpu->arch.pv_time_enabled = false; 2221 vcpu->arch.pv_time_enabled = false;
@@ -2236,7 +2236,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2236 if (data & KVM_STEAL_RESERVED_MASK) 2236 if (data & KVM_STEAL_RESERVED_MASK)
2237 return 1; 2237 return 1;
2238 2238
2239 if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime, 2239 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2240 data & KVM_STEAL_VALID_BITS, 2240 data & KVM_STEAL_VALID_BITS,
2241 sizeof(struct kvm_steal_time))) 2241 sizeof(struct kvm_steal_time)))
2242 return 1; 2242 return 1;
@@ -2858,7 +2858,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
2858 2858
2859 vcpu->arch.st.steal.preempted = 1; 2859 vcpu->arch.st.steal.preempted = 1;
2860 2860
2861 kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime, 2861 kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
2862 &vcpu->arch.st.steal.preempted, 2862 &vcpu->arch.st.steal.preempted,
2863 offsetof(struct kvm_steal_time, preempted), 2863 offsetof(struct kvm_steal_time, preempted),
2864 sizeof(vcpu->arch.st.steal.preempted)); 2864 sizeof(vcpu->arch.st.steal.preempted));
@@ -8527,8 +8527,9 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8527 8527
8528static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) 8528static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
8529{ 8529{
8530 return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val, 8530
8531 sizeof(val)); 8531 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
8532 sizeof(val));
8532} 8533}
8533 8534
8534void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 8535void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 25cf258a1c9b..3727afdf614d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -650,18 +650,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
650int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 650int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
651 unsigned long len); 651 unsigned long len);
652int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 652int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
653int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, 653int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
654 void *data, unsigned long len); 654 void *data, unsigned long len);
655int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 655int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
656 int offset, int len); 656 int offset, int len);
657int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 657int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
658 unsigned long len); 658 unsigned long len);
659int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, 659int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
660 void *data, unsigned long len); 660 void *data, unsigned long len);
661int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, 661int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
662 void *data, int offset, unsigned long len); 662 void *data, int offset, unsigned long len);
663int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, 663int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
664 gpa_t gpa, unsigned long len); 664 gpa_t gpa, unsigned long len);
665int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 665int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
666int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 666int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
667struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 667struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6281cc2446d5..4c4d3fe10654 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1975,18 +1975,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
1975 return 0; 1975 return 0;
1976} 1976}
1977 1977
1978int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, 1978int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1979 gpa_t gpa, unsigned long len) 1979 gpa_t gpa, unsigned long len)
1980{ 1980{
1981 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 1981 struct kvm_memslots *slots = kvm_memslots(kvm);
1982 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 1982 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
1983} 1983}
1984EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init); 1984EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1985 1985
1986int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, 1986int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1987 void *data, int offset, unsigned long len) 1987 void *data, int offset, unsigned long len)
1988{ 1988{
1989 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 1989 struct kvm_memslots *slots = kvm_memslots(kvm);
1990 int r; 1990 int r;
1991 gpa_t gpa = ghc->gpa + offset; 1991 gpa_t gpa = ghc->gpa + offset;
1992 1992
@@ -1996,7 +1996,7 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
1996 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); 1996 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
1997 1997
1998 if (unlikely(!ghc->memslot)) 1998 if (unlikely(!ghc->memslot))
1999 return kvm_vcpu_write_guest(vcpu, gpa, data, len); 1999 return kvm_write_guest(kvm, gpa, data, len);
2000 2000
2001 if (kvm_is_error_hva(ghc->hva)) 2001 if (kvm_is_error_hva(ghc->hva))
2002 return -EFAULT; 2002 return -EFAULT;
@@ -2008,19 +2008,19 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
2008 2008
2009 return 0; 2009 return 0;
2010} 2010}
2011EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached); 2011EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2012 2012
2013int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, 2013int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2014 void *data, unsigned long len) 2014 void *data, unsigned long len)
2015{ 2015{
2016 return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len); 2016 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2017} 2017}
2018EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached); 2018EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2019 2019
2020int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, 2020int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2021 void *data, unsigned long len) 2021 void *data, unsigned long len)
2022{ 2022{
2023 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2023 struct kvm_memslots *slots = kvm_memslots(kvm);
2024 int r; 2024 int r;
2025 2025
2026 BUG_ON(len > ghc->len); 2026 BUG_ON(len > ghc->len);
@@ -2029,7 +2029,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
2029 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); 2029 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
2030 2030
2031 if (unlikely(!ghc->memslot)) 2031 if (unlikely(!ghc->memslot))
2032 return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len); 2032 return kvm_read_guest(kvm, ghc->gpa, data, len);
2033 2033
2034 if (kvm_is_error_hva(ghc->hva)) 2034 if (kvm_is_error_hva(ghc->hva))
2035 return -EFAULT; 2035 return -EFAULT;
@@ -2040,7 +2040,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
2040 2040
2041 return 0; 2041 return 0;
2042} 2042}
2043EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached); 2043EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2044 2044
2045int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 2045int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2046{ 2046{