aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChris Wright <chrisw@sous-sol.org>2011-11-01 20:31:18 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:17:10 -0500
commitfb92045843a8cd99c7b843d9b567a680a3854ba1 (patch)
treead527bd2c48896b379f63f0c114b08b2a8a81c96 /arch
parent5202397df819d3c5a3f201bd4af6b86542115fb6 (diff)
KVM: MMU: remove KVM host pv mmu support
The host side pv mmu support has been marked for feature removal in January 2011. It's not in use, is slower than shadow or hardware assisted paging, and a maintenance burden. It's November 2011, time to remove it. Signed-off-by: Chris Wright <chrisw@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/kvm/mmu.c135
-rw-r--r--arch/x86/kvm/x86.c12
3 files changed, 0 insertions, 160 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c1f19de8b51c..6d8326409974 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -244,13 +244,6 @@ struct kvm_mmu_page {
244 struct rcu_head rcu; 244 struct rcu_head rcu;
245}; 245};
246 246
247struct kvm_pv_mmu_op_buffer {
248 void *ptr;
249 unsigned len;
250 unsigned processed;
251 char buf[512] __aligned(sizeof(long));
252};
253
254struct kvm_pio_request { 247struct kvm_pio_request {
255 unsigned long count; 248 unsigned long count;
256 int in; 249 int in;
@@ -347,10 +340,6 @@ struct kvm_vcpu_arch {
347 */ 340 */
348 struct kvm_mmu *walk_mmu; 341 struct kvm_mmu *walk_mmu;
349 342
350 /* only needed in kvm_pv_mmu_op() path, but it's hot so
351 * put it here to avoid allocation */
352 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
353
354 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 343 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
355 struct kvm_mmu_memory_cache mmu_page_cache; 344 struct kvm_mmu_memory_cache mmu_page_cache;
356 struct kvm_mmu_memory_cache mmu_page_header_cache; 345 struct kvm_mmu_memory_cache mmu_page_header_cache;
@@ -667,8 +656,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
667 656
668int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 657int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
669 const void *val, int bytes); 658 const void *val, int bytes);
670int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
671 gpa_t addr, unsigned long *ret);
672u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 659u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
673 660
674extern bool tdp_enabled; 661extern bool tdp_enabled;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e9534cec003f..a9b3a32bed08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2028,20 +2028,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2028} 2028}
2029EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); 2029EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2030 2030
2031static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
2032{
2033 struct kvm_mmu_page *sp;
2034 struct hlist_node *node;
2035 LIST_HEAD(invalid_list);
2036
2037 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
2038 pgprintk("%s: zap %llx %x\n",
2039 __func__, gfn, sp->role.word);
2040 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2041 }
2042 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2043}
2044
2045static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) 2031static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
2046{ 2032{
2047 int slot = memslot_id(kvm, gfn); 2033 int slot = memslot_id(kvm, gfn);
@@ -4004,127 +3990,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
4004 return nr_mmu_pages; 3990 return nr_mmu_pages;
4005} 3991}
4006 3992
4007static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
4008 unsigned len)
4009{
4010 if (len > buffer->len)
4011 return NULL;
4012 return buffer->ptr;
4013}
4014
4015static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
4016 unsigned len)
4017{
4018 void *ret;
4019
4020 ret = pv_mmu_peek_buffer(buffer, len);
4021 if (!ret)
4022 return ret;
4023 buffer->ptr += len;
4024 buffer->len -= len;
4025 buffer->processed += len;
4026 return ret;
4027}
4028
4029static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
4030 gpa_t addr, gpa_t value)
4031{
4032 int bytes = 8;
4033 int r;
4034
4035 if (!is_long_mode(vcpu) && !is_pae(vcpu))
4036 bytes = 4;
4037
4038 r = mmu_topup_memory_caches(vcpu);
4039 if (r)
4040 return r;
4041
4042 if (!emulator_write_phys(vcpu, addr, &value, bytes))
4043 return -EFAULT;
4044
4045 return 1;
4046}
4047
4048static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
4049{
4050 (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
4051 return 1;
4052}
4053
4054static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
4055{
4056 spin_lock(&vcpu->kvm->mmu_lock);
4057 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
4058 spin_unlock(&vcpu->kvm->mmu_lock);
4059 return 1;
4060}
4061
4062static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
4063 struct kvm_pv_mmu_op_buffer *buffer)
4064{
4065 struct kvm_mmu_op_header *header;
4066
4067 header = pv_mmu_peek_buffer(buffer, sizeof *header);
4068 if (!header)
4069 return 0;
4070 switch (header->op) {
4071 case KVM_MMU_OP_WRITE_PTE: {
4072 struct kvm_mmu_op_write_pte *wpte;
4073
4074 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
4075 if (!wpte)
4076 return 0;
4077 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
4078 wpte->pte_val);
4079 }
4080 case KVM_MMU_OP_FLUSH_TLB: {
4081 struct kvm_mmu_op_flush_tlb *ftlb;
4082
4083 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
4084 if (!ftlb)
4085 return 0;
4086 return kvm_pv_mmu_flush_tlb(vcpu);
4087 }
4088 case KVM_MMU_OP_RELEASE_PT: {
4089 struct kvm_mmu_op_release_pt *rpt;
4090
4091 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
4092 if (!rpt)
4093 return 0;
4094 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
4095 }
4096 default: return 0;
4097 }
4098}
4099
4100int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
4101 gpa_t addr, unsigned long *ret)
4102{
4103 int r;
4104 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
4105
4106 buffer->ptr = buffer->buf;
4107 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
4108 buffer->processed = 0;
4109
4110 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
4111 if (r)
4112 goto out;
4113
4114 while (buffer->len) {
4115 r = kvm_pv_mmu_op_one(vcpu, buffer);
4116 if (r < 0)
4117 goto out;
4118 if (r == 0)
4119 break;
4120 }
4121
4122 r = 1;
4123out:
4124 *ret = buffer->processed;
4125 return r;
4126}
4127
4128int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) 3993int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
4129{ 3994{
4130 struct kvm_shadow_walk_iterator iterator; 3995 struct kvm_shadow_walk_iterator iterator;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9c980ce26e61..a3b25a524c9b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5273,15 +5273,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5273} 5273}
5274EXPORT_SYMBOL_GPL(kvm_emulate_halt); 5274EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5275 5275
5276static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
5277 unsigned long a1)
5278{
5279 if (is_long_mode(vcpu))
5280 return a0;
5281 else
5282 return a0 | ((gpa_t)a1 << 32);
5283}
5284
5285int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 5276int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
5286{ 5277{
5287 u64 param, ingpa, outgpa, ret; 5278 u64 param, ingpa, outgpa, ret;
@@ -5377,9 +5368,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5377 case KVM_HC_VAPIC_POLL_IRQ: 5368 case KVM_HC_VAPIC_POLL_IRQ:
5378 ret = 0; 5369 ret = 0;
5379 break; 5370 break;
5380 case KVM_HC_MMU_OP:
5381 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
5382 break;
5383 default: 5371 default:
5384 ret = -KVM_ENOSYS; 5372 ret = -KVM_ENOSYS;
5385 break; 5373 break;