aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-02-16 03:51:48 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 05:27:28 -0400
commit89a27f4d0e042a2fa3391a76b652aec3e16ef200 (patch)
treed2cf954c066c6f5fbd51a15a439b63d1dba53edd /arch/x86/kvm/x86.c
parent679613442f84702c06a80f2320cb8a50089200bc (diff)
KVM: use desc_ptr struct instead of kvm private descriptor_table
x86 arch defines desc_ptr for idt/gdt pointers, no need to define another structure in kvm code. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c4ca98ad27f..274a8e39bca7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -225,7 +225,7 @@ static void drop_user_return_notifiers(void *ignore)
225 225
226unsigned long segment_base(u16 selector) 226unsigned long segment_base(u16 selector)
227{ 227{
228 struct descriptor_table gdt; 228 struct desc_ptr gdt;
229 struct desc_struct *d; 229 struct desc_struct *d;
230 unsigned long table_base; 230 unsigned long table_base;
231 unsigned long v; 231 unsigned long v;
@@ -234,7 +234,7 @@ unsigned long segment_base(u16 selector)
234 return 0; 234 return 0;
235 235
236 kvm_get_gdt(&gdt); 236 kvm_get_gdt(&gdt);
237 table_base = gdt.base; 237 table_base = gdt.address;
238 238
239 if (selector & 4) { /* from ldt */ 239 if (selector & 4) { /* from ldt */
240 u16 ldt_selector = kvm_read_ldt(); 240 u16 ldt_selector = kvm_read_ldt();
@@ -3949,14 +3949,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3949 3949
3950void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 3950void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3951{ 3951{
3952 struct descriptor_table dt = { limit, base }; 3952 struct desc_ptr dt = { limit, base };
3953 3953
3954 kvm_x86_ops->set_gdt(vcpu, &dt); 3954 kvm_x86_ops->set_gdt(vcpu, &dt);
3955} 3955}
3956 3956
3957void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 3957void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3958{ 3958{
3959 struct descriptor_table dt = { limit, base }; 3959 struct desc_ptr dt = { limit, base };
3960 3960
3961 kvm_x86_ops->set_idt(vcpu, &dt); 3961 kvm_x86_ops->set_idt(vcpu, &dt);
3962} 3962}
@@ -4581,7 +4581,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4581int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 4581int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4582 struct kvm_sregs *sregs) 4582 struct kvm_sregs *sregs)
4583{ 4583{
4584 struct descriptor_table dt; 4584 struct desc_ptr dt;
4585 4585
4586 vcpu_load(vcpu); 4586 vcpu_load(vcpu);
4587 4587
@@ -4596,11 +4596,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4596 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 4596 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4597 4597
4598 kvm_x86_ops->get_idt(vcpu, &dt); 4598 kvm_x86_ops->get_idt(vcpu, &dt);
4599 sregs->idt.limit = dt.limit; 4599 sregs->idt.limit = dt.size;
4600 sregs->idt.base = dt.base; 4600 sregs->idt.base = dt.address;
4601 kvm_x86_ops->get_gdt(vcpu, &dt); 4601 kvm_x86_ops->get_gdt(vcpu, &dt);
4602 sregs->gdt.limit = dt.limit; 4602 sregs->gdt.limit = dt.size;
4603 sregs->gdt.base = dt.base; 4603 sregs->gdt.base = dt.address;
4604 4604
4605 sregs->cr0 = kvm_read_cr0(vcpu); 4605 sregs->cr0 = kvm_read_cr0(vcpu);
4606 sregs->cr2 = vcpu->arch.cr2; 4606 sregs->cr2 = vcpu->arch.cr2;
@@ -4672,7 +4672,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4672 4672
4673static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, 4673static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4674 u16 selector, 4674 u16 selector,
4675 struct descriptor_table *dtable) 4675 struct desc_ptr *dtable)
4676{ 4676{
4677 if (selector & 1 << 2) { 4677 if (selector & 1 << 2) {
4678 struct kvm_segment kvm_seg; 4678 struct kvm_segment kvm_seg;
@@ -4680,10 +4680,10 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4680 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); 4680 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4681 4681
4682 if (kvm_seg.unusable) 4682 if (kvm_seg.unusable)
4683 dtable->limit = 0; 4683 dtable->size = 0;
4684 else 4684 else
4685 dtable->limit = kvm_seg.limit; 4685 dtable->size = kvm_seg.limit;
4686 dtable->base = kvm_seg.base; 4686 dtable->address = kvm_seg.base;
4687 } 4687 }
4688 else 4688 else
4689 kvm_x86_ops->get_gdt(vcpu, dtable); 4689 kvm_x86_ops->get_gdt(vcpu, dtable);
@@ -4693,7 +4693,7 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4693static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4693static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4694 struct desc_struct *seg_desc) 4694 struct desc_struct *seg_desc)
4695{ 4695{
4696 struct descriptor_table dtable; 4696 struct desc_ptr dtable;
4697 u16 index = selector >> 3; 4697 u16 index = selector >> 3;
4698 int ret; 4698 int ret;
4699 u32 err; 4699 u32 err;
@@ -4701,7 +4701,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4701 4701
4702 get_segment_descriptor_dtable(vcpu, selector, &dtable); 4702 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4703 4703
4704 if (dtable.limit < index * 8 + 7) { 4704 if (dtable.size < index * 8 + 7) {
4705 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 4705 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4706 return X86EMUL_PROPAGATE_FAULT; 4706 return X86EMUL_PROPAGATE_FAULT;
4707 } 4707 }
@@ -4718,14 +4718,14 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4718static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4718static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4719 struct desc_struct *seg_desc) 4719 struct desc_struct *seg_desc)
4720{ 4720{
4721 struct descriptor_table dtable; 4721 struct desc_ptr dtable;
4722 u16 index = selector >> 3; 4722 u16 index = selector >> 3;
4723 4723
4724 get_segment_descriptor_dtable(vcpu, selector, &dtable); 4724 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4725 4725
4726 if (dtable.limit < index * 8 + 7) 4726 if (dtable.size < index * 8 + 7)
4727 return 1; 4727 return 1;
4728 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); 4728 return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
4729} 4729}
4730 4730
4731static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu, 4731static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
@@ -5204,15 +5204,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5204{ 5204{
5205 int mmu_reset_needed = 0; 5205 int mmu_reset_needed = 0;
5206 int pending_vec, max_bits; 5206 int pending_vec, max_bits;
5207 struct descriptor_table dt; 5207 struct desc_ptr dt;
5208 5208
5209 vcpu_load(vcpu); 5209 vcpu_load(vcpu);
5210 5210
5211 dt.limit = sregs->idt.limit; 5211 dt.size = sregs->idt.limit;
5212 dt.base = sregs->idt.base; 5212 dt.address = sregs->idt.base;
5213 kvm_x86_ops->set_idt(vcpu, &dt); 5213 kvm_x86_ops->set_idt(vcpu, &dt);
5214 dt.limit = sregs->gdt.limit; 5214 dt.size = sregs->gdt.limit;
5215 dt.base = sregs->gdt.base; 5215 dt.address = sregs->gdt.base;
5216 kvm_x86_ops->set_gdt(vcpu, &dt); 5216 kvm_x86_ops->set_gdt(vcpu, &dt);
5217 5217
5218 vcpu->arch.cr2 = sregs->cr2; 5218 vcpu->arch.cr2 = sregs->cr2;