aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-02-16 03:51:48 -0500
committerAvi Kivity <avi@redhat.com>2010-04-25 05:27:28 -0400
commit89a27f4d0e042a2fa3391a76b652aec3e16ef200 (patch)
treed2cf954c066c6f5fbd51a15a439b63d1dba53edd /arch
parent679613442f84702c06a80f2320cb8a50089200bc (diff)
KVM: use desc_ptr struct instead of kvm private descriptor_table
x86 arch defines desc_ptr for idt/gdt pointers, no need to define another structure in kvm code. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h17
-rw-r--r--arch/x86/kvm/svm.c28
-rw-r--r--arch/x86/kvm/vmx.c36
-rw-r--r--arch/x86/kvm/x86.c46
4 files changed, 61 insertions, 66 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79ca37d..cf392dfb8000 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -461,11 +461,6 @@ struct kvm_vcpu_stat {
461 u32 nmi_injections; 461 u32 nmi_injections;
462}; 462};
463 463
464struct descriptor_table {
465 u16 limit;
466 unsigned long base;
467} __attribute__((packed));
468
469struct kvm_x86_ops { 464struct kvm_x86_ops {
470 int (*cpu_has_kvm_support)(void); /* __init */ 465 int (*cpu_has_kvm_support)(void); /* __init */
471 int (*disabled_by_bios)(void); /* __init */ 466 int (*disabled_by_bios)(void); /* __init */
@@ -503,10 +498,10 @@ struct kvm_x86_ops {
503 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 498 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
504 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 499 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
505 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 500 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
506 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 501 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
507 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 502 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
508 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 503 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
509 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 504 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
510 int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); 505 int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
511 int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); 506 int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
512 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 507 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -724,12 +719,12 @@ static inline void kvm_load_ldt(u16 sel)
724 asm("lldt %0" : : "rm"(sel)); 719 asm("lldt %0" : : "rm"(sel));
725} 720}
726 721
727static inline void kvm_get_idt(struct descriptor_table *table) 722static inline void kvm_get_idt(struct desc_ptr *table)
728{ 723{
729 asm("sidt %0" : "=m"(*table)); 724 asm("sidt %0" : "=m"(*table));
730} 725}
731 726
732static inline void kvm_get_gdt(struct descriptor_table *table) 727static inline void kvm_get_gdt(struct desc_ptr *table)
733{ 728{
734 asm("sgdt %0" : "=m"(*table)); 729 asm("sgdt %0" : "=m"(*table));
735} 730}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ba58206812a..77fa2e3053b5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -319,7 +319,7 @@ static int svm_hardware_enable(void *garbage)
319 319
320 struct svm_cpu_data *sd; 320 struct svm_cpu_data *sd;
321 uint64_t efer; 321 uint64_t efer;
322 struct descriptor_table gdt_descr; 322 struct desc_ptr gdt_descr;
323 struct desc_struct *gdt; 323 struct desc_struct *gdt;
324 int me = raw_smp_processor_id(); 324 int me = raw_smp_processor_id();
325 325
@@ -345,7 +345,7 @@ static int svm_hardware_enable(void *garbage)
345 sd->next_asid = sd->max_asid + 1; 345 sd->next_asid = sd->max_asid + 1;
346 346
347 kvm_get_gdt(&gdt_descr); 347 kvm_get_gdt(&gdt_descr);
348 gdt = (struct desc_struct *)gdt_descr.base; 348 gdt = (struct desc_struct *)gdt_descr.address;
349 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 349 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
350 350
351 wrmsrl(MSR_EFER, efer | EFER_SVME); 351 wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -936,36 +936,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
936 return save->cpl; 936 return save->cpl;
937} 937}
938 938
939static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 939static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
940{ 940{
941 struct vcpu_svm *svm = to_svm(vcpu); 941 struct vcpu_svm *svm = to_svm(vcpu);
942 942
943 dt->limit = svm->vmcb->save.idtr.limit; 943 dt->size = svm->vmcb->save.idtr.limit;
944 dt->base = svm->vmcb->save.idtr.base; 944 dt->address = svm->vmcb->save.idtr.base;
945} 945}
946 946
947static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 947static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
948{ 948{
949 struct vcpu_svm *svm = to_svm(vcpu); 949 struct vcpu_svm *svm = to_svm(vcpu);
950 950
951 svm->vmcb->save.idtr.limit = dt->limit; 951 svm->vmcb->save.idtr.limit = dt->size;
952 svm->vmcb->save.idtr.base = dt->base ; 952 svm->vmcb->save.idtr.base = dt->address ;
953} 953}
954 954
955static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 955static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
956{ 956{
957 struct vcpu_svm *svm = to_svm(vcpu); 957 struct vcpu_svm *svm = to_svm(vcpu);
958 958
959 dt->limit = svm->vmcb->save.gdtr.limit; 959 dt->size = svm->vmcb->save.gdtr.limit;
960 dt->base = svm->vmcb->save.gdtr.base; 960 dt->address = svm->vmcb->save.gdtr.base;
961} 961}
962 962
963static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 963static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
964{ 964{
965 struct vcpu_svm *svm = to_svm(vcpu); 965 struct vcpu_svm *svm = to_svm(vcpu);
966 966
967 svm->vmcb->save.gdtr.limit = dt->limit; 967 svm->vmcb->save.gdtr.limit = dt->size;
968 svm->vmcb->save.gdtr.base = dt->base ; 968 svm->vmcb->save.gdtr.base = dt->address ;
969} 969}
970 970
971static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 971static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bc933cfb4e66..68f895b00450 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -600,11 +600,11 @@ static void reload_tss(void)
600 /* 600 /*
601 * VT restores TR but not its size. Useless. 601 * VT restores TR but not its size. Useless.
602 */ 602 */
603 struct descriptor_table gdt; 603 struct desc_ptr gdt;
604 struct desc_struct *descs; 604 struct desc_struct *descs;
605 605
606 kvm_get_gdt(&gdt); 606 kvm_get_gdt(&gdt);
607 descs = (void *)gdt.base; 607 descs = (void *)gdt.address;
608 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ 608 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
609 load_TR_desc(); 609 load_TR_desc();
610} 610}
@@ -758,7 +758,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
758 } 758 }
759 759
760 if (vcpu->cpu != cpu) { 760 if (vcpu->cpu != cpu) {
761 struct descriptor_table dt; 761 struct desc_ptr dt;
762 unsigned long sysenter_esp; 762 unsigned long sysenter_esp;
763 763
764 vcpu->cpu = cpu; 764 vcpu->cpu = cpu;
@@ -768,7 +768,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
768 */ 768 */
769 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ 769 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
770 kvm_get_gdt(&dt); 770 kvm_get_gdt(&dt);
771 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ 771 vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */
772 772
773 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 773 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
774 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 774 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -1934,28 +1934,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1934 *l = (ar >> 13) & 1; 1934 *l = (ar >> 13) & 1;
1935} 1935}
1936 1936
1937static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 1937static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1938{ 1938{
1939 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); 1939 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
1940 dt->base = vmcs_readl(GUEST_IDTR_BASE); 1940 dt->address = vmcs_readl(GUEST_IDTR_BASE);
1941} 1941}
1942 1942
1943static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 1943static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1944{ 1944{
1945 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); 1945 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
1946 vmcs_writel(GUEST_IDTR_BASE, dt->base); 1946 vmcs_writel(GUEST_IDTR_BASE, dt->address);
1947} 1947}
1948 1948
1949static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 1949static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1950{ 1950{
1951 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); 1951 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
1952 dt->base = vmcs_readl(GUEST_GDTR_BASE); 1952 dt->address = vmcs_readl(GUEST_GDTR_BASE);
1953} 1953}
1954 1954
1955static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 1955static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1956{ 1956{
1957 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); 1957 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
1958 vmcs_writel(GUEST_GDTR_BASE, dt->base); 1958 vmcs_writel(GUEST_GDTR_BASE, dt->address);
1959} 1959}
1960 1960
1961static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 1961static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2334,7 +2334,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2334 u32 junk; 2334 u32 junk;
2335 u64 host_pat, tsc_this, tsc_base; 2335 u64 host_pat, tsc_this, tsc_base;
2336 unsigned long a; 2336 unsigned long a;
2337 struct descriptor_table dt; 2337 struct desc_ptr dt;
2338 int i; 2338 int i;
2339 unsigned long kvm_vmx_return; 2339 unsigned long kvm_vmx_return;
2340 u32 exec_control; 2340 u32 exec_control;
@@ -2416,7 +2416,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2416 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 2416 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
2417 2417
2418 kvm_get_idt(&dt); 2418 kvm_get_idt(&dt);
2419 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ 2419 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
2420 2420
2421 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); 2421 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
2422 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ 2422 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c4ca98ad27f..274a8e39bca7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -225,7 +225,7 @@ static void drop_user_return_notifiers(void *ignore)
225 225
226unsigned long segment_base(u16 selector) 226unsigned long segment_base(u16 selector)
227{ 227{
228 struct descriptor_table gdt; 228 struct desc_ptr gdt;
229 struct desc_struct *d; 229 struct desc_struct *d;
230 unsigned long table_base; 230 unsigned long table_base;
231 unsigned long v; 231 unsigned long v;
@@ -234,7 +234,7 @@ unsigned long segment_base(u16 selector)
234 return 0; 234 return 0;
235 235
236 kvm_get_gdt(&gdt); 236 kvm_get_gdt(&gdt);
237 table_base = gdt.base; 237 table_base = gdt.address;
238 238
239 if (selector & 4) { /* from ldt */ 239 if (selector & 4) { /* from ldt */
240 u16 ldt_selector = kvm_read_ldt(); 240 u16 ldt_selector = kvm_read_ldt();
@@ -3949,14 +3949,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3949 3949
3950void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 3950void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3951{ 3951{
3952 struct descriptor_table dt = { limit, base }; 3952 struct desc_ptr dt = { limit, base };
3953 3953
3954 kvm_x86_ops->set_gdt(vcpu, &dt); 3954 kvm_x86_ops->set_gdt(vcpu, &dt);
3955} 3955}
3956 3956
3957void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 3957void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3958{ 3958{
3959 struct descriptor_table dt = { limit, base }; 3959 struct desc_ptr dt = { limit, base };
3960 3960
3961 kvm_x86_ops->set_idt(vcpu, &dt); 3961 kvm_x86_ops->set_idt(vcpu, &dt);
3962} 3962}
@@ -4581,7 +4581,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4581int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 4581int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4582 struct kvm_sregs *sregs) 4582 struct kvm_sregs *sregs)
4583{ 4583{
4584 struct descriptor_table dt; 4584 struct desc_ptr dt;
4585 4585
4586 vcpu_load(vcpu); 4586 vcpu_load(vcpu);
4587 4587
@@ -4596,11 +4596,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4596 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 4596 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4597 4597
4598 kvm_x86_ops->get_idt(vcpu, &dt); 4598 kvm_x86_ops->get_idt(vcpu, &dt);
4599 sregs->idt.limit = dt.limit; 4599 sregs->idt.limit = dt.size;
4600 sregs->idt.base = dt.base; 4600 sregs->idt.base = dt.address;
4601 kvm_x86_ops->get_gdt(vcpu, &dt); 4601 kvm_x86_ops->get_gdt(vcpu, &dt);
4602 sregs->gdt.limit = dt.limit; 4602 sregs->gdt.limit = dt.size;
4603 sregs->gdt.base = dt.base; 4603 sregs->gdt.base = dt.address;
4604 4604
4605 sregs->cr0 = kvm_read_cr0(vcpu); 4605 sregs->cr0 = kvm_read_cr0(vcpu);
4606 sregs->cr2 = vcpu->arch.cr2; 4606 sregs->cr2 = vcpu->arch.cr2;
@@ -4672,7 +4672,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4672 4672
4673static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, 4673static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4674 u16 selector, 4674 u16 selector,
4675 struct descriptor_table *dtable) 4675 struct desc_ptr *dtable)
4676{ 4676{
4677 if (selector & 1 << 2) { 4677 if (selector & 1 << 2) {
4678 struct kvm_segment kvm_seg; 4678 struct kvm_segment kvm_seg;
@@ -4680,10 +4680,10 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4680 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); 4680 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4681 4681
4682 if (kvm_seg.unusable) 4682 if (kvm_seg.unusable)
4683 dtable->limit = 0; 4683 dtable->size = 0;
4684 else 4684 else
4685 dtable->limit = kvm_seg.limit; 4685 dtable->size = kvm_seg.limit;
4686 dtable->base = kvm_seg.base; 4686 dtable->address = kvm_seg.base;
4687 } 4687 }
4688 else 4688 else
4689 kvm_x86_ops->get_gdt(vcpu, dtable); 4689 kvm_x86_ops->get_gdt(vcpu, dtable);
@@ -4693,7 +4693,7 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4693static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4693static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4694 struct desc_struct *seg_desc) 4694 struct desc_struct *seg_desc)
4695{ 4695{
4696 struct descriptor_table dtable; 4696 struct desc_ptr dtable;
4697 u16 index = selector >> 3; 4697 u16 index = selector >> 3;
4698 int ret; 4698 int ret;
4699 u32 err; 4699 u32 err;
@@ -4701,7 +4701,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4701 4701
4702 get_segment_descriptor_dtable(vcpu, selector, &dtable); 4702 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4703 4703
4704 if (dtable.limit < index * 8 + 7) { 4704 if (dtable.size < index * 8 + 7) {
4705 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 4705 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4706 return X86EMUL_PROPAGATE_FAULT; 4706 return X86EMUL_PROPAGATE_FAULT;
4707 } 4707 }
@@ -4718,14 +4718,14 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4718static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4718static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4719 struct desc_struct *seg_desc) 4719 struct desc_struct *seg_desc)
4720{ 4720{
4721 struct descriptor_table dtable; 4721 struct desc_ptr dtable;
4722 u16 index = selector >> 3; 4722 u16 index = selector >> 3;
4723 4723
4724 get_segment_descriptor_dtable(vcpu, selector, &dtable); 4724 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4725 4725
4726 if (dtable.limit < index * 8 + 7) 4726 if (dtable.size < index * 8 + 7)
4727 return 1; 4727 return 1;
4728 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); 4728 return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
4729} 4729}
4730 4730
4731static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu, 4731static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
@@ -5204,15 +5204,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5204{ 5204{
5205 int mmu_reset_needed = 0; 5205 int mmu_reset_needed = 0;
5206 int pending_vec, max_bits; 5206 int pending_vec, max_bits;
5207 struct descriptor_table dt; 5207 struct desc_ptr dt;
5208 5208
5209 vcpu_load(vcpu); 5209 vcpu_load(vcpu);
5210 5210
5211 dt.limit = sregs->idt.limit; 5211 dt.size = sregs->idt.limit;
5212 dt.base = sregs->idt.base; 5212 dt.address = sregs->idt.base;
5213 kvm_x86_ops->set_idt(vcpu, &dt); 5213 kvm_x86_ops->set_idt(vcpu, &dt);
5214 dt.limit = sregs->gdt.limit; 5214 dt.size = sregs->gdt.limit;
5215 dt.base = sregs->gdt.base; 5215 dt.address = sregs->gdt.base;
5216 kvm_x86_ops->set_gdt(vcpu, &dt); 5216 kvm_x86_ops->set_gdt(vcpu, &dt);
5217 5217
5218 vcpu->arch.cr2 = sregs->cr2; 5218 vcpu->arch.cr2 = sregs->cr2;