aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2007-12-13 10:50:52 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:58:09 -0500
commitad312c7c79f781c822e37effe41307503a2bb85b (patch)
treed979bfb70e76ada58b79b456c61a0507a8f0847d /drivers/kvm
parent682c59a3f3f211ed555b17144f2d82eb8286a1db (diff)
KVM: Portability: Introduce kvm_vcpu_arch
Move all the architecture-specific fields in kvm_vcpu into a new struct kvm_vcpu_arch. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Acked-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/ioapic.c10
-rw-r--r--drivers/kvm/kvm_main.c2
-rw-r--r--drivers/kvm/lapic.c91
-rw-r--r--drivers/kvm/mmu.c142
-rw-r--r--drivers/kvm/paging_tmpl.h16
-rw-r--r--drivers/kvm/svm.c122
-rw-r--r--drivers/kvm/vmx.c213
-rw-r--r--drivers/kvm/x86.c522
-rw-r--r--drivers/kvm/x86.h25
-rw-r--r--drivers/kvm/x86_emulate.c24
10 files changed, 586 insertions, 581 deletions
diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
index e7debfafca50..04910368c251 100644
--- a/drivers/kvm/ioapic.c
+++ b/drivers/kvm/ioapic.c
@@ -158,7 +158,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
158 if (dest_mode == 0) { /* Physical mode. */ 158 if (dest_mode == 0) { /* Physical mode. */
159 if (dest == 0xFF) { /* Broadcast. */ 159 if (dest == 0xFF) { /* Broadcast. */
160 for (i = 0; i < KVM_MAX_VCPUS; ++i) 160 for (i = 0; i < KVM_MAX_VCPUS; ++i)
161 if (kvm->vcpus[i] && kvm->vcpus[i]->apic) 161 if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
162 mask |= 1 << i; 162 mask |= 1 << i;
163 return mask; 163 return mask;
164 } 164 }
@@ -166,8 +166,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
166 vcpu = kvm->vcpus[i]; 166 vcpu = kvm->vcpus[i];
167 if (!vcpu) 167 if (!vcpu)
168 continue; 168 continue;
169 if (kvm_apic_match_physical_addr(vcpu->apic, dest)) { 169 if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
170 if (vcpu->apic) 170 if (vcpu->arch.apic)
171 mask = 1 << i; 171 mask = 1 << i;
172 break; 172 break;
173 } 173 }
@@ -177,8 +177,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
177 vcpu = kvm->vcpus[i]; 177 vcpu = kvm->vcpus[i];
178 if (!vcpu) 178 if (!vcpu)
179 continue; 179 continue;
180 if (vcpu->apic && 180 if (vcpu->arch.apic &&
181 kvm_apic_match_logical_addr(vcpu->apic, dest)) 181 kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
182 mask |= 1 << vcpu->vcpu_id; 182 mask |= 1 << vcpu->vcpu_id;
183 } 183 }
184 ioapic_debug("mask %x\n", mask); 184 ioapic_debug("mask %x\n", mask);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 1a03b67eb921..ae2a1bf640bc 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
670 if (vmf->pgoff == 0) 670 if (vmf->pgoff == 0)
671 page = virt_to_page(vcpu->run); 671 page = virt_to_page(vcpu->run);
672 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 672 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
673 page = virt_to_page(vcpu->pio_data); 673 page = virt_to_page(vcpu->arch.pio_data);
674 else 674 else
675 return VM_FAULT_SIGBUS; 675 return VM_FAULT_SIGBUS;
676 get_page(page); 676 get_page(page);
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 466c37f02e85..5c9f46784c26 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -58,6 +58,7 @@
58 58
59#define VEC_POS(v) ((v) & (32 - 1)) 59#define VEC_POS(v) ((v) & (32 - 1))
60#define REG_POS(v) (((v) >> 5) << 4) 60#define REG_POS(v) (((v) >> 5) << 4)
61
61static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off) 62static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
62{ 63{
63 return *((u32 *) (apic->regs + reg_off)); 64 return *((u32 *) (apic->regs + reg_off));
@@ -90,7 +91,7 @@ static inline void apic_clear_vector(int vec, void *bitmap)
90 91
91static inline int apic_hw_enabled(struct kvm_lapic *apic) 92static inline int apic_hw_enabled(struct kvm_lapic *apic)
92{ 93{
93 return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE; 94 return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
94} 95}
95 96
96static inline int apic_sw_enabled(struct kvm_lapic *apic) 97static inline int apic_sw_enabled(struct kvm_lapic *apic)
@@ -174,7 +175,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
174 175
175int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 176int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
176{ 177{
177 struct kvm_lapic *apic = vcpu->apic; 178 struct kvm_lapic *apic = vcpu->arch.apic;
178 int highest_irr; 179 int highest_irr;
179 180
180 if (!apic) 181 if (!apic)
@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
187 188
188int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) 189int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
189{ 190{
190 struct kvm_lapic *apic = vcpu->apic; 191 struct kvm_lapic *apic = vcpu->arch.apic;
191 192
192 if (!apic_test_and_set_irr(vec, apic)) { 193 if (!apic_test_and_set_irr(vec, apic)) {
193 /* a new pending irq is set in IRR */ 194 /* a new pending irq is set in IRR */
@@ -272,7 +273,7 @@ static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
272 int short_hand, int dest, int dest_mode) 273 int short_hand, int dest, int dest_mode)
273{ 274{
274 int result = 0; 275 int result = 0;
275 struct kvm_lapic *target = vcpu->apic; 276 struct kvm_lapic *target = vcpu->arch.apic;
276 277
277 apic_debug("target %p, source %p, dest 0x%x, " 278 apic_debug("target %p, source %p, dest 0x%x, "
278 "dest_mode 0x%x, short_hand 0x%x", 279 "dest_mode 0x%x, short_hand 0x%x",
@@ -339,10 +340,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
339 } else 340 } else
340 apic_clear_vector(vector, apic->regs + APIC_TMR); 341 apic_clear_vector(vector, apic->regs + APIC_TMR);
341 342
342 if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE) 343 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
343 kvm_vcpu_kick(vcpu); 344 kvm_vcpu_kick(vcpu);
344 else if (vcpu->mp_state == VCPU_MP_STATE_HALTED) { 345 else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
345 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 346 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
346 if (waitqueue_active(&vcpu->wq)) 347 if (waitqueue_active(&vcpu->wq))
347 wake_up_interruptible(&vcpu->wq); 348 wake_up_interruptible(&vcpu->wq);
348 } 349 }
@@ -363,11 +364,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
363 364
364 case APIC_DM_INIT: 365 case APIC_DM_INIT:
365 if (level) { 366 if (level) {
366 if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE) 367 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
367 printk(KERN_DEBUG 368 printk(KERN_DEBUG
368 "INIT on a runnable vcpu %d\n", 369 "INIT on a runnable vcpu %d\n",
369 vcpu->vcpu_id); 370 vcpu->vcpu_id);
370 vcpu->mp_state = VCPU_MP_STATE_INIT_RECEIVED; 371 vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
371 kvm_vcpu_kick(vcpu); 372 kvm_vcpu_kick(vcpu);
372 } else { 373 } else {
373 printk(KERN_DEBUG 374 printk(KERN_DEBUG
@@ -380,9 +381,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
380 case APIC_DM_STARTUP: 381 case APIC_DM_STARTUP:
381 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", 382 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
382 vcpu->vcpu_id, vector); 383 vcpu->vcpu_id, vector);
383 if (vcpu->mp_state == VCPU_MP_STATE_INIT_RECEIVED) { 384 if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
384 vcpu->sipi_vector = vector; 385 vcpu->arch.sipi_vector = vector;
385 vcpu->mp_state = VCPU_MP_STATE_SIPI_RECEIVED; 386 vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
386 if (waitqueue_active(&vcpu->wq)) 387 if (waitqueue_active(&vcpu->wq))
387 wake_up_interruptible(&vcpu->wq); 388 wake_up_interruptible(&vcpu->wq);
388 } 389 }
@@ -411,7 +412,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
411 next = 0; 412 next = 0;
412 if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap)) 413 if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
413 continue; 414 continue;
414 apic = kvm->vcpus[next]->apic; 415 apic = kvm->vcpus[next]->arch.apic;
415 if (apic && apic_enabled(apic)) 416 if (apic && apic_enabled(apic))
416 break; 417 break;
417 apic = NULL; 418 apic = NULL;
@@ -482,12 +483,12 @@ static void apic_send_ipi(struct kvm_lapic *apic)
482 if (!vcpu) 483 if (!vcpu)
483 continue; 484 continue;
484 485
485 if (vcpu->apic && 486 if (vcpu->arch.apic &&
486 apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) { 487 apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
487 if (delivery_mode == APIC_DM_LOWEST) 488 if (delivery_mode == APIC_DM_LOWEST)
488 set_bit(vcpu->vcpu_id, &lpr_map); 489 set_bit(vcpu->vcpu_id, &lpr_map);
489 else 490 else
490 __apic_accept_irq(vcpu->apic, delivery_mode, 491 __apic_accept_irq(vcpu->arch.apic, delivery_mode,
491 vector, level, trig_mode); 492 vector, level, trig_mode);
492 } 493 }
493 } 494 }
@@ -495,7 +496,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
495 if (delivery_mode == APIC_DM_LOWEST) { 496 if (delivery_mode == APIC_DM_LOWEST) {
496 target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map); 497 target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
497 if (target != NULL) 498 if (target != NULL)
498 __apic_accept_irq(target->apic, delivery_mode, 499 __apic_accept_irq(target->arch.apic, delivery_mode,
499 vector, level, trig_mode); 500 vector, level, trig_mode);
500 } 501 }
501} 502}
@@ -772,15 +773,15 @@ static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
772 773
773void kvm_free_lapic(struct kvm_vcpu *vcpu) 774void kvm_free_lapic(struct kvm_vcpu *vcpu)
774{ 775{
775 if (!vcpu->apic) 776 if (!vcpu->arch.apic)
776 return; 777 return;
777 778
778 hrtimer_cancel(&vcpu->apic->timer.dev); 779 hrtimer_cancel(&vcpu->arch.apic->timer.dev);
779 780
780 if (vcpu->apic->regs_page) 781 if (vcpu->arch.apic->regs_page)
781 __free_page(vcpu->apic->regs_page); 782 __free_page(vcpu->arch.apic->regs_page);
782 783
783 kfree(vcpu->apic); 784 kfree(vcpu->arch.apic);
784} 785}
785 786
786/* 787/*
@@ -791,7 +792,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
791 792
792void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) 793void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
793{ 794{
794 struct kvm_lapic *apic = vcpu->apic; 795 struct kvm_lapic *apic = vcpu->arch.apic;
795 796
796 if (!apic) 797 if (!apic)
797 return; 798 return;
@@ -800,7 +801,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
800 801
801u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 802u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
802{ 803{
803 struct kvm_lapic *apic = vcpu->apic; 804 struct kvm_lapic *apic = vcpu->arch.apic;
804 u64 tpr; 805 u64 tpr;
805 806
806 if (!apic) 807 if (!apic)
@@ -813,29 +814,29 @@ EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
813 814
814void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 815void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
815{ 816{
816 struct kvm_lapic *apic = vcpu->apic; 817 struct kvm_lapic *apic = vcpu->arch.apic;
817 818
818 if (!apic) { 819 if (!apic) {
819 value |= MSR_IA32_APICBASE_BSP; 820 value |= MSR_IA32_APICBASE_BSP;
820 vcpu->apic_base = value; 821 vcpu->arch.apic_base = value;
821 return; 822 return;
822 } 823 }
823 if (apic->vcpu->vcpu_id) 824 if (apic->vcpu->vcpu_id)
824 value &= ~MSR_IA32_APICBASE_BSP; 825 value &= ~MSR_IA32_APICBASE_BSP;
825 826
826 vcpu->apic_base = value; 827 vcpu->arch.apic_base = value;
827 apic->base_address = apic->vcpu->apic_base & 828 apic->base_address = apic->vcpu->arch.apic_base &
828 MSR_IA32_APICBASE_BASE; 829 MSR_IA32_APICBASE_BASE;
829 830
830 /* with FSB delivery interrupt, we can restart APIC functionality */ 831 /* with FSB delivery interrupt, we can restart APIC functionality */
831 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " 832 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
832 "0x%lx.\n", apic->vcpu->apic_base, apic->base_address); 833 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
833 834
834} 835}
835 836
836u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu) 837u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
837{ 838{
838 return vcpu->apic_base; 839 return vcpu->arch.apic_base;
839} 840}
840EXPORT_SYMBOL_GPL(kvm_lapic_get_base); 841EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
841 842
@@ -847,7 +848,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
847 apic_debug("%s\n", __FUNCTION__); 848 apic_debug("%s\n", __FUNCTION__);
848 849
849 ASSERT(vcpu); 850 ASSERT(vcpu);
850 apic = vcpu->apic; 851 apic = vcpu->arch.apic;
851 ASSERT(apic != NULL); 852 ASSERT(apic != NULL);
852 853
853 /* Stop the timer in case it's a reset to an active apic */ 854 /* Stop the timer in case it's a reset to an active apic */
@@ -878,19 +879,19 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
878 update_divide_count(apic); 879 update_divide_count(apic);
879 atomic_set(&apic->timer.pending, 0); 880 atomic_set(&apic->timer.pending, 0);
880 if (vcpu->vcpu_id == 0) 881 if (vcpu->vcpu_id == 0)
881 vcpu->apic_base |= MSR_IA32_APICBASE_BSP; 882 vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
882 apic_update_ppr(apic); 883 apic_update_ppr(apic);
883 884
884 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" 885 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
885 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__, 886 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
886 vcpu, kvm_apic_id(apic), 887 vcpu, kvm_apic_id(apic),
887 vcpu->apic_base, apic->base_address); 888 vcpu->arch.apic_base, apic->base_address);
888} 889}
889EXPORT_SYMBOL_GPL(kvm_lapic_reset); 890EXPORT_SYMBOL_GPL(kvm_lapic_reset);
890 891
891int kvm_lapic_enabled(struct kvm_vcpu *vcpu) 892int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
892{ 893{
893 struct kvm_lapic *apic = vcpu->apic; 894 struct kvm_lapic *apic = vcpu->arch.apic;
894 int ret = 0; 895 int ret = 0;
895 896
896 if (!apic) 897 if (!apic)
@@ -915,7 +916,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
915 916
916 atomic_inc(&apic->timer.pending); 917 atomic_inc(&apic->timer.pending);
917 if (waitqueue_active(q)) { 918 if (waitqueue_active(q)) {
918 apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 919 apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
919 wake_up_interruptible(q); 920 wake_up_interruptible(q);
920 } 921 }
921 if (apic_lvtt_period(apic)) { 922 if (apic_lvtt_period(apic)) {
@@ -961,7 +962,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
961 if (!apic) 962 if (!apic)
962 goto nomem; 963 goto nomem;
963 964
964 vcpu->apic = apic; 965 vcpu->arch.apic = apic;
965 966
966 apic->regs_page = alloc_page(GFP_KERNEL); 967 apic->regs_page = alloc_page(GFP_KERNEL);
967 if (apic->regs_page == NULL) { 968 if (apic->regs_page == NULL) {
@@ -976,7 +977,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
976 hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 977 hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
977 apic->timer.dev.function = apic_timer_fn; 978 apic->timer.dev.function = apic_timer_fn;
978 apic->base_address = APIC_DEFAULT_PHYS_BASE; 979 apic->base_address = APIC_DEFAULT_PHYS_BASE;
979 vcpu->apic_base = APIC_DEFAULT_PHYS_BASE; 980 vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
980 981
981 kvm_lapic_reset(vcpu); 982 kvm_lapic_reset(vcpu);
982 apic->dev.read = apic_mmio_read; 983 apic->dev.read = apic_mmio_read;
@@ -994,7 +995,7 @@ EXPORT_SYMBOL_GPL(kvm_create_lapic);
994 995
995int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) 996int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
996{ 997{
997 struct kvm_lapic *apic = vcpu->apic; 998 struct kvm_lapic *apic = vcpu->arch.apic;
998 int highest_irr; 999 int highest_irr;
999 1000
1000 if (!apic || !apic_enabled(apic)) 1001 if (!apic || !apic_enabled(apic))
@@ -1010,11 +1011,11 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1010 1011
1011int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 1012int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1012{ 1013{
1013 u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0); 1014 u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
1014 int r = 0; 1015 int r = 0;
1015 1016
1016 if (vcpu->vcpu_id == 0) { 1017 if (vcpu->vcpu_id == 0) {
1017 if (!apic_hw_enabled(vcpu->apic)) 1018 if (!apic_hw_enabled(vcpu->arch.apic))
1018 r = 1; 1019 r = 1;
1019 if ((lvt0 & APIC_LVT_MASKED) == 0 && 1020 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
1020 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) 1021 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
@@ -1025,7 +1026,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1025 1026
1026void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) 1027void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1027{ 1028{
1028 struct kvm_lapic *apic = vcpu->apic; 1029 struct kvm_lapic *apic = vcpu->arch.apic;
1029 1030
1030 if (apic && apic_lvt_enabled(apic, APIC_LVTT) && 1031 if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
1031 atomic_read(&apic->timer.pending) > 0) { 1032 atomic_read(&apic->timer.pending) > 0) {
@@ -1036,7 +1037,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1036 1037
1037void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec) 1038void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
1038{ 1039{
1039 struct kvm_lapic *apic = vcpu->apic; 1040 struct kvm_lapic *apic = vcpu->arch.apic;
1040 1041
1041 if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec) 1042 if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
1042 apic->timer.last_update = ktime_add_ns( 1043 apic->timer.last_update = ktime_add_ns(
@@ -1047,7 +1048,7 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
1047int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) 1048int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1048{ 1049{
1049 int vector = kvm_apic_has_interrupt(vcpu); 1050 int vector = kvm_apic_has_interrupt(vcpu);
1050 struct kvm_lapic *apic = vcpu->apic; 1051 struct kvm_lapic *apic = vcpu->arch.apic;
1051 1052
1052 if (vector == -1) 1053 if (vector == -1)
1053 return -1; 1054 return -1;
@@ -1060,9 +1061,9 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1060 1061
1061void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) 1062void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1062{ 1063{
1063 struct kvm_lapic *apic = vcpu->apic; 1064 struct kvm_lapic *apic = vcpu->arch.apic;
1064 1065
1065 apic->base_address = vcpu->apic_base & 1066 apic->base_address = vcpu->arch.apic_base &
1066 MSR_IA32_APICBASE_BASE; 1067 MSR_IA32_APICBASE_BASE;
1067 apic_set_reg(apic, APIC_LVR, APIC_VERSION); 1068 apic_set_reg(apic, APIC_LVR, APIC_VERSION);
1068 apic_update_ppr(apic); 1069 apic_update_ppr(apic);
@@ -1073,7 +1074,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1073 1074
1074void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1075void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
1075{ 1076{
1076 struct kvm_lapic *apic = vcpu->apic; 1077 struct kvm_lapic *apic = vcpu->arch.apic;
1077 struct hrtimer *timer; 1078 struct hrtimer *timer;
1078 1079
1079 if (!apic) 1080 if (!apic)
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 92ac0d1106b4..da1dedb497b8 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
180 180
181static int is_write_protection(struct kvm_vcpu *vcpu) 181static int is_write_protection(struct kvm_vcpu *vcpu)
182{ 182{
183 return vcpu->cr0 & X86_CR0_WP; 183 return vcpu->arch.cr0 & X86_CR0_WP;
184} 184}
185 185
186static int is_cpuid_PSE36(void) 186static int is_cpuid_PSE36(void)
@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void)
190 190
191static int is_nx(struct kvm_vcpu *vcpu) 191static int is_nx(struct kvm_vcpu *vcpu)
192{ 192{
193 return vcpu->shadow_efer & EFER_NX; 193 return vcpu->arch.shadow_efer & EFER_NX;
194} 194}
195 195
196static int is_present_pte(unsigned long pte) 196static int is_present_pte(unsigned long pte)
@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
292 int r; 292 int r;
293 293
294 kvm_mmu_free_some_pages(vcpu); 294 kvm_mmu_free_some_pages(vcpu);
295 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, 295 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
296 pte_chain_cache, 4); 296 pte_chain_cache, 4);
297 if (r) 297 if (r)
298 goto out; 298 goto out;
299 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 299 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
300 rmap_desc_cache, 1); 300 rmap_desc_cache, 1);
301 if (r) 301 if (r)
302 goto out; 302 goto out;
303 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8); 303 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
304 if (r) 304 if (r)
305 goto out; 305 goto out;
306 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, 306 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
307 mmu_page_header_cache, 4); 307 mmu_page_header_cache, 4);
308out: 308out:
309 return r; 309 return r;
@@ -311,10 +311,10 @@ out:
311 311
312static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 312static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
313{ 313{
314 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 314 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
315 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 315 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
316 mmu_free_memory_cache_page(&vcpu->mmu_page_cache); 316 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
317 mmu_free_memory_cache(&vcpu->mmu_page_header_cache); 317 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
318} 318}
319 319
320static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, 320static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
330 330
331static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) 331static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
332{ 332{
333 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, 333 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
334 sizeof(struct kvm_pte_chain)); 334 sizeof(struct kvm_pte_chain));
335} 335}
336 336
@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
341 341
342static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) 342static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
343{ 343{
344 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache, 344 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
345 sizeof(struct kvm_rmap_desc)); 345 sizeof(struct kvm_rmap_desc));
346} 346}
347 347
@@ -568,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
568 if (!vcpu->kvm->n_free_mmu_pages) 568 if (!vcpu->kvm->n_free_mmu_pages)
569 return NULL; 569 return NULL;
570 570
571 sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp); 571 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
572 sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); 572 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
573 sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); 573 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
574 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 574 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
575 list_add(&sp->link, &vcpu->kvm->active_mmu_pages); 575 list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
576 ASSERT(is_empty_shadow_page(sp->spt)); 576 ASSERT(is_empty_shadow_page(sp->spt));
@@ -692,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
692 struct hlist_node *node; 692 struct hlist_node *node;
693 693
694 role.word = 0; 694 role.word = 0;
695 role.glevels = vcpu->mmu.root_level; 695 role.glevels = vcpu->arch.mmu.root_level;
696 role.level = level; 696 role.level = level;
697 role.metaphysical = metaphysical; 697 role.metaphysical = metaphysical;
698 role.access = access; 698 role.access = access;
699 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { 699 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
700 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 700 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
701 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 701 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
702 role.quadrant = quadrant; 702 role.quadrant = quadrant;
@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
718 sp->gfn = gfn; 718 sp->gfn = gfn;
719 sp->role = role; 719 sp->role = role;
720 hlist_add_head(&sp->hash_link, bucket); 720 hlist_add_head(&sp->hash_link, bucket);
721 vcpu->mmu.prefetch_page(vcpu, sp); 721 vcpu->arch.mmu.prefetch_page(vcpu, sp);
722 if (!metaphysical) 722 if (!metaphysical)
723 rmap_write_protect(vcpu->kvm, gfn); 723 rmap_write_protect(vcpu->kvm, gfn);
724 if (new_page) 724 if (new_page)
@@ -768,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
768 768
769 for (i = 0; i < KVM_MAX_VCPUS; ++i) 769 for (i = 0; i < KVM_MAX_VCPUS; ++i)
770 if (kvm->vcpus[i]) 770 if (kvm->vcpus[i])
771 kvm->vcpus[i]->last_pte_updated = NULL; 771 kvm->vcpus[i]->arch.last_pte_updated = NULL;
772} 772}
773 773
774static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 774static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
875 875
876struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) 876struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
877{ 877{
878 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); 878 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
879 879
880 if (gpa == UNMAPPED_GVA) 880 if (gpa == UNMAPPED_GVA)
881 return NULL; 881 return NULL;
@@ -962,7 +962,7 @@ unshadowed:
962 else 962 else
963 kvm_release_page_clean(page); 963 kvm_release_page_clean(page);
964 if (!ptwrite || !*ptwrite) 964 if (!ptwrite || !*ptwrite)
965 vcpu->last_pte_updated = shadow_pte; 965 vcpu->arch.last_pte_updated = shadow_pte;
966} 966}
967 967
968static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 968static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
972static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 972static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
973{ 973{
974 int level = PT32E_ROOT_LEVEL; 974 int level = PT32E_ROOT_LEVEL;
975 hpa_t table_addr = vcpu->mmu.root_hpa; 975 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
976 int pt_write = 0; 976 int pt_write = 0;
977 977
978 for (; ; level--) { 978 for (; ; level--) {
@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
1024 int i; 1024 int i;
1025 struct kvm_mmu_page *sp; 1025 struct kvm_mmu_page *sp;
1026 1026
1027 if (!VALID_PAGE(vcpu->mmu.root_hpa)) 1027 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1028 return; 1028 return;
1029#ifdef CONFIG_X86_64 1029#ifdef CONFIG_X86_64
1030 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1030 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1031 hpa_t root = vcpu->mmu.root_hpa; 1031 hpa_t root = vcpu->arch.mmu.root_hpa;
1032 1032
1033 sp = page_header(root); 1033 sp = page_header(root);
1034 --sp->root_count; 1034 --sp->root_count;
1035 vcpu->mmu.root_hpa = INVALID_PAGE; 1035 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1036 return; 1036 return;
1037 } 1037 }
1038#endif 1038#endif
1039 for (i = 0; i < 4; ++i) { 1039 for (i = 0; i < 4; ++i) {
1040 hpa_t root = vcpu->mmu.pae_root[i]; 1040 hpa_t root = vcpu->arch.mmu.pae_root[i];
1041 1041
1042 if (root) { 1042 if (root) {
1043 root &= PT64_BASE_ADDR_MASK; 1043 root &= PT64_BASE_ADDR_MASK;
1044 sp = page_header(root); 1044 sp = page_header(root);
1045 --sp->root_count; 1045 --sp->root_count;
1046 } 1046 }
1047 vcpu->mmu.pae_root[i] = INVALID_PAGE; 1047 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1048 } 1048 }
1049 vcpu->mmu.root_hpa = INVALID_PAGE; 1049 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1050} 1050}
1051 1051
1052static void mmu_alloc_roots(struct kvm_vcpu *vcpu) 1052static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
@@ -1055,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1055 gfn_t root_gfn; 1055 gfn_t root_gfn;
1056 struct kvm_mmu_page *sp; 1056 struct kvm_mmu_page *sp;
1057 1057
1058 root_gfn = vcpu->cr3 >> PAGE_SHIFT; 1058 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1059 1059
1060#ifdef CONFIG_X86_64 1060#ifdef CONFIG_X86_64
1061 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1061 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1062 hpa_t root = vcpu->mmu.root_hpa; 1062 hpa_t root = vcpu->arch.mmu.root_hpa;
1063 1063
1064 ASSERT(!VALID_PAGE(root)); 1064 ASSERT(!VALID_PAGE(root));
1065 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1065 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1066 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); 1066 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
1067 root = __pa(sp->spt); 1067 root = __pa(sp->spt);
1068 ++sp->root_count; 1068 ++sp->root_count;
1069 vcpu->mmu.root_hpa = root; 1069 vcpu->arch.mmu.root_hpa = root;
1070 return; 1070 return;
1071 } 1071 }
1072#endif 1072#endif
1073 for (i = 0; i < 4; ++i) { 1073 for (i = 0; i < 4; ++i) {
1074 hpa_t root = vcpu->mmu.pae_root[i]; 1074 hpa_t root = vcpu->arch.mmu.pae_root[i];
1075 1075
1076 ASSERT(!VALID_PAGE(root)); 1076 ASSERT(!VALID_PAGE(root));
1077 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) { 1077 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1078 if (!is_present_pte(vcpu->pdptrs[i])) { 1078 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1079 vcpu->mmu.pae_root[i] = 0; 1079 vcpu->arch.mmu.pae_root[i] = 0;
1080 continue; 1080 continue;
1081 } 1081 }
1082 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; 1082 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1083 } else if (vcpu->mmu.root_level == 0) 1083 } else if (vcpu->arch.mmu.root_level == 0)
1084 root_gfn = 0; 1084 root_gfn = 0;
1085 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1085 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1086 PT32_ROOT_LEVEL, !is_paging(vcpu), 1086 PT32_ROOT_LEVEL, !is_paging(vcpu),
1087 ACC_ALL, NULL, NULL); 1087 ACC_ALL, NULL, NULL);
1088 root = __pa(sp->spt); 1088 root = __pa(sp->spt);
1089 ++sp->root_count; 1089 ++sp->root_count;
1090 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; 1090 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1091 } 1091 }
1092 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); 1092 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1093} 1093}
1094 1094
1095static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 1095static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1109 return r; 1109 return r;
1110 1110
1111 ASSERT(vcpu); 1111 ASSERT(vcpu);
1112 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 1112 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1113 1113
1114 gfn = gva >> PAGE_SHIFT; 1114 gfn = gva >> PAGE_SHIFT;
1115 1115
@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
1124 1124
1125static int nonpaging_init_context(struct kvm_vcpu *vcpu) 1125static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1126{ 1126{
1127 struct kvm_mmu *context = &vcpu->mmu; 1127 struct kvm_mmu *context = &vcpu->arch.mmu;
1128 1128
1129 context->new_cr3 = nonpaging_new_cr3; 1129 context->new_cr3 = nonpaging_new_cr3;
1130 context->page_fault = nonpaging_page_fault; 1130 context->page_fault = nonpaging_page_fault;
@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu)
1171 1171
1172static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) 1172static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1173{ 1173{
1174 struct kvm_mmu *context = &vcpu->mmu; 1174 struct kvm_mmu *context = &vcpu->arch.mmu;
1175 1175
1176 ASSERT(is_pae(vcpu)); 1176 ASSERT(is_pae(vcpu));
1177 context->new_cr3 = paging_new_cr3; 1177 context->new_cr3 = paging_new_cr3;
@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)
1192 1192
1193static int paging32_init_context(struct kvm_vcpu *vcpu) 1193static int paging32_init_context(struct kvm_vcpu *vcpu)
1194{ 1194{
1195 struct kvm_mmu *context = &vcpu->mmu; 1195 struct kvm_mmu *context = &vcpu->arch.mmu;
1196 1196
1197 context->new_cr3 = paging_new_cr3; 1197 context->new_cr3 = paging_new_cr3;
1198 context->page_fault = paging32_page_fault; 1198 context->page_fault = paging32_page_fault;
@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
1213static int init_kvm_mmu(struct kvm_vcpu *vcpu) 1213static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1214{ 1214{
1215 ASSERT(vcpu); 1215 ASSERT(vcpu);
1216 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1216 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1217 1217
1218 if (!is_paging(vcpu)) 1218 if (!is_paging(vcpu))
1219 return nonpaging_init_context(vcpu); 1219 return nonpaging_init_context(vcpu);
@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1228static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) 1228static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1229{ 1229{
1230 ASSERT(vcpu); 1230 ASSERT(vcpu);
1231 if (VALID_PAGE(vcpu->mmu.root_hpa)) { 1231 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1232 vcpu->mmu.free(vcpu); 1232 vcpu->arch.mmu.free(vcpu);
1233 vcpu->mmu.root_hpa = INVALID_PAGE; 1233 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1234 } 1234 }
1235} 1235}
1236 1236
@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1250 if (r) 1250 if (r)
1251 goto out; 1251 goto out;
1252 mmu_alloc_roots(vcpu); 1252 mmu_alloc_roots(vcpu);
1253 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); 1253 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1254 kvm_mmu_flush_tlb(vcpu); 1254 kvm_mmu_flush_tlb(vcpu);
1255out: 1255out:
1256 mutex_unlock(&vcpu->kvm->lock); 1256 mutex_unlock(&vcpu->kvm->lock);
@@ -1323,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1323 1323
1324static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) 1324static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1325{ 1325{
1326 u64 *spte = vcpu->last_pte_updated; 1326 u64 *spte = vcpu->arch.last_pte_updated;
1327 1327
1328 return !!(spte && (*spte & PT_ACCESSED_MASK)); 1328 return !!(spte && (*spte & PT_ACCESSED_MASK));
1329} 1329}
@@ -1350,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1350 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1350 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1351 ++vcpu->kvm->stat.mmu_pte_write; 1351 ++vcpu->kvm->stat.mmu_pte_write;
1352 kvm_mmu_audit(vcpu, "pre pte write"); 1352 kvm_mmu_audit(vcpu, "pre pte write");
1353 if (gfn == vcpu->last_pt_write_gfn 1353 if (gfn == vcpu->arch.last_pt_write_gfn
1354 && !last_updated_pte_accessed(vcpu)) { 1354 && !last_updated_pte_accessed(vcpu)) {
1355 ++vcpu->last_pt_write_count; 1355 ++vcpu->arch.last_pt_write_count;
1356 if (vcpu->last_pt_write_count >= 3) 1356 if (vcpu->arch.last_pt_write_count >= 3)
1357 flooded = 1; 1357 flooded = 1;
1358 } else { 1358 } else {
1359 vcpu->last_pt_write_gfn = gfn; 1359 vcpu->arch.last_pt_write_gfn = gfn;
1360 vcpu->last_pt_write_count = 1; 1360 vcpu->arch.last_pt_write_count = 1;
1361 vcpu->last_pte_updated = NULL; 1361 vcpu->arch.last_pte_updated = NULL;
1362 } 1362 }
1363 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1363 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1364 bucket = &vcpu->kvm->mmu_page_hash[index]; 1364 bucket = &vcpu->kvm->mmu_page_hash[index];
@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1420 1420
1421int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1421int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1422{ 1422{
1423 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); 1423 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1424 1424
1425 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1425 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1426} 1426}
@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1443 enum emulation_result er; 1443 enum emulation_result er;
1444 1444
1445 mutex_lock(&vcpu->kvm->lock); 1445 mutex_lock(&vcpu->kvm->lock);
1446 r = vcpu->mmu.page_fault(vcpu, cr2, error_code); 1446 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1447 if (r < 0) 1447 if (r < 0)
1448 goto out; 1448 goto out;
1449 1449
@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1486 struct kvm_mmu_page, link); 1486 struct kvm_mmu_page, link);
1487 kvm_mmu_zap_page(vcpu->kvm, sp); 1487 kvm_mmu_zap_page(vcpu->kvm, sp);
1488 } 1488 }
1489 free_page((unsigned long)vcpu->mmu.pae_root); 1489 free_page((unsigned long)vcpu->arch.mmu.pae_root);
1490} 1490}
1491 1491
1492static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 1492static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1508 page = alloc_page(GFP_KERNEL | __GFP_DMA32); 1508 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1509 if (!page) 1509 if (!page)
1510 goto error_1; 1510 goto error_1;
1511 vcpu->mmu.pae_root = page_address(page); 1511 vcpu->arch.mmu.pae_root = page_address(page);
1512 for (i = 0; i < 4; ++i) 1512 for (i = 0; i < 4; ++i)
1513 vcpu->mmu.pae_root[i] = INVALID_PAGE; 1513 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1514 1514
1515 return 0; 1515 return 0;
1516 1516
@@ -1522,7 +1522,7 @@ error_1:
1522int kvm_mmu_create(struct kvm_vcpu *vcpu) 1522int kvm_mmu_create(struct kvm_vcpu *vcpu)
1523{ 1523{
1524 ASSERT(vcpu); 1524 ASSERT(vcpu);
1525 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1525 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1526 1526
1527 return alloc_mmu_pages(vcpu); 1527 return alloc_mmu_pages(vcpu);
1528} 1528}
@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
1530int kvm_mmu_setup(struct kvm_vcpu *vcpu) 1530int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1531{ 1531{
1532 ASSERT(vcpu); 1532 ASSERT(vcpu);
1533 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1533 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1534 1534
1535 return init_kvm_mmu(vcpu); 1535 return init_kvm_mmu(vcpu);
1536} 1536}
@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1659 printk(KERN_ERR "audit: (%s) nontrapping pte" 1659 printk(KERN_ERR "audit: (%s) nontrapping pte"
1660 " in nonleaf level: levels %d gva %lx" 1660 " in nonleaf level: levels %d gva %lx"
1661 " level %d pte %llx\n", audit_msg, 1661 " level %d pte %llx\n", audit_msg,
1662 vcpu->mmu.root_level, va, level, ent); 1662 vcpu->arch.mmu.root_level, va, level, ent);
1663 1663
1664 audit_mappings_page(vcpu, ent, va, level - 1); 1664 audit_mappings_page(vcpu, ent, va, level - 1);
1665 } else { 1665 } else {
1666 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va); 1666 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1667 struct page *page = gpa_to_page(vcpu, gpa); 1667 struct page *page = gpa_to_page(vcpu, gpa);
1668 hpa_t hpa = page_to_phys(page); 1668 hpa_t hpa = page_to_phys(page);
1669 1669
@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1671 && (ent & PT64_BASE_ADDR_MASK) != hpa) 1671 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1672 printk(KERN_ERR "xx audit error: (%s) levels %d" 1672 printk(KERN_ERR "xx audit error: (%s) levels %d"
1673 " gva %lx gpa %llx hpa %llx ent %llx %d\n", 1673 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1674 audit_msg, vcpu->mmu.root_level, 1674 audit_msg, vcpu->arch.mmu.root_level,
1675 va, gpa, hpa, ent, 1675 va, gpa, hpa, ent,
1676 is_shadow_present_pte(ent)); 1676 is_shadow_present_pte(ent));
1677 else if (ent == shadow_notrap_nonpresent_pte 1677 else if (ent == shadow_notrap_nonpresent_pte
@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
1688{ 1688{
1689 unsigned i; 1689 unsigned i;
1690 1690
1691 if (vcpu->mmu.root_level == 4) 1691 if (vcpu->arch.mmu.root_level == 4)
1692 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4); 1692 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
1693 else 1693 else
1694 for (i = 0; i < 4; ++i) 1694 for (i = 0; i < 4; ++i)
1695 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK) 1695 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
1696 audit_mappings_page(vcpu, 1696 audit_mappings_page(vcpu,
1697 vcpu->mmu.pae_root[i], 1697 vcpu->arch.mmu.pae_root[i],
1698 i << 30, 1698 i << 30,
1699 2); 1699 2);
1700} 1700}
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index fb19596c9589..56b88f7e83ef 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -129,11 +129,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
129 129
130 pgprintk("%s: addr %lx\n", __FUNCTION__, addr); 130 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
131walk: 131walk:
132 walker->level = vcpu->mmu.root_level; 132 walker->level = vcpu->arch.mmu.root_level;
133 pte = vcpu->cr3; 133 pte = vcpu->arch.cr3;
134#if PTTYPE == 64 134#if PTTYPE == 64
135 if (!is_long_mode(vcpu)) { 135 if (!is_long_mode(vcpu)) {
136 pte = vcpu->pdptrs[(addr >> 30) & 3]; 136 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
137 if (!is_present_pte(pte)) 137 if (!is_present_pte(pte))
138 goto not_present; 138 goto not_present;
139 --walker->level; 139 --walker->level;
@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
275 if (!is_present_pte(walker->ptes[walker->level - 1])) 275 if (!is_present_pte(walker->ptes[walker->level - 1]))
276 return NULL; 276 return NULL;
277 277
278 shadow_addr = vcpu->mmu.root_hpa; 278 shadow_addr = vcpu->arch.mmu.root_hpa;
279 level = vcpu->mmu.shadow_root_level; 279 level = vcpu->arch.mmu.shadow_root_level;
280 if (level == PT32E_ROOT_LEVEL) { 280 if (level == PT32E_ROOT_LEVEL) {
281 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3]; 281 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
282 shadow_addr &= PT64_BASE_ADDR_MASK; 282 shadow_addr &= PT64_BASE_ADDR_MASK;
283 --level; 283 --level;
284 } 284 }
@@ -380,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
380 if (!r) { 380 if (!r) {
381 pgprintk("%s: guest page fault\n", __FUNCTION__); 381 pgprintk("%s: guest page fault\n", __FUNCTION__);
382 inject_page_fault(vcpu, addr, walker.error_code); 382 inject_page_fault(vcpu, addr, walker.error_code);
383 vcpu->last_pt_write_count = 0; /* reset fork detector */ 383 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
384 return 0; 384 return 0;
385 } 385 }
386 386
@@ -390,7 +390,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
390 shadow_pte, *shadow_pte, write_pt); 390 shadow_pte, *shadow_pte, write_pt);
391 391
392 if (!write_pt) 392 if (!write_pt)
393 vcpu->last_pt_write_count = 0; /* reset fork detector */ 393 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
394 394
395 /* 395 /*
396 * mmio: emulate if accessible, otherwise its a guest fault. 396 * mmio: emulate if accessible, otherwise its a guest fault.
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index ef21804a5c5c..7888638c02e8 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -99,20 +99,20 @@ static inline u32 svm_has(u32 feat)
99 99
100static inline u8 pop_irq(struct kvm_vcpu *vcpu) 100static inline u8 pop_irq(struct kvm_vcpu *vcpu)
101{ 101{
102 int word_index = __ffs(vcpu->irq_summary); 102 int word_index = __ffs(vcpu->arch.irq_summary);
103 int bit_index = __ffs(vcpu->irq_pending[word_index]); 103 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
104 int irq = word_index * BITS_PER_LONG + bit_index; 104 int irq = word_index * BITS_PER_LONG + bit_index;
105 105
106 clear_bit(bit_index, &vcpu->irq_pending[word_index]); 106 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
107 if (!vcpu->irq_pending[word_index]) 107 if (!vcpu->arch.irq_pending[word_index])
108 clear_bit(word_index, &vcpu->irq_summary); 108 clear_bit(word_index, &vcpu->arch.irq_summary);
109 return irq; 109 return irq;
110} 110}
111 111
112static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) 112static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
113{ 113{
114 set_bit(irq, vcpu->irq_pending); 114 set_bit(irq, vcpu->arch.irq_pending);
115 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); 115 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
116} 116}
117 117
118static inline void clgi(void) 118static inline void clgi(void)
@@ -185,7 +185,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
185 efer &= ~EFER_LME; 185 efer &= ~EFER_LME;
186 186
187 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 187 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
188 vcpu->shadow_efer = efer; 188 vcpu->arch.shadow_efer = efer;
189} 189}
190 190
191static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 191static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -227,10 +227,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
227 svm->vmcb->save.rip, 227 svm->vmcb->save.rip,
228 svm->next_rip); 228 svm->next_rip);
229 229
230 vcpu->rip = svm->vmcb->save.rip = svm->next_rip; 230 vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
231 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 231 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
232 232
233 vcpu->interrupt_window_open = 1; 233 vcpu->arch.interrupt_window_open = 1;
234} 234}
235 235
236static int has_svm(void) 236static int has_svm(void)
@@ -559,8 +559,8 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
559 559
560 if (vcpu->vcpu_id != 0) { 560 if (vcpu->vcpu_id != 0) {
561 svm->vmcb->save.rip = 0; 561 svm->vmcb->save.rip = 0;
562 svm->vmcb->save.cs.base = svm->vcpu.sipi_vector << 12; 562 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
563 svm->vmcb->save.cs.selector = svm->vcpu.sipi_vector << 8; 563 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
564 } 564 }
565 565
566 return 0; 566 return 0;
@@ -597,9 +597,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
597 597
598 fx_init(&svm->vcpu); 598 fx_init(&svm->vcpu);
599 svm->vcpu.fpu_active = 1; 599 svm->vcpu.fpu_active = 1;
600 svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 600 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
601 if (svm->vcpu.vcpu_id == 0) 601 if (svm->vcpu.vcpu_id == 0)
602 svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; 602 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
603 603
604 return &svm->vcpu; 604 return &svm->vcpu;
605 605
@@ -633,7 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
633 * increasing TSC. 633 * increasing TSC.
634 */ 634 */
635 rdtscll(tsc_this); 635 rdtscll(tsc_this);
636 delta = vcpu->host_tsc - tsc_this; 636 delta = vcpu->arch.host_tsc - tsc_this;
637 svm->vmcb->control.tsc_offset += delta; 637 svm->vmcb->control.tsc_offset += delta;
638 vcpu->cpu = cpu; 638 vcpu->cpu = cpu;
639 kvm_migrate_apic_timer(vcpu); 639 kvm_migrate_apic_timer(vcpu);
@@ -652,7 +652,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
652 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 652 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
653 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 653 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
654 654
655 rdtscll(vcpu->host_tsc); 655 rdtscll(vcpu->arch.host_tsc);
656} 656}
657 657
658static void svm_vcpu_decache(struct kvm_vcpu *vcpu) 658static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
@@ -663,17 +663,17 @@ static void svm_cache_regs(struct kvm_vcpu *vcpu)
663{ 663{
664 struct vcpu_svm *svm = to_svm(vcpu); 664 struct vcpu_svm *svm = to_svm(vcpu);
665 665
666 vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 666 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
667 vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 667 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
668 vcpu->rip = svm->vmcb->save.rip; 668 vcpu->arch.rip = svm->vmcb->save.rip;
669} 669}
670 670
671static void svm_decache_regs(struct kvm_vcpu *vcpu) 671static void svm_decache_regs(struct kvm_vcpu *vcpu)
672{ 672{
673 struct vcpu_svm *svm = to_svm(vcpu); 673 struct vcpu_svm *svm = to_svm(vcpu);
674 svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; 674 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
675 svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; 675 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
676 svm->vmcb->save.rip = vcpu->rip; 676 svm->vmcb->save.rip = vcpu->arch.rip;
677} 677}
678 678
679static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 679static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -771,24 +771,24 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
771 struct vcpu_svm *svm = to_svm(vcpu); 771 struct vcpu_svm *svm = to_svm(vcpu);
772 772
773#ifdef CONFIG_X86_64 773#ifdef CONFIG_X86_64
774 if (vcpu->shadow_efer & EFER_LME) { 774 if (vcpu->arch.shadow_efer & EFER_LME) {
775 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 775 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
776 vcpu->shadow_efer |= EFER_LMA; 776 vcpu->arch.shadow_efer |= EFER_LMA;
777 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 777 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
778 } 778 }
779 779
780 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 780 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
781 vcpu->shadow_efer &= ~EFER_LMA; 781 vcpu->arch.shadow_efer &= ~EFER_LMA;
782 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 782 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
783 } 783 }
784 } 784 }
785#endif 785#endif
786 if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 786 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
787 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 787 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
788 vcpu->fpu_active = 1; 788 vcpu->fpu_active = 1;
789 } 789 }
790 790
791 vcpu->cr0 = cr0; 791 vcpu->arch.cr0 = cr0;
792 cr0 |= X86_CR0_PG | X86_CR0_WP; 792 cr0 |= X86_CR0_PG | X86_CR0_WP;
793 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 793 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
794 svm->vmcb->save.cr0 = cr0; 794 svm->vmcb->save.cr0 = cr0;
@@ -796,7 +796,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
796 796
797static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 797static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
798{ 798{
799 vcpu->cr4 = cr4; 799 vcpu->arch.cr4 = cr4;
800 to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; 800 to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
801} 801}
802 802
@@ -901,7 +901,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
901 svm->db_regs[dr] = value; 901 svm->db_regs[dr] = value;
902 return; 902 return;
903 case 4 ... 5: 903 case 4 ... 5:
904 if (vcpu->cr4 & X86_CR4_DE) { 904 if (vcpu->arch.cr4 & X86_CR4_DE) {
905 *exception = UD_VECTOR; 905 *exception = UD_VECTOR;
906 return; 906 return;
907 } 907 }
@@ -950,7 +950,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
950static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 950static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
951{ 951{
952 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 952 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
953 if (!(svm->vcpu.cr0 & X86_CR0_TS)) 953 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
954 svm->vmcb->save.cr0 &= ~X86_CR0_TS; 954 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
955 svm->vcpu.fpu_active = 1; 955 svm->vcpu.fpu_active = 1;
956 956
@@ -1103,14 +1103,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1103 1103
1104static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1104static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1105{ 1105{
1106 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; 1106 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1107 u64 data; 1107 u64 data;
1108 1108
1109 if (svm_get_msr(&svm->vcpu, ecx, &data)) 1109 if (svm_get_msr(&svm->vcpu, ecx, &data))
1110 kvm_inject_gp(&svm->vcpu, 0); 1110 kvm_inject_gp(&svm->vcpu, 0);
1111 else { 1111 else {
1112 svm->vmcb->save.rax = data & 0xffffffff; 1112 svm->vmcb->save.rax = data & 0xffffffff;
1113 svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; 1113 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
1114 svm->next_rip = svm->vmcb->save.rip + 2; 1114 svm->next_rip = svm->vmcb->save.rip + 2;
1115 skip_emulated_instruction(&svm->vcpu); 1115 skip_emulated_instruction(&svm->vcpu);
1116 } 1116 }
@@ -1176,9 +1176,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1176 1176
1177static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1177static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1178{ 1178{
1179 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; 1179 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1180 u64 data = (svm->vmcb->save.rax & -1u) 1180 u64 data = (svm->vmcb->save.rax & -1u)
1181 | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); 1181 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
1182 svm->next_rip = svm->vmcb->save.rip + 2; 1182 svm->next_rip = svm->vmcb->save.rip + 2;
1183 if (svm_set_msr(&svm->vcpu, ecx, data)) 1183 if (svm_set_msr(&svm->vcpu, ecx, data))
1184 kvm_inject_gp(&svm->vcpu, 0); 1184 kvm_inject_gp(&svm->vcpu, 0);
@@ -1205,7 +1205,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
1205 * possible 1205 * possible
1206 */ 1206 */
1207 if (kvm_run->request_interrupt_window && 1207 if (kvm_run->request_interrupt_window &&
1208 !svm->vcpu.irq_summary) { 1208 !svm->vcpu.arch.irq_summary) {
1209 ++svm->vcpu.stat.irq_window_exits; 1209 ++svm->vcpu.stat.irq_window_exits;
1210 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 1210 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1211 return 0; 1211 return 0;
@@ -1382,20 +1382,20 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
1382 push_irq(&svm->vcpu, control->int_vector); 1382 push_irq(&svm->vcpu, control->int_vector);
1383 } 1383 }
1384 1384
1385 svm->vcpu.interrupt_window_open = 1385 svm->vcpu.arch.interrupt_window_open =
1386 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 1386 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1387} 1387}
1388 1388
1389static void svm_do_inject_vector(struct vcpu_svm *svm) 1389static void svm_do_inject_vector(struct vcpu_svm *svm)
1390{ 1390{
1391 struct kvm_vcpu *vcpu = &svm->vcpu; 1391 struct kvm_vcpu *vcpu = &svm->vcpu;
1392 int word_index = __ffs(vcpu->irq_summary); 1392 int word_index = __ffs(vcpu->arch.irq_summary);
1393 int bit_index = __ffs(vcpu->irq_pending[word_index]); 1393 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
1394 int irq = word_index * BITS_PER_LONG + bit_index; 1394 int irq = word_index * BITS_PER_LONG + bit_index;
1395 1395
1396 clear_bit(bit_index, &vcpu->irq_pending[word_index]); 1396 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1397 if (!vcpu->irq_pending[word_index]) 1397 if (!vcpu->arch.irq_pending[word_index])
1398 clear_bit(word_index, &vcpu->irq_summary); 1398 clear_bit(word_index, &vcpu->arch.irq_summary);
1399 svm_inject_irq(svm, irq); 1399 svm_inject_irq(svm, irq);
1400} 1400}
1401 1401
@@ -1405,11 +1405,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1405 struct vcpu_svm *svm = to_svm(vcpu); 1405 struct vcpu_svm *svm = to_svm(vcpu);
1406 struct vmcb_control_area *control = &svm->vmcb->control; 1406 struct vmcb_control_area *control = &svm->vmcb->control;
1407 1407
1408 svm->vcpu.interrupt_window_open = 1408 svm->vcpu.arch.interrupt_window_open =
1409 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 1409 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1410 (svm->vmcb->save.rflags & X86_EFLAGS_IF)); 1410 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1411 1411
1412 if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary) 1412 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
1413 /* 1413 /*
1414 * If interrupts enabled, and not blocked by sti or mov ss. Good. 1414 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1415 */ 1415 */
@@ -1418,8 +1418,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1418 /* 1418 /*
1419 * Interrupts blocked. Wait for unblock. 1419 * Interrupts blocked. Wait for unblock.
1420 */ 1420 */
1421 if (!svm->vcpu.interrupt_window_open && 1421 if (!svm->vcpu.arch.interrupt_window_open &&
1422 (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) 1422 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
1423 control->intercept |= 1ULL << INTERCEPT_VINTR; 1423 control->intercept |= 1ULL << INTERCEPT_VINTR;
1424 else 1424 else
1425 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1425 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
@@ -1471,7 +1471,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1471 svm->host_cr2 = kvm_read_cr2(); 1471 svm->host_cr2 = kvm_read_cr2();
1472 svm->host_dr6 = read_dr6(); 1472 svm->host_dr6 = read_dr6();
1473 svm->host_dr7 = read_dr7(); 1473 svm->host_dr7 = read_dr7();
1474 svm->vmcb->save.cr2 = vcpu->cr2; 1474 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1475 1475
1476 if (svm->vmcb->save.dr7 & 0xff) { 1476 if (svm->vmcb->save.dr7 & 0xff) {
1477 write_dr7(0); 1477 write_dr7(0);
@@ -1563,21 +1563,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1563 : 1563 :
1564 : [svm]"a"(svm), 1564 : [svm]"a"(svm),
1565 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), 1565 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1566 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBX])), 1566 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1567 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RCX])), 1567 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1568 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDX])), 1568 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1569 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RSI])), 1569 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1570 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDI])), 1570 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1571 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBP])) 1571 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
1572#ifdef CONFIG_X86_64 1572#ifdef CONFIG_X86_64
1573 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R8])), 1573 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1574 [r9]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R9])), 1574 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1575 [r10]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R10])), 1575 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1576 [r11]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R11])), 1576 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1577 [r12]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R12])), 1577 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1578 [r13]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R13])), 1578 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1579 [r14]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R14])), 1579 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1580 [r15]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R15])) 1580 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
1581#endif 1581#endif
1582 : "cc", "memory" 1582 : "cc", "memory"
1583#ifdef CONFIG_X86_64 1583#ifdef CONFIG_X86_64
@@ -1591,7 +1591,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1591 if ((svm->vmcb->save.dr7 & 0xff)) 1591 if ((svm->vmcb->save.dr7 & 0xff))
1592 load_db_regs(svm->host_db_regs); 1592 load_db_regs(svm->host_db_regs);
1593 1593
1594 vcpu->cr2 = svm->vmcb->save.cr2; 1594 vcpu->arch.cr2 = svm->vmcb->save.cr2;
1595 1595
1596 write_dr6(svm->host_dr6); 1596 write_dr6(svm->host_dr6);
1597 write_dr7(svm->host_dr7); 1597 write_dr7(svm->host_dr7);
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 83084348581a..cf78ebb2f36e 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -247,7 +247,7 @@ static void __vcpu_clear(void *arg)
247 vmcs_clear(vmx->vmcs); 247 vmcs_clear(vmx->vmcs);
248 if (per_cpu(current_vmcs, cpu) == vmx->vmcs) 248 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
249 per_cpu(current_vmcs, cpu) = NULL; 249 per_cpu(current_vmcs, cpu) = NULL;
250 rdtscll(vmx->vcpu.host_tsc); 250 rdtscll(vmx->vcpu.arch.host_tsc);
251} 251}
252 252
253static void vcpu_clear(struct vcpu_vmx *vmx) 253static void vcpu_clear(struct vcpu_vmx *vmx)
@@ -343,7 +343,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
343 eb |= 1u << NM_VECTOR; 343 eb |= 1u << NM_VECTOR;
344 if (vcpu->guest_debug.enabled) 344 if (vcpu->guest_debug.enabled)
345 eb |= 1u << 1; 345 eb |= 1u << 1;
346 if (vcpu->rmode.active) 346 if (vcpu->arch.rmode.active)
347 eb = ~0; 347 eb = ~0;
348 vmcs_write32(EXCEPTION_BITMAP, eb); 348 vmcs_write32(EXCEPTION_BITMAP, eb);
349} 349}
@@ -528,7 +528,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
528 * Make sure the time stamp counter is monotonous. 528 * Make sure the time stamp counter is monotonous.
529 */ 529 */
530 rdtscll(tsc_this); 530 rdtscll(tsc_this);
531 delta = vcpu->host_tsc - tsc_this; 531 delta = vcpu->arch.host_tsc - tsc_this;
532 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); 532 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
533 } 533 }
534} 534}
@@ -544,7 +544,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
544 return; 544 return;
545 vcpu->fpu_active = 1; 545 vcpu->fpu_active = 1;
546 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); 546 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
547 if (vcpu->cr0 & X86_CR0_TS) 547 if (vcpu->arch.cr0 & X86_CR0_TS)
548 vmcs_set_bits(GUEST_CR0, X86_CR0_TS); 548 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
549 update_exception_bitmap(vcpu); 549 update_exception_bitmap(vcpu);
550} 550}
@@ -570,7 +570,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
570 570
571static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 571static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
572{ 572{
573 if (vcpu->rmode.active) 573 if (vcpu->arch.rmode.active)
574 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 574 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
575 vmcs_writel(GUEST_RFLAGS, rflags); 575 vmcs_writel(GUEST_RFLAGS, rflags);
576} 576}
@@ -592,7 +592,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
592 if (interruptibility & 3) 592 if (interruptibility & 3)
593 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 593 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
594 interruptibility & ~3); 594 interruptibility & ~3);
595 vcpu->interrupt_window_open = 1; 595 vcpu->arch.interrupt_window_open = 1;
596} 596}
597 597
598static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 598static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -661,7 +661,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
661 * if efer.sce is enabled. 661 * if efer.sce is enabled.
662 */ 662 */
663 index = __find_msr_index(vmx, MSR_K6_STAR); 663 index = __find_msr_index(vmx, MSR_K6_STAR);
664 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE)) 664 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
665 move_msr_up(vmx, index, save_nmsrs++); 665 move_msr_up(vmx, index, save_nmsrs++);
666 } 666 }
667#endif 667#endif
@@ -805,12 +805,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
805 805
806/* 806/*
807 * Sync the rsp and rip registers into the vcpu structure. This allows 807 * Sync the rsp and rip registers into the vcpu structure. This allows
808 * registers to be accessed by indexing vcpu->regs. 808 * registers to be accessed by indexing vcpu->arch.regs.
809 */ 809 */
810static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) 810static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
811{ 811{
812 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 812 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
813 vcpu->rip = vmcs_readl(GUEST_RIP); 813 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
814} 814}
815 815
816/* 816/*
@@ -819,8 +819,8 @@ static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
819 */ 819 */
820static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) 820static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
821{ 821{
822 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]); 822 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
823 vmcs_writel(GUEST_RIP, vcpu->rip); 823 vmcs_writel(GUEST_RIP, vcpu->arch.rip);
824} 824}
825 825
826static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 826static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -1111,15 +1111,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1111{ 1111{
1112 unsigned long flags; 1112 unsigned long flags;
1113 1113
1114 vcpu->rmode.active = 0; 1114 vcpu->arch.rmode.active = 0;
1115 1115
1116 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base); 1116 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
1117 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit); 1117 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
1118 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar); 1118 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
1119 1119
1120 flags = vmcs_readl(GUEST_RFLAGS); 1120 flags = vmcs_readl(GUEST_RFLAGS);
1121 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); 1121 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1122 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); 1122 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
1123 vmcs_writel(GUEST_RFLAGS, flags); 1123 vmcs_writel(GUEST_RFLAGS, flags);
1124 1124
1125 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 1125 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1127,10 +1127,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1127 1127
1128 update_exception_bitmap(vcpu); 1128 update_exception_bitmap(vcpu);
1129 1129
1130 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es); 1130 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1131 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds); 1131 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1132 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs); 1132 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1133 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs); 1133 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
1134 1134
1135 vmcs_write16(GUEST_SS_SELECTOR, 0); 1135 vmcs_write16(GUEST_SS_SELECTOR, 0);
1136 vmcs_write32(GUEST_SS_AR_BYTES, 0x93); 1136 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1168,19 +1168,20 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1168{ 1168{
1169 unsigned long flags; 1169 unsigned long flags;
1170 1170
1171 vcpu->rmode.active = 1; 1171 vcpu->arch.rmode.active = 1;
1172 1172
1173 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); 1173 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1174 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); 1174 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1175 1175
1176 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); 1176 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1177 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 1177 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1178 1178
1179 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); 1179 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1180 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 1180 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1181 1181
1182 flags = vmcs_readl(GUEST_RFLAGS); 1182 flags = vmcs_readl(GUEST_RFLAGS);
1183 vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1183 vcpu->arch.rmode.save_iopl
1184 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1184 1185
1185 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1186 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1186 1187
@@ -1198,10 +1199,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1198 vmcs_writel(GUEST_CS_BASE, 0xf0000); 1199 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1199 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); 1200 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1200 1201
1201 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es); 1202 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1202 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds); 1203 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1203 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs); 1204 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1204 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 1205 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
1205 1206
1206 kvm_mmu_reset_context(vcpu); 1207 kvm_mmu_reset_context(vcpu);
1207 init_rmode_tss(vcpu->kvm); 1208 init_rmode_tss(vcpu->kvm);
@@ -1222,7 +1223,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1222 | AR_TYPE_BUSY_64_TSS); 1223 | AR_TYPE_BUSY_64_TSS);
1223 } 1224 }
1224 1225
1225 vcpu->shadow_efer |= EFER_LMA; 1226 vcpu->arch.shadow_efer |= EFER_LMA;
1226 1227
1227 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; 1228 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1228 vmcs_write32(VM_ENTRY_CONTROLS, 1229 vmcs_write32(VM_ENTRY_CONTROLS,
@@ -1232,7 +1233,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1232 1233
1233static void exit_lmode(struct kvm_vcpu *vcpu) 1234static void exit_lmode(struct kvm_vcpu *vcpu)
1234{ 1235{
1235 vcpu->shadow_efer &= ~EFER_LMA; 1236 vcpu->arch.shadow_efer &= ~EFER_LMA;
1236 1237
1237 vmcs_write32(VM_ENTRY_CONTROLS, 1238 vmcs_write32(VM_ENTRY_CONTROLS,
1238 vmcs_read32(VM_ENTRY_CONTROLS) 1239 vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1243,22 +1244,22 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
1243 1244
1244static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1245static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1245{ 1246{
1246 vcpu->cr4 &= KVM_GUEST_CR4_MASK; 1247 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1247 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; 1248 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1248} 1249}
1249 1250
1250static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1251static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1251{ 1252{
1252 vmx_fpu_deactivate(vcpu); 1253 vmx_fpu_deactivate(vcpu);
1253 1254
1254 if (vcpu->rmode.active && (cr0 & X86_CR0_PE)) 1255 if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
1255 enter_pmode(vcpu); 1256 enter_pmode(vcpu);
1256 1257
1257 if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE)) 1258 if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
1258 enter_rmode(vcpu); 1259 enter_rmode(vcpu);
1259 1260
1260#ifdef CONFIG_X86_64 1261#ifdef CONFIG_X86_64
1261 if (vcpu->shadow_efer & EFER_LME) { 1262 if (vcpu->arch.shadow_efer & EFER_LME) {
1262 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 1263 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1263 enter_lmode(vcpu); 1264 enter_lmode(vcpu);
1264 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 1265 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1269,7 +1270,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1269 vmcs_writel(CR0_READ_SHADOW, cr0); 1270 vmcs_writel(CR0_READ_SHADOW, cr0);
1270 vmcs_writel(GUEST_CR0, 1271 vmcs_writel(GUEST_CR0,
1271 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); 1272 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1272 vcpu->cr0 = cr0; 1273 vcpu->arch.cr0 = cr0;
1273 1274
1274 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) 1275 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1275 vmx_fpu_activate(vcpu); 1276 vmx_fpu_activate(vcpu);
@@ -1278,16 +1279,16 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1278static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1279static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1279{ 1280{
1280 vmcs_writel(GUEST_CR3, cr3); 1281 vmcs_writel(GUEST_CR3, cr3);
1281 if (vcpu->cr0 & X86_CR0_PE) 1282 if (vcpu->arch.cr0 & X86_CR0_PE)
1282 vmx_fpu_deactivate(vcpu); 1283 vmx_fpu_deactivate(vcpu);
1283} 1284}
1284 1285
1285static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1286static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1286{ 1287{
1287 vmcs_writel(CR4_READ_SHADOW, cr4); 1288 vmcs_writel(CR4_READ_SHADOW, cr4);
1288 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ? 1289 vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
1289 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); 1290 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1290 vcpu->cr4 = cr4; 1291 vcpu->arch.cr4 = cr4;
1291} 1292}
1292 1293
1293#ifdef CONFIG_X86_64 1294#ifdef CONFIG_X86_64
@@ -1297,7 +1298,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1297 struct vcpu_vmx *vmx = to_vmx(vcpu); 1298 struct vcpu_vmx *vmx = to_vmx(vcpu);
1298 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 1299 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1299 1300
1300 vcpu->shadow_efer = efer; 1301 vcpu->arch.shadow_efer = efer;
1301 if (efer & EFER_LMA) { 1302 if (efer & EFER_LMA) {
1302 vmcs_write32(VM_ENTRY_CONTROLS, 1303 vmcs_write32(VM_ENTRY_CONTROLS,
1303 vmcs_read32(VM_ENTRY_CONTROLS) | 1304 vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1374,17 +1375,17 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
1374 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1375 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1375 u32 ar; 1376 u32 ar;
1376 1377
1377 if (vcpu->rmode.active && seg == VCPU_SREG_TR) { 1378 if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
1378 vcpu->rmode.tr.selector = var->selector; 1379 vcpu->arch.rmode.tr.selector = var->selector;
1379 vcpu->rmode.tr.base = var->base; 1380 vcpu->arch.rmode.tr.base = var->base;
1380 vcpu->rmode.tr.limit = var->limit; 1381 vcpu->arch.rmode.tr.limit = var->limit;
1381 vcpu->rmode.tr.ar = vmx_segment_access_rights(var); 1382 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
1382 return; 1383 return;
1383 } 1384 }
1384 vmcs_writel(sf->base, var->base); 1385 vmcs_writel(sf->base, var->base);
1385 vmcs_write32(sf->limit, var->limit); 1386 vmcs_write32(sf->limit, var->limit);
1386 vmcs_write16(sf->selector, var->selector); 1387 vmcs_write16(sf->selector, var->selector);
1387 if (vcpu->rmode.active && var->s) { 1388 if (vcpu->arch.rmode.active && var->s) {
1388 /* 1389 /*
1389 * Hack real-mode segments into vm86 compatibility. 1390 * Hack real-mode segments into vm86 compatibility.
1390 */ 1391 */
@@ -1613,9 +1614,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1613 goto out; 1614 goto out;
1614 } 1615 }
1615 1616
1616 vmx->vcpu.rmode.active = 0; 1617 vmx->vcpu.arch.rmode.active = 0;
1617 1618
1618 vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 1619 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1619 set_cr8(&vmx->vcpu, 0); 1620 set_cr8(&vmx->vcpu, 0);
1620 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 1621 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1621 if (vmx->vcpu.vcpu_id == 0) 1622 if (vmx->vcpu.vcpu_id == 0)
@@ -1632,8 +1633,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1632 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 1633 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1633 vmcs_writel(GUEST_CS_BASE, 0x000f0000); 1634 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1634 } else { 1635 } else {
1635 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8); 1636 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
1636 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12); 1637 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
1637 } 1638 }
1638 vmcs_write32(GUEST_CS_LIMIT, 0xffff); 1639 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1639 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); 1640 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
@@ -1691,7 +1692,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1691 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 1692 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1692 if (vm_need_tpr_shadow(vmx->vcpu.kvm)) 1693 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1693 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 1694 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
1694 page_to_phys(vmx->vcpu.apic->regs_page)); 1695 page_to_phys(vmx->vcpu.arch.apic->regs_page));
1695 vmcs_write32(TPR_THRESHOLD, 0); 1696 vmcs_write32(TPR_THRESHOLD, 0);
1696 } 1697 }
1697 1698
@@ -1699,8 +1700,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1699 vmcs_write64(APIC_ACCESS_ADDR, 1700 vmcs_write64(APIC_ACCESS_ADDR,
1700 page_to_phys(vmx->vcpu.kvm->apic_access_page)); 1701 page_to_phys(vmx->vcpu.kvm->apic_access_page));
1701 1702
1702 vmx->vcpu.cr0 = 0x60000010; 1703 vmx->vcpu.arch.cr0 = 0x60000010;
1703 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */ 1704 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
1704 vmx_set_cr4(&vmx->vcpu, 0); 1705 vmx_set_cr4(&vmx->vcpu, 0);
1705#ifdef CONFIG_X86_64 1706#ifdef CONFIG_X86_64
1706 vmx_set_efer(&vmx->vcpu, 0); 1707 vmx_set_efer(&vmx->vcpu, 0);
@@ -1718,7 +1719,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1718{ 1719{
1719 struct vcpu_vmx *vmx = to_vmx(vcpu); 1720 struct vcpu_vmx *vmx = to_vmx(vcpu);
1720 1721
1721 if (vcpu->rmode.active) { 1722 if (vcpu->arch.rmode.active) {
1722 vmx->rmode.irq.pending = true; 1723 vmx->rmode.irq.pending = true;
1723 vmx->rmode.irq.vector = irq; 1724 vmx->rmode.irq.vector = irq;
1724 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); 1725 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
@@ -1734,13 +1735,13 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1734 1735
1735static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 1736static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1736{ 1737{
1737 int word_index = __ffs(vcpu->irq_summary); 1738 int word_index = __ffs(vcpu->arch.irq_summary);
1738 int bit_index = __ffs(vcpu->irq_pending[word_index]); 1739 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
1739 int irq = word_index * BITS_PER_LONG + bit_index; 1740 int irq = word_index * BITS_PER_LONG + bit_index;
1740 1741
1741 clear_bit(bit_index, &vcpu->irq_pending[word_index]); 1742 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1742 if (!vcpu->irq_pending[word_index]) 1743 if (!vcpu->arch.irq_pending[word_index])
1743 clear_bit(word_index, &vcpu->irq_summary); 1744 clear_bit(word_index, &vcpu->arch.irq_summary);
1744 vmx_inject_irq(vcpu, irq); 1745 vmx_inject_irq(vcpu, irq);
1745} 1746}
1746 1747
@@ -1750,12 +1751,12 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1750{ 1751{
1751 u32 cpu_based_vm_exec_control; 1752 u32 cpu_based_vm_exec_control;
1752 1753
1753 vcpu->interrupt_window_open = 1754 vcpu->arch.interrupt_window_open =
1754 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 1755 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1755 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); 1756 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1756 1757
1757 if (vcpu->interrupt_window_open && 1758 if (vcpu->arch.interrupt_window_open &&
1758 vcpu->irq_summary && 1759 vcpu->arch.irq_summary &&
1759 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) 1760 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1760 /* 1761 /*
1761 * If interrupts enabled, and not blocked by sti or mov ss. Good. 1762 * If interrupts enabled, and not blocked by sti or mov ss. Good.
@@ -1763,8 +1764,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1763 kvm_do_inject_irq(vcpu); 1764 kvm_do_inject_irq(vcpu);
1764 1765
1765 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 1766 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1766 if (!vcpu->interrupt_window_open && 1767 if (!vcpu->arch.interrupt_window_open &&
1767 (vcpu->irq_summary || kvm_run->request_interrupt_window)) 1768 (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
1768 /* 1769 /*
1769 * Interrupts blocked. Wait for unblock. 1770 * Interrupts blocked. Wait for unblock.
1770 */ 1771 */
@@ -1812,7 +1813,7 @@ static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1812static int handle_rmode_exception(struct kvm_vcpu *vcpu, 1813static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1813 int vec, u32 err_code) 1814 int vec, u32 err_code)
1814{ 1815{
1815 if (!vcpu->rmode.active) 1816 if (!vcpu->arch.rmode.active)
1816 return 0; 1817 return 0;
1817 1818
1818 /* 1819 /*
@@ -1843,8 +1844,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1843 1844
1844 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { 1845 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1845 int irq = vect_info & VECTORING_INFO_VECTOR_MASK; 1846 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1846 set_bit(irq, vcpu->irq_pending); 1847 set_bit(irq, vcpu->arch.irq_pending);
1847 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); 1848 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
1848 } 1849 }
1849 1850
1850 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ 1851 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
@@ -1871,11 +1872,11 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1871 return kvm_mmu_page_fault(vcpu, cr2, error_code); 1872 return kvm_mmu_page_fault(vcpu, cr2, error_code);
1872 } 1873 }
1873 1874
1874 if (vcpu->rmode.active && 1875 if (vcpu->arch.rmode.active &&
1875 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, 1876 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1876 error_code)) { 1877 error_code)) {
1877 if (vcpu->halt_request) { 1878 if (vcpu->arch.halt_request) {
1878 vcpu->halt_request = 0; 1879 vcpu->arch.halt_request = 0;
1879 return kvm_emulate_halt(vcpu); 1880 return kvm_emulate_halt(vcpu);
1880 } 1881 }
1881 return 1; 1882 return 1;
@@ -1956,22 +1957,22 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1956 switch (cr) { 1957 switch (cr) {
1957 case 0: 1958 case 0:
1958 vcpu_load_rsp_rip(vcpu); 1959 vcpu_load_rsp_rip(vcpu);
1959 set_cr0(vcpu, vcpu->regs[reg]); 1960 set_cr0(vcpu, vcpu->arch.regs[reg]);
1960 skip_emulated_instruction(vcpu); 1961 skip_emulated_instruction(vcpu);
1961 return 1; 1962 return 1;
1962 case 3: 1963 case 3:
1963 vcpu_load_rsp_rip(vcpu); 1964 vcpu_load_rsp_rip(vcpu);
1964 set_cr3(vcpu, vcpu->regs[reg]); 1965 set_cr3(vcpu, vcpu->arch.regs[reg]);
1965 skip_emulated_instruction(vcpu); 1966 skip_emulated_instruction(vcpu);
1966 return 1; 1967 return 1;
1967 case 4: 1968 case 4:
1968 vcpu_load_rsp_rip(vcpu); 1969 vcpu_load_rsp_rip(vcpu);
1969 set_cr4(vcpu, vcpu->regs[reg]); 1970 set_cr4(vcpu, vcpu->arch.regs[reg]);
1970 skip_emulated_instruction(vcpu); 1971 skip_emulated_instruction(vcpu);
1971 return 1; 1972 return 1;
1972 case 8: 1973 case 8:
1973 vcpu_load_rsp_rip(vcpu); 1974 vcpu_load_rsp_rip(vcpu);
1974 set_cr8(vcpu, vcpu->regs[reg]); 1975 set_cr8(vcpu, vcpu->arch.regs[reg]);
1975 skip_emulated_instruction(vcpu); 1976 skip_emulated_instruction(vcpu);
1976 if (irqchip_in_kernel(vcpu->kvm)) 1977 if (irqchip_in_kernel(vcpu->kvm))
1977 return 1; 1978 return 1;
@@ -1982,8 +1983,8 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1982 case 2: /* clts */ 1983 case 2: /* clts */
1983 vcpu_load_rsp_rip(vcpu); 1984 vcpu_load_rsp_rip(vcpu);
1984 vmx_fpu_deactivate(vcpu); 1985 vmx_fpu_deactivate(vcpu);
1985 vcpu->cr0 &= ~X86_CR0_TS; 1986 vcpu->arch.cr0 &= ~X86_CR0_TS;
1986 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); 1987 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1987 vmx_fpu_activate(vcpu); 1988 vmx_fpu_activate(vcpu);
1988 skip_emulated_instruction(vcpu); 1989 skip_emulated_instruction(vcpu);
1989 return 1; 1990 return 1;
@@ -1991,13 +1992,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1991 switch (cr) { 1992 switch (cr) {
1992 case 3: 1993 case 3:
1993 vcpu_load_rsp_rip(vcpu); 1994 vcpu_load_rsp_rip(vcpu);
1994 vcpu->regs[reg] = vcpu->cr3; 1995 vcpu->arch.regs[reg] = vcpu->arch.cr3;
1995 vcpu_put_rsp_rip(vcpu); 1996 vcpu_put_rsp_rip(vcpu);
1996 skip_emulated_instruction(vcpu); 1997 skip_emulated_instruction(vcpu);
1997 return 1; 1998 return 1;
1998 case 8: 1999 case 8:
1999 vcpu_load_rsp_rip(vcpu); 2000 vcpu_load_rsp_rip(vcpu);
2000 vcpu->regs[reg] = get_cr8(vcpu); 2001 vcpu->arch.regs[reg] = get_cr8(vcpu);
2001 vcpu_put_rsp_rip(vcpu); 2002 vcpu_put_rsp_rip(vcpu);
2002 skip_emulated_instruction(vcpu); 2003 skip_emulated_instruction(vcpu);
2003 return 1; 2004 return 1;
@@ -2043,7 +2044,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2043 default: 2044 default:
2044 val = 0; 2045 val = 0;
2045 } 2046 }
2046 vcpu->regs[reg] = val; 2047 vcpu->arch.regs[reg] = val;
2047 } else { 2048 } else {
2048 /* mov to dr */ 2049 /* mov to dr */
2049 } 2050 }
@@ -2060,7 +2061,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2060 2061
2061static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2062static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2062{ 2063{
2063 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 2064 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2064 u64 data; 2065 u64 data;
2065 2066
2066 if (vmx_get_msr(vcpu, ecx, &data)) { 2067 if (vmx_get_msr(vcpu, ecx, &data)) {
@@ -2069,17 +2070,17 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2069 } 2070 }
2070 2071
2071 /* FIXME: handling of bits 32:63 of rax, rdx */ 2072 /* FIXME: handling of bits 32:63 of rax, rdx */
2072 vcpu->regs[VCPU_REGS_RAX] = data & -1u; 2073 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2073 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u; 2074 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
2074 skip_emulated_instruction(vcpu); 2075 skip_emulated_instruction(vcpu);
2075 return 1; 2076 return 1;
2076} 2077}
2077 2078
2078static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2079static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2079{ 2080{
2080 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 2081 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2081 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u) 2082 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2082 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); 2083 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2083 2084
2084 if (vmx_set_msr(vcpu, ecx, data) != 0) { 2085 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2085 kvm_inject_gp(vcpu, 0); 2086 kvm_inject_gp(vcpu, 0);
@@ -2110,7 +2111,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2110 * possible 2111 * possible
2111 */ 2112 */
2112 if (kvm_run->request_interrupt_window && 2113 if (kvm_run->request_interrupt_window &&
2113 !vcpu->irq_summary) { 2114 !vcpu->arch.irq_summary) {
2114 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 2115 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2115 ++vcpu->stat.irq_window_exits; 2116 ++vcpu->stat.irq_window_exits;
2116 return 0; 2117 return 0;
@@ -2270,7 +2271,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2270 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { 2271 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
2271 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) 2272 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2272 == INTR_TYPE_EXT_INTR 2273 == INTR_TYPE_EXT_INTR
2273 && vcpu->rmode.active) { 2274 && vcpu->arch.rmode.active) {
2274 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; 2275 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
2275 2276
2276 vmx_inject_irq(vcpu, vect); 2277 vmx_inject_irq(vcpu, vect);
@@ -2424,24 +2425,24 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2424 : : "c"(vmx), "d"((unsigned long)HOST_RSP), 2425 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
2425 [launched]"i"(offsetof(struct vcpu_vmx, launched)), 2426 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
2426 [fail]"i"(offsetof(struct vcpu_vmx, fail)), 2427 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
2427 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RAX])), 2428 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
2428 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBX])), 2429 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
2429 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RCX])), 2430 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
2430 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDX])), 2431 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
2431 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RSI])), 2432 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
2432 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDI])), 2433 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
2433 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBP])), 2434 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
2434#ifdef CONFIG_X86_64 2435#ifdef CONFIG_X86_64
2435 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R8])), 2436 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
2436 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R9])), 2437 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
2437 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R10])), 2438 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
2438 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R11])), 2439 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
2439 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R12])), 2440 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
2440 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R13])), 2441 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
2441 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R14])), 2442 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
2442 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R15])), 2443 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
2443#endif 2444#endif
2444 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.cr2)) 2445 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
2445 : "cc", "memory" 2446 : "cc", "memory"
2446#ifdef CONFIG_X86_64 2447#ifdef CONFIG_X86_64
2447 , "rbx", "rdi", "rsi" 2448 , "rbx", "rdi", "rsi"
@@ -2455,7 +2456,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2455 if (vmx->rmode.irq.pending) 2456 if (vmx->rmode.irq.pending)
2456 fixup_rmode_irq(vmx); 2457 fixup_rmode_irq(vmx);
2457 2458
2458 vcpu->interrupt_window_open = 2459 vcpu->arch.interrupt_window_open =
2459 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2460 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2460 2461
2461 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2462 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 3b79684a3c0c..5a2f33a84e4f 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -113,9 +113,9 @@ EXPORT_SYMBOL_GPL(segment_base);
113u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 113u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
114{ 114{
115 if (irqchip_in_kernel(vcpu->kvm)) 115 if (irqchip_in_kernel(vcpu->kvm))
116 return vcpu->apic_base; 116 return vcpu->arch.apic_base;
117 else 117 else
118 return vcpu->apic_base; 118 return vcpu->arch.apic_base;
119} 119}
120EXPORT_SYMBOL_GPL(kvm_get_apic_base); 120EXPORT_SYMBOL_GPL(kvm_get_apic_base);
121 121
@@ -125,16 +125,16 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
125 if (irqchip_in_kernel(vcpu->kvm)) 125 if (irqchip_in_kernel(vcpu->kvm))
126 kvm_lapic_set_base(vcpu, data); 126 kvm_lapic_set_base(vcpu, data);
127 else 127 else
128 vcpu->apic_base = data; 128 vcpu->arch.apic_base = data;
129} 129}
130EXPORT_SYMBOL_GPL(kvm_set_apic_base); 130EXPORT_SYMBOL_GPL(kvm_set_apic_base);
131 131
132void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 132void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
133{ 133{
134 WARN_ON(vcpu->exception.pending); 134 WARN_ON(vcpu->arch.exception.pending);
135 vcpu->exception.pending = true; 135 vcpu->arch.exception.pending = true;
136 vcpu->exception.has_error_code = false; 136 vcpu->arch.exception.has_error_code = false;
137 vcpu->exception.nr = nr; 137 vcpu->arch.exception.nr = nr;
138} 138}
139EXPORT_SYMBOL_GPL(kvm_queue_exception); 139EXPORT_SYMBOL_GPL(kvm_queue_exception);
140 140
@@ -142,32 +142,32 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
142 u32 error_code) 142 u32 error_code)
143{ 143{
144 ++vcpu->stat.pf_guest; 144 ++vcpu->stat.pf_guest;
145 if (vcpu->exception.pending && vcpu->exception.nr == PF_VECTOR) { 145 if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
146 printk(KERN_DEBUG "kvm: inject_page_fault:" 146 printk(KERN_DEBUG "kvm: inject_page_fault:"
147 " double fault 0x%lx\n", addr); 147 " double fault 0x%lx\n", addr);
148 vcpu->exception.nr = DF_VECTOR; 148 vcpu->arch.exception.nr = DF_VECTOR;
149 vcpu->exception.error_code = 0; 149 vcpu->arch.exception.error_code = 0;
150 return; 150 return;
151 } 151 }
152 vcpu->cr2 = addr; 152 vcpu->arch.cr2 = addr;
153 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 153 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
154} 154}
155 155
156void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 156void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
157{ 157{
158 WARN_ON(vcpu->exception.pending); 158 WARN_ON(vcpu->arch.exception.pending);
159 vcpu->exception.pending = true; 159 vcpu->arch.exception.pending = true;
160 vcpu->exception.has_error_code = true; 160 vcpu->arch.exception.has_error_code = true;
161 vcpu->exception.nr = nr; 161 vcpu->arch.exception.nr = nr;
162 vcpu->exception.error_code = error_code; 162 vcpu->arch.exception.error_code = error_code;
163} 163}
164EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 164EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
165 165
166static void __queue_exception(struct kvm_vcpu *vcpu) 166static void __queue_exception(struct kvm_vcpu *vcpu)
167{ 167{
168 kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr, 168 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
169 vcpu->exception.has_error_code, 169 vcpu->arch.exception.has_error_code,
170 vcpu->exception.error_code); 170 vcpu->arch.exception.error_code);
171} 171}
172 172
173/* 173/*
@@ -179,7 +179,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
179 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 179 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
180 int i; 180 int i;
181 int ret; 181 int ret;
182 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; 182 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
183 183
184 mutex_lock(&vcpu->kvm->lock); 184 mutex_lock(&vcpu->kvm->lock);
185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
@@ -196,7 +196,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
196 } 196 }
197 ret = 1; 197 ret = 1;
198 198
199 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); 199 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
200out: 200out:
201 mutex_unlock(&vcpu->kvm->lock); 201 mutex_unlock(&vcpu->kvm->lock);
202 202
@@ -205,7 +205,7 @@ out:
205 205
206static bool pdptrs_changed(struct kvm_vcpu *vcpu) 206static bool pdptrs_changed(struct kvm_vcpu *vcpu)
207{ 207{
208 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; 208 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
209 bool changed = true; 209 bool changed = true;
210 int r; 210 int r;
211 211
@@ -213,10 +213,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
213 return false; 213 return false;
214 214
215 mutex_lock(&vcpu->kvm->lock); 215 mutex_lock(&vcpu->kvm->lock);
216 r = kvm_read_guest(vcpu->kvm, vcpu->cr3 & ~31u, pdpte, sizeof(pdpte)); 216 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
217 if (r < 0) 217 if (r < 0)
218 goto out; 218 goto out;
219 changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0; 219 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
220out: 220out:
221 mutex_unlock(&vcpu->kvm->lock); 221 mutex_unlock(&vcpu->kvm->lock);
222 222
@@ -227,7 +227,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
227{ 227{
228 if (cr0 & CR0_RESERVED_BITS) { 228 if (cr0 & CR0_RESERVED_BITS) {
229 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 229 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
230 cr0, vcpu->cr0); 230 cr0, vcpu->arch.cr0);
231 kvm_inject_gp(vcpu, 0); 231 kvm_inject_gp(vcpu, 0);
232 return; 232 return;
233 } 233 }
@@ -247,7 +247,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
247 247
248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
249#ifdef CONFIG_X86_64 249#ifdef CONFIG_X86_64
250 if ((vcpu->shadow_efer & EFER_LME)) { 250 if ((vcpu->arch.shadow_efer & EFER_LME)) {
251 int cs_db, cs_l; 251 int cs_db, cs_l;
252 252
253 if (!is_pae(vcpu)) { 253 if (!is_pae(vcpu)) {
@@ -266,7 +266,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
266 } 266 }
267 } else 267 } else
268#endif 268#endif
269 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { 269 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
270 printk(KERN_DEBUG "set_cr0: #GP, pdptrs " 270 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
271 "reserved bits\n"); 271 "reserved bits\n");
272 kvm_inject_gp(vcpu, 0); 272 kvm_inject_gp(vcpu, 0);
@@ -276,7 +276,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
276 } 276 }
277 277
278 kvm_x86_ops->set_cr0(vcpu, cr0); 278 kvm_x86_ops->set_cr0(vcpu, cr0);
279 vcpu->cr0 = cr0; 279 vcpu->arch.cr0 = cr0;
280 280
281 mutex_lock(&vcpu->kvm->lock); 281 mutex_lock(&vcpu->kvm->lock);
282 kvm_mmu_reset_context(vcpu); 282 kvm_mmu_reset_context(vcpu);
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
287 287
288void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 288void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
289{ 289{
290 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); 290 set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
291} 291}
292EXPORT_SYMBOL_GPL(lmsw); 292EXPORT_SYMBOL_GPL(lmsw);
293 293
@@ -307,7 +307,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
307 return; 307 return;
308 } 308 }
309 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) 309 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
310 && !load_pdptrs(vcpu, vcpu->cr3)) { 310 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
311 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 311 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
312 kvm_inject_gp(vcpu, 0); 312 kvm_inject_gp(vcpu, 0);
313 return; 313 return;
@@ -319,7 +319,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
319 return; 319 return;
320 } 320 }
321 kvm_x86_ops->set_cr4(vcpu, cr4); 321 kvm_x86_ops->set_cr4(vcpu, cr4);
322 vcpu->cr4 = cr4; 322 vcpu->arch.cr4 = cr4;
323 mutex_lock(&vcpu->kvm->lock); 323 mutex_lock(&vcpu->kvm->lock);
324 kvm_mmu_reset_context(vcpu); 324 kvm_mmu_reset_context(vcpu);
325 mutex_unlock(&vcpu->kvm->lock); 325 mutex_unlock(&vcpu->kvm->lock);
@@ -328,7 +328,7 @@ EXPORT_SYMBOL_GPL(set_cr4);
328 328
329void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 329void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
330{ 330{
331 if (cr3 == vcpu->cr3 && !pdptrs_changed(vcpu)) { 331 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
332 kvm_mmu_flush_tlb(vcpu); 332 kvm_mmu_flush_tlb(vcpu);
333 return; 333 return;
334 } 334 }
@@ -373,8 +373,8 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
373 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 373 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
374 kvm_inject_gp(vcpu, 0); 374 kvm_inject_gp(vcpu, 0);
375 else { 375 else {
376 vcpu->cr3 = cr3; 376 vcpu->arch.cr3 = cr3;
377 vcpu->mmu.new_cr3(vcpu); 377 vcpu->arch.mmu.new_cr3(vcpu);
378 } 378 }
379 mutex_unlock(&vcpu->kvm->lock); 379 mutex_unlock(&vcpu->kvm->lock);
380} 380}
@@ -390,7 +390,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
390 if (irqchip_in_kernel(vcpu->kvm)) 390 if (irqchip_in_kernel(vcpu->kvm))
391 kvm_lapic_set_tpr(vcpu, cr8); 391 kvm_lapic_set_tpr(vcpu, cr8);
392 else 392 else
393 vcpu->cr8 = cr8; 393 vcpu->arch.cr8 = cr8;
394} 394}
395EXPORT_SYMBOL_GPL(set_cr8); 395EXPORT_SYMBOL_GPL(set_cr8);
396 396
@@ -399,7 +399,7 @@ unsigned long get_cr8(struct kvm_vcpu *vcpu)
399 if (irqchip_in_kernel(vcpu->kvm)) 399 if (irqchip_in_kernel(vcpu->kvm))
400 return kvm_lapic_get_cr8(vcpu); 400 return kvm_lapic_get_cr8(vcpu);
401 else 401 else
402 return vcpu->cr8; 402 return vcpu->arch.cr8;
403} 403}
404EXPORT_SYMBOL_GPL(get_cr8); 404EXPORT_SYMBOL_GPL(get_cr8);
405 405
@@ -437,7 +437,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
437 } 437 }
438 438
439 if (is_paging(vcpu) 439 if (is_paging(vcpu)
440 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { 440 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
441 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); 441 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
442 kvm_inject_gp(vcpu, 0); 442 kvm_inject_gp(vcpu, 0);
443 return; 443 return;
@@ -446,9 +446,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
446 kvm_x86_ops->set_efer(vcpu, efer); 446 kvm_x86_ops->set_efer(vcpu, efer);
447 447
448 efer &= ~EFER_LMA; 448 efer &= ~EFER_LMA;
449 efer |= vcpu->shadow_efer & EFER_LMA; 449 efer |= vcpu->arch.shadow_efer & EFER_LMA;
450 450
451 vcpu->shadow_efer = efer; 451 vcpu->arch.shadow_efer = efer;
452} 452}
453 453
454#endif 454#endif
@@ -496,7 +496,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
496 kvm_set_apic_base(vcpu, data); 496 kvm_set_apic_base(vcpu, data);
497 break; 497 break;
498 case MSR_IA32_MISC_ENABLE: 498 case MSR_IA32_MISC_ENABLE:
499 vcpu->ia32_misc_enable_msr = data; 499 vcpu->arch.ia32_misc_enable_msr = data;
500 break; 500 break;
501 default: 501 default:
502 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr); 502 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
@@ -550,11 +550,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
550 data = kvm_get_apic_base(vcpu); 550 data = kvm_get_apic_base(vcpu);
551 break; 551 break;
552 case MSR_IA32_MISC_ENABLE: 552 case MSR_IA32_MISC_ENABLE:
553 data = vcpu->ia32_misc_enable_msr; 553 data = vcpu->arch.ia32_misc_enable_msr;
554 break; 554 break;
555#ifdef CONFIG_X86_64 555#ifdef CONFIG_X86_64
556 case MSR_EFER: 556 case MSR_EFER:
557 data = vcpu->shadow_efer; 557 data = vcpu->arch.shadow_efer;
558 break; 558 break;
559#endif 559#endif
560 default: 560 default:
@@ -760,8 +760,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
760 struct kvm_cpuid_entry2 *e, *entry; 760 struct kvm_cpuid_entry2 *e, *entry;
761 761
762 entry = NULL; 762 entry = NULL;
763 for (i = 0; i < vcpu->cpuid_nent; ++i) { 763 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
764 e = &vcpu->cpuid_entries[i]; 764 e = &vcpu->arch.cpuid_entries[i];
765 if (e->function == 0x80000001) { 765 if (e->function == 0x80000001) {
766 entry = e; 766 entry = e;
767 break; 767 break;
@@ -793,18 +793,18 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
793 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 793 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
794 goto out_free; 794 goto out_free;
795 for (i = 0; i < cpuid->nent; i++) { 795 for (i = 0; i < cpuid->nent; i++) {
796 vcpu->cpuid_entries[i].function = cpuid_entries[i].function; 796 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
797 vcpu->cpuid_entries[i].eax = cpuid_entries[i].eax; 797 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
798 vcpu->cpuid_entries[i].ebx = cpuid_entries[i].ebx; 798 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
799 vcpu->cpuid_entries[i].ecx = cpuid_entries[i].ecx; 799 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
800 vcpu->cpuid_entries[i].edx = cpuid_entries[i].edx; 800 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
801 vcpu->cpuid_entries[i].index = 0; 801 vcpu->arch.cpuid_entries[i].index = 0;
802 vcpu->cpuid_entries[i].flags = 0; 802 vcpu->arch.cpuid_entries[i].flags = 0;
803 vcpu->cpuid_entries[i].padding[0] = 0; 803 vcpu->arch.cpuid_entries[i].padding[0] = 0;
804 vcpu->cpuid_entries[i].padding[1] = 0; 804 vcpu->arch.cpuid_entries[i].padding[1] = 0;
805 vcpu->cpuid_entries[i].padding[2] = 0; 805 vcpu->arch.cpuid_entries[i].padding[2] = 0;
806 } 806 }
807 vcpu->cpuid_nent = cpuid->nent; 807 vcpu->arch.cpuid_nent = cpuid->nent;
808 cpuid_fix_nx_cap(vcpu); 808 cpuid_fix_nx_cap(vcpu);
809 r = 0; 809 r = 0;
810 810
@@ -824,10 +824,10 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
824 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 824 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
825 goto out; 825 goto out;
826 r = -EFAULT; 826 r = -EFAULT;
827 if (copy_from_user(&vcpu->cpuid_entries, entries, 827 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
828 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) 828 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
829 goto out; 829 goto out;
830 vcpu->cpuid_nent = cpuid->nent; 830 vcpu->arch.cpuid_nent = cpuid->nent;
831 return 0; 831 return 0;
832 832
833out: 833out:
@@ -841,16 +841,16 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
841 int r; 841 int r;
842 842
843 r = -E2BIG; 843 r = -E2BIG;
844 if (cpuid->nent < vcpu->cpuid_nent) 844 if (cpuid->nent < vcpu->arch.cpuid_nent)
845 goto out; 845 goto out;
846 r = -EFAULT; 846 r = -EFAULT;
847 if (copy_to_user(entries, &vcpu->cpuid_entries, 847 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
848 vcpu->cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 848 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
849 goto out; 849 goto out;
850 return 0; 850 return 0;
851 851
852out: 852out:
853 cpuid->nent = vcpu->cpuid_nent; 853 cpuid->nent = vcpu->arch.cpuid_nent;
854 return r; 854 return r;
855} 855}
856 856
@@ -1021,7 +1021,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1021 struct kvm_lapic_state *s) 1021 struct kvm_lapic_state *s)
1022{ 1022{
1023 vcpu_load(vcpu); 1023 vcpu_load(vcpu);
1024 memcpy(s->regs, vcpu->apic->regs, sizeof *s); 1024 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1025 vcpu_put(vcpu); 1025 vcpu_put(vcpu);
1026 1026
1027 return 0; 1027 return 0;
@@ -1031,7 +1031,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1031 struct kvm_lapic_state *s) 1031 struct kvm_lapic_state *s)
1032{ 1032{
1033 vcpu_load(vcpu); 1033 vcpu_load(vcpu);
1034 memcpy(vcpu->apic->regs, s->regs, sizeof *s); 1034 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1035 kvm_apic_post_state_restore(vcpu); 1035 kvm_apic_post_state_restore(vcpu);
1036 vcpu_put(vcpu); 1036 vcpu_put(vcpu);
1037 1037
@@ -1047,8 +1047,8 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1047 return -ENXIO; 1047 return -ENXIO;
1048 vcpu_load(vcpu); 1048 vcpu_load(vcpu);
1049 1049
1050 set_bit(irq->irq, vcpu->irq_pending); 1050 set_bit(irq->irq, vcpu->arch.irq_pending);
1051 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); 1051 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
1052 1052
1053 vcpu_put(vcpu); 1053 vcpu_put(vcpu);
1054 1054
@@ -1499,8 +1499,8 @@ static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1499{ 1499{
1500 struct kvm_io_device *dev; 1500 struct kvm_io_device *dev;
1501 1501
1502 if (vcpu->apic) { 1502 if (vcpu->arch.apic) {
1503 dev = &vcpu->apic->dev; 1503 dev = &vcpu->arch.apic->dev;
1504 if (dev->in_range(dev, addr)) 1504 if (dev->in_range(dev, addr))
1505 return dev; 1505 return dev;
1506 } 1506 }
@@ -1527,7 +1527,7 @@ int emulator_read_std(unsigned long addr,
1527 void *data = val; 1527 void *data = val;
1528 1528
1529 while (bytes) { 1529 while (bytes) {
1530 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1530 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1531 unsigned offset = addr & (PAGE_SIZE-1); 1531 unsigned offset = addr & (PAGE_SIZE-1);
1532 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); 1532 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1533 int ret; 1533 int ret;
@@ -1561,7 +1561,7 @@ static int emulator_read_emulated(unsigned long addr,
1561 return X86EMUL_CONTINUE; 1561 return X86EMUL_CONTINUE;
1562 } 1562 }
1563 1563
1564 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1564 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1565 1565
1566 /* For APIC access vmexit */ 1566 /* For APIC access vmexit */
1567 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1567 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1609,7 +1609,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1609 struct kvm_vcpu *vcpu) 1609 struct kvm_vcpu *vcpu)
1610{ 1610{
1611 struct kvm_io_device *mmio_dev; 1611 struct kvm_io_device *mmio_dev;
1612 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1612 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1613 1613
1614 if (gpa == UNMAPPED_GVA) { 1614 if (gpa == UNMAPPED_GVA) {
1615 kvm_inject_page_fault(vcpu, addr, 2); 1615 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1678,7 +1678,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1678#ifndef CONFIG_X86_64 1678#ifndef CONFIG_X86_64
1679 /* guests cmpxchg8b have to be emulated atomically */ 1679 /* guests cmpxchg8b have to be emulated atomically */
1680 if (bytes == 8) { 1680 if (bytes == 8) {
1681 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1681 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1682 struct page *page; 1682 struct page *page;
1683 char *addr; 1683 char *addr;
1684 u64 val; 1684 u64 val;
@@ -1715,7 +1715,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1715 1715
1716int emulate_clts(struct kvm_vcpu *vcpu) 1716int emulate_clts(struct kvm_vcpu *vcpu)
1717{ 1717{
1718 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS); 1718 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
1719 return X86EMUL_CONTINUE; 1719 return X86EMUL_CONTINUE;
1720} 1720}
1721 1721
@@ -1750,7 +1750,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1750{ 1750{
1751 static int reported; 1751 static int reported;
1752 u8 opcodes[4]; 1752 u8 opcodes[4];
1753 unsigned long rip = vcpu->rip; 1753 unsigned long rip = vcpu->arch.rip;
1754 unsigned long rip_linear; 1754 unsigned long rip_linear;
1755 1755
1756 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); 1756 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
@@ -1781,46 +1781,46 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1781{ 1781{
1782 int r; 1782 int r;
1783 1783
1784 vcpu->mmio_fault_cr2 = cr2; 1784 vcpu->arch.mmio_fault_cr2 = cr2;
1785 kvm_x86_ops->cache_regs(vcpu); 1785 kvm_x86_ops->cache_regs(vcpu);
1786 1786
1787 vcpu->mmio_is_write = 0; 1787 vcpu->mmio_is_write = 0;
1788 vcpu->pio.string = 0; 1788 vcpu->arch.pio.string = 0;
1789 1789
1790 if (!no_decode) { 1790 if (!no_decode) {
1791 int cs_db, cs_l; 1791 int cs_db, cs_l;
1792 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 1792 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1793 1793
1794 vcpu->emulate_ctxt.vcpu = vcpu; 1794 vcpu->arch.emulate_ctxt.vcpu = vcpu;
1795 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); 1795 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1796 vcpu->emulate_ctxt.mode = 1796 vcpu->arch.emulate_ctxt.mode =
1797 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM) 1797 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
1798 ? X86EMUL_MODE_REAL : cs_l 1798 ? X86EMUL_MODE_REAL : cs_l
1799 ? X86EMUL_MODE_PROT64 : cs_db 1799 ? X86EMUL_MODE_PROT64 : cs_db
1800 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 1800 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1801 1801
1802 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) { 1802 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1803 vcpu->emulate_ctxt.cs_base = 0; 1803 vcpu->arch.emulate_ctxt.cs_base = 0;
1804 vcpu->emulate_ctxt.ds_base = 0; 1804 vcpu->arch.emulate_ctxt.ds_base = 0;
1805 vcpu->emulate_ctxt.es_base = 0; 1805 vcpu->arch.emulate_ctxt.es_base = 0;
1806 vcpu->emulate_ctxt.ss_base = 0; 1806 vcpu->arch.emulate_ctxt.ss_base = 0;
1807 } else { 1807 } else {
1808 vcpu->emulate_ctxt.cs_base = 1808 vcpu->arch.emulate_ctxt.cs_base =
1809 get_segment_base(vcpu, VCPU_SREG_CS); 1809 get_segment_base(vcpu, VCPU_SREG_CS);
1810 vcpu->emulate_ctxt.ds_base = 1810 vcpu->arch.emulate_ctxt.ds_base =
1811 get_segment_base(vcpu, VCPU_SREG_DS); 1811 get_segment_base(vcpu, VCPU_SREG_DS);
1812 vcpu->emulate_ctxt.es_base = 1812 vcpu->arch.emulate_ctxt.es_base =
1813 get_segment_base(vcpu, VCPU_SREG_ES); 1813 get_segment_base(vcpu, VCPU_SREG_ES);
1814 vcpu->emulate_ctxt.ss_base = 1814 vcpu->arch.emulate_ctxt.ss_base =
1815 get_segment_base(vcpu, VCPU_SREG_SS); 1815 get_segment_base(vcpu, VCPU_SREG_SS);
1816 } 1816 }
1817 1817
1818 vcpu->emulate_ctxt.gs_base = 1818 vcpu->arch.emulate_ctxt.gs_base =
1819 get_segment_base(vcpu, VCPU_SREG_GS); 1819 get_segment_base(vcpu, VCPU_SREG_GS);
1820 vcpu->emulate_ctxt.fs_base = 1820 vcpu->arch.emulate_ctxt.fs_base =
1821 get_segment_base(vcpu, VCPU_SREG_FS); 1821 get_segment_base(vcpu, VCPU_SREG_FS);
1822 1822
1823 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops); 1823 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
1824 ++vcpu->stat.insn_emulation; 1824 ++vcpu->stat.insn_emulation;
1825 if (r) { 1825 if (r) {
1826 ++vcpu->stat.insn_emulation_fail; 1826 ++vcpu->stat.insn_emulation_fail;
@@ -1830,9 +1830,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1830 } 1830 }
1831 } 1831 }
1832 1832
1833 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops); 1833 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
1834 1834
1835 if (vcpu->pio.string) 1835 if (vcpu->arch.pio.string)
1836 return EMULATE_DO_MMIO; 1836 return EMULATE_DO_MMIO;
1837 1837
1838 if ((r || vcpu->mmio_is_write) && run) { 1838 if ((r || vcpu->mmio_is_write) && run) {
@@ -1854,7 +1854,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1854 } 1854 }
1855 1855
1856 kvm_x86_ops->decache_regs(vcpu); 1856 kvm_x86_ops->decache_regs(vcpu);
1857 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags); 1857 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
1858 1858
1859 if (vcpu->mmio_is_write) { 1859 if (vcpu->mmio_is_write) {
1860 vcpu->mmio_needed = 0; 1860 vcpu->mmio_needed = 0;
@@ -1869,33 +1869,33 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1869{ 1869{
1870 int i; 1870 int i;
1871 1871
1872 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) 1872 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
1873 if (vcpu->pio.guest_pages[i]) { 1873 if (vcpu->arch.pio.guest_pages[i]) {
1874 kvm_release_page_dirty(vcpu->pio.guest_pages[i]); 1874 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
1875 vcpu->pio.guest_pages[i] = NULL; 1875 vcpu->arch.pio.guest_pages[i] = NULL;
1876 } 1876 }
1877} 1877}
1878 1878
1879static int pio_copy_data(struct kvm_vcpu *vcpu) 1879static int pio_copy_data(struct kvm_vcpu *vcpu)
1880{ 1880{
1881 void *p = vcpu->pio_data; 1881 void *p = vcpu->arch.pio_data;
1882 void *q; 1882 void *q;
1883 unsigned bytes; 1883 unsigned bytes;
1884 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1; 1884 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
1885 1885
1886 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE, 1886 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1887 PAGE_KERNEL); 1887 PAGE_KERNEL);
1888 if (!q) { 1888 if (!q) {
1889 free_pio_guest_pages(vcpu); 1889 free_pio_guest_pages(vcpu);
1890 return -ENOMEM; 1890 return -ENOMEM;
1891 } 1891 }
1892 q += vcpu->pio.guest_page_offset; 1892 q += vcpu->arch.pio.guest_page_offset;
1893 bytes = vcpu->pio.size * vcpu->pio.cur_count; 1893 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
1894 if (vcpu->pio.in) 1894 if (vcpu->arch.pio.in)
1895 memcpy(q, p, bytes); 1895 memcpy(q, p, bytes);
1896 else 1896 else
1897 memcpy(p, q, bytes); 1897 memcpy(p, q, bytes);
1898 q -= vcpu->pio.guest_page_offset; 1898 q -= vcpu->arch.pio.guest_page_offset;
1899 vunmap(q); 1899 vunmap(q);
1900 free_pio_guest_pages(vcpu); 1900 free_pio_guest_pages(vcpu);
1901 return 0; 1901 return 0;
@@ -1903,7 +1903,7 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1903 1903
1904int complete_pio(struct kvm_vcpu *vcpu) 1904int complete_pio(struct kvm_vcpu *vcpu)
1905{ 1905{
1906 struct kvm_pio_request *io = &vcpu->pio; 1906 struct kvm_pio_request *io = &vcpu->arch.pio;
1907 long delta; 1907 long delta;
1908 int r; 1908 int r;
1909 1909
@@ -1911,7 +1911,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
1911 1911
1912 if (!io->string) { 1912 if (!io->string) {
1913 if (io->in) 1913 if (io->in)
1914 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data, 1914 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
1915 io->size); 1915 io->size);
1916 } else { 1916 } else {
1917 if (io->in) { 1917 if (io->in) {
@@ -1929,15 +1929,15 @@ int complete_pio(struct kvm_vcpu *vcpu)
1929 * The size of the register should really depend on 1929 * The size of the register should really depend on
1930 * current address size. 1930 * current address size.
1931 */ 1931 */
1932 vcpu->regs[VCPU_REGS_RCX] -= delta; 1932 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
1933 } 1933 }
1934 if (io->down) 1934 if (io->down)
1935 delta = -delta; 1935 delta = -delta;
1936 delta *= io->size; 1936 delta *= io->size;
1937 if (io->in) 1937 if (io->in)
1938 vcpu->regs[VCPU_REGS_RDI] += delta; 1938 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
1939 else 1939 else
1940 vcpu->regs[VCPU_REGS_RSI] += delta; 1940 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
1941 } 1941 }
1942 1942
1943 kvm_x86_ops->decache_regs(vcpu); 1943 kvm_x86_ops->decache_regs(vcpu);
@@ -1955,13 +1955,13 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
1955 /* TODO: String I/O for in kernel device */ 1955 /* TODO: String I/O for in kernel device */
1956 1956
1957 mutex_lock(&vcpu->kvm->lock); 1957 mutex_lock(&vcpu->kvm->lock);
1958 if (vcpu->pio.in) 1958 if (vcpu->arch.pio.in)
1959 kvm_iodevice_read(pio_dev, vcpu->pio.port, 1959 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
1960 vcpu->pio.size, 1960 vcpu->arch.pio.size,
1961 pd); 1961 pd);
1962 else 1962 else
1963 kvm_iodevice_write(pio_dev, vcpu->pio.port, 1963 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
1964 vcpu->pio.size, 1964 vcpu->arch.pio.size,
1965 pd); 1965 pd);
1966 mutex_unlock(&vcpu->kvm->lock); 1966 mutex_unlock(&vcpu->kvm->lock);
1967} 1967}
@@ -1969,8 +1969,8 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
1969static void pio_string_write(struct kvm_io_device *pio_dev, 1969static void pio_string_write(struct kvm_io_device *pio_dev,
1970 struct kvm_vcpu *vcpu) 1970 struct kvm_vcpu *vcpu)
1971{ 1971{
1972 struct kvm_pio_request *io = &vcpu->pio; 1972 struct kvm_pio_request *io = &vcpu->arch.pio;
1973 void *pd = vcpu->pio_data; 1973 void *pd = vcpu->arch.pio_data;
1974 int i; 1974 int i;
1975 1975
1976 mutex_lock(&vcpu->kvm->lock); 1976 mutex_lock(&vcpu->kvm->lock);
@@ -1996,25 +1996,25 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1996 1996
1997 vcpu->run->exit_reason = KVM_EXIT_IO; 1997 vcpu->run->exit_reason = KVM_EXIT_IO;
1998 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1998 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1999 vcpu->run->io.size = vcpu->pio.size = size; 1999 vcpu->run->io.size = vcpu->arch.pio.size = size;
2000 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 2000 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2001 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1; 2001 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2002 vcpu->run->io.port = vcpu->pio.port = port; 2002 vcpu->run->io.port = vcpu->arch.pio.port = port;
2003 vcpu->pio.in = in; 2003 vcpu->arch.pio.in = in;
2004 vcpu->pio.string = 0; 2004 vcpu->arch.pio.string = 0;
2005 vcpu->pio.down = 0; 2005 vcpu->arch.pio.down = 0;
2006 vcpu->pio.guest_page_offset = 0; 2006 vcpu->arch.pio.guest_page_offset = 0;
2007 vcpu->pio.rep = 0; 2007 vcpu->arch.pio.rep = 0;
2008 2008
2009 kvm_x86_ops->cache_regs(vcpu); 2009 kvm_x86_ops->cache_regs(vcpu);
2010 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); 2010 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
2011 kvm_x86_ops->decache_regs(vcpu); 2011 kvm_x86_ops->decache_regs(vcpu);
2012 2012
2013 kvm_x86_ops->skip_emulated_instruction(vcpu); 2013 kvm_x86_ops->skip_emulated_instruction(vcpu);
2014 2014
2015 pio_dev = vcpu_find_pio_dev(vcpu, port); 2015 pio_dev = vcpu_find_pio_dev(vcpu, port);
2016 if (pio_dev) { 2016 if (pio_dev) {
2017 kernel_pio(pio_dev, vcpu, vcpu->pio_data); 2017 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2018 complete_pio(vcpu); 2018 complete_pio(vcpu);
2019 return 1; 2019 return 1;
2020 } 2020 }
@@ -2034,15 +2034,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2034 2034
2035 vcpu->run->exit_reason = KVM_EXIT_IO; 2035 vcpu->run->exit_reason = KVM_EXIT_IO;
2036 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 2036 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2037 vcpu->run->io.size = vcpu->pio.size = size; 2037 vcpu->run->io.size = vcpu->arch.pio.size = size;
2038 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 2038 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2039 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count; 2039 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2040 vcpu->run->io.port = vcpu->pio.port = port; 2040 vcpu->run->io.port = vcpu->arch.pio.port = port;
2041 vcpu->pio.in = in; 2041 vcpu->arch.pio.in = in;
2042 vcpu->pio.string = 1; 2042 vcpu->arch.pio.string = 1;
2043 vcpu->pio.down = down; 2043 vcpu->arch.pio.down = down;
2044 vcpu->pio.guest_page_offset = offset_in_page(address); 2044 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2045 vcpu->pio.rep = rep; 2045 vcpu->arch.pio.rep = rep;
2046 2046
2047 if (!count) { 2047 if (!count) {
2048 kvm_x86_ops->skip_emulated_instruction(vcpu); 2048 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2072,15 +2072,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 vcpu->run->io.count = now; 2074 vcpu->run->io.count = now;
2075 vcpu->pio.cur_count = now; 2075 vcpu->arch.pio.cur_count = now;
2076 2076
2077 if (vcpu->pio.cur_count == vcpu->pio.count) 2077 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
2078 kvm_x86_ops->skip_emulated_instruction(vcpu); 2078 kvm_x86_ops->skip_emulated_instruction(vcpu);
2079 2079
2080 for (i = 0; i < nr_pages; ++i) { 2080 for (i = 0; i < nr_pages; ++i) {
2081 mutex_lock(&vcpu->kvm->lock); 2081 mutex_lock(&vcpu->kvm->lock);
2082 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2082 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2083 vcpu->pio.guest_pages[i] = page; 2083 vcpu->arch.pio.guest_pages[i] = page;
2084 mutex_unlock(&vcpu->kvm->lock); 2084 mutex_unlock(&vcpu->kvm->lock);
2085 if (!page) { 2085 if (!page) {
2086 kvm_inject_gp(vcpu, 0); 2086 kvm_inject_gp(vcpu, 0);
@@ -2090,13 +2090,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2090 } 2090 }
2091 2091
2092 pio_dev = vcpu_find_pio_dev(vcpu, port); 2092 pio_dev = vcpu_find_pio_dev(vcpu, port);
2093 if (!vcpu->pio.in) { 2093 if (!vcpu->arch.pio.in) {
2094 /* string PIO write */ 2094 /* string PIO write */
2095 ret = pio_copy_data(vcpu); 2095 ret = pio_copy_data(vcpu);
2096 if (ret >= 0 && pio_dev) { 2096 if (ret >= 0 && pio_dev) {
2097 pio_string_write(pio_dev, vcpu); 2097 pio_string_write(pio_dev, vcpu);
2098 complete_pio(vcpu); 2098 complete_pio(vcpu);
2099 if (vcpu->pio.count == 0) 2099 if (vcpu->arch.pio.count == 0)
2100 ret = 1; 2100 ret = 1;
2101 } 2101 }
2102 } else if (pio_dev) 2102 } else if (pio_dev)
@@ -2156,9 +2156,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2156{ 2156{
2157 ++vcpu->stat.halt_exits; 2157 ++vcpu->stat.halt_exits;
2158 if (irqchip_in_kernel(vcpu->kvm)) { 2158 if (irqchip_in_kernel(vcpu->kvm)) {
2159 vcpu->mp_state = VCPU_MP_STATE_HALTED; 2159 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
2160 kvm_vcpu_block(vcpu); 2160 kvm_vcpu_block(vcpu);
2161 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE) 2161 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
2162 return -EINTR; 2162 return -EINTR;
2163 return 1; 2163 return 1;
2164 } else { 2164 } else {
@@ -2174,11 +2174,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2174 2174
2175 kvm_x86_ops->cache_regs(vcpu); 2175 kvm_x86_ops->cache_regs(vcpu);
2176 2176
2177 nr = vcpu->regs[VCPU_REGS_RAX]; 2177 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2178 a0 = vcpu->regs[VCPU_REGS_RBX]; 2178 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2179 a1 = vcpu->regs[VCPU_REGS_RCX]; 2179 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2180 a2 = vcpu->regs[VCPU_REGS_RDX]; 2180 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2181 a3 = vcpu->regs[VCPU_REGS_RSI]; 2181 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
2182 2182
2183 if (!is_long_mode(vcpu)) { 2183 if (!is_long_mode(vcpu)) {
2184 nr &= 0xFFFFFFFF; 2184 nr &= 0xFFFFFFFF;
@@ -2193,7 +2193,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2193 ret = -KVM_ENOSYS; 2193 ret = -KVM_ENOSYS;
2194 break; 2194 break;
2195 } 2195 }
2196 vcpu->regs[VCPU_REGS_RAX] = ret; 2196 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
2197 kvm_x86_ops->decache_regs(vcpu); 2197 kvm_x86_ops->decache_regs(vcpu);
2198 return 0; 2198 return 0;
2199} 2199}
@@ -2215,7 +2215,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2215 2215
2216 kvm_x86_ops->cache_regs(vcpu); 2216 kvm_x86_ops->cache_regs(vcpu);
2217 kvm_x86_ops->patch_hypercall(vcpu, instruction); 2217 kvm_x86_ops->patch_hypercall(vcpu, instruction);
2218 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu) 2218 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
2219 != X86EMUL_CONTINUE) 2219 != X86EMUL_CONTINUE)
2220 ret = -EFAULT; 2220 ret = -EFAULT;
2221 2221
@@ -2255,13 +2255,13 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2255 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2255 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2256 switch (cr) { 2256 switch (cr) {
2257 case 0: 2257 case 0:
2258 return vcpu->cr0; 2258 return vcpu->arch.cr0;
2259 case 2: 2259 case 2:
2260 return vcpu->cr2; 2260 return vcpu->arch.cr2;
2261 case 3: 2261 case 3:
2262 return vcpu->cr3; 2262 return vcpu->arch.cr3;
2263 case 4: 2263 case 4:
2264 return vcpu->cr4; 2264 return vcpu->arch.cr4;
2265 case 8: 2265 case 8:
2266 return get_cr8(vcpu); 2266 return get_cr8(vcpu);
2267 default: 2267 default:
@@ -2275,17 +2275,17 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2275{ 2275{
2276 switch (cr) { 2276 switch (cr) {
2277 case 0: 2277 case 0:
2278 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val)); 2278 set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
2279 *rflags = kvm_x86_ops->get_rflags(vcpu); 2279 *rflags = kvm_x86_ops->get_rflags(vcpu);
2280 break; 2280 break;
2281 case 2: 2281 case 2:
2282 vcpu->cr2 = val; 2282 vcpu->arch.cr2 = val;
2283 break; 2283 break;
2284 case 3: 2284 case 3:
2285 set_cr3(vcpu, val); 2285 set_cr3(vcpu, val);
2286 break; 2286 break;
2287 case 4: 2287 case 4:
2288 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val)); 2288 set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
2289 break; 2289 break;
2290 case 8: 2290 case 8:
2291 set_cr8(vcpu, val & 0xfUL); 2291 set_cr8(vcpu, val & 0xfUL);
@@ -2297,13 +2297,13 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2297 2297
2298static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 2298static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2299{ 2299{
2300 struct kvm_cpuid_entry2 *e = &vcpu->cpuid_entries[i]; 2300 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2301 int j, nent = vcpu->cpuid_nent; 2301 int j, nent = vcpu->arch.cpuid_nent;
2302 2302
2303 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 2303 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2304 /* when no next entry is found, the current entry[i] is reselected */ 2304 /* when no next entry is found, the current entry[i] is reselected */
2305 for (j = i + 1; j == i; j = (j + 1) % nent) { 2305 for (j = i + 1; j == i; j = (j + 1) % nent) {
2306 struct kvm_cpuid_entry2 *ej = &vcpu->cpuid_entries[j]; 2306 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
2307 if (ej->function == e->function) { 2307 if (ej->function == e->function) {
2308 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 2308 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2309 return j; 2309 return j;
@@ -2334,15 +2334,15 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2334 struct kvm_cpuid_entry2 *e, *best; 2334 struct kvm_cpuid_entry2 *e, *best;
2335 2335
2336 kvm_x86_ops->cache_regs(vcpu); 2336 kvm_x86_ops->cache_regs(vcpu);
2337 function = vcpu->regs[VCPU_REGS_RAX]; 2337 function = vcpu->arch.regs[VCPU_REGS_RAX];
2338 index = vcpu->regs[VCPU_REGS_RCX]; 2338 index = vcpu->arch.regs[VCPU_REGS_RCX];
2339 vcpu->regs[VCPU_REGS_RAX] = 0; 2339 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2340 vcpu->regs[VCPU_REGS_RBX] = 0; 2340 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2341 vcpu->regs[VCPU_REGS_RCX] = 0; 2341 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2342 vcpu->regs[VCPU_REGS_RDX] = 0; 2342 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
2343 best = NULL; 2343 best = NULL;
2344 for (i = 0; i < vcpu->cpuid_nent; ++i) { 2344 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2345 e = &vcpu->cpuid_entries[i]; 2345 e = &vcpu->arch.cpuid_entries[i];
2346 if (is_matching_cpuid_entry(e, function, index)) { 2346 if (is_matching_cpuid_entry(e, function, index)) {
2347 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) 2347 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2348 move_to_next_stateful_cpuid_entry(vcpu, i); 2348 move_to_next_stateful_cpuid_entry(vcpu, i);
@@ -2357,10 +2357,10 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2357 best = e; 2357 best = e;
2358 } 2358 }
2359 if (best) { 2359 if (best) {
2360 vcpu->regs[VCPU_REGS_RAX] = best->eax; 2360 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2361 vcpu->regs[VCPU_REGS_RBX] = best->ebx; 2361 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2362 vcpu->regs[VCPU_REGS_RCX] = best->ecx; 2362 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2363 vcpu->regs[VCPU_REGS_RDX] = best->edx; 2363 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
2364 } 2364 }
2365 kvm_x86_ops->decache_regs(vcpu); 2365 kvm_x86_ops->decache_regs(vcpu);
2366 kvm_x86_ops->skip_emulated_instruction(vcpu); 2366 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2376,9 +2376,9 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2376static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, 2376static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2377 struct kvm_run *kvm_run) 2377 struct kvm_run *kvm_run)
2378{ 2378{
2379 return (!vcpu->irq_summary && 2379 return (!vcpu->arch.irq_summary &&
2380 kvm_run->request_interrupt_window && 2380 kvm_run->request_interrupt_window &&
2381 vcpu->interrupt_window_open && 2381 vcpu->arch.interrupt_window_open &&
2382 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); 2382 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2383} 2383}
2384 2384
@@ -2392,22 +2392,22 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2392 kvm_run->ready_for_interrupt_injection = 1; 2392 kvm_run->ready_for_interrupt_injection = 1;
2393 else 2393 else
2394 kvm_run->ready_for_interrupt_injection = 2394 kvm_run->ready_for_interrupt_injection =
2395 (vcpu->interrupt_window_open && 2395 (vcpu->arch.interrupt_window_open &&
2396 vcpu->irq_summary == 0); 2396 vcpu->arch.irq_summary == 0);
2397} 2397}
2398 2398
2399static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2399static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2400{ 2400{
2401 int r; 2401 int r;
2402 2402
2403 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2403 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2404 pr_debug("vcpu %d received sipi with vector # %x\n", 2404 pr_debug("vcpu %d received sipi with vector # %x\n",
2405 vcpu->vcpu_id, vcpu->sipi_vector); 2405 vcpu->vcpu_id, vcpu->arch.sipi_vector);
2406 kvm_lapic_reset(vcpu); 2406 kvm_lapic_reset(vcpu);
2407 r = kvm_x86_ops->vcpu_reset(vcpu); 2407 r = kvm_x86_ops->vcpu_reset(vcpu);
2408 if (r) 2408 if (r)
2409 return r; 2409 return r;
2410 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 2410 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
2411 } 2411 }
2412 2412
2413preempted: 2413preempted:
@@ -2437,7 +2437,7 @@ again:
2437 goto out; 2437 goto out;
2438 } 2438 }
2439 2439
2440 if (vcpu->exception.pending) 2440 if (vcpu->arch.exception.pending)
2441 __queue_exception(vcpu); 2441 __queue_exception(vcpu);
2442 else if (irqchip_in_kernel(vcpu->kvm)) 2442 else if (irqchip_in_kernel(vcpu->kvm))
2443 kvm_x86_ops->inject_pending_irq(vcpu); 2443 kvm_x86_ops->inject_pending_irq(vcpu);
@@ -2475,11 +2475,11 @@ again:
2475 */ 2475 */
2476 if (unlikely(prof_on == KVM_PROFILING)) { 2476 if (unlikely(prof_on == KVM_PROFILING)) {
2477 kvm_x86_ops->cache_regs(vcpu); 2477 kvm_x86_ops->cache_regs(vcpu);
2478 profile_hit(KVM_PROFILING, (void *)vcpu->rip); 2478 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
2479 } 2479 }
2480 2480
2481 if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu)) 2481 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2482 vcpu->exception.pending = false; 2482 vcpu->arch.exception.pending = false;
2483 2483
2484 r = kvm_x86_ops->handle_exit(kvm_run, vcpu); 2484 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2485 2485
@@ -2512,7 +2512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2512 2512
2513 vcpu_load(vcpu); 2513 vcpu_load(vcpu);
2514 2514
2515 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 2515 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2516 kvm_vcpu_block(vcpu); 2516 kvm_vcpu_block(vcpu);
2517 vcpu_put(vcpu); 2517 vcpu_put(vcpu);
2518 return -EAGAIN; 2518 return -EAGAIN;
@@ -2525,7 +2525,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2525 if (!irqchip_in_kernel(vcpu->kvm)) 2525 if (!irqchip_in_kernel(vcpu->kvm))
2526 set_cr8(vcpu, kvm_run->cr8); 2526 set_cr8(vcpu, kvm_run->cr8);
2527 2527
2528 if (vcpu->pio.cur_count) { 2528 if (vcpu->arch.pio.cur_count) {
2529 r = complete_pio(vcpu); 2529 r = complete_pio(vcpu);
2530 if (r) 2530 if (r)
2531 goto out; 2531 goto out;
@@ -2536,7 +2536,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2536 vcpu->mmio_read_completed = 1; 2536 vcpu->mmio_read_completed = 1;
2537 vcpu->mmio_needed = 0; 2537 vcpu->mmio_needed = 0;
2538 r = emulate_instruction(vcpu, kvm_run, 2538 r = emulate_instruction(vcpu, kvm_run,
2539 vcpu->mmio_fault_cr2, 0, 1); 2539 vcpu->arch.mmio_fault_cr2, 0, 1);
2540 if (r == EMULATE_DO_MMIO) { 2540 if (r == EMULATE_DO_MMIO) {
2541 /* 2541 /*
2542 * Read-modify-write. Back to userspace. 2542 * Read-modify-write. Back to userspace.
@@ -2548,7 +2548,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2548#endif 2548#endif
2549 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 2549 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2550 kvm_x86_ops->cache_regs(vcpu); 2550 kvm_x86_ops->cache_regs(vcpu);
2551 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 2551 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2552 kvm_x86_ops->decache_regs(vcpu); 2552 kvm_x86_ops->decache_regs(vcpu);
2553 } 2553 }
2554 2554
@@ -2568,26 +2568,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2568 2568
2569 kvm_x86_ops->cache_regs(vcpu); 2569 kvm_x86_ops->cache_regs(vcpu);
2570 2570
2571 regs->rax = vcpu->regs[VCPU_REGS_RAX]; 2571 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2572 regs->rbx = vcpu->regs[VCPU_REGS_RBX]; 2572 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2573 regs->rcx = vcpu->regs[VCPU_REGS_RCX]; 2573 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2574 regs->rdx = vcpu->regs[VCPU_REGS_RDX]; 2574 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2575 regs->rsi = vcpu->regs[VCPU_REGS_RSI]; 2575 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2576 regs->rdi = vcpu->regs[VCPU_REGS_RDI]; 2576 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2577 regs->rsp = vcpu->regs[VCPU_REGS_RSP]; 2577 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2578 regs->rbp = vcpu->regs[VCPU_REGS_RBP]; 2578 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
2579#ifdef CONFIG_X86_64 2579#ifdef CONFIG_X86_64
2580 regs->r8 = vcpu->regs[VCPU_REGS_R8]; 2580 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
2581 regs->r9 = vcpu->regs[VCPU_REGS_R9]; 2581 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
2582 regs->r10 = vcpu->regs[VCPU_REGS_R10]; 2582 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
2583 regs->r11 = vcpu->regs[VCPU_REGS_R11]; 2583 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
2584 regs->r12 = vcpu->regs[VCPU_REGS_R12]; 2584 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
2585 regs->r13 = vcpu->regs[VCPU_REGS_R13]; 2585 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
2586 regs->r14 = vcpu->regs[VCPU_REGS_R14]; 2586 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
2587 regs->r15 = vcpu->regs[VCPU_REGS_R15]; 2587 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
2588#endif 2588#endif
2589 2589
2590 regs->rip = vcpu->rip; 2590 regs->rip = vcpu->arch.rip;
2591 regs->rflags = kvm_x86_ops->get_rflags(vcpu); 2591 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2592 2592
2593 /* 2593 /*
@@ -2605,26 +2605,26 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2605{ 2605{
2606 vcpu_load(vcpu); 2606 vcpu_load(vcpu);
2607 2607
2608 vcpu->regs[VCPU_REGS_RAX] = regs->rax; 2608 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
2609 vcpu->regs[VCPU_REGS_RBX] = regs->rbx; 2609 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
2610 vcpu->regs[VCPU_REGS_RCX] = regs->rcx; 2610 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
2611 vcpu->regs[VCPU_REGS_RDX] = regs->rdx; 2611 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
2612 vcpu->regs[VCPU_REGS_RSI] = regs->rsi; 2612 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
2613 vcpu->regs[VCPU_REGS_RDI] = regs->rdi; 2613 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
2614 vcpu->regs[VCPU_REGS_RSP] = regs->rsp; 2614 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
2615 vcpu->regs[VCPU_REGS_RBP] = regs->rbp; 2615 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
2616#ifdef CONFIG_X86_64 2616#ifdef CONFIG_X86_64
2617 vcpu->regs[VCPU_REGS_R8] = regs->r8; 2617 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
2618 vcpu->regs[VCPU_REGS_R9] = regs->r9; 2618 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
2619 vcpu->regs[VCPU_REGS_R10] = regs->r10; 2619 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
2620 vcpu->regs[VCPU_REGS_R11] = regs->r11; 2620 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
2621 vcpu->regs[VCPU_REGS_R12] = regs->r12; 2621 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
2622 vcpu->regs[VCPU_REGS_R13] = regs->r13; 2622 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
2623 vcpu->regs[VCPU_REGS_R14] = regs->r14; 2623 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
2624 vcpu->regs[VCPU_REGS_R15] = regs->r15; 2624 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
2625#endif 2625#endif
2626 2626
2627 vcpu->rip = regs->rip; 2627 vcpu->arch.rip = regs->rip;
2628 kvm_x86_ops->set_rflags(vcpu, regs->rflags); 2628 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2629 2629
2630 kvm_x86_ops->decache_regs(vcpu); 2630 kvm_x86_ops->decache_regs(vcpu);
@@ -2676,12 +2676,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2676 sregs->gdt.base = dt.base; 2676 sregs->gdt.base = dt.base;
2677 2677
2678 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2678 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2679 sregs->cr0 = vcpu->cr0; 2679 sregs->cr0 = vcpu->arch.cr0;
2680 sregs->cr2 = vcpu->cr2; 2680 sregs->cr2 = vcpu->arch.cr2;
2681 sregs->cr3 = vcpu->cr3; 2681 sregs->cr3 = vcpu->arch.cr3;
2682 sregs->cr4 = vcpu->cr4; 2682 sregs->cr4 = vcpu->arch.cr4;
2683 sregs->cr8 = get_cr8(vcpu); 2683 sregs->cr8 = get_cr8(vcpu);
2684 sregs->efer = vcpu->shadow_efer; 2684 sregs->efer = vcpu->arch.shadow_efer;
2685 sregs->apic_base = kvm_get_apic_base(vcpu); 2685 sregs->apic_base = kvm_get_apic_base(vcpu);
2686 2686
2687 if (irqchip_in_kernel(vcpu->kvm)) { 2687 if (irqchip_in_kernel(vcpu->kvm)) {
@@ -2692,7 +2692,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2692 set_bit(pending_vec, 2692 set_bit(pending_vec,
2693 (unsigned long *)sregs->interrupt_bitmap); 2693 (unsigned long *)sregs->interrupt_bitmap);
2694 } else 2694 } else
2695 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending, 2695 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
2696 sizeof sregs->interrupt_bitmap); 2696 sizeof sregs->interrupt_bitmap);
2697 2697
2698 vcpu_put(vcpu); 2698 vcpu_put(vcpu);
@@ -2722,13 +2722,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2722 dt.base = sregs->gdt.base; 2722 dt.base = sregs->gdt.base;
2723 kvm_x86_ops->set_gdt(vcpu, &dt); 2723 kvm_x86_ops->set_gdt(vcpu, &dt);
2724 2724
2725 vcpu->cr2 = sregs->cr2; 2725 vcpu->arch.cr2 = sregs->cr2;
2726 mmu_reset_needed |= vcpu->cr3 != sregs->cr3; 2726 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
2727 vcpu->cr3 = sregs->cr3; 2727 vcpu->arch.cr3 = sregs->cr3;
2728 2728
2729 set_cr8(vcpu, sregs->cr8); 2729 set_cr8(vcpu, sregs->cr8);
2730 2730
2731 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; 2731 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
2732#ifdef CONFIG_X86_64 2732#ifdef CONFIG_X86_64
2733 kvm_x86_ops->set_efer(vcpu, sregs->efer); 2733 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2734#endif 2734#endif
@@ -2736,25 +2736,25 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2736 2736
2737 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2737 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2738 2738
2739 mmu_reset_needed |= vcpu->cr0 != sregs->cr0; 2739 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
2740 vcpu->cr0 = sregs->cr0; 2740 vcpu->arch.cr0 = sregs->cr0;
2741 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 2741 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2742 2742
2743 mmu_reset_needed |= vcpu->cr4 != sregs->cr4; 2743 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
2744 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 2744 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2745 if (!is_long_mode(vcpu) && is_pae(vcpu)) 2745 if (!is_long_mode(vcpu) && is_pae(vcpu))
2746 load_pdptrs(vcpu, vcpu->cr3); 2746 load_pdptrs(vcpu, vcpu->arch.cr3);
2747 2747
2748 if (mmu_reset_needed) 2748 if (mmu_reset_needed)
2749 kvm_mmu_reset_context(vcpu); 2749 kvm_mmu_reset_context(vcpu);
2750 2750
2751 if (!irqchip_in_kernel(vcpu->kvm)) { 2751 if (!irqchip_in_kernel(vcpu->kvm)) {
2752 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap, 2752 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
2753 sizeof vcpu->irq_pending); 2753 sizeof vcpu->arch.irq_pending);
2754 vcpu->irq_summary = 0; 2754 vcpu->arch.irq_summary = 0;
2755 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i) 2755 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
2756 if (vcpu->irq_pending[i]) 2756 if (vcpu->arch.irq_pending[i])
2757 __set_bit(i, &vcpu->irq_summary); 2757 __set_bit(i, &vcpu->arch.irq_summary);
2758 } else { 2758 } else {
2759 max_bits = (sizeof sregs->interrupt_bitmap) << 3; 2759 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2760 pending_vec = find_first_bit( 2760 pending_vec = find_first_bit(
@@ -2829,7 +2829,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2829 2829
2830 vcpu_load(vcpu); 2830 vcpu_load(vcpu);
2831 mutex_lock(&vcpu->kvm->lock); 2831 mutex_lock(&vcpu->kvm->lock);
2832 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 2832 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
2833 tr->physical_address = gpa; 2833 tr->physical_address = gpa;
2834 tr->valid = gpa != UNMAPPED_GVA; 2834 tr->valid = gpa != UNMAPPED_GVA;
2835 tr->writeable = 1; 2835 tr->writeable = 1;
@@ -2842,7 +2842,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2842 2842
2843int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2843int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2844{ 2844{
2845 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; 2845 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
2846 2846
2847 vcpu_load(vcpu); 2847 vcpu_load(vcpu);
2848 2848
@@ -2862,7 +2862,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2862 2862
2863int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2863int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2864{ 2864{
2865 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; 2865 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
2866 2866
2867 vcpu_load(vcpu); 2867 vcpu_load(vcpu);
2868 2868
@@ -2886,16 +2886,16 @@ void fx_init(struct kvm_vcpu *vcpu)
2886 2886
2887 /* Initialize guest FPU by resetting ours and saving into guest's */ 2887 /* Initialize guest FPU by resetting ours and saving into guest's */
2888 preempt_disable(); 2888 preempt_disable();
2889 fx_save(&vcpu->host_fx_image); 2889 fx_save(&vcpu->arch.host_fx_image);
2890 fpu_init(); 2890 fpu_init();
2891 fx_save(&vcpu->guest_fx_image); 2891 fx_save(&vcpu->arch.guest_fx_image);
2892 fx_restore(&vcpu->host_fx_image); 2892 fx_restore(&vcpu->arch.host_fx_image);
2893 preempt_enable(); 2893 preempt_enable();
2894 2894
2895 vcpu->cr0 |= X86_CR0_ET; 2895 vcpu->arch.cr0 |= X86_CR0_ET;
2896 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space); 2896 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2897 vcpu->guest_fx_image.mxcsr = 0x1f80; 2897 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
2898 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask, 2898 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
2899 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask); 2899 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2900} 2900}
2901EXPORT_SYMBOL_GPL(fx_init); 2901EXPORT_SYMBOL_GPL(fx_init);
@@ -2906,8 +2906,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2906 return; 2906 return;
2907 2907
2908 vcpu->guest_fpu_loaded = 1; 2908 vcpu->guest_fpu_loaded = 1;
2909 fx_save(&vcpu->host_fx_image); 2909 fx_save(&vcpu->arch.host_fx_image);
2910 fx_restore(&vcpu->guest_fx_image); 2910 fx_restore(&vcpu->arch.guest_fx_image);
2911} 2911}
2912EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); 2912EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2913 2913
@@ -2917,8 +2917,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2917 return; 2917 return;
2918 2918
2919 vcpu->guest_fpu_loaded = 0; 2919 vcpu->guest_fpu_loaded = 0;
2920 fx_save(&vcpu->guest_fx_image); 2920 fx_save(&vcpu->arch.guest_fx_image);
2921 fx_restore(&vcpu->host_fx_image); 2921 fx_restore(&vcpu->arch.host_fx_image);
2922 ++vcpu->stat.fpu_reload; 2922 ++vcpu->stat.fpu_reload;
2923} 2923}
2924EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); 2924EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
@@ -2939,7 +2939,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2939 int r; 2939 int r;
2940 2940
2941 /* We do fxsave: this must be aligned. */ 2941 /* We do fxsave: this must be aligned. */
2942 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF); 2942 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
2943 2943
2944 vcpu_load(vcpu); 2944 vcpu_load(vcpu);
2945 r = kvm_arch_vcpu_reset(vcpu); 2945 r = kvm_arch_vcpu_reset(vcpu);
@@ -3003,18 +3003,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3003 BUG_ON(vcpu->kvm == NULL); 3003 BUG_ON(vcpu->kvm == NULL);
3004 kvm = vcpu->kvm; 3004 kvm = vcpu->kvm;
3005 3005
3006 vcpu->mmu.root_hpa = INVALID_PAGE; 3006 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3007 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 3007 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3008 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 3008 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
3009 else 3009 else
3010 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED; 3010 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
3011 3011
3012 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3012 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3013 if (!page) { 3013 if (!page) {
3014 r = -ENOMEM; 3014 r = -ENOMEM;
3015 goto fail; 3015 goto fail;
3016 } 3016 }
3017 vcpu->pio_data = page_address(page); 3017 vcpu->arch.pio_data = page_address(page);
3018 3018
3019 r = kvm_mmu_create(vcpu); 3019 r = kvm_mmu_create(vcpu);
3020 if (r < 0) 3020 if (r < 0)
@@ -3031,7 +3031,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3031fail_mmu_destroy: 3031fail_mmu_destroy:
3032 kvm_mmu_destroy(vcpu); 3032 kvm_mmu_destroy(vcpu);
3033fail_free_pio_data: 3033fail_free_pio_data:
3034 free_page((unsigned long)vcpu->pio_data); 3034 free_page((unsigned long)vcpu->arch.pio_data);
3035fail: 3035fail:
3036 return r; 3036 return r;
3037} 3037}
@@ -3040,7 +3040,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3040{ 3040{
3041 kvm_free_lapic(vcpu); 3041 kvm_free_lapic(vcpu);
3042 kvm_mmu_destroy(vcpu); 3042 kvm_mmu_destroy(vcpu);
3043 free_page((unsigned long)vcpu->pio_data); 3043 free_page((unsigned long)vcpu->arch.pio_data);
3044} 3044}
3045 3045
3046struct kvm *kvm_arch_create_vm(void) 3046struct kvm *kvm_arch_create_vm(void)
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 0fc7020aa1a5..0e01ac75268c 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -92,8 +92,7 @@ enum {
92 92
93#include "x86_emulate.h" 93#include "x86_emulate.h"
94 94
95struct kvm_vcpu { 95struct kvm_vcpu_arch {
96 KVM_VCPU_COMM;
97 u64 host_tsc; 96 u64 host_tsc;
98 int interrupt_window_open; 97 int interrupt_window_open;
99 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 98 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
@@ -130,7 +129,6 @@ struct kvm_vcpu {
130 int last_pt_write_count; 129 int last_pt_write_count;
131 u64 *last_pte_updated; 130 u64 *last_pte_updated;
132 131
133
134 struct i387_fxsave_struct host_fx_image; 132 struct i387_fxsave_struct host_fx_image;
135 struct i387_fxsave_struct guest_fx_image; 133 struct i387_fxsave_struct guest_fx_image;
136 134
@@ -159,12 +157,17 @@ struct kvm_vcpu {
159 157
160 int cpuid_nent; 158 int cpuid_nent;
161 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 159 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
162
163 /* emulate context */ 160 /* emulate context */
164 161
165 struct x86_emulate_ctxt emulate_ctxt; 162 struct x86_emulate_ctxt emulate_ctxt;
166}; 163};
167 164
165struct kvm_vcpu {
166 KVM_VCPU_COMM;
167
168 struct kvm_vcpu_arch arch;
169};
170
168struct descriptor_table { 171struct descriptor_table {
169 u16 limit; 172 u16 limit;
170 unsigned long base; 173 unsigned long base;
@@ -339,7 +342,7 @@ static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
339 342
340static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) 343static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
341{ 344{
342 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) 345 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
343 return 0; 346 return 0;
344 347
345 return kvm_mmu_load(vcpu); 348 return kvm_mmu_load(vcpu);
@@ -348,7 +351,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
348static inline int is_long_mode(struct kvm_vcpu *vcpu) 351static inline int is_long_mode(struct kvm_vcpu *vcpu)
349{ 352{
350#ifdef CONFIG_X86_64 353#ifdef CONFIG_X86_64
351 return vcpu->shadow_efer & EFER_LME; 354 return vcpu->arch.shadow_efer & EFER_LME;
352#else 355#else
353 return 0; 356 return 0;
354#endif 357#endif
@@ -356,17 +359,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
356 359
357static inline int is_pae(struct kvm_vcpu *vcpu) 360static inline int is_pae(struct kvm_vcpu *vcpu)
358{ 361{
359 return vcpu->cr4 & X86_CR4_PAE; 362 return vcpu->arch.cr4 & X86_CR4_PAE;
360} 363}
361 364
362static inline int is_pse(struct kvm_vcpu *vcpu) 365static inline int is_pse(struct kvm_vcpu *vcpu)
363{ 366{
364 return vcpu->cr4 & X86_CR4_PSE; 367 return vcpu->arch.cr4 & X86_CR4_PSE;
365} 368}
366 369
367static inline int is_paging(struct kvm_vcpu *vcpu) 370static inline int is_paging(struct kvm_vcpu *vcpu)
368{ 371{
369 return vcpu->cr0 & X86_CR0_PG; 372 return vcpu->arch.cr0 & X86_CR0_PG;
370} 373}
371 374
372int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 375int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -489,8 +492,8 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
489 492
490static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 493static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
491{ 494{
492 return vcpu->mp_state == VCPU_MP_STATE_RUNNABLE 495 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
493 || vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED; 496 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
494} 497}
495 498
496#endif 499#endif
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 0a6ab06fde01..50b133f68743 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -769,8 +769,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
769 /* Shadow copy of register state. Committed on successful emulation. */ 769 /* Shadow copy of register state. Committed on successful emulation. */
770 770
771 memset(c, 0, sizeof(struct decode_cache)); 771 memset(c, 0, sizeof(struct decode_cache));
772 c->eip = ctxt->vcpu->rip; 772 c->eip = ctxt->vcpu->arch.rip;
773 memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs); 773 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
774 774
775 switch (mode) { 775 switch (mode) {
776 case X86EMUL_MODE_REAL: 776 case X86EMUL_MODE_REAL:
@@ -1226,7 +1226,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1226 * modify them. 1226 * modify them.
1227 */ 1227 */
1228 1228
1229 memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs); 1229 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
1230 saved_eip = c->eip; 1230 saved_eip = c->eip;
1231 1231
1232 if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs)) 1232 if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
@@ -1235,7 +1235,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1235 if (c->rep_prefix && (c->d & String)) { 1235 if (c->rep_prefix && (c->d & String)) {
1236 /* All REP prefixes have the same first termination condition */ 1236 /* All REP prefixes have the same first termination condition */
1237 if (c->regs[VCPU_REGS_RCX] == 0) { 1237 if (c->regs[VCPU_REGS_RCX] == 0) {
1238 ctxt->vcpu->rip = c->eip; 1238 ctxt->vcpu->arch.rip = c->eip;
1239 goto done; 1239 goto done;
1240 } 1240 }
1241 /* The second termination condition only applies for REPE 1241 /* The second termination condition only applies for REPE
@@ -1249,17 +1249,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1249 (c->b == 0xae) || (c->b == 0xaf)) { 1249 (c->b == 0xae) || (c->b == 0xaf)) {
1250 if ((c->rep_prefix == REPE_PREFIX) && 1250 if ((c->rep_prefix == REPE_PREFIX) &&
1251 ((ctxt->eflags & EFLG_ZF) == 0)) { 1251 ((ctxt->eflags & EFLG_ZF) == 0)) {
1252 ctxt->vcpu->rip = c->eip; 1252 ctxt->vcpu->arch.rip = c->eip;
1253 goto done; 1253 goto done;
1254 } 1254 }
1255 if ((c->rep_prefix == REPNE_PREFIX) && 1255 if ((c->rep_prefix == REPNE_PREFIX) &&
1256 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { 1256 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
1257 ctxt->vcpu->rip = c->eip; 1257 ctxt->vcpu->arch.rip = c->eip;
1258 goto done; 1258 goto done;
1259 } 1259 }
1260 } 1260 }
1261 c->regs[VCPU_REGS_RCX]--; 1261 c->regs[VCPU_REGS_RCX]--;
1262 c->eip = ctxt->vcpu->rip; 1262 c->eip = ctxt->vcpu->arch.rip;
1263 } 1263 }
1264 1264
1265 if (c->src.type == OP_MEM) { 1265 if (c->src.type == OP_MEM) {
@@ -1628,7 +1628,7 @@ special_insn:
1628 c->dst.type = OP_NONE; /* Disable writeback. */ 1628 c->dst.type = OP_NONE; /* Disable writeback. */
1629 break; 1629 break;
1630 case 0xf4: /* hlt */ 1630 case 0xf4: /* hlt */
1631 ctxt->vcpu->halt_request = 1; 1631 ctxt->vcpu->arch.halt_request = 1;
1632 goto done; 1632 goto done;
1633 case 0xf5: /* cmc */ 1633 case 0xf5: /* cmc */
1634 /* complement carry flag from eflags reg */ 1634 /* complement carry flag from eflags reg */
@@ -1665,8 +1665,8 @@ writeback:
1665 goto done; 1665 goto done;
1666 1666
1667 /* Commit shadow register state. */ 1667 /* Commit shadow register state. */
1668 memcpy(ctxt->vcpu->regs, c->regs, sizeof c->regs); 1668 memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
1669 ctxt->vcpu->rip = c->eip; 1669 ctxt->vcpu->arch.rip = c->eip;
1670 1670
1671done: 1671done:
1672 if (rc == X86EMUL_UNHANDLEABLE) { 1672 if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1783,7 +1783,7 @@ twobyte_insn:
1783 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); 1783 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
1784 if (rc) { 1784 if (rc) {
1785 kvm_inject_gp(ctxt->vcpu, 0); 1785 kvm_inject_gp(ctxt->vcpu, 0);
1786 c->eip = ctxt->vcpu->rip; 1786 c->eip = ctxt->vcpu->arch.rip;
1787 } 1787 }
1788 rc = X86EMUL_CONTINUE; 1788 rc = X86EMUL_CONTINUE;
1789 c->dst.type = OP_NONE; 1789 c->dst.type = OP_NONE;
@@ -1793,7 +1793,7 @@ twobyte_insn:
1793 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); 1793 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
1794 if (rc) { 1794 if (rc) {
1795 kvm_inject_gp(ctxt->vcpu, 0); 1795 kvm_inject_gp(ctxt->vcpu, 0);
1796 c->eip = ctxt->vcpu->rip; 1796 c->eip = ctxt->vcpu->arch.rip;
1797 } else { 1797 } else {
1798 c->regs[VCPU_REGS_RAX] = (u32)msr_data; 1798 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
1799 c->regs[VCPU_REGS_RDX] = msr_data >> 32; 1799 c->regs[VCPU_REGS_RDX] = msr_data >> 32;