aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r--drivers/kvm/x86.c522
1 files changed, 261 insertions, 261 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 3b79684a3c0c..5a2f33a84e4f 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -113,9 +113,9 @@ EXPORT_SYMBOL_GPL(segment_base);
113u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 113u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
114{ 114{
115 if (irqchip_in_kernel(vcpu->kvm)) 115 if (irqchip_in_kernel(vcpu->kvm))
116 return vcpu->apic_base; 116 return vcpu->arch.apic_base;
117 else 117 else
118 return vcpu->apic_base; 118 return vcpu->arch.apic_base;
119} 119}
120EXPORT_SYMBOL_GPL(kvm_get_apic_base); 120EXPORT_SYMBOL_GPL(kvm_get_apic_base);
121 121
@@ -125,16 +125,16 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
125 if (irqchip_in_kernel(vcpu->kvm)) 125 if (irqchip_in_kernel(vcpu->kvm))
126 kvm_lapic_set_base(vcpu, data); 126 kvm_lapic_set_base(vcpu, data);
127 else 127 else
128 vcpu->apic_base = data; 128 vcpu->arch.apic_base = data;
129} 129}
130EXPORT_SYMBOL_GPL(kvm_set_apic_base); 130EXPORT_SYMBOL_GPL(kvm_set_apic_base);
131 131
132void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 132void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
133{ 133{
134 WARN_ON(vcpu->exception.pending); 134 WARN_ON(vcpu->arch.exception.pending);
135 vcpu->exception.pending = true; 135 vcpu->arch.exception.pending = true;
136 vcpu->exception.has_error_code = false; 136 vcpu->arch.exception.has_error_code = false;
137 vcpu->exception.nr = nr; 137 vcpu->arch.exception.nr = nr;
138} 138}
139EXPORT_SYMBOL_GPL(kvm_queue_exception); 139EXPORT_SYMBOL_GPL(kvm_queue_exception);
140 140
@@ -142,32 +142,32 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
142 u32 error_code) 142 u32 error_code)
143{ 143{
144 ++vcpu->stat.pf_guest; 144 ++vcpu->stat.pf_guest;
145 if (vcpu->exception.pending && vcpu->exception.nr == PF_VECTOR) { 145 if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
146 printk(KERN_DEBUG "kvm: inject_page_fault:" 146 printk(KERN_DEBUG "kvm: inject_page_fault:"
147 " double fault 0x%lx\n", addr); 147 " double fault 0x%lx\n", addr);
148 vcpu->exception.nr = DF_VECTOR; 148 vcpu->arch.exception.nr = DF_VECTOR;
149 vcpu->exception.error_code = 0; 149 vcpu->arch.exception.error_code = 0;
150 return; 150 return;
151 } 151 }
152 vcpu->cr2 = addr; 152 vcpu->arch.cr2 = addr;
153 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 153 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
154} 154}
155 155
156void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 156void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
157{ 157{
158 WARN_ON(vcpu->exception.pending); 158 WARN_ON(vcpu->arch.exception.pending);
159 vcpu->exception.pending = true; 159 vcpu->arch.exception.pending = true;
160 vcpu->exception.has_error_code = true; 160 vcpu->arch.exception.has_error_code = true;
161 vcpu->exception.nr = nr; 161 vcpu->arch.exception.nr = nr;
162 vcpu->exception.error_code = error_code; 162 vcpu->arch.exception.error_code = error_code;
163} 163}
164EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 164EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
165 165
166static void __queue_exception(struct kvm_vcpu *vcpu) 166static void __queue_exception(struct kvm_vcpu *vcpu)
167{ 167{
168 kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr, 168 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
169 vcpu->exception.has_error_code, 169 vcpu->arch.exception.has_error_code,
170 vcpu->exception.error_code); 170 vcpu->arch.exception.error_code);
171} 171}
172 172
173/* 173/*
@@ -179,7 +179,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
179 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 179 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
180 int i; 180 int i;
181 int ret; 181 int ret;
182 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; 182 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
183 183
184 mutex_lock(&vcpu->kvm->lock); 184 mutex_lock(&vcpu->kvm->lock);
185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
@@ -196,7 +196,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
196 } 196 }
197 ret = 1; 197 ret = 1;
198 198
199 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); 199 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
200out: 200out:
201 mutex_unlock(&vcpu->kvm->lock); 201 mutex_unlock(&vcpu->kvm->lock);
202 202
@@ -205,7 +205,7 @@ out:
205 205
206static bool pdptrs_changed(struct kvm_vcpu *vcpu) 206static bool pdptrs_changed(struct kvm_vcpu *vcpu)
207{ 207{
208 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; 208 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
209 bool changed = true; 209 bool changed = true;
210 int r; 210 int r;
211 211
@@ -213,10 +213,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
213 return false; 213 return false;
214 214
215 mutex_lock(&vcpu->kvm->lock); 215 mutex_lock(&vcpu->kvm->lock);
216 r = kvm_read_guest(vcpu->kvm, vcpu->cr3 & ~31u, pdpte, sizeof(pdpte)); 216 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
217 if (r < 0) 217 if (r < 0)
218 goto out; 218 goto out;
219 changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0; 219 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
220out: 220out:
221 mutex_unlock(&vcpu->kvm->lock); 221 mutex_unlock(&vcpu->kvm->lock);
222 222
@@ -227,7 +227,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
227{ 227{
228 if (cr0 & CR0_RESERVED_BITS) { 228 if (cr0 & CR0_RESERVED_BITS) {
229 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 229 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
230 cr0, vcpu->cr0); 230 cr0, vcpu->arch.cr0);
231 kvm_inject_gp(vcpu, 0); 231 kvm_inject_gp(vcpu, 0);
232 return; 232 return;
233 } 233 }
@@ -247,7 +247,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
247 247
248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
249#ifdef CONFIG_X86_64 249#ifdef CONFIG_X86_64
250 if ((vcpu->shadow_efer & EFER_LME)) { 250 if ((vcpu->arch.shadow_efer & EFER_LME)) {
251 int cs_db, cs_l; 251 int cs_db, cs_l;
252 252
253 if (!is_pae(vcpu)) { 253 if (!is_pae(vcpu)) {
@@ -266,7 +266,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
266 } 266 }
267 } else 267 } else
268#endif 268#endif
269 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { 269 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
270 printk(KERN_DEBUG "set_cr0: #GP, pdptrs " 270 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
271 "reserved bits\n"); 271 "reserved bits\n");
272 kvm_inject_gp(vcpu, 0); 272 kvm_inject_gp(vcpu, 0);
@@ -276,7 +276,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
276 } 276 }
277 277
278 kvm_x86_ops->set_cr0(vcpu, cr0); 278 kvm_x86_ops->set_cr0(vcpu, cr0);
279 vcpu->cr0 = cr0; 279 vcpu->arch.cr0 = cr0;
280 280
281 mutex_lock(&vcpu->kvm->lock); 281 mutex_lock(&vcpu->kvm->lock);
282 kvm_mmu_reset_context(vcpu); 282 kvm_mmu_reset_context(vcpu);
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
287 287
288void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 288void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
289{ 289{
290 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); 290 set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
291} 291}
292EXPORT_SYMBOL_GPL(lmsw); 292EXPORT_SYMBOL_GPL(lmsw);
293 293
@@ -307,7 +307,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
307 return; 307 return;
308 } 308 }
309 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) 309 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
310 && !load_pdptrs(vcpu, vcpu->cr3)) { 310 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
311 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 311 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
312 kvm_inject_gp(vcpu, 0); 312 kvm_inject_gp(vcpu, 0);
313 return; 313 return;
@@ -319,7 +319,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
319 return; 319 return;
320 } 320 }
321 kvm_x86_ops->set_cr4(vcpu, cr4); 321 kvm_x86_ops->set_cr4(vcpu, cr4);
322 vcpu->cr4 = cr4; 322 vcpu->arch.cr4 = cr4;
323 mutex_lock(&vcpu->kvm->lock); 323 mutex_lock(&vcpu->kvm->lock);
324 kvm_mmu_reset_context(vcpu); 324 kvm_mmu_reset_context(vcpu);
325 mutex_unlock(&vcpu->kvm->lock); 325 mutex_unlock(&vcpu->kvm->lock);
@@ -328,7 +328,7 @@ EXPORT_SYMBOL_GPL(set_cr4);
328 328
329void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 329void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
330{ 330{
331 if (cr3 == vcpu->cr3 && !pdptrs_changed(vcpu)) { 331 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
332 kvm_mmu_flush_tlb(vcpu); 332 kvm_mmu_flush_tlb(vcpu);
333 return; 333 return;
334 } 334 }
@@ -373,8 +373,8 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
373 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 373 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
374 kvm_inject_gp(vcpu, 0); 374 kvm_inject_gp(vcpu, 0);
375 else { 375 else {
376 vcpu->cr3 = cr3; 376 vcpu->arch.cr3 = cr3;
377 vcpu->mmu.new_cr3(vcpu); 377 vcpu->arch.mmu.new_cr3(vcpu);
378 } 378 }
379 mutex_unlock(&vcpu->kvm->lock); 379 mutex_unlock(&vcpu->kvm->lock);
380} 380}
@@ -390,7 +390,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
390 if (irqchip_in_kernel(vcpu->kvm)) 390 if (irqchip_in_kernel(vcpu->kvm))
391 kvm_lapic_set_tpr(vcpu, cr8); 391 kvm_lapic_set_tpr(vcpu, cr8);
392 else 392 else
393 vcpu->cr8 = cr8; 393 vcpu->arch.cr8 = cr8;
394} 394}
395EXPORT_SYMBOL_GPL(set_cr8); 395EXPORT_SYMBOL_GPL(set_cr8);
396 396
@@ -399,7 +399,7 @@ unsigned long get_cr8(struct kvm_vcpu *vcpu)
399 if (irqchip_in_kernel(vcpu->kvm)) 399 if (irqchip_in_kernel(vcpu->kvm))
400 return kvm_lapic_get_cr8(vcpu); 400 return kvm_lapic_get_cr8(vcpu);
401 else 401 else
402 return vcpu->cr8; 402 return vcpu->arch.cr8;
403} 403}
404EXPORT_SYMBOL_GPL(get_cr8); 404EXPORT_SYMBOL_GPL(get_cr8);
405 405
@@ -437,7 +437,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
437 } 437 }
438 438
439 if (is_paging(vcpu) 439 if (is_paging(vcpu)
440 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { 440 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
441 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); 441 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
442 kvm_inject_gp(vcpu, 0); 442 kvm_inject_gp(vcpu, 0);
443 return; 443 return;
@@ -446,9 +446,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
446 kvm_x86_ops->set_efer(vcpu, efer); 446 kvm_x86_ops->set_efer(vcpu, efer);
447 447
448 efer &= ~EFER_LMA; 448 efer &= ~EFER_LMA;
449 efer |= vcpu->shadow_efer & EFER_LMA; 449 efer |= vcpu->arch.shadow_efer & EFER_LMA;
450 450
451 vcpu->shadow_efer = efer; 451 vcpu->arch.shadow_efer = efer;
452} 452}
453 453
454#endif 454#endif
@@ -496,7 +496,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
496 kvm_set_apic_base(vcpu, data); 496 kvm_set_apic_base(vcpu, data);
497 break; 497 break;
498 case MSR_IA32_MISC_ENABLE: 498 case MSR_IA32_MISC_ENABLE:
499 vcpu->ia32_misc_enable_msr = data; 499 vcpu->arch.ia32_misc_enable_msr = data;
500 break; 500 break;
501 default: 501 default:
502 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr); 502 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
@@ -550,11 +550,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
550 data = kvm_get_apic_base(vcpu); 550 data = kvm_get_apic_base(vcpu);
551 break; 551 break;
552 case MSR_IA32_MISC_ENABLE: 552 case MSR_IA32_MISC_ENABLE:
553 data = vcpu->ia32_misc_enable_msr; 553 data = vcpu->arch.ia32_misc_enable_msr;
554 break; 554 break;
555#ifdef CONFIG_X86_64 555#ifdef CONFIG_X86_64
556 case MSR_EFER: 556 case MSR_EFER:
557 data = vcpu->shadow_efer; 557 data = vcpu->arch.shadow_efer;
558 break; 558 break;
559#endif 559#endif
560 default: 560 default:
@@ -760,8 +760,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
760 struct kvm_cpuid_entry2 *e, *entry; 760 struct kvm_cpuid_entry2 *e, *entry;
761 761
762 entry = NULL; 762 entry = NULL;
763 for (i = 0; i < vcpu->cpuid_nent; ++i) { 763 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
764 e = &vcpu->cpuid_entries[i]; 764 e = &vcpu->arch.cpuid_entries[i];
765 if (e->function == 0x80000001) { 765 if (e->function == 0x80000001) {
766 entry = e; 766 entry = e;
767 break; 767 break;
@@ -793,18 +793,18 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
793 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 793 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
794 goto out_free; 794 goto out_free;
795 for (i = 0; i < cpuid->nent; i++) { 795 for (i = 0; i < cpuid->nent; i++) {
796 vcpu->cpuid_entries[i].function = cpuid_entries[i].function; 796 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
797 vcpu->cpuid_entries[i].eax = cpuid_entries[i].eax; 797 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
798 vcpu->cpuid_entries[i].ebx = cpuid_entries[i].ebx; 798 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
799 vcpu->cpuid_entries[i].ecx = cpuid_entries[i].ecx; 799 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
800 vcpu->cpuid_entries[i].edx = cpuid_entries[i].edx; 800 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
801 vcpu->cpuid_entries[i].index = 0; 801 vcpu->arch.cpuid_entries[i].index = 0;
802 vcpu->cpuid_entries[i].flags = 0; 802 vcpu->arch.cpuid_entries[i].flags = 0;
803 vcpu->cpuid_entries[i].padding[0] = 0; 803 vcpu->arch.cpuid_entries[i].padding[0] = 0;
804 vcpu->cpuid_entries[i].padding[1] = 0; 804 vcpu->arch.cpuid_entries[i].padding[1] = 0;
805 vcpu->cpuid_entries[i].padding[2] = 0; 805 vcpu->arch.cpuid_entries[i].padding[2] = 0;
806 } 806 }
807 vcpu->cpuid_nent = cpuid->nent; 807 vcpu->arch.cpuid_nent = cpuid->nent;
808 cpuid_fix_nx_cap(vcpu); 808 cpuid_fix_nx_cap(vcpu);
809 r = 0; 809 r = 0;
810 810
@@ -824,10 +824,10 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
824 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 824 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
825 goto out; 825 goto out;
826 r = -EFAULT; 826 r = -EFAULT;
827 if (copy_from_user(&vcpu->cpuid_entries, entries, 827 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
828 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) 828 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
829 goto out; 829 goto out;
830 vcpu->cpuid_nent = cpuid->nent; 830 vcpu->arch.cpuid_nent = cpuid->nent;
831 return 0; 831 return 0;
832 832
833out: 833out:
@@ -841,16 +841,16 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
841 int r; 841 int r;
842 842
843 r = -E2BIG; 843 r = -E2BIG;
844 if (cpuid->nent < vcpu->cpuid_nent) 844 if (cpuid->nent < vcpu->arch.cpuid_nent)
845 goto out; 845 goto out;
846 r = -EFAULT; 846 r = -EFAULT;
847 if (copy_to_user(entries, &vcpu->cpuid_entries, 847 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
848 vcpu->cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 848 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
849 goto out; 849 goto out;
850 return 0; 850 return 0;
851 851
852out: 852out:
853 cpuid->nent = vcpu->cpuid_nent; 853 cpuid->nent = vcpu->arch.cpuid_nent;
854 return r; 854 return r;
855} 855}
856 856
@@ -1021,7 +1021,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1021 struct kvm_lapic_state *s) 1021 struct kvm_lapic_state *s)
1022{ 1022{
1023 vcpu_load(vcpu); 1023 vcpu_load(vcpu);
1024 memcpy(s->regs, vcpu->apic->regs, sizeof *s); 1024 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1025 vcpu_put(vcpu); 1025 vcpu_put(vcpu);
1026 1026
1027 return 0; 1027 return 0;
@@ -1031,7 +1031,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1031 struct kvm_lapic_state *s) 1031 struct kvm_lapic_state *s)
1032{ 1032{
1033 vcpu_load(vcpu); 1033 vcpu_load(vcpu);
1034 memcpy(vcpu->apic->regs, s->regs, sizeof *s); 1034 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1035 kvm_apic_post_state_restore(vcpu); 1035 kvm_apic_post_state_restore(vcpu);
1036 vcpu_put(vcpu); 1036 vcpu_put(vcpu);
1037 1037
@@ -1047,8 +1047,8 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1047 return -ENXIO; 1047 return -ENXIO;
1048 vcpu_load(vcpu); 1048 vcpu_load(vcpu);
1049 1049
1050 set_bit(irq->irq, vcpu->irq_pending); 1050 set_bit(irq->irq, vcpu->arch.irq_pending);
1051 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary); 1051 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
1052 1052
1053 vcpu_put(vcpu); 1053 vcpu_put(vcpu);
1054 1054
@@ -1499,8 +1499,8 @@ static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1499{ 1499{
1500 struct kvm_io_device *dev; 1500 struct kvm_io_device *dev;
1501 1501
1502 if (vcpu->apic) { 1502 if (vcpu->arch.apic) {
1503 dev = &vcpu->apic->dev; 1503 dev = &vcpu->arch.apic->dev;
1504 if (dev->in_range(dev, addr)) 1504 if (dev->in_range(dev, addr))
1505 return dev; 1505 return dev;
1506 } 1506 }
@@ -1527,7 +1527,7 @@ int emulator_read_std(unsigned long addr,
1527 void *data = val; 1527 void *data = val;
1528 1528
1529 while (bytes) { 1529 while (bytes) {
1530 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1530 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1531 unsigned offset = addr & (PAGE_SIZE-1); 1531 unsigned offset = addr & (PAGE_SIZE-1);
1532 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); 1532 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1533 int ret; 1533 int ret;
@@ -1561,7 +1561,7 @@ static int emulator_read_emulated(unsigned long addr,
1561 return X86EMUL_CONTINUE; 1561 return X86EMUL_CONTINUE;
1562 } 1562 }
1563 1563
1564 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1564 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1565 1565
1566 /* For APIC access vmexit */ 1566 /* For APIC access vmexit */
1567 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1567 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1609,7 +1609,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1609 struct kvm_vcpu *vcpu) 1609 struct kvm_vcpu *vcpu)
1610{ 1610{
1611 struct kvm_io_device *mmio_dev; 1611 struct kvm_io_device *mmio_dev;
1612 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1612 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1613 1613
1614 if (gpa == UNMAPPED_GVA) { 1614 if (gpa == UNMAPPED_GVA) {
1615 kvm_inject_page_fault(vcpu, addr, 2); 1615 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1678,7 +1678,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1678#ifndef CONFIG_X86_64 1678#ifndef CONFIG_X86_64
1679 /* guests cmpxchg8b have to be emulated atomically */ 1679 /* guests cmpxchg8b have to be emulated atomically */
1680 if (bytes == 8) { 1680 if (bytes == 8) {
1681 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1681 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1682 struct page *page; 1682 struct page *page;
1683 char *addr; 1683 char *addr;
1684 u64 val; 1684 u64 val;
@@ -1715,7 +1715,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1715 1715
1716int emulate_clts(struct kvm_vcpu *vcpu) 1716int emulate_clts(struct kvm_vcpu *vcpu)
1717{ 1717{
1718 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS); 1718 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
1719 return X86EMUL_CONTINUE; 1719 return X86EMUL_CONTINUE;
1720} 1720}
1721 1721
@@ -1750,7 +1750,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1750{ 1750{
1751 static int reported; 1751 static int reported;
1752 u8 opcodes[4]; 1752 u8 opcodes[4];
1753 unsigned long rip = vcpu->rip; 1753 unsigned long rip = vcpu->arch.rip;
1754 unsigned long rip_linear; 1754 unsigned long rip_linear;
1755 1755
1756 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); 1756 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
@@ -1781,46 +1781,46 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1781{ 1781{
1782 int r; 1782 int r;
1783 1783
1784 vcpu->mmio_fault_cr2 = cr2; 1784 vcpu->arch.mmio_fault_cr2 = cr2;
1785 kvm_x86_ops->cache_regs(vcpu); 1785 kvm_x86_ops->cache_regs(vcpu);
1786 1786
1787 vcpu->mmio_is_write = 0; 1787 vcpu->mmio_is_write = 0;
1788 vcpu->pio.string = 0; 1788 vcpu->arch.pio.string = 0;
1789 1789
1790 if (!no_decode) { 1790 if (!no_decode) {
1791 int cs_db, cs_l; 1791 int cs_db, cs_l;
1792 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 1792 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1793 1793
1794 vcpu->emulate_ctxt.vcpu = vcpu; 1794 vcpu->arch.emulate_ctxt.vcpu = vcpu;
1795 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); 1795 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1796 vcpu->emulate_ctxt.mode = 1796 vcpu->arch.emulate_ctxt.mode =
1797 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM) 1797 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
1798 ? X86EMUL_MODE_REAL : cs_l 1798 ? X86EMUL_MODE_REAL : cs_l
1799 ? X86EMUL_MODE_PROT64 : cs_db 1799 ? X86EMUL_MODE_PROT64 : cs_db
1800 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 1800 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1801 1801
1802 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) { 1802 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1803 vcpu->emulate_ctxt.cs_base = 0; 1803 vcpu->arch.emulate_ctxt.cs_base = 0;
1804 vcpu->emulate_ctxt.ds_base = 0; 1804 vcpu->arch.emulate_ctxt.ds_base = 0;
1805 vcpu->emulate_ctxt.es_base = 0; 1805 vcpu->arch.emulate_ctxt.es_base = 0;
1806 vcpu->emulate_ctxt.ss_base = 0; 1806 vcpu->arch.emulate_ctxt.ss_base = 0;
1807 } else { 1807 } else {
1808 vcpu->emulate_ctxt.cs_base = 1808 vcpu->arch.emulate_ctxt.cs_base =
1809 get_segment_base(vcpu, VCPU_SREG_CS); 1809 get_segment_base(vcpu, VCPU_SREG_CS);
1810 vcpu->emulate_ctxt.ds_base = 1810 vcpu->arch.emulate_ctxt.ds_base =
1811 get_segment_base(vcpu, VCPU_SREG_DS); 1811 get_segment_base(vcpu, VCPU_SREG_DS);
1812 vcpu->emulate_ctxt.es_base = 1812 vcpu->arch.emulate_ctxt.es_base =
1813 get_segment_base(vcpu, VCPU_SREG_ES); 1813 get_segment_base(vcpu, VCPU_SREG_ES);
1814 vcpu->emulate_ctxt.ss_base = 1814 vcpu->arch.emulate_ctxt.ss_base =
1815 get_segment_base(vcpu, VCPU_SREG_SS); 1815 get_segment_base(vcpu, VCPU_SREG_SS);
1816 } 1816 }
1817 1817
1818 vcpu->emulate_ctxt.gs_base = 1818 vcpu->arch.emulate_ctxt.gs_base =
1819 get_segment_base(vcpu, VCPU_SREG_GS); 1819 get_segment_base(vcpu, VCPU_SREG_GS);
1820 vcpu->emulate_ctxt.fs_base = 1820 vcpu->arch.emulate_ctxt.fs_base =
1821 get_segment_base(vcpu, VCPU_SREG_FS); 1821 get_segment_base(vcpu, VCPU_SREG_FS);
1822 1822
1823 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops); 1823 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
1824 ++vcpu->stat.insn_emulation; 1824 ++vcpu->stat.insn_emulation;
1825 if (r) { 1825 if (r) {
1826 ++vcpu->stat.insn_emulation_fail; 1826 ++vcpu->stat.insn_emulation_fail;
@@ -1830,9 +1830,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1830 } 1830 }
1831 } 1831 }
1832 1832
1833 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops); 1833 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
1834 1834
1835 if (vcpu->pio.string) 1835 if (vcpu->arch.pio.string)
1836 return EMULATE_DO_MMIO; 1836 return EMULATE_DO_MMIO;
1837 1837
1838 if ((r || vcpu->mmio_is_write) && run) { 1838 if ((r || vcpu->mmio_is_write) && run) {
@@ -1854,7 +1854,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1854 } 1854 }
1855 1855
1856 kvm_x86_ops->decache_regs(vcpu); 1856 kvm_x86_ops->decache_regs(vcpu);
1857 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags); 1857 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
1858 1858
1859 if (vcpu->mmio_is_write) { 1859 if (vcpu->mmio_is_write) {
1860 vcpu->mmio_needed = 0; 1860 vcpu->mmio_needed = 0;
@@ -1869,33 +1869,33 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1869{ 1869{
1870 int i; 1870 int i;
1871 1871
1872 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) 1872 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
1873 if (vcpu->pio.guest_pages[i]) { 1873 if (vcpu->arch.pio.guest_pages[i]) {
1874 kvm_release_page_dirty(vcpu->pio.guest_pages[i]); 1874 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
1875 vcpu->pio.guest_pages[i] = NULL; 1875 vcpu->arch.pio.guest_pages[i] = NULL;
1876 } 1876 }
1877} 1877}
1878 1878
1879static int pio_copy_data(struct kvm_vcpu *vcpu) 1879static int pio_copy_data(struct kvm_vcpu *vcpu)
1880{ 1880{
1881 void *p = vcpu->pio_data; 1881 void *p = vcpu->arch.pio_data;
1882 void *q; 1882 void *q;
1883 unsigned bytes; 1883 unsigned bytes;
1884 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1; 1884 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
1885 1885
1886 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE, 1886 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1887 PAGE_KERNEL); 1887 PAGE_KERNEL);
1888 if (!q) { 1888 if (!q) {
1889 free_pio_guest_pages(vcpu); 1889 free_pio_guest_pages(vcpu);
1890 return -ENOMEM; 1890 return -ENOMEM;
1891 } 1891 }
1892 q += vcpu->pio.guest_page_offset; 1892 q += vcpu->arch.pio.guest_page_offset;
1893 bytes = vcpu->pio.size * vcpu->pio.cur_count; 1893 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
1894 if (vcpu->pio.in) 1894 if (vcpu->arch.pio.in)
1895 memcpy(q, p, bytes); 1895 memcpy(q, p, bytes);
1896 else 1896 else
1897 memcpy(p, q, bytes); 1897 memcpy(p, q, bytes);
1898 q -= vcpu->pio.guest_page_offset; 1898 q -= vcpu->arch.pio.guest_page_offset;
1899 vunmap(q); 1899 vunmap(q);
1900 free_pio_guest_pages(vcpu); 1900 free_pio_guest_pages(vcpu);
1901 return 0; 1901 return 0;
@@ -1903,7 +1903,7 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1903 1903
1904int complete_pio(struct kvm_vcpu *vcpu) 1904int complete_pio(struct kvm_vcpu *vcpu)
1905{ 1905{
1906 struct kvm_pio_request *io = &vcpu->pio; 1906 struct kvm_pio_request *io = &vcpu->arch.pio;
1907 long delta; 1907 long delta;
1908 int r; 1908 int r;
1909 1909
@@ -1911,7 +1911,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
1911 1911
1912 if (!io->string) { 1912 if (!io->string) {
1913 if (io->in) 1913 if (io->in)
1914 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data, 1914 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
1915 io->size); 1915 io->size);
1916 } else { 1916 } else {
1917 if (io->in) { 1917 if (io->in) {
@@ -1929,15 +1929,15 @@ int complete_pio(struct kvm_vcpu *vcpu)
1929 * The size of the register should really depend on 1929 * The size of the register should really depend on
1930 * current address size. 1930 * current address size.
1931 */ 1931 */
1932 vcpu->regs[VCPU_REGS_RCX] -= delta; 1932 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
1933 } 1933 }
1934 if (io->down) 1934 if (io->down)
1935 delta = -delta; 1935 delta = -delta;
1936 delta *= io->size; 1936 delta *= io->size;
1937 if (io->in) 1937 if (io->in)
1938 vcpu->regs[VCPU_REGS_RDI] += delta; 1938 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
1939 else 1939 else
1940 vcpu->regs[VCPU_REGS_RSI] += delta; 1940 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
1941 } 1941 }
1942 1942
1943 kvm_x86_ops->decache_regs(vcpu); 1943 kvm_x86_ops->decache_regs(vcpu);
@@ -1955,13 +1955,13 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
1955 /* TODO: String I/O for in kernel device */ 1955 /* TODO: String I/O for in kernel device */
1956 1956
1957 mutex_lock(&vcpu->kvm->lock); 1957 mutex_lock(&vcpu->kvm->lock);
1958 if (vcpu->pio.in) 1958 if (vcpu->arch.pio.in)
1959 kvm_iodevice_read(pio_dev, vcpu->pio.port, 1959 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
1960 vcpu->pio.size, 1960 vcpu->arch.pio.size,
1961 pd); 1961 pd);
1962 else 1962 else
1963 kvm_iodevice_write(pio_dev, vcpu->pio.port, 1963 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
1964 vcpu->pio.size, 1964 vcpu->arch.pio.size,
1965 pd); 1965 pd);
1966 mutex_unlock(&vcpu->kvm->lock); 1966 mutex_unlock(&vcpu->kvm->lock);
1967} 1967}
@@ -1969,8 +1969,8 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
1969static void pio_string_write(struct kvm_io_device *pio_dev, 1969static void pio_string_write(struct kvm_io_device *pio_dev,
1970 struct kvm_vcpu *vcpu) 1970 struct kvm_vcpu *vcpu)
1971{ 1971{
1972 struct kvm_pio_request *io = &vcpu->pio; 1972 struct kvm_pio_request *io = &vcpu->arch.pio;
1973 void *pd = vcpu->pio_data; 1973 void *pd = vcpu->arch.pio_data;
1974 int i; 1974 int i;
1975 1975
1976 mutex_lock(&vcpu->kvm->lock); 1976 mutex_lock(&vcpu->kvm->lock);
@@ -1996,25 +1996,25 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1996 1996
1997 vcpu->run->exit_reason = KVM_EXIT_IO; 1997 vcpu->run->exit_reason = KVM_EXIT_IO;
1998 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1998 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1999 vcpu->run->io.size = vcpu->pio.size = size; 1999 vcpu->run->io.size = vcpu->arch.pio.size = size;
2000 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 2000 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2001 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1; 2001 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2002 vcpu->run->io.port = vcpu->pio.port = port; 2002 vcpu->run->io.port = vcpu->arch.pio.port = port;
2003 vcpu->pio.in = in; 2003 vcpu->arch.pio.in = in;
2004 vcpu->pio.string = 0; 2004 vcpu->arch.pio.string = 0;
2005 vcpu->pio.down = 0; 2005 vcpu->arch.pio.down = 0;
2006 vcpu->pio.guest_page_offset = 0; 2006 vcpu->arch.pio.guest_page_offset = 0;
2007 vcpu->pio.rep = 0; 2007 vcpu->arch.pio.rep = 0;
2008 2008
2009 kvm_x86_ops->cache_regs(vcpu); 2009 kvm_x86_ops->cache_regs(vcpu);
2010 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); 2010 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
2011 kvm_x86_ops->decache_regs(vcpu); 2011 kvm_x86_ops->decache_regs(vcpu);
2012 2012
2013 kvm_x86_ops->skip_emulated_instruction(vcpu); 2013 kvm_x86_ops->skip_emulated_instruction(vcpu);
2014 2014
2015 pio_dev = vcpu_find_pio_dev(vcpu, port); 2015 pio_dev = vcpu_find_pio_dev(vcpu, port);
2016 if (pio_dev) { 2016 if (pio_dev) {
2017 kernel_pio(pio_dev, vcpu, vcpu->pio_data); 2017 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2018 complete_pio(vcpu); 2018 complete_pio(vcpu);
2019 return 1; 2019 return 1;
2020 } 2020 }
@@ -2034,15 +2034,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2034 2034
2035 vcpu->run->exit_reason = KVM_EXIT_IO; 2035 vcpu->run->exit_reason = KVM_EXIT_IO;
2036 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 2036 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2037 vcpu->run->io.size = vcpu->pio.size = size; 2037 vcpu->run->io.size = vcpu->arch.pio.size = size;
2038 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 2038 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2039 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count; 2039 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2040 vcpu->run->io.port = vcpu->pio.port = port; 2040 vcpu->run->io.port = vcpu->arch.pio.port = port;
2041 vcpu->pio.in = in; 2041 vcpu->arch.pio.in = in;
2042 vcpu->pio.string = 1; 2042 vcpu->arch.pio.string = 1;
2043 vcpu->pio.down = down; 2043 vcpu->arch.pio.down = down;
2044 vcpu->pio.guest_page_offset = offset_in_page(address); 2044 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2045 vcpu->pio.rep = rep; 2045 vcpu->arch.pio.rep = rep;
2046 2046
2047 if (!count) { 2047 if (!count) {
2048 kvm_x86_ops->skip_emulated_instruction(vcpu); 2048 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2072,15 +2072,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 vcpu->run->io.count = now; 2074 vcpu->run->io.count = now;
2075 vcpu->pio.cur_count = now; 2075 vcpu->arch.pio.cur_count = now;
2076 2076
2077 if (vcpu->pio.cur_count == vcpu->pio.count) 2077 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
2078 kvm_x86_ops->skip_emulated_instruction(vcpu); 2078 kvm_x86_ops->skip_emulated_instruction(vcpu);
2079 2079
2080 for (i = 0; i < nr_pages; ++i) { 2080 for (i = 0; i < nr_pages; ++i) {
2081 mutex_lock(&vcpu->kvm->lock); 2081 mutex_lock(&vcpu->kvm->lock);
2082 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2082 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2083 vcpu->pio.guest_pages[i] = page; 2083 vcpu->arch.pio.guest_pages[i] = page;
2084 mutex_unlock(&vcpu->kvm->lock); 2084 mutex_unlock(&vcpu->kvm->lock);
2085 if (!page) { 2085 if (!page) {
2086 kvm_inject_gp(vcpu, 0); 2086 kvm_inject_gp(vcpu, 0);
@@ -2090,13 +2090,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2090 } 2090 }
2091 2091
2092 pio_dev = vcpu_find_pio_dev(vcpu, port); 2092 pio_dev = vcpu_find_pio_dev(vcpu, port);
2093 if (!vcpu->pio.in) { 2093 if (!vcpu->arch.pio.in) {
2094 /* string PIO write */ 2094 /* string PIO write */
2095 ret = pio_copy_data(vcpu); 2095 ret = pio_copy_data(vcpu);
2096 if (ret >= 0 && pio_dev) { 2096 if (ret >= 0 && pio_dev) {
2097 pio_string_write(pio_dev, vcpu); 2097 pio_string_write(pio_dev, vcpu);
2098 complete_pio(vcpu); 2098 complete_pio(vcpu);
2099 if (vcpu->pio.count == 0) 2099 if (vcpu->arch.pio.count == 0)
2100 ret = 1; 2100 ret = 1;
2101 } 2101 }
2102 } else if (pio_dev) 2102 } else if (pio_dev)
@@ -2156,9 +2156,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2156{ 2156{
2157 ++vcpu->stat.halt_exits; 2157 ++vcpu->stat.halt_exits;
2158 if (irqchip_in_kernel(vcpu->kvm)) { 2158 if (irqchip_in_kernel(vcpu->kvm)) {
2159 vcpu->mp_state = VCPU_MP_STATE_HALTED; 2159 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
2160 kvm_vcpu_block(vcpu); 2160 kvm_vcpu_block(vcpu);
2161 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE) 2161 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
2162 return -EINTR; 2162 return -EINTR;
2163 return 1; 2163 return 1;
2164 } else { 2164 } else {
@@ -2174,11 +2174,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2174 2174
2175 kvm_x86_ops->cache_regs(vcpu); 2175 kvm_x86_ops->cache_regs(vcpu);
2176 2176
2177 nr = vcpu->regs[VCPU_REGS_RAX]; 2177 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2178 a0 = vcpu->regs[VCPU_REGS_RBX]; 2178 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2179 a1 = vcpu->regs[VCPU_REGS_RCX]; 2179 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2180 a2 = vcpu->regs[VCPU_REGS_RDX]; 2180 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2181 a3 = vcpu->regs[VCPU_REGS_RSI]; 2181 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
2182 2182
2183 if (!is_long_mode(vcpu)) { 2183 if (!is_long_mode(vcpu)) {
2184 nr &= 0xFFFFFFFF; 2184 nr &= 0xFFFFFFFF;
@@ -2193,7 +2193,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2193 ret = -KVM_ENOSYS; 2193 ret = -KVM_ENOSYS;
2194 break; 2194 break;
2195 } 2195 }
2196 vcpu->regs[VCPU_REGS_RAX] = ret; 2196 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
2197 kvm_x86_ops->decache_regs(vcpu); 2197 kvm_x86_ops->decache_regs(vcpu);
2198 return 0; 2198 return 0;
2199} 2199}
@@ -2215,7 +2215,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2215 2215
2216 kvm_x86_ops->cache_regs(vcpu); 2216 kvm_x86_ops->cache_regs(vcpu);
2217 kvm_x86_ops->patch_hypercall(vcpu, instruction); 2217 kvm_x86_ops->patch_hypercall(vcpu, instruction);
2218 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu) 2218 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
2219 != X86EMUL_CONTINUE) 2219 != X86EMUL_CONTINUE)
2220 ret = -EFAULT; 2220 ret = -EFAULT;
2221 2221
@@ -2255,13 +2255,13 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2255 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2255 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2256 switch (cr) { 2256 switch (cr) {
2257 case 0: 2257 case 0:
2258 return vcpu->cr0; 2258 return vcpu->arch.cr0;
2259 case 2: 2259 case 2:
2260 return vcpu->cr2; 2260 return vcpu->arch.cr2;
2261 case 3: 2261 case 3:
2262 return vcpu->cr3; 2262 return vcpu->arch.cr3;
2263 case 4: 2263 case 4:
2264 return vcpu->cr4; 2264 return vcpu->arch.cr4;
2265 case 8: 2265 case 8:
2266 return get_cr8(vcpu); 2266 return get_cr8(vcpu);
2267 default: 2267 default:
@@ -2275,17 +2275,17 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2275{ 2275{
2276 switch (cr) { 2276 switch (cr) {
2277 case 0: 2277 case 0:
2278 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val)); 2278 set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
2279 *rflags = kvm_x86_ops->get_rflags(vcpu); 2279 *rflags = kvm_x86_ops->get_rflags(vcpu);
2280 break; 2280 break;
2281 case 2: 2281 case 2:
2282 vcpu->cr2 = val; 2282 vcpu->arch.cr2 = val;
2283 break; 2283 break;
2284 case 3: 2284 case 3:
2285 set_cr3(vcpu, val); 2285 set_cr3(vcpu, val);
2286 break; 2286 break;
2287 case 4: 2287 case 4:
2288 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val)); 2288 set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
2289 break; 2289 break;
2290 case 8: 2290 case 8:
2291 set_cr8(vcpu, val & 0xfUL); 2291 set_cr8(vcpu, val & 0xfUL);
@@ -2297,13 +2297,13 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2297 2297
2298static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 2298static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2299{ 2299{
2300 struct kvm_cpuid_entry2 *e = &vcpu->cpuid_entries[i]; 2300 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2301 int j, nent = vcpu->cpuid_nent; 2301 int j, nent = vcpu->arch.cpuid_nent;
2302 2302
2303 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 2303 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2304 /* when no next entry is found, the current entry[i] is reselected */ 2304 /* when no next entry is found, the current entry[i] is reselected */
2305 for (j = i + 1; j == i; j = (j + 1) % nent) { 2305 for (j = i + 1; j == i; j = (j + 1) % nent) {
2306 struct kvm_cpuid_entry2 *ej = &vcpu->cpuid_entries[j]; 2306 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
2307 if (ej->function == e->function) { 2307 if (ej->function == e->function) {
2308 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 2308 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2309 return j; 2309 return j;
@@ -2334,15 +2334,15 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2334 struct kvm_cpuid_entry2 *e, *best; 2334 struct kvm_cpuid_entry2 *e, *best;
2335 2335
2336 kvm_x86_ops->cache_regs(vcpu); 2336 kvm_x86_ops->cache_regs(vcpu);
2337 function = vcpu->regs[VCPU_REGS_RAX]; 2337 function = vcpu->arch.regs[VCPU_REGS_RAX];
2338 index = vcpu->regs[VCPU_REGS_RCX]; 2338 index = vcpu->arch.regs[VCPU_REGS_RCX];
2339 vcpu->regs[VCPU_REGS_RAX] = 0; 2339 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2340 vcpu->regs[VCPU_REGS_RBX] = 0; 2340 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2341 vcpu->regs[VCPU_REGS_RCX] = 0; 2341 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2342 vcpu->regs[VCPU_REGS_RDX] = 0; 2342 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
2343 best = NULL; 2343 best = NULL;
2344 for (i = 0; i < vcpu->cpuid_nent; ++i) { 2344 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2345 e = &vcpu->cpuid_entries[i]; 2345 e = &vcpu->arch.cpuid_entries[i];
2346 if (is_matching_cpuid_entry(e, function, index)) { 2346 if (is_matching_cpuid_entry(e, function, index)) {
2347 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) 2347 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2348 move_to_next_stateful_cpuid_entry(vcpu, i); 2348 move_to_next_stateful_cpuid_entry(vcpu, i);
@@ -2357,10 +2357,10 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2357 best = e; 2357 best = e;
2358 } 2358 }
2359 if (best) { 2359 if (best) {
2360 vcpu->regs[VCPU_REGS_RAX] = best->eax; 2360 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2361 vcpu->regs[VCPU_REGS_RBX] = best->ebx; 2361 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2362 vcpu->regs[VCPU_REGS_RCX] = best->ecx; 2362 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2363 vcpu->regs[VCPU_REGS_RDX] = best->edx; 2363 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
2364 } 2364 }
2365 kvm_x86_ops->decache_regs(vcpu); 2365 kvm_x86_ops->decache_regs(vcpu);
2366 kvm_x86_ops->skip_emulated_instruction(vcpu); 2366 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2376,9 +2376,9 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2376static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, 2376static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2377 struct kvm_run *kvm_run) 2377 struct kvm_run *kvm_run)
2378{ 2378{
2379 return (!vcpu->irq_summary && 2379 return (!vcpu->arch.irq_summary &&
2380 kvm_run->request_interrupt_window && 2380 kvm_run->request_interrupt_window &&
2381 vcpu->interrupt_window_open && 2381 vcpu->arch.interrupt_window_open &&
2382 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); 2382 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2383} 2383}
2384 2384
@@ -2392,22 +2392,22 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2392 kvm_run->ready_for_interrupt_injection = 1; 2392 kvm_run->ready_for_interrupt_injection = 1;
2393 else 2393 else
2394 kvm_run->ready_for_interrupt_injection = 2394 kvm_run->ready_for_interrupt_injection =
2395 (vcpu->interrupt_window_open && 2395 (vcpu->arch.interrupt_window_open &&
2396 vcpu->irq_summary == 0); 2396 vcpu->arch.irq_summary == 0);
2397} 2397}
2398 2398
2399static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2399static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2400{ 2400{
2401 int r; 2401 int r;
2402 2402
2403 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2403 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2404 pr_debug("vcpu %d received sipi with vector # %x\n", 2404 pr_debug("vcpu %d received sipi with vector # %x\n",
2405 vcpu->vcpu_id, vcpu->sipi_vector); 2405 vcpu->vcpu_id, vcpu->arch.sipi_vector);
2406 kvm_lapic_reset(vcpu); 2406 kvm_lapic_reset(vcpu);
2407 r = kvm_x86_ops->vcpu_reset(vcpu); 2407 r = kvm_x86_ops->vcpu_reset(vcpu);
2408 if (r) 2408 if (r)
2409 return r; 2409 return r;
2410 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 2410 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
2411 } 2411 }
2412 2412
2413preempted: 2413preempted:
@@ -2437,7 +2437,7 @@ again:
2437 goto out; 2437 goto out;
2438 } 2438 }
2439 2439
2440 if (vcpu->exception.pending) 2440 if (vcpu->arch.exception.pending)
2441 __queue_exception(vcpu); 2441 __queue_exception(vcpu);
2442 else if (irqchip_in_kernel(vcpu->kvm)) 2442 else if (irqchip_in_kernel(vcpu->kvm))
2443 kvm_x86_ops->inject_pending_irq(vcpu); 2443 kvm_x86_ops->inject_pending_irq(vcpu);
@@ -2475,11 +2475,11 @@ again:
2475 */ 2475 */
2476 if (unlikely(prof_on == KVM_PROFILING)) { 2476 if (unlikely(prof_on == KVM_PROFILING)) {
2477 kvm_x86_ops->cache_regs(vcpu); 2477 kvm_x86_ops->cache_regs(vcpu);
2478 profile_hit(KVM_PROFILING, (void *)vcpu->rip); 2478 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
2479 } 2479 }
2480 2480
2481 if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu)) 2481 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2482 vcpu->exception.pending = false; 2482 vcpu->arch.exception.pending = false;
2483 2483
2484 r = kvm_x86_ops->handle_exit(kvm_run, vcpu); 2484 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2485 2485
@@ -2512,7 +2512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2512 2512
2513 vcpu_load(vcpu); 2513 vcpu_load(vcpu);
2514 2514
2515 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 2515 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2516 kvm_vcpu_block(vcpu); 2516 kvm_vcpu_block(vcpu);
2517 vcpu_put(vcpu); 2517 vcpu_put(vcpu);
2518 return -EAGAIN; 2518 return -EAGAIN;
@@ -2525,7 +2525,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2525 if (!irqchip_in_kernel(vcpu->kvm)) 2525 if (!irqchip_in_kernel(vcpu->kvm))
2526 set_cr8(vcpu, kvm_run->cr8); 2526 set_cr8(vcpu, kvm_run->cr8);
2527 2527
2528 if (vcpu->pio.cur_count) { 2528 if (vcpu->arch.pio.cur_count) {
2529 r = complete_pio(vcpu); 2529 r = complete_pio(vcpu);
2530 if (r) 2530 if (r)
2531 goto out; 2531 goto out;
@@ -2536,7 +2536,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2536 vcpu->mmio_read_completed = 1; 2536 vcpu->mmio_read_completed = 1;
2537 vcpu->mmio_needed = 0; 2537 vcpu->mmio_needed = 0;
2538 r = emulate_instruction(vcpu, kvm_run, 2538 r = emulate_instruction(vcpu, kvm_run,
2539 vcpu->mmio_fault_cr2, 0, 1); 2539 vcpu->arch.mmio_fault_cr2, 0, 1);
2540 if (r == EMULATE_DO_MMIO) { 2540 if (r == EMULATE_DO_MMIO) {
2541 /* 2541 /*
2542 * Read-modify-write. Back to userspace. 2542 * Read-modify-write. Back to userspace.
@@ -2548,7 +2548,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2548#endif 2548#endif
2549 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 2549 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2550 kvm_x86_ops->cache_regs(vcpu); 2550 kvm_x86_ops->cache_regs(vcpu);
2551 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 2551 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2552 kvm_x86_ops->decache_regs(vcpu); 2552 kvm_x86_ops->decache_regs(vcpu);
2553 } 2553 }
2554 2554
@@ -2568,26 +2568,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2568 2568
2569 kvm_x86_ops->cache_regs(vcpu); 2569 kvm_x86_ops->cache_regs(vcpu);
2570 2570
2571 regs->rax = vcpu->regs[VCPU_REGS_RAX]; 2571 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2572 regs->rbx = vcpu->regs[VCPU_REGS_RBX]; 2572 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2573 regs->rcx = vcpu->regs[VCPU_REGS_RCX]; 2573 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2574 regs->rdx = vcpu->regs[VCPU_REGS_RDX]; 2574 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2575 regs->rsi = vcpu->regs[VCPU_REGS_RSI]; 2575 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2576 regs->rdi = vcpu->regs[VCPU_REGS_RDI]; 2576 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2577 regs->rsp = vcpu->regs[VCPU_REGS_RSP]; 2577 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2578 regs->rbp = vcpu->regs[VCPU_REGS_RBP]; 2578 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
2579#ifdef CONFIG_X86_64 2579#ifdef CONFIG_X86_64
2580 regs->r8 = vcpu->regs[VCPU_REGS_R8]; 2580 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
2581 regs->r9 = vcpu->regs[VCPU_REGS_R9]; 2581 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
2582 regs->r10 = vcpu->regs[VCPU_REGS_R10]; 2582 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
2583 regs->r11 = vcpu->regs[VCPU_REGS_R11]; 2583 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
2584 regs->r12 = vcpu->regs[VCPU_REGS_R12]; 2584 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
2585 regs->r13 = vcpu->regs[VCPU_REGS_R13]; 2585 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
2586 regs->r14 = vcpu->regs[VCPU_REGS_R14]; 2586 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
2587 regs->r15 = vcpu->regs[VCPU_REGS_R15]; 2587 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
2588#endif 2588#endif
2589 2589
2590 regs->rip = vcpu->rip; 2590 regs->rip = vcpu->arch.rip;
2591 regs->rflags = kvm_x86_ops->get_rflags(vcpu); 2591 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2592 2592
2593 /* 2593 /*
@@ -2605,26 +2605,26 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2605{ 2605{
2606 vcpu_load(vcpu); 2606 vcpu_load(vcpu);
2607 2607
2608 vcpu->regs[VCPU_REGS_RAX] = regs->rax; 2608 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
2609 vcpu->regs[VCPU_REGS_RBX] = regs->rbx; 2609 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
2610 vcpu->regs[VCPU_REGS_RCX] = regs->rcx; 2610 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
2611 vcpu->regs[VCPU_REGS_RDX] = regs->rdx; 2611 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
2612 vcpu->regs[VCPU_REGS_RSI] = regs->rsi; 2612 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
2613 vcpu->regs[VCPU_REGS_RDI] = regs->rdi; 2613 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
2614 vcpu->regs[VCPU_REGS_RSP] = regs->rsp; 2614 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
2615 vcpu->regs[VCPU_REGS_RBP] = regs->rbp; 2615 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
2616#ifdef CONFIG_X86_64 2616#ifdef CONFIG_X86_64
2617 vcpu->regs[VCPU_REGS_R8] = regs->r8; 2617 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
2618 vcpu->regs[VCPU_REGS_R9] = regs->r9; 2618 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
2619 vcpu->regs[VCPU_REGS_R10] = regs->r10; 2619 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
2620 vcpu->regs[VCPU_REGS_R11] = regs->r11; 2620 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
2621 vcpu->regs[VCPU_REGS_R12] = regs->r12; 2621 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
2622 vcpu->regs[VCPU_REGS_R13] = regs->r13; 2622 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
2623 vcpu->regs[VCPU_REGS_R14] = regs->r14; 2623 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
2624 vcpu->regs[VCPU_REGS_R15] = regs->r15; 2624 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
2625#endif 2625#endif
2626 2626
2627 vcpu->rip = regs->rip; 2627 vcpu->arch.rip = regs->rip;
2628 kvm_x86_ops->set_rflags(vcpu, regs->rflags); 2628 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2629 2629
2630 kvm_x86_ops->decache_regs(vcpu); 2630 kvm_x86_ops->decache_regs(vcpu);
@@ -2676,12 +2676,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2676 sregs->gdt.base = dt.base; 2676 sregs->gdt.base = dt.base;
2677 2677
2678 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2678 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2679 sregs->cr0 = vcpu->cr0; 2679 sregs->cr0 = vcpu->arch.cr0;
2680 sregs->cr2 = vcpu->cr2; 2680 sregs->cr2 = vcpu->arch.cr2;
2681 sregs->cr3 = vcpu->cr3; 2681 sregs->cr3 = vcpu->arch.cr3;
2682 sregs->cr4 = vcpu->cr4; 2682 sregs->cr4 = vcpu->arch.cr4;
2683 sregs->cr8 = get_cr8(vcpu); 2683 sregs->cr8 = get_cr8(vcpu);
2684 sregs->efer = vcpu->shadow_efer; 2684 sregs->efer = vcpu->arch.shadow_efer;
2685 sregs->apic_base = kvm_get_apic_base(vcpu); 2685 sregs->apic_base = kvm_get_apic_base(vcpu);
2686 2686
2687 if (irqchip_in_kernel(vcpu->kvm)) { 2687 if (irqchip_in_kernel(vcpu->kvm)) {
@@ -2692,7 +2692,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2692 set_bit(pending_vec, 2692 set_bit(pending_vec,
2693 (unsigned long *)sregs->interrupt_bitmap); 2693 (unsigned long *)sregs->interrupt_bitmap);
2694 } else 2694 } else
2695 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending, 2695 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
2696 sizeof sregs->interrupt_bitmap); 2696 sizeof sregs->interrupt_bitmap);
2697 2697
2698 vcpu_put(vcpu); 2698 vcpu_put(vcpu);
@@ -2722,13 +2722,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2722 dt.base = sregs->gdt.base; 2722 dt.base = sregs->gdt.base;
2723 kvm_x86_ops->set_gdt(vcpu, &dt); 2723 kvm_x86_ops->set_gdt(vcpu, &dt);
2724 2724
2725 vcpu->cr2 = sregs->cr2; 2725 vcpu->arch.cr2 = sregs->cr2;
2726 mmu_reset_needed |= vcpu->cr3 != sregs->cr3; 2726 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
2727 vcpu->cr3 = sregs->cr3; 2727 vcpu->arch.cr3 = sregs->cr3;
2728 2728
2729 set_cr8(vcpu, sregs->cr8); 2729 set_cr8(vcpu, sregs->cr8);
2730 2730
2731 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; 2731 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
2732#ifdef CONFIG_X86_64 2732#ifdef CONFIG_X86_64
2733 kvm_x86_ops->set_efer(vcpu, sregs->efer); 2733 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2734#endif 2734#endif
@@ -2736,25 +2736,25 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2736 2736
2737 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 2737 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2738 2738
2739 mmu_reset_needed |= vcpu->cr0 != sregs->cr0; 2739 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
2740 vcpu->cr0 = sregs->cr0; 2740 vcpu->arch.cr0 = sregs->cr0;
2741 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 2741 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2742 2742
2743 mmu_reset_needed |= vcpu->cr4 != sregs->cr4; 2743 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
2744 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 2744 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2745 if (!is_long_mode(vcpu) && is_pae(vcpu)) 2745 if (!is_long_mode(vcpu) && is_pae(vcpu))
2746 load_pdptrs(vcpu, vcpu->cr3); 2746 load_pdptrs(vcpu, vcpu->arch.cr3);
2747 2747
2748 if (mmu_reset_needed) 2748 if (mmu_reset_needed)
2749 kvm_mmu_reset_context(vcpu); 2749 kvm_mmu_reset_context(vcpu);
2750 2750
2751 if (!irqchip_in_kernel(vcpu->kvm)) { 2751 if (!irqchip_in_kernel(vcpu->kvm)) {
2752 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap, 2752 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
2753 sizeof vcpu->irq_pending); 2753 sizeof vcpu->arch.irq_pending);
2754 vcpu->irq_summary = 0; 2754 vcpu->arch.irq_summary = 0;
2755 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i) 2755 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
2756 if (vcpu->irq_pending[i]) 2756 if (vcpu->arch.irq_pending[i])
2757 __set_bit(i, &vcpu->irq_summary); 2757 __set_bit(i, &vcpu->arch.irq_summary);
2758 } else { 2758 } else {
2759 max_bits = (sizeof sregs->interrupt_bitmap) << 3; 2759 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2760 pending_vec = find_first_bit( 2760 pending_vec = find_first_bit(
@@ -2829,7 +2829,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2829 2829
2830 vcpu_load(vcpu); 2830 vcpu_load(vcpu);
2831 mutex_lock(&vcpu->kvm->lock); 2831 mutex_lock(&vcpu->kvm->lock);
2832 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 2832 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
2833 tr->physical_address = gpa; 2833 tr->physical_address = gpa;
2834 tr->valid = gpa != UNMAPPED_GVA; 2834 tr->valid = gpa != UNMAPPED_GVA;
2835 tr->writeable = 1; 2835 tr->writeable = 1;
@@ -2842,7 +2842,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2842 2842
2843int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2843int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2844{ 2844{
2845 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; 2845 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
2846 2846
2847 vcpu_load(vcpu); 2847 vcpu_load(vcpu);
2848 2848
@@ -2862,7 +2862,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2862 2862
2863int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2863int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2864{ 2864{
2865 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image; 2865 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
2866 2866
2867 vcpu_load(vcpu); 2867 vcpu_load(vcpu);
2868 2868
@@ -2886,16 +2886,16 @@ void fx_init(struct kvm_vcpu *vcpu)
2886 2886
2887 /* Initialize guest FPU by resetting ours and saving into guest's */ 2887 /* Initialize guest FPU by resetting ours and saving into guest's */
2888 preempt_disable(); 2888 preempt_disable();
2889 fx_save(&vcpu->host_fx_image); 2889 fx_save(&vcpu->arch.host_fx_image);
2890 fpu_init(); 2890 fpu_init();
2891 fx_save(&vcpu->guest_fx_image); 2891 fx_save(&vcpu->arch.guest_fx_image);
2892 fx_restore(&vcpu->host_fx_image); 2892 fx_restore(&vcpu->arch.host_fx_image);
2893 preempt_enable(); 2893 preempt_enable();
2894 2894
2895 vcpu->cr0 |= X86_CR0_ET; 2895 vcpu->arch.cr0 |= X86_CR0_ET;
2896 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space); 2896 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2897 vcpu->guest_fx_image.mxcsr = 0x1f80; 2897 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
2898 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask, 2898 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
2899 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask); 2899 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2900} 2900}
2901EXPORT_SYMBOL_GPL(fx_init); 2901EXPORT_SYMBOL_GPL(fx_init);
@@ -2906,8 +2906,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2906 return; 2906 return;
2907 2907
2908 vcpu->guest_fpu_loaded = 1; 2908 vcpu->guest_fpu_loaded = 1;
2909 fx_save(&vcpu->host_fx_image); 2909 fx_save(&vcpu->arch.host_fx_image);
2910 fx_restore(&vcpu->guest_fx_image); 2910 fx_restore(&vcpu->arch.guest_fx_image);
2911} 2911}
2912EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); 2912EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2913 2913
@@ -2917,8 +2917,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2917 return; 2917 return;
2918 2918
2919 vcpu->guest_fpu_loaded = 0; 2919 vcpu->guest_fpu_loaded = 0;
2920 fx_save(&vcpu->guest_fx_image); 2920 fx_save(&vcpu->arch.guest_fx_image);
2921 fx_restore(&vcpu->host_fx_image); 2921 fx_restore(&vcpu->arch.host_fx_image);
2922 ++vcpu->stat.fpu_reload; 2922 ++vcpu->stat.fpu_reload;
2923} 2923}
2924EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); 2924EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
@@ -2939,7 +2939,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2939 int r; 2939 int r;
2940 2940
2941 /* We do fxsave: this must be aligned. */ 2941 /* We do fxsave: this must be aligned. */
2942 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF); 2942 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
2943 2943
2944 vcpu_load(vcpu); 2944 vcpu_load(vcpu);
2945 r = kvm_arch_vcpu_reset(vcpu); 2945 r = kvm_arch_vcpu_reset(vcpu);
@@ -3003,18 +3003,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3003 BUG_ON(vcpu->kvm == NULL); 3003 BUG_ON(vcpu->kvm == NULL);
3004 kvm = vcpu->kvm; 3004 kvm = vcpu->kvm;
3005 3005
3006 vcpu->mmu.root_hpa = INVALID_PAGE; 3006 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3007 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 3007 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3008 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 3008 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
3009 else 3009 else
3010 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED; 3010 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
3011 3011
3012 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3012 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3013 if (!page) { 3013 if (!page) {
3014 r = -ENOMEM; 3014 r = -ENOMEM;
3015 goto fail; 3015 goto fail;
3016 } 3016 }
3017 vcpu->pio_data = page_address(page); 3017 vcpu->arch.pio_data = page_address(page);
3018 3018
3019 r = kvm_mmu_create(vcpu); 3019 r = kvm_mmu_create(vcpu);
3020 if (r < 0) 3020 if (r < 0)
@@ -3031,7 +3031,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3031fail_mmu_destroy: 3031fail_mmu_destroy:
3032 kvm_mmu_destroy(vcpu); 3032 kvm_mmu_destroy(vcpu);
3033fail_free_pio_data: 3033fail_free_pio_data:
3034 free_page((unsigned long)vcpu->pio_data); 3034 free_page((unsigned long)vcpu->arch.pio_data);
3035fail: 3035fail:
3036 return r; 3036 return r;
3037} 3037}
@@ -3040,7 +3040,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3040{ 3040{
3041 kvm_free_lapic(vcpu); 3041 kvm_free_lapic(vcpu);
3042 kvm_mmu_destroy(vcpu); 3042 kvm_mmu_destroy(vcpu);
3043 free_page((unsigned long)vcpu->pio_data); 3043 free_page((unsigned long)vcpu->arch.pio_data);
3044} 3044}
3045 3045
3046struct kvm *kvm_arch_create_vm(void) 3046struct kvm *kvm_arch_create_vm(void)