diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 160 |
1 files changed, 80 insertions, 80 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 8da13a462e3c..9bfa1bcd26e9 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -53,7 +53,7 @@ static LIST_HEAD(vm_list); | |||
53 | 53 | ||
54 | static cpumask_t cpus_hardware_enabled; | 54 | static cpumask_t cpus_hardware_enabled; |
55 | 55 | ||
56 | struct kvm_arch_ops *kvm_arch_ops; | 56 | struct kvm_x86_ops *kvm_x86_ops; |
57 | struct kmem_cache *kvm_vcpu_cache; | 57 | struct kmem_cache *kvm_vcpu_cache; |
58 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | 58 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); |
59 | 59 | ||
@@ -182,14 +182,14 @@ static void vcpu_load(struct kvm_vcpu *vcpu) | |||
182 | mutex_lock(&vcpu->mutex); | 182 | mutex_lock(&vcpu->mutex); |
183 | cpu = get_cpu(); | 183 | cpu = get_cpu(); |
184 | preempt_notifier_register(&vcpu->preempt_notifier); | 184 | preempt_notifier_register(&vcpu->preempt_notifier); |
185 | kvm_arch_ops->vcpu_load(vcpu, cpu); | 185 | kvm_x86_ops->vcpu_load(vcpu, cpu); |
186 | put_cpu(); | 186 | put_cpu(); |
187 | } | 187 | } |
188 | 188 | ||
189 | static void vcpu_put(struct kvm_vcpu *vcpu) | 189 | static void vcpu_put(struct kvm_vcpu *vcpu) |
190 | { | 190 | { |
191 | preempt_disable(); | 191 | preempt_disable(); |
192 | kvm_arch_ops->vcpu_put(vcpu); | 192 | kvm_x86_ops->vcpu_put(vcpu); |
193 | preempt_notifier_unregister(&vcpu->preempt_notifier); | 193 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
194 | preempt_enable(); | 194 | preempt_enable(); |
195 | mutex_unlock(&vcpu->mutex); | 195 | mutex_unlock(&vcpu->mutex); |
@@ -374,7 +374,7 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
374 | kvm_unload_vcpu_mmu(kvm->vcpus[i]); | 374 | kvm_unload_vcpu_mmu(kvm->vcpus[i]); |
375 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 375 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
376 | if (kvm->vcpus[i]) { | 376 | if (kvm->vcpus[i]) { |
377 | kvm_arch_ops->vcpu_free(kvm->vcpus[i]); | 377 | kvm_x86_ops->vcpu_free(kvm->vcpus[i]); |
378 | kvm->vcpus[i] = NULL; | 378 | kvm->vcpus[i] = NULL; |
379 | } | 379 | } |
380 | } | 380 | } |
@@ -405,7 +405,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) | |||
405 | 405 | ||
406 | static void inject_gp(struct kvm_vcpu *vcpu) | 406 | static void inject_gp(struct kvm_vcpu *vcpu) |
407 | { | 407 | { |
408 | kvm_arch_ops->inject_gp(vcpu, 0); | 408 | kvm_x86_ops->inject_gp(vcpu, 0); |
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
@@ -480,7 +480,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
480 | inject_gp(vcpu); | 480 | inject_gp(vcpu); |
481 | return; | 481 | return; |
482 | } | 482 | } |
483 | kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 483 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
484 | if (cs_l) { | 484 | if (cs_l) { |
485 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | 485 | printk(KERN_DEBUG "set_cr0: #GP, start paging " |
486 | "in long mode while CS.L == 1\n"); | 486 | "in long mode while CS.L == 1\n"); |
@@ -499,7 +499,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
499 | 499 | ||
500 | } | 500 | } |
501 | 501 | ||
502 | kvm_arch_ops->set_cr0(vcpu, cr0); | 502 | kvm_x86_ops->set_cr0(vcpu, cr0); |
503 | vcpu->cr0 = cr0; | 503 | vcpu->cr0 = cr0; |
504 | 504 | ||
505 | mutex_lock(&vcpu->kvm->lock); | 505 | mutex_lock(&vcpu->kvm->lock); |
@@ -542,7 +542,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
542 | inject_gp(vcpu); | 542 | inject_gp(vcpu); |
543 | return; | 543 | return; |
544 | } | 544 | } |
545 | kvm_arch_ops->set_cr4(vcpu, cr4); | 545 | kvm_x86_ops->set_cr4(vcpu, cr4); |
546 | vcpu->cr4 = cr4; | 546 | vcpu->cr4 = cr4; |
547 | mutex_lock(&vcpu->kvm->lock); | 547 | mutex_lock(&vcpu->kvm->lock); |
548 | kvm_mmu_reset_context(vcpu); | 548 | kvm_mmu_reset_context(vcpu); |
@@ -1134,7 +1134,7 @@ static int emulator_write_emulated_onepage(unsigned long addr, | |||
1134 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 1134 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
1135 | 1135 | ||
1136 | if (gpa == UNMAPPED_GVA) { | 1136 | if (gpa == UNMAPPED_GVA) { |
1137 | kvm_arch_ops->inject_page_fault(vcpu, addr, 2); | 1137 | kvm_x86_ops->inject_page_fault(vcpu, addr, 2); |
1138 | return X86EMUL_PROPAGATE_FAULT; | 1138 | return X86EMUL_PROPAGATE_FAULT; |
1139 | } | 1139 | } |
1140 | 1140 | ||
@@ -1197,7 +1197,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
1197 | 1197 | ||
1198 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | 1198 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) |
1199 | { | 1199 | { |
1200 | return kvm_arch_ops->get_segment_base(vcpu, seg); | 1200 | return kvm_x86_ops->get_segment_base(vcpu, seg); |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | 1203 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) |
@@ -1208,7 +1208,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | |||
1208 | int emulate_clts(struct kvm_vcpu *vcpu) | 1208 | int emulate_clts(struct kvm_vcpu *vcpu) |
1209 | { | 1209 | { |
1210 | vcpu->cr0 &= ~X86_CR0_TS; | 1210 | vcpu->cr0 &= ~X86_CR0_TS; |
1211 | kvm_arch_ops->set_cr0(vcpu, vcpu->cr0); | 1211 | kvm_x86_ops->set_cr0(vcpu, vcpu->cr0); |
1212 | return X86EMUL_CONTINUE; | 1212 | return X86EMUL_CONTINUE; |
1213 | } | 1213 | } |
1214 | 1214 | ||
@@ -1218,7 +1218,7 @@ int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest) | |||
1218 | 1218 | ||
1219 | switch (dr) { | 1219 | switch (dr) { |
1220 | case 0 ... 3: | 1220 | case 0 ... 3: |
1221 | *dest = kvm_arch_ops->get_dr(vcpu, dr); | 1221 | *dest = kvm_x86_ops->get_dr(vcpu, dr); |
1222 | return X86EMUL_CONTINUE; | 1222 | return X86EMUL_CONTINUE; |
1223 | default: | 1223 | default: |
1224 | pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); | 1224 | pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); |
@@ -1231,7 +1231,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | |||
1231 | unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; | 1231 | unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; |
1232 | int exception; | 1232 | int exception; |
1233 | 1233 | ||
1234 | kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); | 1234 | kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); |
1235 | if (exception) { | 1235 | if (exception) { |
1236 | /* FIXME: better handling */ | 1236 | /* FIXME: better handling */ |
1237 | return X86EMUL_UNHANDLEABLE; | 1237 | return X86EMUL_UNHANDLEABLE; |
@@ -1277,12 +1277,12 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
1277 | int cs_db, cs_l; | 1277 | int cs_db, cs_l; |
1278 | 1278 | ||
1279 | vcpu->mmio_fault_cr2 = cr2; | 1279 | vcpu->mmio_fault_cr2 = cr2; |
1280 | kvm_arch_ops->cache_regs(vcpu); | 1280 | kvm_x86_ops->cache_regs(vcpu); |
1281 | 1281 | ||
1282 | kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 1282 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
1283 | 1283 | ||
1284 | emulate_ctxt.vcpu = vcpu; | 1284 | emulate_ctxt.vcpu = vcpu; |
1285 | emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu); | 1285 | emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); |
1286 | emulate_ctxt.cr2 = cr2; | 1286 | emulate_ctxt.cr2 = cr2; |
1287 | emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM) | 1287 | emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM) |
1288 | ? X86EMUL_MODE_REAL : cs_l | 1288 | ? X86EMUL_MODE_REAL : cs_l |
@@ -1328,8 +1328,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
1328 | return EMULATE_DO_MMIO; | 1328 | return EMULATE_DO_MMIO; |
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | kvm_arch_ops->decache_regs(vcpu); | 1331 | kvm_x86_ops->decache_regs(vcpu); |
1332 | kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags); | 1332 | kvm_x86_ops->set_rflags(vcpu, emulate_ctxt.eflags); |
1333 | 1333 | ||
1334 | if (vcpu->mmio_is_write) { | 1334 | if (vcpu->mmio_is_write) { |
1335 | vcpu->mmio_needed = 0; | 1335 | vcpu->mmio_needed = 0; |
@@ -1386,7 +1386,7 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1386 | { | 1386 | { |
1387 | unsigned long nr, a0, a1, a2, a3, a4, a5, ret; | 1387 | unsigned long nr, a0, a1, a2, a3, a4, a5, ret; |
1388 | 1388 | ||
1389 | kvm_arch_ops->cache_regs(vcpu); | 1389 | kvm_x86_ops->cache_regs(vcpu); |
1390 | ret = -KVM_EINVAL; | 1390 | ret = -KVM_EINVAL; |
1391 | #ifdef CONFIG_X86_64 | 1391 | #ifdef CONFIG_X86_64 |
1392 | if (is_long_mode(vcpu)) { | 1392 | if (is_long_mode(vcpu)) { |
@@ -1419,11 +1419,11 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1419 | run->hypercall.args[5] = a5; | 1419 | run->hypercall.args[5] = a5; |
1420 | run->hypercall.ret = ret; | 1420 | run->hypercall.ret = ret; |
1421 | run->hypercall.longmode = is_long_mode(vcpu); | 1421 | run->hypercall.longmode = is_long_mode(vcpu); |
1422 | kvm_arch_ops->decache_regs(vcpu); | 1422 | kvm_x86_ops->decache_regs(vcpu); |
1423 | return 0; | 1423 | return 0; |
1424 | } | 1424 | } |
1425 | vcpu->regs[VCPU_REGS_RAX] = ret; | 1425 | vcpu->regs[VCPU_REGS_RAX] = ret; |
1426 | kvm_arch_ops->decache_regs(vcpu); | 1426 | kvm_x86_ops->decache_regs(vcpu); |
1427 | return 1; | 1427 | return 1; |
1428 | } | 1428 | } |
1429 | EXPORT_SYMBOL_GPL(kvm_hypercall); | 1429 | EXPORT_SYMBOL_GPL(kvm_hypercall); |
@@ -1437,26 +1437,26 @@ void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) | |||
1437 | { | 1437 | { |
1438 | struct descriptor_table dt = { limit, base }; | 1438 | struct descriptor_table dt = { limit, base }; |
1439 | 1439 | ||
1440 | kvm_arch_ops->set_gdt(vcpu, &dt); | 1440 | kvm_x86_ops->set_gdt(vcpu, &dt); |
1441 | } | 1441 | } |
1442 | 1442 | ||
1443 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) | 1443 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) |
1444 | { | 1444 | { |
1445 | struct descriptor_table dt = { limit, base }; | 1445 | struct descriptor_table dt = { limit, base }; |
1446 | 1446 | ||
1447 | kvm_arch_ops->set_idt(vcpu, &dt); | 1447 | kvm_x86_ops->set_idt(vcpu, &dt); |
1448 | } | 1448 | } |
1449 | 1449 | ||
1450 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | 1450 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, |
1451 | unsigned long *rflags) | 1451 | unsigned long *rflags) |
1452 | { | 1452 | { |
1453 | lmsw(vcpu, msw); | 1453 | lmsw(vcpu, msw); |
1454 | *rflags = kvm_arch_ops->get_rflags(vcpu); | 1454 | *rflags = kvm_x86_ops->get_rflags(vcpu); |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 1457 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
1458 | { | 1458 | { |
1459 | kvm_arch_ops->decache_cr4_guest_bits(vcpu); | 1459 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
1460 | switch (cr) { | 1460 | switch (cr) { |
1461 | case 0: | 1461 | case 0: |
1462 | return vcpu->cr0; | 1462 | return vcpu->cr0; |
@@ -1478,7 +1478,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
1478 | switch (cr) { | 1478 | switch (cr) { |
1479 | case 0: | 1479 | case 0: |
1480 | set_cr0(vcpu, mk_cr_64(vcpu->cr0, val)); | 1480 | set_cr0(vcpu, mk_cr_64(vcpu->cr0, val)); |
1481 | *rflags = kvm_arch_ops->get_rflags(vcpu); | 1481 | *rflags = kvm_x86_ops->get_rflags(vcpu); |
1482 | break; | 1482 | break; |
1483 | case 2: | 1483 | case 2: |
1484 | vcpu->cr2 = val; | 1484 | vcpu->cr2 = val; |
@@ -1552,7 +1552,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa) | |||
1552 | mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT); | 1552 | mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT); |
1553 | hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT), | 1553 | hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT), |
1554 | KM_USER1) + (hypercall_hpa & ~PAGE_MASK); | 1554 | KM_USER1) + (hypercall_hpa & ~PAGE_MASK); |
1555 | kvm_arch_ops->patch_hypercall(vcpu, hypercall); | 1555 | kvm_x86_ops->patch_hypercall(vcpu, hypercall); |
1556 | kunmap_atomic(hypercall, KM_USER1); | 1556 | kunmap_atomic(hypercall, KM_USER1); |
1557 | 1557 | ||
1558 | para_state->ret = 0; | 1558 | para_state->ret = 0; |
@@ -1619,7 +1619,7 @@ EXPORT_SYMBOL_GPL(kvm_get_msr_common); | |||
1619 | */ | 1619 | */ |
1620 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | 1620 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) |
1621 | { | 1621 | { |
1622 | return kvm_arch_ops->get_msr(vcpu, msr_index, pdata); | 1622 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | #ifdef CONFIG_X86_64 | 1625 | #ifdef CONFIG_X86_64 |
@@ -1640,7 +1640,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
1640 | return; | 1640 | return; |
1641 | } | 1641 | } |
1642 | 1642 | ||
1643 | kvm_arch_ops->set_efer(vcpu, efer); | 1643 | kvm_x86_ops->set_efer(vcpu, efer); |
1644 | 1644 | ||
1645 | efer &= ~EFER_LMA; | 1645 | efer &= ~EFER_LMA; |
1646 | efer |= vcpu->shadow_efer & EFER_LMA; | 1646 | efer |= vcpu->shadow_efer & EFER_LMA; |
@@ -1697,7 +1697,7 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common); | |||
1697 | */ | 1697 | */ |
1698 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 1698 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
1699 | { | 1699 | { |
1700 | return kvm_arch_ops->set_msr(vcpu, msr_index, data); | 1700 | return kvm_x86_ops->set_msr(vcpu, msr_index, data); |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | void kvm_resched(struct kvm_vcpu *vcpu) | 1703 | void kvm_resched(struct kvm_vcpu *vcpu) |
@@ -1714,7 +1714,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
1714 | u32 function; | 1714 | u32 function; |
1715 | struct kvm_cpuid_entry *e, *best; | 1715 | struct kvm_cpuid_entry *e, *best; |
1716 | 1716 | ||
1717 | kvm_arch_ops->cache_regs(vcpu); | 1717 | kvm_x86_ops->cache_regs(vcpu); |
1718 | function = vcpu->regs[VCPU_REGS_RAX]; | 1718 | function = vcpu->regs[VCPU_REGS_RAX]; |
1719 | vcpu->regs[VCPU_REGS_RAX] = 0; | 1719 | vcpu->regs[VCPU_REGS_RAX] = 0; |
1720 | vcpu->regs[VCPU_REGS_RBX] = 0; | 1720 | vcpu->regs[VCPU_REGS_RBX] = 0; |
@@ -1740,8 +1740,8 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
1740 | vcpu->regs[VCPU_REGS_RCX] = best->ecx; | 1740 | vcpu->regs[VCPU_REGS_RCX] = best->ecx; |
1741 | vcpu->regs[VCPU_REGS_RDX] = best->edx; | 1741 | vcpu->regs[VCPU_REGS_RDX] = best->edx; |
1742 | } | 1742 | } |
1743 | kvm_arch_ops->decache_regs(vcpu); | 1743 | kvm_x86_ops->decache_regs(vcpu); |
1744 | kvm_arch_ops->skip_emulated_instruction(vcpu); | 1744 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
1745 | } | 1745 | } |
1746 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | 1746 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
1747 | 1747 | ||
@@ -1776,7 +1776,7 @@ static int complete_pio(struct kvm_vcpu *vcpu) | |||
1776 | long delta; | 1776 | long delta; |
1777 | int r; | 1777 | int r; |
1778 | 1778 | ||
1779 | kvm_arch_ops->cache_regs(vcpu); | 1779 | kvm_x86_ops->cache_regs(vcpu); |
1780 | 1780 | ||
1781 | if (!io->string) { | 1781 | if (!io->string) { |
1782 | if (io->in) | 1782 | if (io->in) |
@@ -1786,7 +1786,7 @@ static int complete_pio(struct kvm_vcpu *vcpu) | |||
1786 | if (io->in) { | 1786 | if (io->in) { |
1787 | r = pio_copy_data(vcpu); | 1787 | r = pio_copy_data(vcpu); |
1788 | if (r) { | 1788 | if (r) { |
1789 | kvm_arch_ops->cache_regs(vcpu); | 1789 | kvm_x86_ops->cache_regs(vcpu); |
1790 | return r; | 1790 | return r; |
1791 | } | 1791 | } |
1792 | } | 1792 | } |
@@ -1809,13 +1809,13 @@ static int complete_pio(struct kvm_vcpu *vcpu) | |||
1809 | vcpu->regs[VCPU_REGS_RSI] += delta; | 1809 | vcpu->regs[VCPU_REGS_RSI] += delta; |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | kvm_arch_ops->decache_regs(vcpu); | 1812 | kvm_x86_ops->decache_regs(vcpu); |
1813 | 1813 | ||
1814 | io->count -= io->cur_count; | 1814 | io->count -= io->cur_count; |
1815 | io->cur_count = 0; | 1815 | io->cur_count = 0; |
1816 | 1816 | ||
1817 | if (!io->count) | 1817 | if (!io->count) |
1818 | kvm_arch_ops->skip_emulated_instruction(vcpu); | 1818 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
1819 | return 0; | 1819 | return 0; |
1820 | } | 1820 | } |
1821 | 1821 | ||
@@ -1871,9 +1871,9 @@ int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
1871 | vcpu->pio.guest_page_offset = 0; | 1871 | vcpu->pio.guest_page_offset = 0; |
1872 | vcpu->pio.rep = 0; | 1872 | vcpu->pio.rep = 0; |
1873 | 1873 | ||
1874 | kvm_arch_ops->cache_regs(vcpu); | 1874 | kvm_x86_ops->cache_regs(vcpu); |
1875 | memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); | 1875 | memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); |
1876 | kvm_arch_ops->decache_regs(vcpu); | 1876 | kvm_x86_ops->decache_regs(vcpu); |
1877 | 1877 | ||
1878 | pio_dev = vcpu_find_pio_dev(vcpu, port); | 1878 | pio_dev = vcpu_find_pio_dev(vcpu, port); |
1879 | if (pio_dev) { | 1879 | if (pio_dev) { |
@@ -1908,7 +1908,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
1908 | vcpu->pio.rep = rep; | 1908 | vcpu->pio.rep = rep; |
1909 | 1909 | ||
1910 | if (!count) { | 1910 | if (!count) { |
1911 | kvm_arch_ops->skip_emulated_instruction(vcpu); | 1911 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
1912 | return 1; | 1912 | return 1; |
1913 | } | 1913 | } |
1914 | 1914 | ||
@@ -2012,12 +2012,12 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2012 | } | 2012 | } |
2013 | 2013 | ||
2014 | if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { | 2014 | if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { |
2015 | kvm_arch_ops->cache_regs(vcpu); | 2015 | kvm_x86_ops->cache_regs(vcpu); |
2016 | vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; | 2016 | vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; |
2017 | kvm_arch_ops->decache_regs(vcpu); | 2017 | kvm_x86_ops->decache_regs(vcpu); |
2018 | } | 2018 | } |
2019 | 2019 | ||
2020 | r = kvm_arch_ops->run(vcpu, kvm_run); | 2020 | r = kvm_x86_ops->run(vcpu, kvm_run); |
2021 | 2021 | ||
2022 | out: | 2022 | out: |
2023 | if (vcpu->sigset_active) | 2023 | if (vcpu->sigset_active) |
@@ -2032,7 +2032,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, | |||
2032 | { | 2032 | { |
2033 | vcpu_load(vcpu); | 2033 | vcpu_load(vcpu); |
2034 | 2034 | ||
2035 | kvm_arch_ops->cache_regs(vcpu); | 2035 | kvm_x86_ops->cache_regs(vcpu); |
2036 | 2036 | ||
2037 | regs->rax = vcpu->regs[VCPU_REGS_RAX]; | 2037 | regs->rax = vcpu->regs[VCPU_REGS_RAX]; |
2038 | regs->rbx = vcpu->regs[VCPU_REGS_RBX]; | 2038 | regs->rbx = vcpu->regs[VCPU_REGS_RBX]; |
@@ -2054,7 +2054,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, | |||
2054 | #endif | 2054 | #endif |
2055 | 2055 | ||
2056 | regs->rip = vcpu->rip; | 2056 | regs->rip = vcpu->rip; |
2057 | regs->rflags = kvm_arch_ops->get_rflags(vcpu); | 2057 | regs->rflags = kvm_x86_ops->get_rflags(vcpu); |
2058 | 2058 | ||
2059 | /* | 2059 | /* |
2060 | * Don't leak debug flags in case they were set for guest debugging | 2060 | * Don't leak debug flags in case they were set for guest debugging |
@@ -2092,9 +2092,9 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, | |||
2092 | #endif | 2092 | #endif |
2093 | 2093 | ||
2094 | vcpu->rip = regs->rip; | 2094 | vcpu->rip = regs->rip; |
2095 | kvm_arch_ops->set_rflags(vcpu, regs->rflags); | 2095 | kvm_x86_ops->set_rflags(vcpu, regs->rflags); |
2096 | 2096 | ||
2097 | kvm_arch_ops->decache_regs(vcpu); | 2097 | kvm_x86_ops->decache_regs(vcpu); |
2098 | 2098 | ||
2099 | vcpu_put(vcpu); | 2099 | vcpu_put(vcpu); |
2100 | 2100 | ||
@@ -2104,7 +2104,7 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, | |||
2104 | static void get_segment(struct kvm_vcpu *vcpu, | 2104 | static void get_segment(struct kvm_vcpu *vcpu, |
2105 | struct kvm_segment *var, int seg) | 2105 | struct kvm_segment *var, int seg) |
2106 | { | 2106 | { |
2107 | return kvm_arch_ops->get_segment(vcpu, var, seg); | 2107 | return kvm_x86_ops->get_segment(vcpu, var, seg); |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 2110 | static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
@@ -2125,14 +2125,14 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
2125 | get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); | 2125 | get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); |
2126 | get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); | 2126 | get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); |
2127 | 2127 | ||
2128 | kvm_arch_ops->get_idt(vcpu, &dt); | 2128 | kvm_x86_ops->get_idt(vcpu, &dt); |
2129 | sregs->idt.limit = dt.limit; | 2129 | sregs->idt.limit = dt.limit; |
2130 | sregs->idt.base = dt.base; | 2130 | sregs->idt.base = dt.base; |
2131 | kvm_arch_ops->get_gdt(vcpu, &dt); | 2131 | kvm_x86_ops->get_gdt(vcpu, &dt); |
2132 | sregs->gdt.limit = dt.limit; | 2132 | sregs->gdt.limit = dt.limit; |
2133 | sregs->gdt.base = dt.base; | 2133 | sregs->gdt.base = dt.base; |
2134 | 2134 | ||
2135 | kvm_arch_ops->decache_cr4_guest_bits(vcpu); | 2135 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
2136 | sregs->cr0 = vcpu->cr0; | 2136 | sregs->cr0 = vcpu->cr0; |
2137 | sregs->cr2 = vcpu->cr2; | 2137 | sregs->cr2 = vcpu->cr2; |
2138 | sregs->cr3 = vcpu->cr3; | 2138 | sregs->cr3 = vcpu->cr3; |
@@ -2144,7 +2144,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
2144 | if (irqchip_in_kernel(vcpu->kvm)) { | 2144 | if (irqchip_in_kernel(vcpu->kvm)) { |
2145 | memset(sregs->interrupt_bitmap, 0, | 2145 | memset(sregs->interrupt_bitmap, 0, |
2146 | sizeof sregs->interrupt_bitmap); | 2146 | sizeof sregs->interrupt_bitmap); |
2147 | pending_vec = kvm_arch_ops->get_irq(vcpu); | 2147 | pending_vec = kvm_x86_ops->get_irq(vcpu); |
2148 | if (pending_vec >= 0) | 2148 | if (pending_vec >= 0) |
2149 | set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap); | 2149 | set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap); |
2150 | } else | 2150 | } else |
@@ -2159,7 +2159,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
2159 | static void set_segment(struct kvm_vcpu *vcpu, | 2159 | static void set_segment(struct kvm_vcpu *vcpu, |
2160 | struct kvm_segment *var, int seg) | 2160 | struct kvm_segment *var, int seg) |
2161 | { | 2161 | { |
2162 | return kvm_arch_ops->set_segment(vcpu, var, seg); | 2162 | return kvm_x86_ops->set_segment(vcpu, var, seg); |
2163 | } | 2163 | } |
2164 | 2164 | ||
2165 | static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 2165 | static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
@@ -2173,10 +2173,10 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2173 | 2173 | ||
2174 | dt.limit = sregs->idt.limit; | 2174 | dt.limit = sregs->idt.limit; |
2175 | dt.base = sregs->idt.base; | 2175 | dt.base = sregs->idt.base; |
2176 | kvm_arch_ops->set_idt(vcpu, &dt); | 2176 | kvm_x86_ops->set_idt(vcpu, &dt); |
2177 | dt.limit = sregs->gdt.limit; | 2177 | dt.limit = sregs->gdt.limit; |
2178 | dt.base = sregs->gdt.base; | 2178 | dt.base = sregs->gdt.base; |
2179 | kvm_arch_ops->set_gdt(vcpu, &dt); | 2179 | kvm_x86_ops->set_gdt(vcpu, &dt); |
2180 | 2180 | ||
2181 | vcpu->cr2 = sregs->cr2; | 2181 | vcpu->cr2 = sregs->cr2; |
2182 | mmu_reset_needed |= vcpu->cr3 != sregs->cr3; | 2182 | mmu_reset_needed |= vcpu->cr3 != sregs->cr3; |
@@ -2186,18 +2186,18 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2186 | 2186 | ||
2187 | mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; | 2187 | mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; |
2188 | #ifdef CONFIG_X86_64 | 2188 | #ifdef CONFIG_X86_64 |
2189 | kvm_arch_ops->set_efer(vcpu, sregs->efer); | 2189 | kvm_x86_ops->set_efer(vcpu, sregs->efer); |
2190 | #endif | 2190 | #endif |
2191 | kvm_set_apic_base(vcpu, sregs->apic_base); | 2191 | kvm_set_apic_base(vcpu, sregs->apic_base); |
2192 | 2192 | ||
2193 | kvm_arch_ops->decache_cr4_guest_bits(vcpu); | 2193 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
2194 | 2194 | ||
2195 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; | 2195 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; |
2196 | vcpu->cr0 = sregs->cr0; | 2196 | vcpu->cr0 = sregs->cr0; |
2197 | kvm_arch_ops->set_cr0(vcpu, sregs->cr0); | 2197 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); |
2198 | 2198 | ||
2199 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; | 2199 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; |
2200 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); | 2200 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
2201 | if (!is_long_mode(vcpu) && is_pae(vcpu)) | 2201 | if (!is_long_mode(vcpu) && is_pae(vcpu)) |
2202 | load_pdptrs(vcpu, vcpu->cr3); | 2202 | load_pdptrs(vcpu, vcpu->cr3); |
2203 | 2203 | ||
@@ -2218,7 +2218,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2218 | max_bits); | 2218 | max_bits); |
2219 | /* Only pending external irq is handled here */ | 2219 | /* Only pending external irq is handled here */ |
2220 | if (pending_vec < max_bits) { | 2220 | if (pending_vec < max_bits) { |
2221 | kvm_arch_ops->set_irq(vcpu, pending_vec); | 2221 | kvm_x86_ops->set_irq(vcpu, pending_vec); |
2222 | printk("Set back pending irq %d\n", pending_vec); | 2222 | printk("Set back pending irq %d\n", pending_vec); |
2223 | } | 2223 | } |
2224 | } | 2224 | } |
@@ -2411,7 +2411,7 @@ static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |||
2411 | 2411 | ||
2412 | vcpu_load(vcpu); | 2412 | vcpu_load(vcpu); |
2413 | 2413 | ||
2414 | r = kvm_arch_ops->set_guest_debug(vcpu, dbg); | 2414 | r = kvm_x86_ops->set_guest_debug(vcpu, dbg); |
2415 | 2415 | ||
2416 | vcpu_put(vcpu); | 2416 | vcpu_put(vcpu); |
2417 | 2417 | ||
@@ -2493,7 +2493,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
2493 | if (!valid_vcpu(n)) | 2493 | if (!valid_vcpu(n)) |
2494 | return -EINVAL; | 2494 | return -EINVAL; |
2495 | 2495 | ||
2496 | vcpu = kvm_arch_ops->vcpu_create(kvm, n); | 2496 | vcpu = kvm_x86_ops->vcpu_create(kvm, n); |
2497 | if (IS_ERR(vcpu)) | 2497 | if (IS_ERR(vcpu)) |
2498 | return PTR_ERR(vcpu); | 2498 | return PTR_ERR(vcpu); |
2499 | 2499 | ||
@@ -2534,7 +2534,7 @@ mmu_unload: | |||
2534 | vcpu_put(vcpu); | 2534 | vcpu_put(vcpu); |
2535 | 2535 | ||
2536 | free_vcpu: | 2536 | free_vcpu: |
2537 | kvm_arch_ops->vcpu_free(vcpu); | 2537 | kvm_x86_ops->vcpu_free(vcpu); |
2538 | return r; | 2538 | return r; |
2539 | } | 2539 | } |
2540 | 2540 | ||
@@ -3163,7 +3163,7 @@ static void decache_vcpus_on_cpu(int cpu) | |||
3163 | */ | 3163 | */ |
3164 | if (mutex_trylock(&vcpu->mutex)) { | 3164 | if (mutex_trylock(&vcpu->mutex)) { |
3165 | if (vcpu->cpu == cpu) { | 3165 | if (vcpu->cpu == cpu) { |
3166 | kvm_arch_ops->vcpu_decache(vcpu); | 3166 | kvm_x86_ops->vcpu_decache(vcpu); |
3167 | vcpu->cpu = -1; | 3167 | vcpu->cpu = -1; |
3168 | } | 3168 | } |
3169 | mutex_unlock(&vcpu->mutex); | 3169 | mutex_unlock(&vcpu->mutex); |
@@ -3179,7 +3179,7 @@ static void hardware_enable(void *junk) | |||
3179 | if (cpu_isset(cpu, cpus_hardware_enabled)) | 3179 | if (cpu_isset(cpu, cpus_hardware_enabled)) |
3180 | return; | 3180 | return; |
3181 | cpu_set(cpu, cpus_hardware_enabled); | 3181 | cpu_set(cpu, cpus_hardware_enabled); |
3182 | kvm_arch_ops->hardware_enable(NULL); | 3182 | kvm_x86_ops->hardware_enable(NULL); |
3183 | } | 3183 | } |
3184 | 3184 | ||
3185 | static void hardware_disable(void *junk) | 3185 | static void hardware_disable(void *junk) |
@@ -3190,7 +3190,7 @@ static void hardware_disable(void *junk) | |||
3190 | return; | 3190 | return; |
3191 | cpu_clear(cpu, cpus_hardware_enabled); | 3191 | cpu_clear(cpu, cpus_hardware_enabled); |
3192 | decache_vcpus_on_cpu(cpu); | 3192 | decache_vcpus_on_cpu(cpu); |
3193 | kvm_arch_ops->hardware_disable(NULL); | 3193 | kvm_x86_ops->hardware_disable(NULL); |
3194 | } | 3194 | } |
3195 | 3195 | ||
3196 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | 3196 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, |
@@ -3358,7 +3358,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | |||
3358 | { | 3358 | { |
3359 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3359 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
3360 | 3360 | ||
3361 | kvm_arch_ops->vcpu_load(vcpu, cpu); | 3361 | kvm_x86_ops->vcpu_load(vcpu, cpu); |
3362 | } | 3362 | } |
3363 | 3363 | ||
3364 | static void kvm_sched_out(struct preempt_notifier *pn, | 3364 | static void kvm_sched_out(struct preempt_notifier *pn, |
@@ -3366,16 +3366,16 @@ static void kvm_sched_out(struct preempt_notifier *pn, | |||
3366 | { | 3366 | { |
3367 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3367 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
3368 | 3368 | ||
3369 | kvm_arch_ops->vcpu_put(vcpu); | 3369 | kvm_x86_ops->vcpu_put(vcpu); |
3370 | } | 3370 | } |
3371 | 3371 | ||
3372 | int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size, | 3372 | int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size, |
3373 | struct module *module) | 3373 | struct module *module) |
3374 | { | 3374 | { |
3375 | int r; | 3375 | int r; |
3376 | int cpu; | 3376 | int cpu; |
3377 | 3377 | ||
3378 | if (kvm_arch_ops) { | 3378 | if (kvm_x86_ops) { |
3379 | printk(KERN_ERR "kvm: already loaded the other module\n"); | 3379 | printk(KERN_ERR "kvm: already loaded the other module\n"); |
3380 | return -EEXIST; | 3380 | return -EEXIST; |
3381 | } | 3381 | } |
@@ -3389,15 +3389,15 @@ int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size, | |||
3389 | return -EOPNOTSUPP; | 3389 | return -EOPNOTSUPP; |
3390 | } | 3390 | } |
3391 | 3391 | ||
3392 | kvm_arch_ops = ops; | 3392 | kvm_x86_ops = ops; |
3393 | 3393 | ||
3394 | r = kvm_arch_ops->hardware_setup(); | 3394 | r = kvm_x86_ops->hardware_setup(); |
3395 | if (r < 0) | 3395 | if (r < 0) |
3396 | goto out; | 3396 | goto out; |
3397 | 3397 | ||
3398 | for_each_online_cpu(cpu) { | 3398 | for_each_online_cpu(cpu) { |
3399 | smp_call_function_single(cpu, | 3399 | smp_call_function_single(cpu, |
3400 | kvm_arch_ops->check_processor_compatibility, | 3400 | kvm_x86_ops->check_processor_compatibility, |
3401 | &r, 0, 1); | 3401 | &r, 0, 1); |
3402 | if (r < 0) | 3402 | if (r < 0) |
3403 | goto out_free_0; | 3403 | goto out_free_0; |
@@ -3450,13 +3450,13 @@ out_free_2: | |||
3450 | out_free_1: | 3450 | out_free_1: |
3451 | on_each_cpu(hardware_disable, NULL, 0, 1); | 3451 | on_each_cpu(hardware_disable, NULL, 0, 1); |
3452 | out_free_0: | 3452 | out_free_0: |
3453 | kvm_arch_ops->hardware_unsetup(); | 3453 | kvm_x86_ops->hardware_unsetup(); |
3454 | out: | 3454 | out: |
3455 | kvm_arch_ops = NULL; | 3455 | kvm_x86_ops = NULL; |
3456 | return r; | 3456 | return r; |
3457 | } | 3457 | } |
3458 | 3458 | ||
3459 | void kvm_exit_arch(void) | 3459 | void kvm_exit_x86(void) |
3460 | { | 3460 | { |
3461 | misc_deregister(&kvm_dev); | 3461 | misc_deregister(&kvm_dev); |
3462 | kmem_cache_destroy(kvm_vcpu_cache); | 3462 | kmem_cache_destroy(kvm_vcpu_cache); |
@@ -3465,8 +3465,8 @@ void kvm_exit_arch(void) | |||
3465 | unregister_reboot_notifier(&kvm_reboot_notifier); | 3465 | unregister_reboot_notifier(&kvm_reboot_notifier); |
3466 | unregister_cpu_notifier(&kvm_cpu_notifier); | 3466 | unregister_cpu_notifier(&kvm_cpu_notifier); |
3467 | on_each_cpu(hardware_disable, NULL, 0, 1); | 3467 | on_each_cpu(hardware_disable, NULL, 0, 1); |
3468 | kvm_arch_ops->hardware_unsetup(); | 3468 | kvm_x86_ops->hardware_unsetup(); |
3469 | kvm_arch_ops = NULL; | 3469 | kvm_x86_ops = NULL; |
3470 | } | 3470 | } |
3471 | 3471 | ||
3472 | static __init int kvm_init(void) | 3472 | static __init int kvm_init(void) |
@@ -3509,5 +3509,5 @@ static __exit void kvm_exit(void) | |||
3509 | module_init(kvm_init) | 3509 | module_init(kvm_init) |
3510 | module_exit(kvm_exit) | 3510 | module_exit(kvm_exit) |
3511 | 3511 | ||
3512 | EXPORT_SYMBOL_GPL(kvm_init_arch); | 3512 | EXPORT_SYMBOL_GPL(kvm_init_x86); |
3513 | EXPORT_SYMBOL_GPL(kvm_exit_arch); | 3513 | EXPORT_SYMBOL_GPL(kvm_exit_x86); |