aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/svm.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-09-10 11:10:54 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:28 -0400
commit04d2cc7780d48a212843e38d46402d97fa1f4774 (patch)
treea209131bad59abcf574abbaae23145db3c4005e0 /drivers/kvm/svm.c
parent29bd8a78082f2d7e2165a735f50b5c716ef3213b (diff)
KVM: Move main vcpu loop into subarch independent code
This simplifies adding new code as well as reducing overall code size. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r--drivers/kvm/svm.c142
1 files changed, 39 insertions, 103 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 7b22d396c149..95681ea16382 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/profile.h>
26#include <linux/sched.h> 25#include <linux/sched.h>
27 26
28#include <asm/desc.h> 27#include <asm/desc.h>
@@ -50,6 +49,8 @@ MODULE_LICENSE("GPL");
50#define SVM_FEATURE_LBRV (1 << 1) 49#define SVM_FEATURE_LBRV (1 << 1)
51#define SVM_DEATURE_SVML (1 << 2) 50#define SVM_DEATURE_SVML (1 << 2)
52 51
52static void kvm_reput_irq(struct vcpu_svm *svm);
53
53static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 54static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
54{ 55{
55 return container_of(vcpu, struct vcpu_svm, vcpu); 56 return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -555,6 +556,13 @@ static void init_vmcb(struct vmcb *vmcb)
555 /* rdx = ?? */ 556 /* rdx = ?? */
556} 557}
557 558
559static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
560{
561 struct vcpu_svm *svm = to_svm(vcpu);
562
563 init_vmcb(svm->vmcb);
564}
565
558static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) 566static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
559{ 567{
560 struct vcpu_svm *svm; 568 struct vcpu_svm *svm;
@@ -1252,10 +1260,20 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1252}; 1260};
1253 1261
1254 1262
1255static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1263static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1256{ 1264{
1265 struct vcpu_svm *svm = to_svm(vcpu);
1257 u32 exit_code = svm->vmcb->control.exit_code; 1266 u32 exit_code = svm->vmcb->control.exit_code;
1258 1267
1268 kvm_reput_irq(svm);
1269
1270 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1271 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1272 kvm_run->fail_entry.hardware_entry_failure_reason
1273 = svm->vmcb->control.exit_code;
1274 return 0;
1275 }
1276
1259 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 1277 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
1260 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) 1278 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1261 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 1279 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
@@ -1313,11 +1331,11 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1313 svm_inject_irq(svm, irq); 1331 svm_inject_irq(svm, irq);
1314} 1332}
1315 1333
1316static void svm_intr_assist(struct vcpu_svm *svm) 1334static void svm_intr_assist(struct kvm_vcpu *vcpu)
1317{ 1335{
1336 struct vcpu_svm *svm = to_svm(vcpu);
1318 struct vmcb *vmcb = svm->vmcb; 1337 struct vmcb *vmcb = svm->vmcb;
1319 int intr_vector = -1; 1338 int intr_vector = -1;
1320 struct kvm_vcpu *vcpu = &svm->vcpu;
1321 1339
1322 kvm_inject_pending_timer_irqs(vcpu); 1340 kvm_inject_pending_timer_irqs(vcpu);
1323 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && 1341 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
@@ -1376,9 +1394,10 @@ static void svm_do_inject_vector(struct vcpu_svm *svm)
1376 svm_inject_irq(svm, irq); 1394 svm_inject_irq(svm, irq);
1377} 1395}
1378 1396
1379static void do_interrupt_requests(struct vcpu_svm *svm, 1397static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1380 struct kvm_run *kvm_run) 1398 struct kvm_run *kvm_run)
1381{ 1399{
1400 struct vcpu_svm *svm = to_svm(vcpu);
1382 struct vmcb_control_area *control = &svm->vmcb->control; 1401 struct vmcb_control_area *control = &svm->vmcb->control;
1383 1402
1384 svm->vcpu.interrupt_window_open = 1403 svm->vcpu.interrupt_window_open =
@@ -1401,35 +1420,6 @@ static void do_interrupt_requests(struct vcpu_svm *svm,
1401 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1420 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1402} 1421}
1403 1422
1404static void post_kvm_run_save(struct vcpu_svm *svm,
1405 struct kvm_run *kvm_run)
1406{
1407 if (irqchip_in_kernel(svm->vcpu.kvm))
1408 kvm_run->ready_for_interrupt_injection = 1;
1409 else
1410 kvm_run->ready_for_interrupt_injection =
1411 (svm->vcpu.interrupt_window_open &&
1412 svm->vcpu.irq_summary == 0);
1413 kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1414 kvm_run->cr8 = get_cr8(&svm->vcpu);
1415 kvm_run->apic_base = kvm_get_apic_base(&svm->vcpu);
1416}
1417
1418/*
1419 * Check if userspace requested an interrupt window, and that the
1420 * interrupt window is open.
1421 *
1422 * No need to exit to userspace if we already have an interrupt queued.
1423 */
1424static int dm_request_for_irq_injection(struct vcpu_svm *svm,
1425 struct kvm_run *kvm_run)
1426{
1427 return (!svm->vcpu.irq_summary &&
1428 kvm_run->request_interrupt_window &&
1429 svm->vcpu.interrupt_window_open &&
1430 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1431}
1432
1433static void save_db_regs(unsigned long *db_regs) 1423static void save_db_regs(unsigned long *db_regs)
1434{ 1424{
1435 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); 1425 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
@@ -1451,38 +1441,16 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1451 force_new_asid(vcpu); 1441 force_new_asid(vcpu);
1452} 1442}
1453 1443
1454static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1444static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1445{
1446}
1447
1448static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1455{ 1449{
1456 struct vcpu_svm *svm = to_svm(vcpu); 1450 struct vcpu_svm *svm = to_svm(vcpu);
1457 u16 fs_selector; 1451 u16 fs_selector;
1458 u16 gs_selector; 1452 u16 gs_selector;
1459 u16 ldt_selector; 1453 u16 ldt_selector;
1460 int r;
1461
1462again:
1463 r = kvm_mmu_reload(vcpu);
1464 if (unlikely(r))
1465 return r;
1466
1467 clgi();
1468
1469 if (signal_pending(current)) {
1470 stgi();
1471 ++vcpu->stat.signal_exits;
1472 post_kvm_run_save(svm, kvm_run);
1473 kvm_run->exit_reason = KVM_EXIT_INTR;
1474 return -EINTR;
1475 }
1476
1477 if (irqchip_in_kernel(vcpu->kvm))
1478 svm_intr_assist(svm);
1479 else if (!vcpu->mmio_read_completed)
1480 do_interrupt_requests(svm, kvm_run);
1481
1482 vcpu->guest_mode = 1;
1483 if (vcpu->requests)
1484 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1485 svm_flush_tlb(vcpu);
1486 1454
1487 pre_svm_run(svm); 1455 pre_svm_run(svm);
1488 1456
@@ -1501,10 +1469,9 @@ again:
1501 load_db_regs(svm->db_regs); 1469 load_db_regs(svm->db_regs);
1502 } 1470 }
1503 1471
1504 if (vcpu->fpu_active) { 1472 clgi();
1505 fx_save(&vcpu->host_fx_image); 1473
1506 fx_restore(&vcpu->guest_fx_image); 1474 local_irq_enable();
1507 }
1508 1475
1509 asm volatile ( 1476 asm volatile (
1510#ifdef CONFIG_X86_64 1477#ifdef CONFIG_X86_64
@@ -1612,12 +1579,9 @@ again:
1612#endif 1579#endif
1613 : "cc", "memory" ); 1580 : "cc", "memory" );
1614 1581
1615 vcpu->guest_mode = 0; 1582 local_irq_disable();
1616 1583
1617 if (vcpu->fpu_active) { 1584 stgi();
1618 fx_save(&vcpu->guest_fx_image);
1619 fx_restore(&vcpu->host_fx_image);
1620 }
1621 1585
1622 if ((svm->vmcb->save.dr7 & 0xff)) 1586 if ((svm->vmcb->save.dr7 & 0xff))
1623 load_db_regs(svm->host_db_regs); 1587 load_db_regs(svm->host_db_regs);
@@ -1635,40 +1599,7 @@ again:
1635 1599
1636 reload_tss(vcpu); 1600 reload_tss(vcpu);
1637 1601
1638 /*
1639 * Profile KVM exit RIPs:
1640 */
1641 if (unlikely(prof_on == KVM_PROFILING))
1642 profile_hit(KVM_PROFILING,
1643 (void *)(unsigned long)svm->vmcb->save.rip);
1644
1645 stgi();
1646
1647 kvm_reput_irq(svm);
1648
1649 svm->next_rip = 0; 1602 svm->next_rip = 0;
1650
1651 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1652 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1653 kvm_run->fail_entry.hardware_entry_failure_reason
1654 = svm->vmcb->control.exit_code;
1655 post_kvm_run_save(svm, kvm_run);
1656 return 0;
1657 }
1658
1659 r = handle_exit(svm, kvm_run);
1660 if (r > 0) {
1661 if (dm_request_for_irq_injection(svm, kvm_run)) {
1662 ++vcpu->stat.request_irq_exits;
1663 post_kvm_run_save(svm, kvm_run);
1664 kvm_run->exit_reason = KVM_EXIT_INTR;
1665 return -EINTR;
1666 }
1667 kvm_resched(vcpu);
1668 goto again;
1669 }
1670 post_kvm_run_save(svm, kvm_run);
1671 return r;
1672} 1603}
1673 1604
1674static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1605static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -1752,7 +1683,9 @@ static struct kvm_x86_ops svm_x86_ops = {
1752 1683
1753 .vcpu_create = svm_create_vcpu, 1684 .vcpu_create = svm_create_vcpu,
1754 .vcpu_free = svm_free_vcpu, 1685 .vcpu_free = svm_free_vcpu,
1686 .vcpu_reset = svm_vcpu_reset,
1755 1687
1688 .prepare_guest_switch = svm_prepare_guest_switch,
1756 .vcpu_load = svm_vcpu_load, 1689 .vcpu_load = svm_vcpu_load,
1757 .vcpu_put = svm_vcpu_put, 1690 .vcpu_put = svm_vcpu_put,
1758 .vcpu_decache = svm_vcpu_decache, 1691 .vcpu_decache = svm_vcpu_decache,
@@ -1786,10 +1719,13 @@ static struct kvm_x86_ops svm_x86_ops = {
1786 .inject_gp = svm_inject_gp, 1719 .inject_gp = svm_inject_gp,
1787 1720
1788 .run = svm_vcpu_run, 1721 .run = svm_vcpu_run,
1722 .handle_exit = handle_exit,
1789 .skip_emulated_instruction = skip_emulated_instruction, 1723 .skip_emulated_instruction = skip_emulated_instruction,
1790 .patch_hypercall = svm_patch_hypercall, 1724 .patch_hypercall = svm_patch_hypercall,
1791 .get_irq = svm_get_irq, 1725 .get_irq = svm_get_irq,
1792 .set_irq = svm_set_irq, 1726 .set_irq = svm_set_irq,
1727 .inject_pending_irq = svm_intr_assist,
1728 .inject_pending_vectors = do_interrupt_requests,
1793}; 1729};
1794 1730
1795static int __init svm_init(void) 1731static int __init svm_init(void)