aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-07 05:49:35 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:24 -0400
commit4c2161aed55c294c4c42622455f067a4b3077b85 (patch)
treeadce92cc511e513fe1dd28a1e0f788172726e814 /arch/x86/kvm
parentaad42c641cfcda4f87abc4f6588329b9b3cc3364 (diff)
KVM: SVM: consolidate nested_svm_exit_handled
When caching guest intercepts there is no need anymore for the nested_svm_exit_handled_real function. So move its code into nested_svm_exit_handled. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c109
1 files changed, 49 insertions, 60 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4426c631057b..bdd73fd9a75c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1456,15 +1456,58 @@ static int nested_svm_do(struct vcpu_svm *svm,
1456 return retval; 1456 return retval;
1457} 1457}
1458 1458
1459static int nested_svm_exit_handled_real(struct vcpu_svm *svm, 1459static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1460 void *arg1, 1460 void *arg1, void *arg2,
1461 void *arg2, 1461 void *opaque)
1462 void *opaque) 1462{
1463 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1464 u8 *msrpm = (u8 *)arg2;
1465 u32 t0, t1;
1466 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1467 u32 param = svm->vmcb->control.exit_info_1 & 1;
1468
1469 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1470 return 0;
1471
1472 switch (msr) {
1473 case 0 ... 0x1fff:
1474 t0 = (msr * 2) % 8;
1475 t1 = msr / 8;
1476 break;
1477 case 0xc0000000 ... 0xc0001fff:
1478 t0 = (8192 + msr - 0xc0000000) * 2;
1479 t1 = (t0 / 8);
1480 t0 %= 8;
1481 break;
1482 case 0xc0010000 ... 0xc0011fff:
1483 t0 = (16384 + msr - 0xc0010000) * 2;
1484 t1 = (t0 / 8);
1485 t0 %= 8;
1486 break;
1487 default:
1488 return 1;
1489 break;
1490 }
1491 if (msrpm[t1] & ((1 << param) << t0))
1492 return 1;
1493
1494 return 0;
1495}
1496
1497static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1463{ 1498{
1464 bool kvm_overrides = *(bool *)opaque;
1465 u32 exit_code = svm->vmcb->control.exit_code; 1499 u32 exit_code = svm->vmcb->control.exit_code;
1466 1500
1467 if (kvm_overrides) { 1501 switch (svm->vmcb->control.exit_code) {
1502 case SVM_EXIT_MSR:
1503 return nested_svm_do(svm, svm->nested.vmcb,
1504 svm->nested.vmcb_msrpm, NULL,
1505 nested_svm_exit_handled_msr);
1506 default:
1507 break;
1508 }
1509
1510 if (kvm_override) {
1468 switch (exit_code) { 1511 switch (exit_code) {
1469 case SVM_EXIT_INTR: 1512 case SVM_EXIT_INTR:
1470 case SVM_EXIT_NMI: 1513 case SVM_EXIT_NMI:
@@ -1526,60 +1569,6 @@ static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1526 return 0; 1569 return 0;
1527} 1570}
1528 1571
1529static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1530 void *arg1, void *arg2,
1531 void *opaque)
1532{
1533 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1534 u8 *msrpm = (u8 *)arg2;
1535 u32 t0, t1;
1536 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1537 u32 param = svm->vmcb->control.exit_info_1 & 1;
1538
1539 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1540 return 0;
1541
1542 switch(msr) {
1543 case 0 ... 0x1fff:
1544 t0 = (msr * 2) % 8;
1545 t1 = msr / 8;
1546 break;
1547 case 0xc0000000 ... 0xc0001fff:
1548 t0 = (8192 + msr - 0xc0000000) * 2;
1549 t1 = (t0 / 8);
1550 t0 %= 8;
1551 break;
1552 case 0xc0010000 ... 0xc0011fff:
1553 t0 = (16384 + msr - 0xc0010000) * 2;
1554 t1 = (t0 / 8);
1555 t0 %= 8;
1556 break;
1557 default:
1558 return 1;
1559 break;
1560 }
1561 if (msrpm[t1] & ((1 << param) << t0))
1562 return 1;
1563
1564 return 0;
1565}
1566
1567static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1568{
1569 bool k = kvm_override;
1570
1571 switch (svm->vmcb->control.exit_code) {
1572 case SVM_EXIT_MSR:
1573 return nested_svm_do(svm, svm->nested.vmcb,
1574 svm->nested.vmcb_msrpm, NULL,
1575 nested_svm_exit_handled_msr);
1576 default: break;
1577 }
1578
1579 return nested_svm_do(svm, svm->nested.vmcb, 0, &k,
1580 nested_svm_exit_handled_real);
1581}
1582
1583static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) 1572static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
1584{ 1573{
1585 struct vmcb_control_area *dst = &dst_vmcb->control; 1574 struct vmcb_control_area *dst = &dst_vmcb->control;