aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/vcpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/vcpu.c')
-rw-r--r--arch/ia64/kvm/vcpu.c44
1 files changed, 6 insertions, 38 deletions
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index ecd526b5532..d4d28050587 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
112 return; 112 return;
113} 113}
114 114
115
116void switch_to_virtual_rid(struct kvm_vcpu *vcpu) 115void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117{ 116{
118 unsigned long psr; 117 unsigned long psr;
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
166 return; 165 return;
167} 166}
168 167
169
170
171/* 168/*
172 * In physical mode, insert tc/tr for region 0 and 4 uses 169 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation. 170 * RID[0] and RID[4] which is for physical mode emulation.
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); 266 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270} 267}
271 268
272
273/* 269/*
274 * The inverse of the above: given bspstore and the number of 270 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp. 271 * registers, calculate ar.bsp.
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) 807static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812{ 808{
813 struct kvm_vcpu *v; 809 struct kvm_vcpu *v;
810 struct kvm *kvm;
814 int i; 811 int i;
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); 812 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv); 813 unsigned long vitv = VCPU(vcpu, itv);
817 814
815 kvm = (struct kvm *)KVM_VM_BASE;
816
818 if (vcpu->vcpu_id == 0) { 817 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < KVM_MAX_VCPUS; i++) { 818 for (i = 0; i < kvm->arch.online_vcpus; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + 819 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i); 820 sizeof(struct kvm_vcpu_data) * i);
822 VMX(v, itc_offset) = itc_offset; 821 VMX(v, itc_offset) = itc_offset;
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1039 return key; 1038 return key;
1040} 1039}
1041 1040
1042
1043
1044void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) 1041void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045{ 1042{
1046 unsigned long thash, vadr; 1043 unsigned long thash, vadr;
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1050 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); 1047 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1051} 1048}
1052 1049
1053
1054void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) 1050void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055{ 1051{
1056 unsigned long tag, vadr; 1052 unsigned long tag, vadr;
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1131 return IA64_NO_FAULT; 1127 return IA64_NO_FAULT;
1132} 1128}
1133 1129
1134
1135int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) 1130int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136{ 1131{
1137 unsigned long r1, r3; 1132 unsigned long r1, r3;
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1154 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 1149 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1155} 1150}
1156 1151
1157
1158/************************************ 1152/************************************
1159 * Insert/Purge translation register/cache 1153 * Insert/Purge translation register/cache
1160 ************************************/ 1154 ************************************/
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1385 vcpu_set_itc(vcpu, r2); 1379 vcpu_set_itc(vcpu, r2);
1386} 1380}
1387 1381
1388
1389void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) 1382void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1390{ 1383{
1391 unsigned long r1; 1384 unsigned long r1;
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1393 r1 = vcpu_get_itc(vcpu); 1386 r1 = vcpu_get_itc(vcpu);
1394 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); 1387 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395} 1388}
1389
1396/************************************************************************** 1390/**************************************************************************
1397 struct kvm_vcpu*protection key register access routines 1391 struct kvm_vcpu protection key register access routines
1398 **************************************************************************/ 1392 **************************************************************************/
1399 1393
1400unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) 1394unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1407 ia64_set_pkr(reg, val); 1401 ia64_set_pkr(reg, val);
1408} 1402}
1409 1403
1410
1411unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412{
1413 union ia64_rr rr, rr1;
1414
1415 rr.val = vcpu_get_rr(vcpu, ifa);
1416 rr1.val = 0;
1417 rr1.ps = rr.ps;
1418 rr1.rid = rr.rid;
1419 return (rr1.val);
1420}
1421
1422
1423
1424/******************************** 1404/********************************
1425 * Moves to privileged registers 1405 * Moves to privileged registers
1426 ********************************/ 1406 ********************************/
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1464 return (IA64_NO_FAULT); 1444 return (IA64_NO_FAULT);
1465} 1445}
1466 1446
1467
1468
1469void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) 1447void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470{ 1448{
1471 unsigned long r3, r2; 1449 unsigned long r3, r2;
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1510 vcpu_set_pkr(vcpu, r3, r2); 1488 vcpu_set_pkr(vcpu, r3, r2);
1511} 1489}
1512 1490
1513
1514
1515void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) 1491void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516{ 1492{
1517 unsigned long r3, r1; 1493 unsigned long r3, r1;
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1557 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); 1533 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1558} 1534}
1559 1535
1560
1561unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) 1536unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562{ 1537{
1563 /* FIXME: This could get called as a result of a rsvd-reg fault */ 1538 /* FIXME: This could get called as a result of a rsvd-reg fault */
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1609 return 0; 1584 return 0;
1610} 1585}
1611 1586
1612
1613unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) 1587unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614{ 1588{
1615 unsigned long tgt = inst.M33.r1; 1589 unsigned long tgt = inst.M33.r1;
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1633 return 0; 1607 return 0;
1634} 1608}
1635 1609
1636
1637
1638void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) 1610void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639{ 1611{
1640 1612
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
1776 } 1748 }
1777} 1749}
1778 1750
1779
1780
1781
1782void vcpu_rfi(struct kvm_vcpu *vcpu) 1751void vcpu_rfi(struct kvm_vcpu *vcpu)
1783{ 1752{
1784 unsigned long ifs, psr; 1753 unsigned long ifs, psr;
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
1796 regs->cr_iip = VCPU(vcpu, iip); 1765 regs->cr_iip = VCPU(vcpu, iip);
1797} 1766}
1798 1767
1799
1800/* 1768/*
1801 VPSR can't keep track of below bits of guest PSR 1769 VPSR can't keep track of below bits of guest PSR
1802 This function gets guest PSR 1770 This function gets guest PSR