diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-12-17 21:23:58 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:50 -0400 |
commit | 22ccb14203d59a8bcf6f3fea76b3594d710569fa (patch) | |
tree | 996a3c55514e342f5ed9451719e3620cf266aede /arch/ia64/kvm/vcpu.c | |
parent | 989c0f0ed56468a4aa48711cef5acf122a40d1dd (diff) |
KVM: ia64: Code cleanup
Remove some unnecessary blank lines to accord with Kernel's coding style.
Also remove vcpu_get_itir_on_fault due to no reference to it.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm/vcpu.c')
-rw-r--r-- | arch/ia64/kvm/vcpu.c | 39 |
1 files changed, 2 insertions, 37 deletions
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index ecd526b55323..4d8be4c252fa 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) | |||
112 | return; | 112 | return; |
113 | } | 113 | } |
114 | 114 | ||
115 | |||
116 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | 115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) |
117 | { | 116 | { |
118 | unsigned long psr; | 117 | unsigned long psr; |
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | |||
166 | return; | 165 | return; |
167 | } | 166 | } |
168 | 167 | ||
169 | |||
170 | |||
171 | /* | 168 | /* |
172 | * In physical mode, insert tc/tr for region 0 and 4 uses | 169 | * In physical mode, insert tc/tr for region 0 and 4 uses |
173 | * RID[0] and RID[4] which is for physical mode emulation. | 170 | * RID[0] and RID[4] which is for physical mode emulation. |
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, | |||
269 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | 266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); |
270 | } | 267 | } |
271 | 268 | ||
272 | |||
273 | /* | 269 | /* |
274 | * The inverse of the above: given bspstore and the number of | 270 | * The inverse of the above: given bspstore and the number of |
275 | * registers, calculate ar.bsp. | 271 | * registers, calculate ar.bsp. |
@@ -1039,8 +1035,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | |||
1039 | return key; | 1035 | return key; |
1040 | } | 1036 | } |
1041 | 1037 | ||
1042 | |||
1043 | |||
1044 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | 1038 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) |
1045 | { | 1039 | { |
1046 | unsigned long thash, vadr; | 1040 | unsigned long thash, vadr; |
@@ -1050,7 +1044,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | |||
1050 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | 1044 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); |
1051 | } | 1045 | } |
1052 | 1046 | ||
1053 | |||
1054 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | 1047 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) |
1055 | { | 1048 | { |
1056 | unsigned long tag, vadr; | 1049 | unsigned long tag, vadr; |
@@ -1131,7 +1124,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | |||
1131 | return IA64_NO_FAULT; | 1124 | return IA64_NO_FAULT; |
1132 | } | 1125 | } |
1133 | 1126 | ||
1134 | |||
1135 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | 1127 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) |
1136 | { | 1128 | { |
1137 | unsigned long r1, r3; | 1129 | unsigned long r1, r3; |
@@ -1154,7 +1146,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | |||
1154 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | 1146 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
1155 | } | 1147 | } |
1156 | 1148 | ||
1157 | |||
1158 | /************************************ | 1149 | /************************************ |
1159 | * Insert/Purge translation register/cache | 1150 | * Insert/Purge translation register/cache |
1160 | ************************************/ | 1151 | ************************************/ |
@@ -1385,7 +1376,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1385 | vcpu_set_itc(vcpu, r2); | 1376 | vcpu_set_itc(vcpu, r2); |
1386 | } | 1377 | } |
1387 | 1378 | ||
1388 | |||
1389 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | 1379 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) |
1390 | { | 1380 | { |
1391 | unsigned long r1; | 1381 | unsigned long r1; |
@@ -1393,8 +1383,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1393 | r1 = vcpu_get_itc(vcpu); | 1383 | r1 = vcpu_get_itc(vcpu); |
1394 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | 1384 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); |
1395 | } | 1385 | } |
1386 | |||
1396 | /************************************************************************** | 1387 | /************************************************************************** |
1397 | struct kvm_vcpu*protection key register access routines | 1388 | struct kvm_vcpu protection key register access routines |
1398 | **************************************************************************/ | 1389 | **************************************************************************/ |
1399 | 1390 | ||
1400 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | 1391 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) |
@@ -1407,20 +1398,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | |||
1407 | ia64_set_pkr(reg, val); | 1398 | ia64_set_pkr(reg, val); |
1408 | } | 1399 | } |
1409 | 1400 | ||
1410 | |||
1411 | unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) | ||
1412 | { | ||
1413 | union ia64_rr rr, rr1; | ||
1414 | |||
1415 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
1416 | rr1.val = 0; | ||
1417 | rr1.ps = rr.ps; | ||
1418 | rr1.rid = rr.rid; | ||
1419 | return (rr1.val); | ||
1420 | } | ||
1421 | |||
1422 | |||
1423 | |||
1424 | /******************************** | 1401 | /******************************** |
1425 | * Moves to privileged registers | 1402 | * Moves to privileged registers |
1426 | ********************************/ | 1403 | ********************************/ |
@@ -1464,8 +1441,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | |||
1464 | return (IA64_NO_FAULT); | 1441 | return (IA64_NO_FAULT); |
1465 | } | 1442 | } |
1466 | 1443 | ||
1467 | |||
1468 | |||
1469 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1444 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1470 | { | 1445 | { |
1471 | unsigned long r3, r2; | 1446 | unsigned long r3, r2; |
@@ -1510,8 +1485,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1510 | vcpu_set_pkr(vcpu, r3, r2); | 1485 | vcpu_set_pkr(vcpu, r3, r2); |
1511 | } | 1486 | } |
1512 | 1487 | ||
1513 | |||
1514 | |||
1515 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1488 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1516 | { | 1489 | { |
1517 | unsigned long r3, r1; | 1490 | unsigned long r3, r1; |
@@ -1557,7 +1530,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | |||
1557 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1530 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1558 | } | 1531 | } |
1559 | 1532 | ||
1560 | |||
1561 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | 1533 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) |
1562 | { | 1534 | { |
1563 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | 1535 | /* FIXME: This could get called as a result of a rsvd-reg fault */ |
@@ -1609,7 +1581,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1609 | return 0; | 1581 | return 0; |
1610 | } | 1582 | } |
1611 | 1583 | ||
1612 | |||
1613 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | 1584 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) |
1614 | { | 1585 | { |
1615 | unsigned long tgt = inst.M33.r1; | 1586 | unsigned long tgt = inst.M33.r1; |
@@ -1633,8 +1604,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1633 | return 0; | 1604 | return 0; |
1634 | } | 1605 | } |
1635 | 1606 | ||
1636 | |||
1637 | |||
1638 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | 1607 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) |
1639 | { | 1608 | { |
1640 | 1609 | ||
@@ -1776,9 +1745,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) | |||
1776 | } | 1745 | } |
1777 | } | 1746 | } |
1778 | 1747 | ||
1779 | |||
1780 | |||
1781 | |||
1782 | void vcpu_rfi(struct kvm_vcpu *vcpu) | 1748 | void vcpu_rfi(struct kvm_vcpu *vcpu) |
1783 | { | 1749 | { |
1784 | unsigned long ifs, psr; | 1750 | unsigned long ifs, psr; |
@@ -1796,7 +1762,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) | |||
1796 | regs->cr_iip = VCPU(vcpu, iip); | 1762 | regs->cr_iip = VCPU(vcpu, iip); |
1797 | } | 1763 | } |
1798 | 1764 | ||
1799 | |||
1800 | /* | 1765 | /* |
1801 | VPSR can't keep track of below bits of guest PSR | 1766 | VPSR can't keep track of below bits of guest PSR |
1802 | This function gets guest PSR | 1767 | This function gets guest PSR |