aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-12-17 21:23:58 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:50 -0400
commit22ccb14203d59a8bcf6f3fea76b3594d710569fa (patch)
tree996a3c55514e342f5ed9451719e3620cf266aede /arch/ia64/kvm
parent989c0f0ed56468a4aa48711cef5acf122a40d1dd (diff)
KVM: ia64: Code cleanup
Remove some unnecessary blank lines to accord with Kernel's coding style. Also remove vcpu_get_itir_on_fault due to no reference to it. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/process.c15
-rw-r--r--arch/ia64/kvm/vcpu.c39
-rw-r--r--arch/ia64/kvm/vtlb.c5
3 files changed, 2 insertions, 57 deletions
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 230eae482f3..f9c9504144f 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
167 return (rr1.val); 167 return (rr1.val);
168} 168}
169 169
170
171/* 170/*
172 * Set vIFA & vITIR & vIHA, when vPSR.ic =1 171 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173 * Parameter: 172 * Parameter:
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
222 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); 221 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223} 222}
224 223
225
226
227/* 224/*
228 * Data Nested TLB Fault 225 * Data Nested TLB Fault
229 * @ Data Nested TLB Vector 226 * @ Data Nested TLB Vector
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
245 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); 242 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246} 243}
247 244
248
249/* 245/*
250 * Data TLB Fault 246 * Data TLB Fault
251 * @ Data TLB vector 247 * @ Data TLB vector
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
265 /* If vPSR.ic, IFA, ITIR, IHA*/ 261 /* If vPSR.ic, IFA, ITIR, IHA*/
266 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); 262 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); 263 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270} 264}
271 265
272/* 266/*
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
279 _vhpt_fault(vcpu, vadr); 273 _vhpt_fault(vcpu, vadr);
280} 274}
281 275
282
283/* 276/*
284 * VHPT Data Fault 277 * VHPT Data Fault
285 * @ VHPT Translation vector 278 * @ VHPT Translation vector
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
290 _vhpt_fault(vcpu, vadr); 283 _vhpt_fault(vcpu, vadr);
291} 284}
292 285
293
294
295/* 286/*
296 * Deal with: 287 * Deal with:
297 * General Exception vector 288 * General Exception vector
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
301 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); 292 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302} 293}
303 294
304
305/* 295/*
306 * Illegal Operation Fault 296 * Illegal Operation Fault
307 * @ General Exception Vector 297 * @ General Exception Vector
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
419 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); 409 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420} 410}
421 411
422
423void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 412void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424{ 413{
425 __page_not_present(vcpu, vadr); 414 __page_not_present(vcpu, vadr);
426} 415}
427 416
428
429void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 417void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430{ 418{
431 __page_not_present(vcpu, vadr); 419 __page_not_present(vcpu, vadr);
432} 420}
433 421
434
435/* Deal with 422/* Deal with
436 * Data access rights vector 423 * Data access rights vector
437 */ 424 */
@@ -703,7 +690,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
703 } 690 }
704} 691}
705 692
706
707void leave_hypervisor_tail(void) 693void leave_hypervisor_tail(void)
708{ 694{
709 struct kvm_vcpu *v = current_vcpu; 695 struct kvm_vcpu *v = current_vcpu;
@@ -737,7 +723,6 @@ void leave_hypervisor_tail(void)
737 } 723 }
738} 724}
739 725
740
741static inline void handle_lds(struct kvm_pt_regs *regs) 726static inline void handle_lds(struct kvm_pt_regs *regs)
742{ 727{
743 regs->cr_ipsr |= IA64_PSR_ED; 728 regs->cr_ipsr |= IA64_PSR_ED;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index ecd526b5532..4d8be4c252f 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
112 return; 112 return;
113} 113}
114 114
115
116void switch_to_virtual_rid(struct kvm_vcpu *vcpu) 115void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117{ 116{
118 unsigned long psr; 117 unsigned long psr;
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
166 return; 165 return;
167} 166}
168 167
169
170
171/* 168/*
172 * In physical mode, insert tc/tr for region 0 and 4 uses 169 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation. 170 * RID[0] and RID[4] which is for physical mode emulation.
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); 266 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270} 267}
271 268
272
273/* 269/*
274 * The inverse of the above: given bspstore and the number of 270 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp. 271 * registers, calculate ar.bsp.
@@ -1039,8 +1035,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1039 return key; 1035 return key;
1040} 1036}
1041 1037
1042
1043
1044void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) 1038void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045{ 1039{
1046 unsigned long thash, vadr; 1040 unsigned long thash, vadr;
@@ -1050,7 +1044,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1050 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); 1044 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1051} 1045}
1052 1046
1053
1054void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) 1047void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055{ 1048{
1056 unsigned long tag, vadr; 1049 unsigned long tag, vadr;
@@ -1131,7 +1124,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1131 return IA64_NO_FAULT; 1124 return IA64_NO_FAULT;
1132} 1125}
1133 1126
1134
1135int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) 1127int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136{ 1128{
1137 unsigned long r1, r3; 1129 unsigned long r1, r3;
@@ -1154,7 +1146,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1154 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 1146 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1155} 1147}
1156 1148
1157
1158/************************************ 1149/************************************
1159 * Insert/Purge translation register/cache 1150 * Insert/Purge translation register/cache
1160 ************************************/ 1151 ************************************/
@@ -1385,7 +1376,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1385 vcpu_set_itc(vcpu, r2); 1376 vcpu_set_itc(vcpu, r2);
1386} 1377}
1387 1378
1388
1389void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) 1379void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1390{ 1380{
1391 unsigned long r1; 1381 unsigned long r1;
@@ -1393,8 +1383,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1393 r1 = vcpu_get_itc(vcpu); 1383 r1 = vcpu_get_itc(vcpu);
1394 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); 1384 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395} 1385}
1386
1396/************************************************************************** 1387/**************************************************************************
1397 struct kvm_vcpu*protection key register access routines 1388 struct kvm_vcpu protection key register access routines
1398 **************************************************************************/ 1389 **************************************************************************/
1399 1390
1400unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) 1391unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
@@ -1407,20 +1398,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1407 ia64_set_pkr(reg, val); 1398 ia64_set_pkr(reg, val);
1408} 1399}
1409 1400
1410
1411unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412{
1413 union ia64_rr rr, rr1;
1414
1415 rr.val = vcpu_get_rr(vcpu, ifa);
1416 rr1.val = 0;
1417 rr1.ps = rr.ps;
1418 rr1.rid = rr.rid;
1419 return (rr1.val);
1420}
1421
1422
1423
1424/******************************** 1401/********************************
1425 * Moves to privileged registers 1402 * Moves to privileged registers
1426 ********************************/ 1403 ********************************/
@@ -1464,8 +1441,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1464 return (IA64_NO_FAULT); 1441 return (IA64_NO_FAULT);
1465} 1442}
1466 1443
1467
1468
1469void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) 1444void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470{ 1445{
1471 unsigned long r3, r2; 1446 unsigned long r3, r2;
@@ -1510,8 +1485,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1510 vcpu_set_pkr(vcpu, r3, r2); 1485 vcpu_set_pkr(vcpu, r3, r2);
1511} 1486}
1512 1487
1513
1514
1515void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) 1488void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516{ 1489{
1517 unsigned long r3, r1; 1490 unsigned long r3, r1;
@@ -1557,7 +1530,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1557 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); 1530 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1558} 1531}
1559 1532
1560
1561unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) 1533unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562{ 1534{
1563 /* FIXME: This could get called as a result of a rsvd-reg fault */ 1535 /* FIXME: This could get called as a result of a rsvd-reg fault */
@@ -1609,7 +1581,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1609 return 0; 1581 return 0;
1610} 1582}
1611 1583
1612
1613unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) 1584unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614{ 1585{
1615 unsigned long tgt = inst.M33.r1; 1586 unsigned long tgt = inst.M33.r1;
@@ -1633,8 +1604,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1633 return 0; 1604 return 0;
1634} 1605}
1635 1606
1636
1637
1638void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) 1607void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639{ 1608{
1640 1609
@@ -1776,9 +1745,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
1776 } 1745 }
1777} 1746}
1778 1747
1779
1780
1781
1782void vcpu_rfi(struct kvm_vcpu *vcpu) 1748void vcpu_rfi(struct kvm_vcpu *vcpu)
1783{ 1749{
1784 unsigned long ifs, psr; 1750 unsigned long ifs, psr;
@@ -1796,7 +1762,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
1796 regs->cr_iip = VCPU(vcpu, iip); 1762 regs->cr_iip = VCPU(vcpu, iip);
1797} 1763}
1798 1764
1799
1800/* 1765/*
1801 VPSR can't keep track of below bits of guest PSR 1766 VPSR can't keep track of below bits of guest PSR
1802 This function gets guest PSR 1767 This function gets guest PSR
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd5..ac94867f826 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -509,7 +509,6 @@ void thash_purge_all(struct kvm_vcpu *v)
509 local_flush_tlb_all(); 509 local_flush_tlb_all();
510} 510}
511 511
512
513/* 512/*
514 * Lookup the hash table and its collision chain to find an entry 513 * Lookup the hash table and its collision chain to find an entry
515 * covering this address rid:va or the entry. 514 * covering this address rid:va or the entry.
@@ -517,7 +516,6 @@ void thash_purge_all(struct kvm_vcpu *v)
517 * INPUT: 516 * INPUT:
518 * in: TLB format for both VHPT & TLB. 517 * in: TLB format for both VHPT & TLB.
519 */ 518 */
520
521struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) 519struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
522{ 520{
523 struct thash_data *cch; 521 struct thash_data *cch;
@@ -547,7 +545,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
547 return NULL; 545 return NULL;
548} 546}
549 547
550
551/* 548/*
552 * Initialize internal control data before service. 549 * Initialize internal control data before service.
553 */ 550 */
@@ -589,7 +586,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
589 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); 586 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
590} 587}
591 588
592
593/* 589/*
594 * Fetch guest bundle code. 590 * Fetch guest bundle code.
595 * INPUT: 591 * INPUT:
@@ -631,7 +627,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
631 return IA64_NO_FAULT; 627 return IA64_NO_FAULT;
632} 628}
633 629
634
635void kvm_init_vhpt(struct kvm_vcpu *v) 630void kvm_init_vhpt(struct kvm_vcpu *v)
636{ 631{
637 v->arch.vhpt.num = VHPT_NUM_ENTRIES; 632 v->arch.vhpt.num = VHPT_NUM_ENTRIES;