aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/vtlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/vtlb.c')
-rw-r--r--arch/ia64/kvm/vtlb.c44
1 files changed, 17 insertions, 27 deletions
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd55..38232b37668b 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
164 unsigned long ps, gpaddr; 164 unsigned long ps, gpaddr;
165 165
166 ps = itir_ps(itir); 166 ps = itir_ps(itir);
167 rr.val = ia64_get_rr(ifa);
167 168
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | 169 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1)); 170 (ifa & ((1UL << ps) - 1));
170 171
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa); 172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG; 173 head->etag = INVALID_TI_TAG;
174 ia64_mf(); 174 ia64_mf();
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
412 412
413/* 413/*
414 * Purge overlap TCs and then insert the new entry to emulate itc ops. 414 * Purge overlap TCs and then insert the new entry to emulate itc ops.
415 * Notes: Only TC entry can purge and insert. 415 * Notes: Only TC entry can purge and insert.
416 * 1 indicates this is MMIO
417 */ 416 */
418int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, 417void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
419 u64 ifa, int type) 418 u64 ifa, int type)
420{ 419{
421 u64 ps; 420 u64 ps;
422 u64 phy_pte, io_mask, index; 421 u64 phy_pte, io_mask, index;
423 union ia64_rr vrr, mrr; 422 union ia64_rr vrr, mrr;
424 int ret = 0;
425 423
426 ps = itir_ps(itir); 424 ps = itir_ps(itir);
427 vrr.val = vcpu_get_rr(v, ifa); 425 vrr.val = vcpu_get_rr(v, ifa);
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
441 phy_pte &= ~_PAGE_MA_MASK; 439 phy_pte &= ~_PAGE_MA_MASK;
442 } 440 }
443 441
444 if (pte & VTLB_PTE_IO)
445 ret = 1;
446
447 vtlb_purge(v, ifa, ps); 442 vtlb_purge(v, ifa, ps);
448 vhpt_purge(v, ifa, ps); 443 vhpt_purge(v, ifa, ps);
449 444
450 if (ps == mrr.ps) { 445 if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
451 if (!(pte&VTLB_PTE_IO)) {
452 vhpt_insert(phy_pte, itir, ifa, pte);
453 } else {
454 vtlb_insert(v, pte, itir, ifa);
455 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
456 }
457 } else if (ps > mrr.ps) {
458 vtlb_insert(v, pte, itir, ifa); 446 vtlb_insert(v, pte, itir, ifa);
459 vcpu_quick_region_set(VMX(v, tc_regions), ifa); 447 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
460 if (!(pte&VTLB_PTE_IO)) 448 }
461 vhpt_insert(phy_pte, itir, ifa, pte); 449 if (pte & VTLB_PTE_IO)
462 } else { 450 return;
451
452 if (ps >= mrr.ps)
453 vhpt_insert(phy_pte, itir, ifa, pte);
454 else {
463 u64 psr; 455 u64 psr;
464 phy_pte &= ~PAGE_FLAGS_RV_MASK; 456 phy_pte &= ~PAGE_FLAGS_RV_MASK;
465 psr = ia64_clear_ic(); 457 psr = ia64_clear_ic();
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
469 if (!(pte&VTLB_PTE_IO)) 461 if (!(pte&VTLB_PTE_IO))
470 mark_pages_dirty(v, pte, ps); 462 mark_pages_dirty(v, pte, ps);
471 463
472 return ret;
473} 464}
474 465
475/* 466/*
@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v)
509 local_flush_tlb_all(); 500 local_flush_tlb_all();
510} 501}
511 502
512
513/* 503/*
514 * Lookup the hash table and its collision chain to find an entry 504 * Lookup the hash table and its collision chain to find an entry
515 * covering this address rid:va or the entry. 505 * covering this address rid:va or the entry.
@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v)
517 * INPUT: 507 * INPUT:
518 * in: TLB format for both VHPT & TLB. 508 * in: TLB format for both VHPT & TLB.
519 */ 509 */
520
521struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) 510struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
522{ 511{
523 struct thash_data *cch; 512 struct thash_data *cch;
@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
547 return NULL; 536 return NULL;
548} 537}
549 538
550
551/* 539/*
552 * Initialize internal control data before service. 540 * Initialize internal control data before service.
553 */ 541 */
@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
573u64 kvm_get_mpt_entry(u64 gpfn) 561u64 kvm_get_mpt_entry(u64 gpfn)
574{ 562{
575 u64 *base = (u64 *) KVM_P2M_BASE; 563 u64 *base = (u64 *) KVM_P2M_BASE;
564
565 if (gpfn >= (KVM_P2M_SIZE >> 3))
566 panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
567
576 return *(base + gpfn); 568 return *(base + gpfn);
577} 569}
578 570
@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
589 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); 581 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
590} 582}
591 583
592
593/* 584/*
594 * Fetch guest bundle code. 585 * Fetch guest bundle code.
595 * INPUT: 586 * INPUT:
@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
631 return IA64_NO_FAULT; 622 return IA64_NO_FAULT;
632} 623}
633 624
634
635void kvm_init_vhpt(struct kvm_vcpu *v) 625void kvm_init_vhpt(struct kvm_vcpu *v)
636{ 626{
637 v->arch.vhpt.num = VHPT_NUM_ENTRIES; 627 v->arch.vhpt.num = VHPT_NUM_ENTRIES;