diff options
Diffstat (limited to 'arch/ia64/kvm/vtlb.c')
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index def4576d22b1..e22b93361e08 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | |||
390 | 390 | ||
391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | 391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) |
392 | { | 392 | { |
393 | u64 ps, ps_mask, paddr, maddr; | 393 | u64 ps, ps_mask, paddr, maddr, io_mask; |
394 | union pte_flags phy_pte; | 394 | union pte_flags phy_pte; |
395 | 395 | ||
396 | ps = itir_ps(itir); | 396 | ps = itir_ps(itir); |
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
398 | phy_pte.val = *pte; | 398 | phy_pte.val = *pte; |
399 | paddr = *pte; | 399 | paddr = *pte; |
400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | 400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); |
401 | maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); | 401 | maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); |
402 | if (maddr & GPFN_IO_MASK) { | 402 | io_mask = maddr & GPFN_IO_MASK; |
403 | if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { | ||
403 | *pte |= VTLB_PTE_IO; | 404 | *pte |= VTLB_PTE_IO; |
404 | return -1; | 405 | return -1; |
405 | } | 406 | } |
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
418 | u64 ifa, int type) | 419 | u64 ifa, int type) |
419 | { | 420 | { |
420 | u64 ps; | 421 | u64 ps; |
421 | u64 phy_pte; | 422 | u64 phy_pte, io_mask, index; |
422 | union ia64_rr vrr, mrr; | 423 | union ia64_rr vrr, mrr; |
423 | int ret = 0; | 424 | int ret = 0; |
424 | 425 | ||
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
426 | vrr.val = vcpu_get_rr(v, ifa); | 427 | vrr.val = vcpu_get_rr(v, ifa); |
427 | mrr.val = ia64_get_rr(ifa); | 428 | mrr.val = ia64_get_rr(ifa); |
428 | 429 | ||
430 | index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
431 | io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; | ||
429 | phy_pte = translate_phy_pte(&pte, itir, ifa); | 432 | phy_pte = translate_phy_pte(&pte, itir, ifa); |
430 | 433 | ||
431 | /* Ensure WB attribute if pte is related to a normal mem page, | 434 | /* Ensure WB attribute if pte is related to a normal mem page, |
432 | * which is required by vga acceleration since qemu maps shared | 435 | * which is required by vga acceleration since qemu maps shared |
433 | * vram buffer with WB. | 436 | * vram buffer with WB. |
434 | */ | 437 | */ |
435 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { | 438 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && |
439 | io_mask != GPFN_PHYS_MMIO) { | ||
436 | pte &= ~_PAGE_MA_MASK; | 440 | pte &= ~_PAGE_MA_MASK; |
437 | phy_pte &= ~_PAGE_MA_MASK; | 441 | phy_pte &= ~_PAGE_MA_MASK; |
438 | } | 442 | } |
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
566 | } | 570 | } |
567 | } | 571 | } |
568 | 572 | ||
569 | u64 kvm_lookup_mpa(u64 gpfn) | 573 | u64 kvm_get_mpt_entry(u64 gpfn) |
570 | { | 574 | { |
571 | u64 *base = (u64 *) KVM_P2M_BASE; | 575 | u64 *base = (u64 *) KVM_P2M_BASE; |
572 | return *(base + gpfn); | 576 | return *(base + gpfn); |
573 | } | 577 | } |
574 | 578 | ||
579 | u64 kvm_lookup_mpa(u64 gpfn) | ||
580 | { | ||
581 | u64 maddr; | ||
582 | maddr = kvm_get_mpt_entry(gpfn); | ||
583 | return maddr&_PAGE_PPN_MASK; | ||
584 | } | ||
585 | |||
575 | u64 kvm_gpa_to_mpa(u64 gpa) | 586 | u64 kvm_gpa_to_mpa(u64 gpa) |
576 | { | 587 | { |
577 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | 588 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); |