aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/vtlb.c
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-09-28 04:39:46 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:38 -0400
commitb010eb5103cfbe12ae6f08a4cdb3a748bf78c410 (patch)
treee2b812000bbb1b13edb52667e42a8d04d4ad02e5 /arch/ia64/kvm/vtlb.c
parent1cbea809c400661eecb538e0dd0bc4f3660f0a35 (diff)
KVM: ia64: add directed mmio range support for kvm guests
Using vt-d, kvm guests can be assigned physcial devices, so this patch introduce a new mmio type (directed mmio) to handle its mmio access. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64/kvm/vtlb.c')
-rw-r--r--arch/ia64/kvm/vtlb.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index def4576d22b1..e22b93361e08 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
390 390
391u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) 391u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
392{ 392{
393 u64 ps, ps_mask, paddr, maddr; 393 u64 ps, ps_mask, paddr, maddr, io_mask;
394 union pte_flags phy_pte; 394 union pte_flags phy_pte;
395 395
396 ps = itir_ps(itir); 396 ps = itir_ps(itir);
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
398 phy_pte.val = *pte; 398 phy_pte.val = *pte;
399 paddr = *pte; 399 paddr = *pte;
400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); 400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
401 maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); 401 maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
402 if (maddr & GPFN_IO_MASK) { 402 io_mask = maddr & GPFN_IO_MASK;
403 if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
403 *pte |= VTLB_PTE_IO; 404 *pte |= VTLB_PTE_IO;
404 return -1; 405 return -1;
405 } 406 }
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
418 u64 ifa, int type) 419 u64 ifa, int type)
419{ 420{
420 u64 ps; 421 u64 ps;
421 u64 phy_pte; 422 u64 phy_pte, io_mask, index;
422 union ia64_rr vrr, mrr; 423 union ia64_rr vrr, mrr;
423 int ret = 0; 424 int ret = 0;
424 425
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
426 vrr.val = vcpu_get_rr(v, ifa); 427 vrr.val = vcpu_get_rr(v, ifa);
427 mrr.val = ia64_get_rr(ifa); 428 mrr.val = ia64_get_rr(ifa);
428 429
430 index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
431 io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
429 phy_pte = translate_phy_pte(&pte, itir, ifa); 432 phy_pte = translate_phy_pte(&pte, itir, ifa);
430 433
431 /* Ensure WB attribute if pte is related to a normal mem page, 434 /* Ensure WB attribute if pte is related to a normal mem page,
432 * which is required by vga acceleration since qemu maps shared 435 * which is required by vga acceleration since qemu maps shared
433 * vram buffer with WB. 436 * vram buffer with WB.
434 */ 437 */
435 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { 438 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
439 io_mask != GPFN_PHYS_MMIO) {
436 pte &= ~_PAGE_MA_MASK; 440 pte &= ~_PAGE_MA_MASK;
437 phy_pte &= ~_PAGE_MA_MASK; 441 phy_pte &= ~_PAGE_MA_MASK;
438 } 442 }
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz)
566 } 570 }
567} 571}
568 572
569u64 kvm_lookup_mpa(u64 gpfn) 573u64 kvm_get_mpt_entry(u64 gpfn)
570{ 574{
571 u64 *base = (u64 *) KVM_P2M_BASE; 575 u64 *base = (u64 *) KVM_P2M_BASE;
572 return *(base + gpfn); 576 return *(base + gpfn);
573} 577}
574 578
579u64 kvm_lookup_mpa(u64 gpfn)
580{
581 u64 maddr;
582 maddr = kvm_get_mpt_entry(gpfn);
583 return maddr&_PAGE_PPN_MASK;
584}
585
575u64 kvm_gpa_to_mpa(u64 gpa) 586u64 kvm_gpa_to_mpa(u64 gpa)
576{ 587{
577 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); 588 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);