diff options
| author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-09-28 04:39:46 -0400 |
|---|---|---|
| committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:38 -0400 |
| commit | b010eb5103cfbe12ae6f08a4cdb3a748bf78c410 (patch) | |
| tree | e2b812000bbb1b13edb52667e42a8d04d4ad02e5 | |
| parent | 1cbea809c400661eecb538e0dd0bc4f3660f0a35 (diff) | |
KVM: ia64: add directed mmio range support for kvm guests
Using vt-d, kvm guests can be assigned physcial devices, so
this patch introduce a new mmio type (directed mmio)
to handle its mmio access.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
| -rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 2 | ||||
| -rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 4 | ||||
| -rw-r--r-- | arch/ia64/kvm/vcpu.h | 26 | ||||
| -rw-r--r-- | arch/ia64/kvm/vtlb.c | 23 |
4 files changed, 33 insertions, 22 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index da579a33db12..85db124d37f6 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
| @@ -132,7 +132,7 @@ | |||
| 132 | #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ | 132 | #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ |
| 133 | #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ | 133 | #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ |
| 134 | #define GPFN_GFW (6UL << 60) /* Guest Firmware */ | 134 | #define GPFN_GFW (6UL << 60) /* Guest Firmware */ |
| 135 | #define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ | 135 | #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ |
| 136 | 136 | ||
| 137 | #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ | 137 | #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ |
| 138 | #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ | 138 | #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 800a4f2e917e..3df82f3fe547 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
| @@ -1447,11 +1447,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
| 1447 | if (!kvm_is_mmio_pfn(pfn)) { | 1447 | if (!kvm_is_mmio_pfn(pfn)) { |
| 1448 | kvm_set_pmt_entry(kvm, base_gfn + i, | 1448 | kvm_set_pmt_entry(kvm, base_gfn + i, |
| 1449 | pfn << PAGE_SHIFT, | 1449 | pfn << PAGE_SHIFT, |
| 1450 | _PAGE_MA_WB); | 1450 | _PAGE_AR_RWX | _PAGE_MA_WB); |
| 1451 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); | 1451 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); |
| 1452 | } else { | 1452 | } else { |
| 1453 | kvm_set_pmt_entry(kvm, base_gfn + i, | 1453 | kvm_set_pmt_entry(kvm, base_gfn + i, |
| 1454 | GPFN_LOW_MMIO | (pfn << PAGE_SHIFT), | 1454 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), |
| 1455 | _PAGE_MA_UC); | 1455 | _PAGE_MA_UC); |
| 1456 | memslot->rmap[i] = 0; | 1456 | memslot->rmap[i] = 0; |
| 1457 | } | 1457 | } |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b0fcfb62c49e..341e3fee280c 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
| @@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, | |||
| 313 | trp->rid = rid; | 313 | trp->rid = rid; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | extern u64 kvm_lookup_mpa(u64 gpfn); | 316 | extern u64 kvm_get_mpt_entry(u64 gpfn); |
| 317 | extern u64 kvm_gpa_to_mpa(u64 gpa); | ||
| 318 | |||
| 319 | /* Return I/O type if trye */ | ||
| 320 | #define __gpfn_is_io(gpfn) \ | ||
| 321 | ({ \ | ||
| 322 | u64 pte, ret = 0; \ | ||
| 323 | pte = kvm_lookup_mpa(gpfn); \ | ||
| 324 | if (!(pte & GPFN_INV_MASK)) \ | ||
| 325 | ret = pte & GPFN_IO_MASK; \ | ||
| 326 | ret; \ | ||
| 327 | }) | ||
| 328 | 317 | ||
| 318 | /* Return I/ */ | ||
| 319 | static inline u64 __gpfn_is_io(u64 gpfn) | ||
| 320 | { | ||
| 321 | u64 pte; | ||
| 322 | pte = kvm_get_mpt_entry(gpfn); | ||
| 323 | if (!(pte & GPFN_INV_MASK)) { | ||
| 324 | pte = pte & GPFN_IO_MASK; | ||
| 325 | if (pte != GPFN_PHYS_MMIO) | ||
| 326 | return pte; | ||
| 327 | } | ||
| 328 | return 0; | ||
| 329 | } | ||
| 329 | #endif | 330 | #endif |
| 330 | |||
| 331 | #define IA64_NO_FAULT 0 | 331 | #define IA64_NO_FAULT 0 |
| 332 | #define IA64_FAULT 1 | 332 | #define IA64_FAULT 1 |
| 333 | 333 | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index def4576d22b1..e22b93361e08 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
| @@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | |||
| 390 | 390 | ||
| 391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | 391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) |
| 392 | { | 392 | { |
| 393 | u64 ps, ps_mask, paddr, maddr; | 393 | u64 ps, ps_mask, paddr, maddr, io_mask; |
| 394 | union pte_flags phy_pte; | 394 | union pte_flags phy_pte; |
| 395 | 395 | ||
| 396 | ps = itir_ps(itir); | 396 | ps = itir_ps(itir); |
| @@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
| 398 | phy_pte.val = *pte; | 398 | phy_pte.val = *pte; |
| 399 | paddr = *pte; | 399 | paddr = *pte; |
| 400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | 400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); |
| 401 | maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); | 401 | maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); |
| 402 | if (maddr & GPFN_IO_MASK) { | 402 | io_mask = maddr & GPFN_IO_MASK; |
| 403 | if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { | ||
| 403 | *pte |= VTLB_PTE_IO; | 404 | *pte |= VTLB_PTE_IO; |
| 404 | return -1; | 405 | return -1; |
| 405 | } | 406 | } |
| @@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
| 418 | u64 ifa, int type) | 419 | u64 ifa, int type) |
| 419 | { | 420 | { |
| 420 | u64 ps; | 421 | u64 ps; |
| 421 | u64 phy_pte; | 422 | u64 phy_pte, io_mask, index; |
| 422 | union ia64_rr vrr, mrr; | 423 | union ia64_rr vrr, mrr; |
| 423 | int ret = 0; | 424 | int ret = 0; |
| 424 | 425 | ||
| @@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
| 426 | vrr.val = vcpu_get_rr(v, ifa); | 427 | vrr.val = vcpu_get_rr(v, ifa); |
| 427 | mrr.val = ia64_get_rr(ifa); | 428 | mrr.val = ia64_get_rr(ifa); |
| 428 | 429 | ||
| 430 | index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
| 431 | io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; | ||
| 429 | phy_pte = translate_phy_pte(&pte, itir, ifa); | 432 | phy_pte = translate_phy_pte(&pte, itir, ifa); |
| 430 | 433 | ||
| 431 | /* Ensure WB attribute if pte is related to a normal mem page, | 434 | /* Ensure WB attribute if pte is related to a normal mem page, |
| 432 | * which is required by vga acceleration since qemu maps shared | 435 | * which is required by vga acceleration since qemu maps shared |
| 433 | * vram buffer with WB. | 436 | * vram buffer with WB. |
| 434 | */ | 437 | */ |
| 435 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { | 438 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && |
| 439 | io_mask != GPFN_PHYS_MMIO) { | ||
| 436 | pte &= ~_PAGE_MA_MASK; | 440 | pte &= ~_PAGE_MA_MASK; |
| 437 | phy_pte &= ~_PAGE_MA_MASK; | 441 | phy_pte &= ~_PAGE_MA_MASK; |
| 438 | } | 442 | } |
| @@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
| 566 | } | 570 | } |
| 567 | } | 571 | } |
| 568 | 572 | ||
| 569 | u64 kvm_lookup_mpa(u64 gpfn) | 573 | u64 kvm_get_mpt_entry(u64 gpfn) |
| 570 | { | 574 | { |
| 571 | u64 *base = (u64 *) KVM_P2M_BASE; | 575 | u64 *base = (u64 *) KVM_P2M_BASE; |
| 572 | return *(base + gpfn); | 576 | return *(base + gpfn); |
| 573 | } | 577 | } |
| 574 | 578 | ||
| 579 | u64 kvm_lookup_mpa(u64 gpfn) | ||
| 580 | { | ||
| 581 | u64 maddr; | ||
| 582 | maddr = kvm_get_mpt_entry(gpfn); | ||
| 583 | return maddr&_PAGE_PPN_MASK; | ||
| 584 | } | ||
| 585 | |||
| 575 | u64 kvm_gpa_to_mpa(u64 gpa) | 586 | u64 kvm_gpa_to_mpa(u64 gpa) |
| 576 | { | 587 | { |
| 577 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | 588 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); |
