diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2009-01-15 04:58:19 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:03:06 -0400 |
commit | 27d146449ccaad16480888129bb32a000ee33717 (patch) | |
tree | 10cebed5b2e9baf491911a69bb5d01ecf5f0160f /arch/ia64/kvm | |
parent | 91b2ae773d3b168b763237fac33f75b13d891f20 (diff) |
KVM: ia64: vTLB change for enabling windows 2008 boot
Simply the logic of hash vTLB, and export kvm_gpa_to_mpa.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 39 |
2 files changed, 19 insertions, 24 deletions
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b2f12a562bdf..042af92ced83 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); | |||
703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); | 703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); |
704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); | 704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); |
705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); | 705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); |
706 | extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, | 706 | extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, |
707 | u64 itir, u64 ifa, int type); | 707 | u64 itir, u64 ifa, int type); |
708 | extern void thash_purge_all(struct kvm_vcpu *v); | 708 | extern void thash_purge_all(struct kvm_vcpu *v); |
709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, | 709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, |
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); | |||
738 | void thash_init(struct thash_cb *hcb, u64 sz); | 738 | void thash_init(struct thash_cb *hcb, u64 sz); |
739 | 739 | ||
740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); | 740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); |
741 | 741 | u64 kvm_gpa_to_mpa(u64 gpa); | |
742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | 742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, |
743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | 743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); |
744 | 744 | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index ac94867f8267..38232b37668b 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | |||
164 | unsigned long ps, gpaddr; | 164 | unsigned long ps, gpaddr; |
165 | 165 | ||
166 | ps = itir_ps(itir); | 166 | ps = itir_ps(itir); |
167 | rr.val = ia64_get_rr(ifa); | ||
167 | 168 | ||
168 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | 169 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | |
169 | (ifa & ((1UL << ps) - 1)); | 170 | (ifa & ((1UL << ps) - 1)); |
170 | 171 | ||
171 | rr.val = ia64_get_rr(ifa); | ||
172 | head = (struct thash_data *)ia64_thash(ifa); | 172 | head = (struct thash_data *)ia64_thash(ifa); |
173 | head->etag = INVALID_TI_TAG; | 173 | head->etag = INVALID_TI_TAG; |
174 | ia64_mf(); | 174 | ia64_mf(); |
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
412 | 412 | ||
413 | /* | 413 | /* |
414 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | 414 | * Purge overlap TCs and then insert the new entry to emulate itc ops. |
415 | * Notes: Only TC entry can purge and insert. | 415 | * Notes: Only TC entry can purge and insert. |
416 | * 1 indicates this is MMIO | ||
417 | */ | 416 | */ |
418 | int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | 417 | void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, |
419 | u64 ifa, int type) | 418 | u64 ifa, int type) |
420 | { | 419 | { |
421 | u64 ps; | 420 | u64 ps; |
422 | u64 phy_pte, io_mask, index; | 421 | u64 phy_pte, io_mask, index; |
423 | union ia64_rr vrr, mrr; | 422 | union ia64_rr vrr, mrr; |
424 | int ret = 0; | ||
425 | 423 | ||
426 | ps = itir_ps(itir); | 424 | ps = itir_ps(itir); |
427 | vrr.val = vcpu_get_rr(v, ifa); | 425 | vrr.val = vcpu_get_rr(v, ifa); |
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
441 | phy_pte &= ~_PAGE_MA_MASK; | 439 | phy_pte &= ~_PAGE_MA_MASK; |
442 | } | 440 | } |
443 | 441 | ||
444 | if (pte & VTLB_PTE_IO) | ||
445 | ret = 1; | ||
446 | |||
447 | vtlb_purge(v, ifa, ps); | 442 | vtlb_purge(v, ifa, ps); |
448 | vhpt_purge(v, ifa, ps); | 443 | vhpt_purge(v, ifa, ps); |
449 | 444 | ||
450 | if (ps == mrr.ps) { | 445 | if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { |
451 | if (!(pte&VTLB_PTE_IO)) { | ||
452 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
453 | } else { | ||
454 | vtlb_insert(v, pte, itir, ifa); | ||
455 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
456 | } | ||
457 | } else if (ps > mrr.ps) { | ||
458 | vtlb_insert(v, pte, itir, ifa); | 446 | vtlb_insert(v, pte, itir, ifa); |
459 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | 447 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); |
460 | if (!(pte&VTLB_PTE_IO)) | 448 | } |
461 | vhpt_insert(phy_pte, itir, ifa, pte); | 449 | if (pte & VTLB_PTE_IO) |
462 | } else { | 450 | return; |
451 | |||
452 | if (ps >= mrr.ps) | ||
453 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
454 | else { | ||
463 | u64 psr; | 455 | u64 psr; |
464 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 456 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
465 | psr = ia64_clear_ic(); | 457 | psr = ia64_clear_ic(); |
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
469 | if (!(pte&VTLB_PTE_IO)) | 461 | if (!(pte&VTLB_PTE_IO)) |
470 | mark_pages_dirty(v, pte, ps); | 462 | mark_pages_dirty(v, pte, ps); |
471 | 463 | ||
472 | return ret; | ||
473 | } | 464 | } |
474 | 465 | ||
475 | /* | 466 | /* |
@@ -570,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
570 | u64 kvm_get_mpt_entry(u64 gpfn) | 561 | u64 kvm_get_mpt_entry(u64 gpfn) |
571 | { | 562 | { |
572 | u64 *base = (u64 *) KVM_P2M_BASE; | 563 | u64 *base = (u64 *) KVM_P2M_BASE; |
564 | |||
565 | if (gpfn >= (KVM_P2M_SIZE >> 3)) | ||
566 | panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); | ||
567 | |||
573 | return *(base + gpfn); | 568 | return *(base + gpfn); |
574 | } | 569 | } |
575 | 570 | ||