diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-01-28 10:27:00 -0500 |
---|---|---|
committer | Christoffer Dall <cdall@cs.columbia.edu> | 2013-03-06 18:48:45 -0500 |
commit | 48762767e1c150d58c250650f8202b7d4ad65ec4 (patch) | |
tree | 38ae7af86d7e84a6497121eda2991bedfb442f6a /arch/arm/kvm | |
parent | 06fe0b73ff17e5d777af1b26f3e227d79c0d6808 (diff) |
ARM: KVM: change kvm_tlb_flush_vmid to kvm_tlb_flush_vmid_ipa
v8 is capable of invalidating Stage-2 by IPA, but v7 is not.
Change kvm_tlb_flush_vmid() to take an IPA parameter, which is
then ignored by the invalidation code (and nuke the whole TLB
as it always did).
This allows v8 to implement a more optimized strategy.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/interrupts.S | 9 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 8 |
2 files changed, 10 insertions, 7 deletions
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index a8e0c2d85cb5..f7793df62f58 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S | |||
@@ -35,15 +35,18 @@ __kvm_hyp_code_start: | |||
35 | /******************************************************************** | 35 | /******************************************************************** |
36 | * Flush per-VMID TLBs | 36 | * Flush per-VMID TLBs |
37 | * | 37 | * |
38 | * void __kvm_tlb_flush_vmid(struct kvm *kvm); | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
39 | * | 39 | * |
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | 40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs |
41 | * inside the inner-shareable domain (which is the case for all v7 | 41 | * inside the inner-shareable domain (which is the case for all v7 |
42 | * implementations). If we come across a non-IS SMP implementation, we'll | 42 | * implementations). If we come across a non-IS SMP implementation, we'll |
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | 43 | * have to use an IPI based mechanism. Until then, we stick to the simple |
44 | * hardware assisted version. | 44 | * hardware assisted version. |
45 | * | ||
46 | * As v7 does not support flushing per IPA, just nuke the whole TLB | ||
47 | * instead, ignoring the ipa value. | ||
45 | */ | 48 | */ |
46 | ENTRY(__kvm_tlb_flush_vmid) | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
47 | push {r2, r3} | 50 | push {r2, r3} |
48 | 51 | ||
49 | add r0, r0, #KVM_VTTBR | 52 | add r0, r0, #KVM_VTTBR |
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid) | |||
60 | 63 | ||
61 | pop {r2, r3} | 64 | pop {r2, r3} |
62 | bx lr | 65 | bx lr |
63 | ENDPROC(__kvm_tlb_flush_vmid) | 66 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
64 | 67 | ||
65 | /******************************************************************** | 68 | /******************************************************************** |
66 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable | 69 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 692f064fde0a..0bf2c8551f75 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -34,9 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |||
34 | 34 | ||
35 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 35 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
36 | 36 | ||
37 | static void kvm_tlb_flush_vmid(struct kvm *kvm) | 37 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
38 | { | 38 | { |
39 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | 39 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
40 | } | 40 | } |
41 | 41 | ||
42 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 42 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
@@ -449,7 +449,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
449 | old_pte = *pte; | 449 | old_pte = *pte; |
450 | kvm_set_pte(pte, *new_pte); | 450 | kvm_set_pte(pte, *new_pte); |
451 | if (pte_present(old_pte)) | 451 | if (pte_present(old_pte)) |
452 | kvm_tlb_flush_vmid(kvm); | 452 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
453 | else | 453 | else |
454 | get_page(virt_to_page(pte)); | 454 | get_page(virt_to_page(pte)); |
455 | 455 | ||
@@ -666,7 +666,7 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
666 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 666 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
667 | { | 667 | { |
668 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 668 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
669 | kvm_tlb_flush_vmid(kvm); | 669 | kvm_tlb_flush_vmid_ipa(kvm, gpa); |
670 | } | 670 | } |
671 | 671 | ||
672 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 672 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |