aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-01-28 10:27:00 -0500
committerChristoffer Dall <cdall@cs.columbia.edu>2013-03-06 18:48:45 -0500
commit48762767e1c150d58c250650f8202b7d4ad65ec4 (patch)
tree38ae7af86d7e84a6497121eda2991bedfb442f6a
parent06fe0b73ff17e5d777af1b26f3e227d79c0d6808 (diff)
ARM: KVM: change kvm_tlb_flush_vmid to kvm_tlb_flush_vmid_ipa
v8 is capable of invalidating Stage-2 by IPA, but v7 is not. Change kvm_tlb_flush_vmid() to take an IPA parameter, which is then ignored by the invalidation code (and nuke the whole TLB as it always did). This allows v8 to implement a more optimized strategy. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/kvm/interrupts.S9
-rw-r--r--arch/arm/kvm/mmu.c8
3 files changed, 11 insertions, 8 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e1..18d50322a9e2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76 76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
81#endif 81#endif
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index a8e0c2d85cb5..f7793df62f58 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
35/******************************************************************** 35/********************************************************************
36 * Flush per-VMID TLBs 36 * Flush per-VMID TLBs
37 * 37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm); 38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 * 39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs 40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7 41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll 42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple 43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version. 44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
45 */ 48 */
46ENTRY(__kvm_tlb_flush_vmid) 49ENTRY(__kvm_tlb_flush_vmid_ipa)
47 push {r2, r3} 50 push {r2, r3}
48 51
49 add r0, r0, #KVM_VTTBR 52 add r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
60 63
61 pop {r2, r3} 64 pop {r2, r3}
62 bx lr 65 bx lr
63ENDPROC(__kvm_tlb_flush_vmid) 66ENDPROC(__kvm_tlb_flush_vmid_ipa)
64 67
65/******************************************************************** 68/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable 69 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 692f064fde0a..0bf2c8551f75 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -34,9 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34 34
35static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 35static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
36 36
37static void kvm_tlb_flush_vmid(struct kvm *kvm) 37static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
38{ 38{
39 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); 39 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
40} 40}
41 41
42static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 42static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -449,7 +449,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
449 old_pte = *pte; 449 old_pte = *pte;
450 kvm_set_pte(pte, *new_pte); 450 kvm_set_pte(pte, *new_pte);
451 if (pte_present(old_pte)) 451 if (pte_present(old_pte))
452 kvm_tlb_flush_vmid(kvm); 452 kvm_tlb_flush_vmid_ipa(kvm, addr);
453 else 453 else
454 get_page(virt_to_page(pte)); 454 get_page(virt_to_page(pte));
455 455
@@ -666,7 +666,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
666static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 666static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
667{ 667{
668 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 668 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
669 kvm_tlb_flush_vmid(kvm); 669 kvm_tlb_flush_vmid_ipa(kvm, gpa);
670} 670}
671 671
672int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 672int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)