aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2016-08-30 12:05:55 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2016-09-08 06:53:00 -0400
commitdcadda146f4fd25a732382747f306465d337cda6 (patch)
treea2db69fca0e9abe1bb48b3a9d84ddc0f4b7face3 /arch/arm/kvm
parent55d7cad6a98bbd6f8a5a98b9943b6819d7dc1f22 (diff)
arm/kvm: excise redundant cache maintenance
When modifying Stage-2 page tables, we perform cache maintenance to account for non-coherent page table walks. However, this is unnecessary, as page table walks are guaranteed to be coherent in the presence of the virtualization extensions. Per ARM DDI 0406C.c, section B1.7 ("The Virtualization Extensions"), the virtualization extensions mandate the multiprocessing extensions. Per ARM DDI 0406C.c, section B3.10.1 ("General TLB maintenance requirements"), as described in the sub-section titled "TLB maintenance operations and the memory order model", this maintenance is not required in the presence of the multiprocessing extensions. Hence, we need not perform this cache maintenance when modifying Stage-2 entries. This patch removes the logic for performing the redundant maintenance. To ensure visibility and ordering of updates, a dsb(ishst) that was otherwise implicit in the maintenance is folded into kvm_set_pmd() and kvm_set_pte(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Christoffer Dall <christoffer.dall@linaro.org> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 29d0b23af2a9..344755dcc3b2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -744,7 +744,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
744 if (!pgd) 744 if (!pgd)
745 return -ENOMEM; 745 return -ENOMEM;
746 746
747 kvm_clean_pgd(pgd);
748 kvm->arch.pgd = pgd; 747 kvm->arch.pgd = pgd;
749 return 0; 748 return 0;
750} 749}
@@ -936,7 +935,6 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
936 if (!cache) 935 if (!cache)
937 return 0; /* ignore calls from kvm_set_spte_hva */ 936 return 0; /* ignore calls from kvm_set_spte_hva */
938 pte = mmu_memory_cache_alloc(cache); 937 pte = mmu_memory_cache_alloc(cache);
939 kvm_clean_pte(pte);
940 pmd_populate_kernel(NULL, pmd, pte); 938 pmd_populate_kernel(NULL, pmd, pte);
941 get_page(virt_to_page(pmd)); 939 get_page(virt_to_page(pmd));
942 } 940 }