aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2016-03-22 14:33:45 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2016-04-21 08:58:07 -0400
commit70fd19068573e449d47eb2daa69cf5db541ef4f5 (patch)
treecce9203c5fef16946530f2fd9f6104e97c0a34a9 /arch/arm/kvm
parent66f877faf9cc23232f25b423758eaa167de1ad09 (diff)
kvm-arm: Use explicit stage2 helper routines
We have stage2 page table helpers for both arm and arm64. Switch to the stage2 helpers for routines that only deal with stage2 page table. Cc: Marc Zyngier <marc.zyngier@arm.com> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index d0c0ee92c378..f93f717b5d8b 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -319,9 +319,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
319 pmd_t *pmd; 319 pmd_t *pmd;
320 phys_addr_t next; 320 phys_addr_t next;
321 321
322 pmd = pmd_offset(pud, addr); 322 pmd = stage2_pmd_offset(pud, addr);
323 do { 323 do {
324 next = kvm_pmd_addr_end(addr, end); 324 next = stage2_pmd_addr_end(addr, end);
325 if (!pmd_none(*pmd)) { 325 if (!pmd_none(*pmd)) {
326 if (pmd_thp_or_huge(*pmd)) 326 if (pmd_thp_or_huge(*pmd))
327 kvm_flush_dcache_pmd(*pmd); 327 kvm_flush_dcache_pmd(*pmd);
@@ -337,11 +337,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
337 pud_t *pud; 337 pud_t *pud;
338 phys_addr_t next; 338 phys_addr_t next;
339 339
340 pud = pud_offset(pgd, addr); 340 pud = stage2_pud_offset(pgd, addr);
341 do { 341 do {
342 next = kvm_pud_addr_end(addr, end); 342 next = stage2_pud_addr_end(addr, end);
343 if (!pud_none(*pud)) { 343 if (!stage2_pud_none(*pud)) {
344 if (pud_huge(*pud)) 344 if (stage2_pud_huge(*pud))
345 kvm_flush_dcache_pud(*pud); 345 kvm_flush_dcache_pud(*pud);
346 else 346 else
347 stage2_flush_pmds(kvm, pud, addr, next); 347 stage2_flush_pmds(kvm, pud, addr, next);
@@ -357,9 +357,9 @@ static void stage2_flush_memslot(struct kvm *kvm,
357 phys_addr_t next; 357 phys_addr_t next;
358 pgd_t *pgd; 358 pgd_t *pgd;
359 359
360 pgd = kvm->arch.pgd + kvm_pgd_index(addr); 360 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
361 do { 361 do {
362 next = kvm_pgd_addr_end(addr, end); 362 next = stage2_pgd_addr_end(addr, end);
363 stage2_flush_puds(kvm, pgd, addr, next); 363 stage2_flush_puds(kvm, pgd, addr, next);
364 } while (pgd++, addr = next, addr != end); 364 } while (pgd++, addr = next, addr != end);
365} 365}
@@ -807,16 +807,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
807 pgd_t *pgd; 807 pgd_t *pgd;
808 pud_t *pud; 808 pud_t *pud;
809 809
810 pgd = kvm->arch.pgd + kvm_pgd_index(addr); 810 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
811 if (WARN_ON(pgd_none(*pgd))) { 811 if (WARN_ON(stage2_pgd_none(*pgd))) {
812 if (!cache) 812 if (!cache)
813 return NULL; 813 return NULL;
814 pud = mmu_memory_cache_alloc(cache); 814 pud = mmu_memory_cache_alloc(cache);
815 pgd_populate(NULL, pgd, pud); 815 stage2_pgd_populate(pgd, pud);
816 get_page(virt_to_page(pgd)); 816 get_page(virt_to_page(pgd));
817 } 817 }
818 818
819 return pud_offset(pgd, addr); 819 return stage2_pud_offset(pgd, addr);
820} 820}
821 821
822static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 822static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -826,15 +826,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
826 pmd_t *pmd; 826 pmd_t *pmd;
827 827
828 pud = stage2_get_pud(kvm, cache, addr); 828 pud = stage2_get_pud(kvm, cache, addr);
829 if (pud_none(*pud)) { 829 if (stage2_pud_none(*pud)) {
830 if (!cache) 830 if (!cache)
831 return NULL; 831 return NULL;
832 pmd = mmu_memory_cache_alloc(cache); 832 pmd = mmu_memory_cache_alloc(cache);
833 pud_populate(NULL, pud, pmd); 833 stage2_pud_populate(pud, pmd);
834 get_page(virt_to_page(pud)); 834 get_page(virt_to_page(pud));
835 } 835 }
836 836
837 return pmd_offset(pud, addr); 837 return stage2_pmd_offset(pud, addr);
838} 838}
839 839
840static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache 840static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1042,10 +1042,10 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1042 pmd_t *pmd; 1042 pmd_t *pmd;
1043 phys_addr_t next; 1043 phys_addr_t next;
1044 1044
1045 pmd = pmd_offset(pud, addr); 1045 pmd = stage2_pmd_offset(pud, addr);
1046 1046
1047 do { 1047 do {
1048 next = kvm_pmd_addr_end(addr, end); 1048 next = stage2_pmd_addr_end(addr, end);
1049 if (!pmd_none(*pmd)) { 1049 if (!pmd_none(*pmd)) {
1050 if (pmd_thp_or_huge(*pmd)) { 1050 if (pmd_thp_or_huge(*pmd)) {
1051 if (!kvm_s2pmd_readonly(pmd)) 1051 if (!kvm_s2pmd_readonly(pmd))
@@ -1070,12 +1070,12 @@ static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1070 pud_t *pud; 1070 pud_t *pud;
1071 phys_addr_t next; 1071 phys_addr_t next;
1072 1072
1073 pud = pud_offset(pgd, addr); 1073 pud = stage2_pud_offset(pgd, addr);
1074 do { 1074 do {
1075 next = kvm_pud_addr_end(addr, end); 1075 next = stage2_pud_addr_end(addr, end);
1076 if (!pud_none(*pud)) { 1076 if (!stage2_pud_none(*pud)) {
1077 /* TODO:PUD not supported, revisit later if supported */ 1077 /* TODO:PUD not supported, revisit later if supported */
1078 BUG_ON(pud_huge(*pud)); 1078 BUG_ON(stage2_pud_huge(*pud));
1079 stage2_wp_pmds(pud, addr, next); 1079 stage2_wp_pmds(pud, addr, next);
1080 } 1080 }
1081 } while (pud++, addr = next, addr != end); 1081 } while (pud++, addr = next, addr != end);
@@ -1092,7 +1092,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1092 pgd_t *pgd; 1092 pgd_t *pgd;
1093 phys_addr_t next; 1093 phys_addr_t next;
1094 1094
1095 pgd = kvm->arch.pgd + kvm_pgd_index(addr); 1095 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
1096 do { 1096 do {
1097 /* 1097 /*
1098 * Release kvm_mmu_lock periodically if the memory region is 1098 * Release kvm_mmu_lock periodically if the memory region is
@@ -1104,8 +1104,8 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1104 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 1104 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1105 cond_resched_lock(&kvm->mmu_lock); 1105 cond_resched_lock(&kvm->mmu_lock);
1106 1106
1107 next = kvm_pgd_addr_end(addr, end); 1107 next = stage2_pgd_addr_end(addr, end);
1108 if (pgd_present(*pgd)) 1108 if (stage2_pgd_present(*pgd))
1109 stage2_wp_puds(pgd, addr, next); 1109 stage2_wp_puds(pgd, addr, next);
1110 } while (pgd++, addr = next, addr != end); 1110 } while (pgd++, addr = next, addr != end);
1111} 1111}