diff options
author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2016-03-01 07:00:39 -0500 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-04-21 08:57:05 -0400 |
commit | bbb3b6b35087539e75792b46e07b7ce5282d0979 (patch) | |
tree | e1cbcddd257fcca72bcfcae8981028159881663b /arch/arm/kvm | |
parent | 0dbd3b18c63c81dec1a8c47667d89c54ade9b52a (diff) |
kvm-arm: Replace kvm_pmd_huge with pmd_thp_or_huge
Both arm and arm64 now provides a helper, pmd_thp_or_huge()
to check if the given pmd represents a huge page. Use that
instead of our own custom check.
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/mmu.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 774d00b8066b..7837f0afa5a4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector; | |||
45 | 45 | ||
46 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) | 46 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
47 | 47 | ||
48 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) | ||
49 | #define kvm_pud_huge(_x) pud_huge(_x) | 48 | #define kvm_pud_huge(_x) pud_huge(_x) |
50 | 49 | ||
51 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) | 50 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
@@ -115,7 +114,7 @@ static bool kvm_is_device_pfn(unsigned long pfn) | |||
115 | */ | 114 | */ |
116 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | 115 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) |
117 | { | 116 | { |
118 | if (!kvm_pmd_huge(*pmd)) | 117 | if (!pmd_thp_or_huge(*pmd)) |
119 | return; | 118 | return; |
120 | 119 | ||
121 | pmd_clear(pmd); | 120 | pmd_clear(pmd); |
@@ -177,7 +176,7 @@ static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) | |||
177 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | 176 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
178 | { | 177 | { |
179 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | 178 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
180 | VM_BUG_ON(kvm_pmd_huge(*pmd)); | 179 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
181 | pmd_clear(pmd); | 180 | pmd_clear(pmd); |
182 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 181 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
183 | pte_free_kernel(NULL, pte_table); | 182 | pte_free_kernel(NULL, pte_table); |
@@ -240,7 +239,7 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud, | |||
240 | do { | 239 | do { |
241 | next = kvm_pmd_addr_end(addr, end); | 240 | next = kvm_pmd_addr_end(addr, end); |
242 | if (!pmd_none(*pmd)) { | 241 | if (!pmd_none(*pmd)) { |
243 | if (kvm_pmd_huge(*pmd)) { | 242 | if (pmd_thp_or_huge(*pmd)) { |
244 | pmd_t old_pmd = *pmd; | 243 | pmd_t old_pmd = *pmd; |
245 | 244 | ||
246 | pmd_clear(pmd); | 245 | pmd_clear(pmd); |
@@ -326,7 +325,7 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |||
326 | do { | 325 | do { |
327 | next = kvm_pmd_addr_end(addr, end); | 326 | next = kvm_pmd_addr_end(addr, end); |
328 | if (!pmd_none(*pmd)) { | 327 | if (!pmd_none(*pmd)) { |
329 | if (kvm_pmd_huge(*pmd)) | 328 | if (pmd_thp_or_huge(*pmd)) |
330 | kvm_flush_dcache_pmd(*pmd); | 329 | kvm_flush_dcache_pmd(*pmd); |
331 | else | 330 | else |
332 | stage2_flush_ptes(kvm, pmd, addr, next); | 331 | stage2_flush_ptes(kvm, pmd, addr, next); |
@@ -1050,7 +1049,7 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | |||
1050 | do { | 1049 | do { |
1051 | next = kvm_pmd_addr_end(addr, end); | 1050 | next = kvm_pmd_addr_end(addr, end); |
1052 | if (!pmd_none(*pmd)) { | 1051 | if (!pmd_none(*pmd)) { |
1053 | if (kvm_pmd_huge(*pmd)) { | 1052 | if (pmd_thp_or_huge(*pmd)) { |
1054 | if (!kvm_s2pmd_readonly(pmd)) | 1053 | if (!kvm_s2pmd_readonly(pmd)) |
1055 | kvm_set_s2pmd_readonly(pmd); | 1054 | kvm_set_s2pmd_readonly(pmd); |
1056 | } else { | 1055 | } else { |
@@ -1331,7 +1330,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) | |||
1331 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | 1330 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ |
1332 | goto out; | 1331 | goto out; |
1333 | 1332 | ||
1334 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | 1333 | if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */ |
1335 | *pmd = pmd_mkyoung(*pmd); | 1334 | *pmd = pmd_mkyoung(*pmd); |
1336 | pfn = pmd_pfn(*pmd); | 1335 | pfn = pmd_pfn(*pmd); |
1337 | pfn_valid = true; | 1336 | pfn_valid = true; |
@@ -1555,7 +1554,7 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | |||
1555 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | 1554 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ |
1556 | return 0; | 1555 | return 0; |
1557 | 1556 | ||
1558 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | 1557 | if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */ |
1559 | if (pmd_young(*pmd)) { | 1558 | if (pmd_young(*pmd)) { |
1560 | *pmd = pmd_mkold(*pmd); | 1559 | *pmd = pmd_mkold(*pmd); |
1561 | return 1; | 1560 | return 1; |
@@ -1585,7 +1584,7 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | |||
1585 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | 1584 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ |
1586 | return 0; | 1585 | return 0; |
1587 | 1586 | ||
1588 | if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */ | 1587 | if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */ |
1589 | return pmd_young(*pmd); | 1588 | return pmd_young(*pmd); |
1590 | 1589 | ||
1591 | pte = pte_offset_kernel(pmd, gpa); | 1590 | pte = pte_offset_kernel(pmd, gpa); |