aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2018-09-26 12:32:44 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2018-10-01 08:50:30 -0400
commite55cac5bf2a9cc86b57a9533d6b9e5005bc19b5c (patch)
treeb8bb0ba718f87a4485b270afdd4ea14f4a8fa1cf
parent7665f3a8491b0ed3c6f65c0bc3a5424ea8f87731 (diff)
kvm: arm/arm64: Prepare for VM specific stage2 translations
Right now the stage2 page table for a VM is hard coded, assuming an IPA of 40bits. As we are about to add support for per VM IPA, prepare the stage2 page table helpers to accept the kvm instance to make the right decision for the VM. No functional changes. Adds stage2_pgd_size(kvm) to replace S2_PGD_SIZE. Also, moves some of the definitions in arm32 to align with the arm64. Also drop the _AC() specifier constants wherever possible. Cc: Christoffer Dall <cdall@kernel.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h54
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h7
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopmd.h18
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopud.h16
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h58
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/mmu.c119
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
10 files changed, 158 insertions, 134 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 3ab8b3781bfe..c3f1f9b304b7 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -133,8 +133,7 @@
133 * space. 133 * space.
134 */ 134 */
135#define KVM_PHYS_SHIFT (40) 135#define KVM_PHYS_SHIFT (40)
136#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) 136
137#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
138#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) 137#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
139 138
140/* Virtualization Translation Control Register (VTCR) bits */ 139/* Virtualization Translation Control Register (VTCR) bits */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 265ea9cf7df7..12ae5fbbcf01 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -35,16 +35,12 @@
35 addr; \ 35 addr; \
36 }) 36 })
37 37
38/*
39 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
40 */
41#define KVM_MMU_CACHE_MIN_PAGES 2
42
43#ifndef __ASSEMBLY__ 38#ifndef __ASSEMBLY__
44 39
45#include <linux/highmem.h> 40#include <linux/highmem.h>
46#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
47#include <asm/cputype.h> 42#include <asm/cputype.h>
43#include <asm/kvm_arm.h>
48#include <asm/kvm_hyp.h> 44#include <asm/kvm_hyp.h>
49#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
50#include <asm/stage2_pgtable.h> 46#include <asm/stage2_pgtable.h>
@@ -52,6 +48,13 @@
52/* Ensure compatibility with arm64 */ 48/* Ensure compatibility with arm64 */
53#define VA_BITS 32 49#define VA_BITS 32
54 50
51#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
52#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
53#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
54#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
55
56#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
57
55int create_hyp_mappings(void *from, void *to, pgprot_t prot); 58int create_hyp_mappings(void *from, void *to, pgprot_t prot);
56int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 59int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
57 void __iomem **kaddr, 60 void __iomem **kaddr,
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index 460d616bb2d6..f6a7ea805232 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -19,43 +19,53 @@
19#ifndef __ARM_S2_PGTABLE_H_ 19#ifndef __ARM_S2_PGTABLE_H_
20#define __ARM_S2_PGTABLE_H_ 20#define __ARM_S2_PGTABLE_H_
21 21
22#define stage2_pgd_none(pgd) pgd_none(pgd) 22/*
23#define stage2_pgd_clear(pgd) pgd_clear(pgd) 23 * kvm_mmu_cache_min_pages() is the number of pages required
24#define stage2_pgd_present(pgd) pgd_present(pgd) 24 * to install a stage-2 translation. We pre-allocate the entry
25#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) 25 * level table at VM creation. Since we have a 3 level page-table,
26#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) 26 * we need only two pages to add a new mapping.
27#define stage2_pud_free(pud) pud_free(NULL, pud) 27 */
28 28#define kvm_mmu_cache_min_pages(kvm) 2
29#define stage2_pud_none(pud) pud_none(pud) 29
30#define stage2_pud_clear(pud) pud_clear(pud) 30#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
31#define stage2_pud_present(pud) pud_present(pud) 31#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
32#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) 32#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
33#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) 33#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
34#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) 34#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
35 35#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
36#define stage2_pud_huge(pud) pud_huge(pud) 36
37#define stage2_pud_none(kvm, pud) pud_none(pud)
38#define stage2_pud_clear(kvm, pud) pud_clear(pud)
39#define stage2_pud_present(kvm, pud) pud_present(pud)
40#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
41#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
42#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
43
44#define stage2_pud_huge(kvm, pud) pud_huge(pud)
37 45
38/* Open coded p*d_addr_end that can deal with 64bit addresses */ 46/* Open coded p*d_addr_end that can deal with 64bit addresses */
39static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) 47static inline phys_addr_t
48stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
40{ 49{
41 phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK; 50 phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
42 51
43 return (boundary - 1 < end - 1) ? boundary : end; 52 return (boundary - 1 < end - 1) ? boundary : end;
44} 53}
45 54
46#define stage2_pud_addr_end(addr, end) (end) 55#define stage2_pud_addr_end(kvm, addr, end) (end)
47 56
48static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) 57static inline phys_addr_t
58stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
49{ 59{
50 phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK; 60 phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
51 61
52 return (boundary - 1 < end - 1) ? boundary : end; 62 return (boundary - 1 < end - 1) ? boundary : end;
53} 63}
54 64
55#define stage2_pgd_index(addr) pgd_index(addr) 65#define stage2_pgd_index(kvm, addr) pgd_index(addr)
56 66
57#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) 67#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
58#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 68#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
59#define stage2_pud_table_empty(pudp) false 69#define stage2_pud_table_empty(kvm, pudp) false
60 70
61#endif /* __ARM_S2_PGTABLE_H_ */ 71#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d6fff7de5539..3a032066e52c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -141,8 +141,11 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
141 * We currently only support a 40bit IPA. 141 * We currently only support a 40bit IPA.
142 */ 142 */
143#define KVM_PHYS_SHIFT (40) 143#define KVM_PHYS_SHIFT (40)
144#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 144
145#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 145#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
146#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
147#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
148#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
146 149
147#include <asm/stage2_pgtable.h> 150#include <asm/stage2_pgtable.h>
148 151
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
index 2656a0fd05a6..0280dedbf75f 100644
--- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h
+++ b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
@@ -26,17 +26,17 @@
26#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) 26#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
27#define S2_PMD_MASK (~(S2_PMD_SIZE-1)) 27#define S2_PMD_MASK (~(S2_PMD_SIZE-1))
28 28
29#define stage2_pud_none(pud) (0) 29#define stage2_pud_none(kvm, pud) (0)
30#define stage2_pud_present(pud) (1) 30#define stage2_pud_present(kvm, pud) (1)
31#define stage2_pud_clear(pud) do { } while (0) 31#define stage2_pud_clear(kvm, pud) do { } while (0)
32#define stage2_pud_populate(pud, pmd) do { } while (0) 32#define stage2_pud_populate(kvm, pud, pmd) do { } while (0)
33#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud)) 33#define stage2_pmd_offset(kvm, pud, address) ((pmd_t *)(pud))
34 34
35#define stage2_pmd_free(pmd) do { } while (0) 35#define stage2_pmd_free(kvm, pmd) do { } while (0)
36 36
37#define stage2_pmd_addr_end(addr, end) (end) 37#define stage2_pmd_addr_end(kvm, addr, end) (end)
38 38
39#define stage2_pud_huge(pud) (0) 39#define stage2_pud_huge(kvm, pud) (0)
40#define stage2_pmd_table_empty(pmdp) (0) 40#define stage2_pmd_table_empty(kvm, pmdp) (0)
41 41
42#endif 42#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h
index 5ee87b54ebf3..cd6304e203be 100644
--- a/arch/arm64/include/asm/stage2_pgtable-nopud.h
+++ b/arch/arm64/include/asm/stage2_pgtable-nopud.h
@@ -24,16 +24,16 @@
24#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) 24#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT)
25#define S2_PUD_MASK (~(S2_PUD_SIZE-1)) 25#define S2_PUD_MASK (~(S2_PUD_SIZE-1))
26 26
27#define stage2_pgd_none(pgd) (0) 27#define stage2_pgd_none(kvm, pgd) (0)
28#define stage2_pgd_present(pgd) (1) 28#define stage2_pgd_present(kvm, pgd) (1)
29#define stage2_pgd_clear(pgd) do { } while (0) 29#define stage2_pgd_clear(kvm, pgd) do { } while (0)
30#define stage2_pgd_populate(pgd, pud) do { } while (0) 30#define stage2_pgd_populate(kvm, pgd, pud) do { } while (0)
31 31
32#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd)) 32#define stage2_pud_offset(kvm, pgd, address) ((pud_t *)(pgd))
33 33
34#define stage2_pud_free(x) do { } while (0) 34#define stage2_pud_free(kvm, x) do { } while (0)
35 35
36#define stage2_pud_addr_end(addr, end) (end) 36#define stage2_pud_addr_end(kvm, addr, end) (end)
37#define stage2_pud_table_empty(pmdp) (0) 37#define stage2_pud_table_empty(kvm, pmdp) (0)
38 38
39#endif 39#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 8b68099348e5..11891612be14 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -55,7 +55,7 @@
55 55
56/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ 56/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */
57#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) 57#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS)
58#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT) 58#define S2_PGDIR_SIZE (1UL << S2_PGDIR_SHIFT)
59#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) 59#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1))
60 60
61/* 61/*
@@ -65,28 +65,30 @@
65#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) 65#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT))
66 66
67/* 67/*
68 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation 68 * kvm_mmmu_cache_min_pages() is the number of pages required to install
69 * levels in addition to the PGD. 69 * a stage-2 translation. We pre-allocate the entry level page table at
70 * the VM creation.
70 */ 71 */
71#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1) 72#define kvm_mmu_cache_min_pages(kvm) (STAGE2_PGTABLE_LEVELS - 1)
72 73
73 74
74#if STAGE2_PGTABLE_LEVELS > 3 75#if STAGE2_PGTABLE_LEVELS > 3
75 76
76#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) 77#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
77#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) 78#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
78#define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) 79#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
79 80
80#define stage2_pgd_none(pgd) pgd_none(pgd) 81#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
81#define stage2_pgd_clear(pgd) pgd_clear(pgd) 82#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
82#define stage2_pgd_present(pgd) pgd_present(pgd) 83#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
83#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) 84#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
84#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) 85#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
85#define stage2_pud_free(pud) pud_free(NULL, pud) 86#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
86 87
87#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp) 88#define stage2_pud_table_empty(kvm, pudp) kvm_page_empty(pudp)
88 89
89static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) 90static inline phys_addr_t
91stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
90{ 92{
91 phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; 93 phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
92 94
@@ -99,20 +101,21 @@ static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end)
99#if STAGE2_PGTABLE_LEVELS > 2 101#if STAGE2_PGTABLE_LEVELS > 2
100 102
101#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) 103#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
102#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT) 104#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
103#define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) 105#define S2_PMD_MASK (~(S2_PMD_SIZE - 1))
104 106
105#define stage2_pud_none(pud) pud_none(pud) 107#define stage2_pud_none(kvm, pud) pud_none(pud)
106#define stage2_pud_clear(pud) pud_clear(pud) 108#define stage2_pud_clear(kvm, pud) pud_clear(pud)
107#define stage2_pud_present(pud) pud_present(pud) 109#define stage2_pud_present(kvm, pud) pud_present(pud)
108#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) 110#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
109#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) 111#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
110#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) 112#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
111 113
112#define stage2_pud_huge(pud) pud_huge(pud) 114#define stage2_pud_huge(kvm, pud) pud_huge(pud)
113#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 115#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
114 116
115static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) 117static inline phys_addr_t
118stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
116{ 119{
117 phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; 120 phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
118 121
@@ -121,7 +124,7 @@ static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
121 124
122#endif /* STAGE2_PGTABLE_LEVELS > 2 */ 125#endif /* STAGE2_PGTABLE_LEVELS > 2 */
123 126
124#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) 127#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
125 128
126#if STAGE2_PGTABLE_LEVELS == 2 129#if STAGE2_PGTABLE_LEVELS == 2
127#include <asm/stage2_pgtable-nopmd.h> 130#include <asm/stage2_pgtable-nopmd.h>
@@ -129,10 +132,13 @@ static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
129#include <asm/stage2_pgtable-nopud.h> 132#include <asm/stage2_pgtable-nopud.h>
130#endif 133#endif
131 134
135#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
132 136
133#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) 137#define stage2_pgd_index(kvm, addr) \
138 (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
134 139
135static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) 140static inline phys_addr_t
141stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
136{ 142{
137 phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; 143 phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
138 144
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 327d0fd28380..43e716bc3f08 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -545,7 +545,7 @@ static void update_vttbr(struct kvm *kvm)
545 545
546 /* update vttbr to be used with the new vmid */ 546 /* update vttbr to be used with the new vmid */
547 pgd_phys = virt_to_phys(kvm->arch.pgd); 547 pgd_phys = virt_to_phys(kvm->arch.pgd);
548 BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); 548 BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
549 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 549 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
550 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 550 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
551 551
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 4a285d760ce0..7e477b3cae5b 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector;
45 45
46static unsigned long io_map_base; 46static unsigned long io_map_base;
47 47
48#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
49#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) 48#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
50 49
51#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) 50#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
150 149
151static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 150static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
152{ 151{
153 pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL); 152 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
154 stage2_pgd_clear(pgd); 153 stage2_pgd_clear(kvm, pgd);
155 kvm_tlb_flush_vmid_ipa(kvm, addr); 154 kvm_tlb_flush_vmid_ipa(kvm, addr);
156 stage2_pud_free(pud_table); 155 stage2_pud_free(kvm, pud_table);
157 put_page(virt_to_page(pgd)); 156 put_page(virt_to_page(pgd));
158} 157}
159 158
160static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 159static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
161{ 160{
162 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0); 161 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
163 VM_BUG_ON(stage2_pud_huge(*pud)); 162 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
164 stage2_pud_clear(pud); 163 stage2_pud_clear(kvm, pud);
165 kvm_tlb_flush_vmid_ipa(kvm, addr); 164 kvm_tlb_flush_vmid_ipa(kvm, addr);
166 stage2_pmd_free(pmd_table); 165 stage2_pmd_free(kvm, pmd_table);
167 put_page(virt_to_page(pud)); 166 put_page(virt_to_page(pud));
168} 167}
169 168
@@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
252 } 251 }
253 } while (pte++, addr += PAGE_SIZE, addr != end); 252 } while (pte++, addr += PAGE_SIZE, addr != end);
254 253
255 if (stage2_pte_table_empty(start_pte)) 254 if (stage2_pte_table_empty(kvm, start_pte))
256 clear_stage2_pmd_entry(kvm, pmd, start_addr); 255 clear_stage2_pmd_entry(kvm, pmd, start_addr);
257} 256}
258 257
@@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
262 phys_addr_t next, start_addr = addr; 261 phys_addr_t next, start_addr = addr;
263 pmd_t *pmd, *start_pmd; 262 pmd_t *pmd, *start_pmd;
264 263
265 start_pmd = pmd = stage2_pmd_offset(pud, addr); 264 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
266 do { 265 do {
267 next = stage2_pmd_addr_end(addr, end); 266 next = stage2_pmd_addr_end(kvm, addr, end);
268 if (!pmd_none(*pmd)) { 267 if (!pmd_none(*pmd)) {
269 if (pmd_thp_or_huge(*pmd)) { 268 if (pmd_thp_or_huge(*pmd)) {
270 pmd_t old_pmd = *pmd; 269 pmd_t old_pmd = *pmd;
@@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
281 } 280 }
282 } while (pmd++, addr = next, addr != end); 281 } while (pmd++, addr = next, addr != end);
283 282
284 if (stage2_pmd_table_empty(start_pmd)) 283 if (stage2_pmd_table_empty(kvm, start_pmd))
285 clear_stage2_pud_entry(kvm, pud, start_addr); 284 clear_stage2_pud_entry(kvm, pud, start_addr);
286} 285}
287 286
@@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
291 phys_addr_t next, start_addr = addr; 290 phys_addr_t next, start_addr = addr;
292 pud_t *pud, *start_pud; 291 pud_t *pud, *start_pud;
293 292
294 start_pud = pud = stage2_pud_offset(pgd, addr); 293 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
295 do { 294 do {
296 next = stage2_pud_addr_end(addr, end); 295 next = stage2_pud_addr_end(kvm, addr, end);
297 if (!stage2_pud_none(*pud)) { 296 if (!stage2_pud_none(kvm, *pud)) {
298 if (stage2_pud_huge(*pud)) { 297 if (stage2_pud_huge(kvm, *pud)) {
299 pud_t old_pud = *pud; 298 pud_t old_pud = *pud;
300 299
301 stage2_pud_clear(pud); 300 stage2_pud_clear(kvm, pud);
302 kvm_tlb_flush_vmid_ipa(kvm, addr); 301 kvm_tlb_flush_vmid_ipa(kvm, addr);
303 kvm_flush_dcache_pud(old_pud); 302 kvm_flush_dcache_pud(old_pud);
304 put_page(virt_to_page(pud)); 303 put_page(virt_to_page(pud));
@@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
308 } 307 }
309 } while (pud++, addr = next, addr != end); 308 } while (pud++, addr = next, addr != end);
310 309
311 if (stage2_pud_table_empty(start_pud)) 310 if (stage2_pud_table_empty(kvm, start_pud))
312 clear_stage2_pgd_entry(kvm, pgd, start_addr); 311 clear_stage2_pgd_entry(kvm, pgd, start_addr);
313} 312}
314 313
@@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
332 assert_spin_locked(&kvm->mmu_lock); 331 assert_spin_locked(&kvm->mmu_lock);
333 WARN_ON(size & ~PAGE_MASK); 332 WARN_ON(size & ~PAGE_MASK);
334 333
335 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 334 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
336 do { 335 do {
337 /* 336 /*
338 * Make sure the page table is still active, as another thread 337 * Make sure the page table is still active, as another thread
@@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
341 */ 340 */
342 if (!READ_ONCE(kvm->arch.pgd)) 341 if (!READ_ONCE(kvm->arch.pgd))
343 break; 342 break;
344 next = stage2_pgd_addr_end(addr, end); 343 next = stage2_pgd_addr_end(kvm, addr, end);
345 if (!stage2_pgd_none(*pgd)) 344 if (!stage2_pgd_none(kvm, *pgd))
346 unmap_stage2_puds(kvm, pgd, addr, next); 345 unmap_stage2_puds(kvm, pgd, addr, next);
347 /* 346 /*
348 * If the range is too large, release the kvm->mmu_lock 347 * If the range is too large, release the kvm->mmu_lock
@@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
371 pmd_t *pmd; 370 pmd_t *pmd;
372 phys_addr_t next; 371 phys_addr_t next;
373 372
374 pmd = stage2_pmd_offset(pud, addr); 373 pmd = stage2_pmd_offset(kvm, pud, addr);
375 do { 374 do {
376 next = stage2_pmd_addr_end(addr, end); 375 next = stage2_pmd_addr_end(kvm, addr, end);
377 if (!pmd_none(*pmd)) { 376 if (!pmd_none(*pmd)) {
378 if (pmd_thp_or_huge(*pmd)) 377 if (pmd_thp_or_huge(*pmd))
379 kvm_flush_dcache_pmd(*pmd); 378 kvm_flush_dcache_pmd(*pmd);
@@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
389 pud_t *pud; 388 pud_t *pud;
390 phys_addr_t next; 389 phys_addr_t next;
391 390
392 pud = stage2_pud_offset(pgd, addr); 391 pud = stage2_pud_offset(kvm, pgd, addr);
393 do { 392 do {
394 next = stage2_pud_addr_end(addr, end); 393 next = stage2_pud_addr_end(kvm, addr, end);
395 if (!stage2_pud_none(*pud)) { 394 if (!stage2_pud_none(kvm, *pud)) {
396 if (stage2_pud_huge(*pud)) 395 if (stage2_pud_huge(kvm, *pud))
397 kvm_flush_dcache_pud(*pud); 396 kvm_flush_dcache_pud(*pud);
398 else 397 else
399 stage2_flush_pmds(kvm, pud, addr, next); 398 stage2_flush_pmds(kvm, pud, addr, next);
@@ -409,10 +408,10 @@ static void stage2_flush_memslot(struct kvm *kvm,
409 phys_addr_t next; 408 phys_addr_t next;
410 pgd_t *pgd; 409 pgd_t *pgd;
411 410
412 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 411 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
413 do { 412 do {
414 next = stage2_pgd_addr_end(addr, end); 413 next = stage2_pgd_addr_end(kvm, addr, end);
415 if (!stage2_pgd_none(*pgd)) 414 if (!stage2_pgd_none(kvm, *pgd))
416 stage2_flush_puds(kvm, pgd, addr, next); 415 stage2_flush_puds(kvm, pgd, addr, next);
417 } while (pgd++, addr = next, addr != end); 416 } while (pgd++, addr = next, addr != end);
418} 417}
@@ -898,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
898 } 897 }
899 898
900 /* Allocate the HW PGD, making sure that each page gets its own refcount */ 899 /* Allocate the HW PGD, making sure that each page gets its own refcount */
901 pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO); 900 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
902 if (!pgd) 901 if (!pgd)
903 return -ENOMEM; 902 return -ENOMEM;
904 903
@@ -987,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
987 986
988 spin_lock(&kvm->mmu_lock); 987 spin_lock(&kvm->mmu_lock);
989 if (kvm->arch.pgd) { 988 if (kvm->arch.pgd) {
990 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 989 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
991 pgd = READ_ONCE(kvm->arch.pgd); 990 pgd = READ_ONCE(kvm->arch.pgd);
992 kvm->arch.pgd = NULL; 991 kvm->arch.pgd = NULL;
993 } 992 }
@@ -995,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
995 994
996 /* Free the HW pgd, one page at a time */ 995 /* Free the HW pgd, one page at a time */
997 if (pgd) 996 if (pgd)
998 free_pages_exact(pgd, S2_PGD_SIZE); 997 free_pages_exact(pgd, stage2_pgd_size(kvm));
999} 998}
1000 999
1001static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 1000static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1004,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
1004 pgd_t *pgd; 1003 pgd_t *pgd;
1005 pud_t *pud; 1004 pud_t *pud;
1006 1005
1007 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 1006 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1008 if (stage2_pgd_none(*pgd)) { 1007 if (stage2_pgd_none(kvm, *pgd)) {
1009 if (!cache) 1008 if (!cache)
1010 return NULL; 1009 return NULL;
1011 pud = mmu_memory_cache_alloc(cache); 1010 pud = mmu_memory_cache_alloc(cache);
1012 stage2_pgd_populate(pgd, pud); 1011 stage2_pgd_populate(kvm, pgd, pud);
1013 get_page(virt_to_page(pgd)); 1012 get_page(virt_to_page(pgd));
1014 } 1013 }
1015 1014
1016 return stage2_pud_offset(pgd, addr); 1015 return stage2_pud_offset(kvm, pgd, addr);
1017} 1016}
1018 1017
1019static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 1018static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1026,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
1026 if (!pud) 1025 if (!pud)
1027 return NULL; 1026 return NULL;
1028 1027
1029 if (stage2_pud_none(*pud)) { 1028 if (stage2_pud_none(kvm, *pud)) {
1030 if (!cache) 1029 if (!cache)
1031 return NULL; 1030 return NULL;
1032 pmd = mmu_memory_cache_alloc(cache); 1031 pmd = mmu_memory_cache_alloc(cache);
1033 stage2_pud_populate(pud, pmd); 1032 stage2_pud_populate(kvm, pud, pmd);
1034 get_page(virt_to_page(pud)); 1033 get_page(virt_to_page(pud));
1035 } 1034 }
1036 1035
1037 return stage2_pmd_offset(pud, addr); 1036 return stage2_pmd_offset(kvm, pud, addr);
1038} 1037}
1039 1038
1040static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache 1039static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1208,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1208 if (writable) 1207 if (writable)
1209 pte = kvm_s2pte_mkwrite(pte); 1208 pte = kvm_s2pte_mkwrite(pte);
1210 1209
1211 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, 1210 ret = mmu_topup_memory_cache(&cache,
1212 KVM_NR_MEM_OBJS); 1211 kvm_mmu_cache_min_pages(kvm),
1212 KVM_NR_MEM_OBJS);
1213 if (ret) 1213 if (ret)
1214 goto out; 1214 goto out;
1215 spin_lock(&kvm->mmu_lock); 1215 spin_lock(&kvm->mmu_lock);
@@ -1297,19 +1297,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1297 1297
1298/** 1298/**
1299 * stage2_wp_pmds - write protect PUD range 1299 * stage2_wp_pmds - write protect PUD range
1300 * kvm: kvm instance for the VM
1300 * @pud: pointer to pud entry 1301 * @pud: pointer to pud entry
1301 * @addr: range start address 1302 * @addr: range start address
1302 * @end: range end address 1303 * @end: range end address
1303 */ 1304 */
1304static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) 1305static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1306 phys_addr_t addr, phys_addr_t end)
1305{ 1307{
1306 pmd_t *pmd; 1308 pmd_t *pmd;
1307 phys_addr_t next; 1309 phys_addr_t next;
1308 1310
1309 pmd = stage2_pmd_offset(pud, addr); 1311 pmd = stage2_pmd_offset(kvm, pud, addr);
1310 1312
1311 do { 1313 do {
1312 next = stage2_pmd_addr_end(addr, end); 1314 next = stage2_pmd_addr_end(kvm, addr, end);
1313 if (!pmd_none(*pmd)) { 1315 if (!pmd_none(*pmd)) {
1314 if (pmd_thp_or_huge(*pmd)) { 1316 if (pmd_thp_or_huge(*pmd)) {
1315 if (!kvm_s2pmd_readonly(pmd)) 1317 if (!kvm_s2pmd_readonly(pmd))
@@ -1329,18 +1331,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1329 * 1331 *
1330 * Process PUD entries, for a huge PUD we cause a panic. 1332 * Process PUD entries, for a huge PUD we cause a panic.
1331 */ 1333 */
1332static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) 1334static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1335 phys_addr_t addr, phys_addr_t end)
1333{ 1336{
1334 pud_t *pud; 1337 pud_t *pud;
1335 phys_addr_t next; 1338 phys_addr_t next;
1336 1339
1337 pud = stage2_pud_offset(pgd, addr); 1340 pud = stage2_pud_offset(kvm, pgd, addr);
1338 do { 1341 do {
1339 next = stage2_pud_addr_end(addr, end); 1342 next = stage2_pud_addr_end(kvm, addr, end);
1340 if (!stage2_pud_none(*pud)) { 1343 if (!stage2_pud_none(kvm, *pud)) {
1341 /* TODO:PUD not supported, revisit later if supported */ 1344 /* TODO:PUD not supported, revisit later if supported */
1342 BUG_ON(stage2_pud_huge(*pud)); 1345 BUG_ON(stage2_pud_huge(kvm, *pud));
1343 stage2_wp_pmds(pud, addr, next); 1346 stage2_wp_pmds(kvm, pud, addr, next);
1344 } 1347 }
1345 } while (pud++, addr = next, addr != end); 1348 } while (pud++, addr = next, addr != end);
1346} 1349}
@@ -1356,7 +1359,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1356 pgd_t *pgd; 1359 pgd_t *pgd;
1357 phys_addr_t next; 1360 phys_addr_t next;
1358 1361
1359 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 1362 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1360 do { 1363 do {
1361 /* 1364 /*
1362 * Release kvm_mmu_lock periodically if the memory region is 1365 * Release kvm_mmu_lock periodically if the memory region is
@@ -1370,9 +1373,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1370 cond_resched_lock(&kvm->mmu_lock); 1373 cond_resched_lock(&kvm->mmu_lock);
1371 if (!READ_ONCE(kvm->arch.pgd)) 1374 if (!READ_ONCE(kvm->arch.pgd))
1372 break; 1375 break;
1373 next = stage2_pgd_addr_end(addr, end); 1376 next = stage2_pgd_addr_end(kvm, addr, end);
1374 if (stage2_pgd_present(*pgd)) 1377 if (stage2_pgd_present(kvm, *pgd))
1375 stage2_wp_puds(pgd, addr, next); 1378 stage2_wp_puds(kvm, pgd, addr, next);
1376 } while (pgd++, addr = next, addr != end); 1379 } while (pgd++, addr = next, addr != end);
1377} 1380}
1378 1381
@@ -1521,7 +1524,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1521 up_read(&current->mm->mmap_sem); 1524 up_read(&current->mm->mmap_sem);
1522 1525
1523 /* We need minimum second+third level pages */ 1526 /* We need minimum second+third level pages */
1524 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, 1527 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1525 KVM_NR_MEM_OBJS); 1528 KVM_NR_MEM_OBJS);
1526 if (ret) 1529 if (ret)
1527 return ret; 1530 return ret;
@@ -1764,7 +1767,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1764 } 1767 }
1765 1768
1766 /* Userspace should not be able to register out-of-bounds IPAs */ 1769 /* Userspace should not be able to register out-of-bounds IPAs */
1767 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); 1770 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1768 1771
1769 if (fault_status == FSC_ACCESS) { 1772 if (fault_status == FSC_ACCESS) {
1770 handle_access_fault(vcpu, fault_ipa); 1773 handle_access_fault(vcpu, fault_ipa);
@@ -2063,7 +2066,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
2063 * space addressable by the KVM guest IPA space. 2066 * space addressable by the KVM guest IPA space.
2064 */ 2067 */
2065 if (memslot->base_gfn + memslot->npages >= 2068 if (memslot->base_gfn + memslot->npages >=
2066 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 2069 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2067 return -EFAULT; 2070 return -EFAULT;
2068 2071
2069 down_read(&current->mm->mmap_sem); 2072 down_read(&current->mm->mmap_sem);
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 6ada2432e37c..114dce9f4bf5 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -25,7 +25,7 @@
25int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, 25int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
26 phys_addr_t addr, phys_addr_t alignment) 26 phys_addr_t addr, phys_addr_t alignment)
27{ 27{
28 if (addr & ~KVM_PHYS_MASK) 28 if (addr & ~kvm_phys_mask(kvm))
29 return -E2BIG; 29 return -E2BIG;
30 30
31 if (!IS_ALIGNED(addr, alignment)) 31 if (!IS_ALIGNED(addr, alignment))