aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 17:32:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 17:32:31 -0400
commit8a5de18239e418fe7b1f36504834689f754d8ccc (patch)
tree8d05ae77da1d4a8512b6052e2ba23571543666c7 /arch/arm64
parent857b50f5d0eed113428c864e927289d8f5f2b864 (diff)
parent2df36a5dd6792870bef48f63bfca42055ea5b79c (diff)
Merge tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
Pull second batch of changes for KVM/{arm,arm64} from Marc Zyngier: "The most obvious thing is the sizeable MMU changes to support 48bit VAs on arm64. Summary: - support for 48bit IPA and VA (EL2) - a number of fixes for devices mapped into guests - yet another VGIC fix for BE - a fix for CPU hotplug - a few compile fixes (disabled VGIC, strict mm checks)" [ I'm pulling directly from Marc at the request of Paolo Bonzini, whose backpack was stolen at Düsseldorf airport and will do new keys and rebuild his web of trust. - Linus ] * tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm: arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2 arm/arm64: KVM: map MMIO regions at creation time arm64: kvm: define PAGE_S2_DEVICE as read-only by default ARM: kvm: define PAGE_S2_DEVICE as read-only by default arm/arm64: KVM: add 'writable' parameter to kvm_phys_addr_ioremap arm/arm64: KVM: fix potential NULL dereference in user_mem_abort() arm/arm64: KVM: use __GFP_ZERO not memset() to get zeroed pages ARM: KVM: fix vgic-disabled build arm: kvm: fix CPU hotplug
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h127
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S12
3 files changed, 126 insertions, 15 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a030d163840b..0caf7a59f6a1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -41,6 +41,18 @@
41 */ 41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43 43
44/*
45 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
46 * levels in addition to the PGD and potentially the PUD which are
47 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
48 * tables use one level of tables less than the kernel.
49 */
50#ifdef CONFIG_ARM64_64K_PAGES
51#define KVM_MMU_CACHE_MIN_PAGES 1
52#else
53#define KVM_MMU_CACHE_MIN_PAGES 2
54#endif
55
44#ifdef __ASSEMBLY__ 56#ifdef __ASSEMBLY__
45 57
46/* 58/*
@@ -53,6 +65,7 @@
53 65
54#else 66#else
55 67
68#include <asm/pgalloc.h>
56#include <asm/cachetype.h> 69#include <asm/cachetype.h>
57#include <asm/cacheflush.h> 70#include <asm/cacheflush.h>
58 71
@@ -65,10 +78,6 @@
65#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 78#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
66#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 79#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
67 80
68/* Make sure we get the right size, and thus the right alignment */
69#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
70#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
71
72int create_hyp_mappings(void *from, void *to); 81int create_hyp_mappings(void *from, void *to);
73int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 82int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
74void free_boot_hyp_pgd(void); 83void free_boot_hyp_pgd(void);
@@ -77,7 +86,7 @@ void free_hyp_pgds(void);
77int kvm_alloc_stage2_pgd(struct kvm *kvm); 86int kvm_alloc_stage2_pgd(struct kvm *kvm);
78void kvm_free_stage2_pgd(struct kvm *kvm); 87void kvm_free_stage2_pgd(struct kvm *kvm);
79int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 88int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
80 phys_addr_t pa, unsigned long size); 89 phys_addr_t pa, unsigned long size, bool writable);
81 90
82int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 91int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
83 92
@@ -93,6 +102,7 @@ void kvm_clear_hyp_idmap(void);
93#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 102#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
94 103
95static inline void kvm_clean_pgd(pgd_t *pgd) {} 104static inline void kvm_clean_pgd(pgd_t *pgd) {}
105static inline void kvm_clean_pmd(pmd_t *pmd) {}
96static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 106static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
97static inline void kvm_clean_pte(pte_t *pte) {} 107static inline void kvm_clean_pte(pte_t *pte) {}
98static inline void kvm_clean_pte_entry(pte_t *pte) {} 108static inline void kvm_clean_pte_entry(pte_t *pte) {}
@@ -111,19 +121,116 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
111#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 121#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
112#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 122#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
113 123
124/*
125 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
126 * the entire IPA input range with a single pgd entry, and we would only need
127 * one pgd entry. Note that in this case, the pgd is actually not used by
128 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
129 * structure for the kernel pgtable macros to work.
130 */
131#if PGDIR_SHIFT > KVM_PHYS_SHIFT
132#define PTRS_PER_S2_PGD_SHIFT 0
133#else
134#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
135#endif
136#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
138
139/*
140 * If we are concatenating first level stage-2 page tables, we would have less
141 * than or equal to 16 pointers in the fake PGD, because that's what the
142 * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
143 * represents the first level for the host, and we add 1 to go to the next
144 * level (which uses contatenation) for the stage-2 tables.
145 */
146#if PTRS_PER_S2_PGD <= 16
147#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
148#else
149#define KVM_PREALLOC_LEVEL (0)
150#endif
151
152/**
153 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
154 * @kvm: The KVM struct pointer for the VM.
155 * @pgd: The kernel pseudo pgd
156 *
157 * When the kernel uses more levels of page tables than the guest, we allocate
158 * a fake PGD and pre-populate it to point to the next-level page table, which
159 * will be the real initial page table pointed to by the VTTBR.
160 *
161 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
162 * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
163 * allocate 2 consecutive PUD pages.
164 */
165static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
166{
167 unsigned int i;
168 unsigned long hwpgd;
169
170 if (KVM_PREALLOC_LEVEL == 0)
171 return 0;
172
173 hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
174 if (!hwpgd)
175 return -ENOMEM;
176
177 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
178 if (KVM_PREALLOC_LEVEL == 1)
179 pgd_populate(NULL, pgd + i,
180 (pud_t *)hwpgd + i * PTRS_PER_PUD);
181 else if (KVM_PREALLOC_LEVEL == 2)
182 pud_populate(NULL, pud_offset(pgd, 0) + i,
183 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
184 }
185
186 return 0;
187}
188
189static inline void *kvm_get_hwpgd(struct kvm *kvm)
190{
191 pgd_t *pgd = kvm->arch.pgd;
192 pud_t *pud;
193
194 if (KVM_PREALLOC_LEVEL == 0)
195 return pgd;
196
197 pud = pud_offset(pgd, 0);
198 if (KVM_PREALLOC_LEVEL == 1)
199 return pud;
200
201 BUG_ON(KVM_PREALLOC_LEVEL != 2);
202 return pmd_offset(pud, 0);
203}
204
205static inline void kvm_free_hwpgd(struct kvm *kvm)
206{
207 if (KVM_PREALLOC_LEVEL > 0) {
208 unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
209 free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
210 }
211}
212
114static inline bool kvm_page_empty(void *ptr) 213static inline bool kvm_page_empty(void *ptr)
115{ 214{
116 struct page *ptr_page = virt_to_page(ptr); 215 struct page *ptr_page = virt_to_page(ptr);
117 return page_count(ptr_page) == 1; 216 return page_count(ptr_page) == 1;
118} 217}
119 218
120#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) 219#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
121#ifndef CONFIG_ARM64_64K_PAGES 220
122#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 221#ifdef __PAGETABLE_PMD_FOLDED
222#define kvm_pmd_table_empty(kvm, pmdp) (0)
223#else
224#define kvm_pmd_table_empty(kvm, pmdp) \
225 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
226#endif
227
228#ifdef __PAGETABLE_PUD_FOLDED
229#define kvm_pud_table_empty(kvm, pudp) (0)
123#else 230#else
124#define kvm_pmd_table_empty(pmdp) (0) 231#define kvm_pud_table_empty(kvm, pudp) \
232 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
125#endif 233#endif
126#define kvm_pud_table_empty(pudp) (0)
127 234
128 235
129struct kvm; 236struct kvm;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index cefd3e825612..41a43bf26492 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
79#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 79#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
80 80
81#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 81#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
82#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 82#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
83 83
84#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 84#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
85#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 85#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
index ae211772f991..f002fe1c3700 100644
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 )
67 str w4, [x3, #VGIC_V2_CPU_HCR] 67 str w4, [x3, #VGIC_V2_CPU_HCR]
68 str w5, [x3, #VGIC_V2_CPU_VMCR] 68 str w5, [x3, #VGIC_V2_CPU_VMCR]
69 str w6, [x3, #VGIC_V2_CPU_MISR] 69 str w6, [x3, #VGIC_V2_CPU_MISR]
70 str w7, [x3, #VGIC_V2_CPU_EISR] 70CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
71 str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] 71CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
72 str w9, [x3, #VGIC_V2_CPU_ELRSR] 72CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
73 str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] 73CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
74CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
75CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
76CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
77CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
74 str w11, [x3, #VGIC_V2_CPU_APR] 78 str w11, [x3, #VGIC_V2_CPU_APR]
75 79
76 /* Clear GICH_HCR */ 80 /* Clear GICH_HCR */