aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-04-12 14:12:01 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-04-29 01:23:07 -0400
commit6060df84cbe0d36b8e1415b68e3f67b77f27052a (patch)
tree4be162a9c983c9223fac27dc4824dd3666443245 /arch/arm/kvm
parent372b7c1bc80510225ca91cba75bc0850a6e16c39 (diff)
ARM: KVM: simplify HYP mapping population
The way we populate HYP mappings is a bit convoluted, to say the least. Passing a pointer around to keep track of the current PFN is quite odd, and we end-up having two different PTE accessors for no good reason. Simplify the whole thing by unifying the two PTE accessors, passing a pgprot_t around, and moving the various validity checks to the upper layers. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c102
1 files changed, 42 insertions, 60 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 2f12e4056408..30033219df84 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -125,54 +125,34 @@ void free_hyp_pmds(void)
125} 125}
126 126
127static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, 127static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
128 unsigned long end) 128 unsigned long end, unsigned long pfn,
129 pgprot_t prot)
129{ 130{
130 pte_t *pte; 131 pte_t *pte;
131 unsigned long addr; 132 unsigned long addr;
132 struct page *page;
133 133
134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 134 for (addr = start; addr < end; addr += PAGE_SIZE) {
135 unsigned long hyp_addr = KERN_TO_HYP(addr); 135 pte = pte_offset_kernel(pmd, addr);
136 136 kvm_set_pte(pte, pfn_pte(pfn, prot));
137 pte = pte_offset_kernel(pmd, hyp_addr); 137 pfn++;
138 BUG_ON(!virt_addr_valid(addr));
139 page = virt_to_page(addr);
140 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
141 }
142}
143
144static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
145 unsigned long end,
146 unsigned long *pfn_base)
147{
148 pte_t *pte;
149 unsigned long addr;
150
151 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
152 unsigned long hyp_addr = KERN_TO_HYP(addr);
153
154 pte = pte_offset_kernel(pmd, hyp_addr);
155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++;
158 } 138 }
159} 139}
160 140
161static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, 141static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
162 unsigned long end, unsigned long *pfn_base) 142 unsigned long end, unsigned long pfn,
143 pgprot_t prot)
163{ 144{
164 pmd_t *pmd; 145 pmd_t *pmd;
165 pte_t *pte; 146 pte_t *pte;
166 unsigned long addr, next; 147 unsigned long addr, next;
167 148
168 for (addr = start; addr < end; addr = next) { 149 for (addr = start; addr < end; addr = next) {
169 unsigned long hyp_addr = KERN_TO_HYP(addr); 150 pmd = pmd_offset(pud, addr);
170 pmd = pmd_offset(pud, hyp_addr);
171 151
172 BUG_ON(pmd_sect(*pmd)); 152 BUG_ON(pmd_sect(*pmd));
173 153
174 if (pmd_none(*pmd)) { 154 if (pmd_none(*pmd)) {
175 pte = pte_alloc_one_kernel(NULL, hyp_addr); 155 pte = pte_alloc_one_kernel(NULL, addr);
176 if (!pte) { 156 if (!pte) {
177 kvm_err("Cannot allocate Hyp pte\n"); 157 kvm_err("Cannot allocate Hyp pte\n");
178 return -ENOMEM; 158 return -ENOMEM;
@@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
182 162
183 next = pmd_addr_end(addr, end); 163 next = pmd_addr_end(addr, end);
184 164
185 /* 165 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
186 * If pfn_base is NULL, we map kernel pages into HYP with the 166 pfn += (next - addr) >> PAGE_SHIFT;
187 * virtual address. Otherwise, this is considered an I/O
188 * mapping and we map the physical region starting at
189 * *pfn_base to [start, end[.
190 */
191 if (!pfn_base)
192 create_hyp_pte_mappings(pmd, addr, next);
193 else
194 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
195 } 167 }
196 168
197 return 0; 169 return 0;
198} 170}
199 171
200static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) 172static int __create_hyp_mappings(pgd_t *pgdp,
173 unsigned long start, unsigned long end,
174 unsigned long pfn, pgprot_t prot)
201{ 175{
202 unsigned long start = (unsigned long)from;
203 unsigned long end = (unsigned long)to;
204 pgd_t *pgd; 176 pgd_t *pgd;
205 pud_t *pud; 177 pud_t *pud;
206 pmd_t *pmd; 178 pmd_t *pmd;
@@ -209,21 +181,14 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
209 181
210 if (start >= end) 182 if (start >= end)
211 return -EINVAL; 183 return -EINVAL;
212 /* Check for a valid kernel memory mapping */
213 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
214 return -EINVAL;
215 /* Check for a valid kernel IO mapping */
216 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
217 return -EINVAL;
218 184
219 mutex_lock(&kvm_hyp_pgd_mutex); 185 mutex_lock(&kvm_hyp_pgd_mutex);
220 for (addr = start; addr < end; addr = next) { 186 for (addr = start & PAGE_MASK; addr < end; addr = next) {
221 unsigned long hyp_addr = KERN_TO_HYP(addr); 187 pgd = pgdp + pgd_index(addr);
222 pgd = hyp_pgd + pgd_index(hyp_addr); 188 pud = pud_offset(pgd, addr);
223 pud = pud_offset(pgd, hyp_addr);
224 189
225 if (pud_none_or_clear_bad(pud)) { 190 if (pud_none_or_clear_bad(pud)) {
226 pmd = pmd_alloc_one(NULL, hyp_addr); 191 pmd = pmd_alloc_one(NULL, addr);
227 if (!pmd) { 192 if (!pmd) {
228 kvm_err("Cannot allocate Hyp pmd\n"); 193 kvm_err("Cannot allocate Hyp pmd\n");
229 err = -ENOMEM; 194 err = -ENOMEM;
@@ -233,9 +198,10 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
233 } 198 }
234 199
235 next = pgd_addr_end(addr, end); 200 next = pgd_addr_end(addr, end);
236 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); 201 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
237 if (err) 202 if (err)
238 goto out; 203 goto out;
204 pfn += (next - addr) >> PAGE_SHIFT;
239 } 205 }
240out: 206out:
241 mutex_unlock(&kvm_hyp_pgd_mutex); 207 mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -255,22 +221,38 @@ out:
255 */ 221 */
256int create_hyp_mappings(void *from, void *to) 222int create_hyp_mappings(void *from, void *to)
257{ 223{
258 return __create_hyp_mappings(from, to, NULL); 224 unsigned long phys_addr = virt_to_phys(from);
225 unsigned long start = KERN_TO_HYP((unsigned long)from);
226 unsigned long end = KERN_TO_HYP((unsigned long)to);
227
228 /* Check for a valid kernel memory mapping */
229 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
230 return -EINVAL;
231
232 return __create_hyp_mappings(hyp_pgd, start, end,
233 __phys_to_pfn(phys_addr), PAGE_HYP);
259} 234}
260 235
261/** 236/**
262 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode 237 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
263 * @from: The kernel start VA of the range 238 * @from: The kernel start VA of the range
264 * @to: The kernel end VA of the range (exclusive) 239 * @to: The kernel end VA of the range (exclusive)
265 * @addr: The physical start address which gets mapped 240 * @phys_addr: The physical start address which gets mapped
266 * 241 *
267 * The resulting HYP VA is the same as the kernel VA, modulo 242 * The resulting HYP VA is the same as the kernel VA, modulo
268 * HYP_PAGE_OFFSET. 243 * HYP_PAGE_OFFSET.
269 */ 244 */
270int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) 245int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
271{ 246{
272 unsigned long pfn = __phys_to_pfn(addr); 247 unsigned long start = KERN_TO_HYP((unsigned long)from);
273 return __create_hyp_mappings(from, to, &pfn); 248 unsigned long end = KERN_TO_HYP((unsigned long)to);
249
250 /* Check for a valid kernel IO mapping */
251 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
252 return -EINVAL;
253
254 return __create_hyp_mappings(hyp_pgd, start, end,
255 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
274} 256}
275 257
276/** 258/**