aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2013-01-20 18:28:07 -0500
committerChristoffer Dall <c.dall@virtualopensystems.com>2013-01-23 13:29:11 -0500
commitd5d8184d35c990b1324d9b30bcd0e4e8aa08f56d (patch)
tree7559fbc98d2b347f89f926b39715d3bf3cfbb719 /arch
parent342cd0ab0e6ca3fe7c88a78890352748b8e894a9 (diff)
KVM: ARM: Memory virtualization setup
This commit introduces the framework for guest memory management through the use of 2nd stage translation. Each VM has a pointer to a level-1 table (the pgd field in struct kvm_arch) which is used for the 2nd stage translations. Entries are added when handling guest faults (later patch) and the table itself can be allocated and freed through the following functions implemented in arch/arm/kvm/arm_mmu.c: - kvm_alloc_stage2_pgd(struct kvm *kvm); - kvm_free_stage2_pgd(struct kvm *kvm); Each entry in TLBs and caches are tagged with a VMID identifier in addition to ASIDs. The VMIDs are assigned consecutively to VMs in the order that VMs are executed, and caches and tlbs are invalidated when the VMID space has been used to allow for more than 255 simultaenously running guests. The 2nd stage pgd is allocated in kvm_arch_init_vm(). The table is freed in kvm_arch_destroy_vm(). Both functions are called from the main KVM code. We pre-allocate page table memory to be able to synchronize using a spinlock and be called under rcu_read_lock from the MMU notifiers. We steal the mmu_memory_cache implementation from x86 and adapt for our specific usage. We support MMU notifiers (thanks to Marc Zyngier) through kvm_unmap_hva and kvm_set_spte_hva. Finally, define kvm_phys_addr_ioremap() to map a device at a guest IPA, which is used by VGIC support to map the virtual CPU interface registers to the guest. This support is added by Marc Zyngier. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h18
-rw-r--r--arch/arm/include/asm/kvm_mmu.h9
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/arm.c37
-rw-r--r--arch/arm/kvm/interrupts.S7
-rw-r--r--arch/arm/kvm/mmu.c370
-rw-r--r--arch/arm/kvm/trace.h46
8 files changed, 488 insertions, 2 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 81324e2eb3f9..f6652f6c5d84 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -57,6 +57,7 @@
57#define ARM_EXCEPTION_HVC 7 57#define ARM_EXCEPTION_HVC 7
58 58
59#ifndef __ASSEMBLY__ 59#ifndef __ASSEMBLY__
60struct kvm;
60struct kvm_vcpu; 61struct kvm_vcpu;
61 62
62extern char __kvm_hyp_init[]; 63extern char __kvm_hyp_init[];
@@ -71,6 +72,7 @@ extern char __kvm_hyp_code_start[];
71extern char __kvm_hyp_code_end[]; 72extern char __kvm_hyp_code_end[];
72 73
73extern void __kvm_flush_vm_context(void); 74extern void __kvm_flush_vm_context(void);
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
74 76
75extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 77extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
76#endif 78#endif
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 067ef2898c26..3636c7ea4eb2 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -112,4 +112,22 @@ struct kvm_one_reg;
112int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 112int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
113int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 113int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
114u64 kvm_call_hyp(void *hypfn, ...); 114u64 kvm_call_hyp(void *hypfn, ...);
115
116#define KVM_ARCH_WANT_MMU_NOTIFIER
117struct kvm;
118int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
119int kvm_unmap_hva_range(struct kvm *kvm,
120 unsigned long start, unsigned long end);
121void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
122
123/* We do not have shadow page tables, hence the empty hooks */
124static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
125{
126 return 0;
127}
128
129static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
130{
131 return 0;
132}
115#endif /* __ARM_KVM_HOST_H__ */ 133#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index e8679b317b0f..499e7b0925ff 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -23,6 +23,15 @@ int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 23int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 24void free_hyp_pmds(void);
25 25
26int kvm_alloc_stage2_pgd(struct kvm *kvm);
27void kvm_free_stage2_pgd(struct kvm *kvm);
28int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
29 phys_addr_t pa, unsigned long size);
30
31int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
32
33void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
34
26phys_addr_t kvm_mmu_get_httbr(void); 35phys_addr_t kvm_mmu_get_httbr(void);
27int kvm_mmu_init(void); 36int kvm_mmu_init(void);
28void kvm_clear_hyp_idmap(void); 37void kvm_clear_hyp_idmap(void);
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 4a01b6fbf380..05227cb57a7b 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM_ARM_HOST
36 bool "KVM host support for ARM cpus." 36 bool "KVM host support for ARM cpus."
37 depends on KVM 37 depends on KVM
38 depends on MMU 38 depends on MMU
39 select MMU_NOTIFIER
39 ---help--- 40 ---help---
40 Provides host support for ARM processors. 41 Provides host support for ARM processors.
41 42
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2c6b780e78a7..d810afb6cb84 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -81,12 +81,33 @@ void kvm_arch_sync_events(struct kvm *kvm)
81{ 81{
82} 82}
83 83
84/**
85 * kvm_arch_init_vm - initializes a VM data structure
86 * @kvm: pointer to the KVM struct
87 */
84int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 88int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
85{ 89{
90 int ret = 0;
91
86 if (type) 92 if (type)
87 return -EINVAL; 93 return -EINVAL;
88 94
89 return 0; 95 ret = kvm_alloc_stage2_pgd(kvm);
96 if (ret)
97 goto out_fail_alloc;
98
99 ret = create_hyp_mappings(kvm, kvm + 1);
100 if (ret)
101 goto out_free_stage2_pgd;
102
103 /* Mark the initial VMID generation invalid */
104 kvm->arch.vmid_gen = 0;
105
106 return ret;
107out_free_stage2_pgd:
108 kvm_free_stage2_pgd(kvm);
109out_fail_alloc:
110 return ret;
90} 111}
91 112
92int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 113int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -104,10 +125,16 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
104 return 0; 125 return 0;
105} 126}
106 127
128/**
129 * kvm_arch_destroy_vm - destroy the VM data structure
130 * @kvm: pointer to the KVM struct
131 */
107void kvm_arch_destroy_vm(struct kvm *kvm) 132void kvm_arch_destroy_vm(struct kvm *kvm)
108{ 133{
109 int i; 134 int i;
110 135
136 kvm_free_stage2_pgd(kvm);
137
111 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 138 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
112 if (kvm->vcpus[i]) { 139 if (kvm->vcpus[i]) {
113 kvm_arch_vcpu_free(kvm->vcpus[i]); 140 kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -196,7 +223,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
196 if (err) 223 if (err)
197 goto free_vcpu; 224 goto free_vcpu;
198 225
226 err = create_hyp_mappings(vcpu, vcpu + 1);
227 if (err)
228 goto vcpu_uninit;
229
199 return vcpu; 230 return vcpu;
231vcpu_uninit:
232 kvm_vcpu_uninit(vcpu);
200free_vcpu: 233free_vcpu:
201 kmem_cache_free(kvm_vcpu_cache, vcpu); 234 kmem_cache_free(kvm_vcpu_cache, vcpu);
202out: 235out:
@@ -210,6 +243,8 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
210 243
211void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 244void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
212{ 245{
246 kvm_mmu_free_memory_caches(vcpu);
247 kmem_cache_free(kvm_vcpu_cache, vcpu);
213} 248}
214 249
215void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 250void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index d10a8075409a..f701aff31e44 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -32,6 +32,13 @@ __kvm_hyp_code_start:
32/******************************************************************** 32/********************************************************************
33 * Flush per-VMID TLBs 33 * Flush per-VMID TLBs
34 */ 34 */
35ENTRY(__kvm_tlb_flush_vmid)
36 bx lr
37ENDPROC(__kvm_tlb_flush_vmid)
38
39/********************************************************************
40 * Flush TLBs and instruction caches of current CPU for all VMIDs
41 */
35ENTRY(__kvm_flush_vm_context) 42ENTRY(__kvm_flush_vm_context)
36 bx lr 43 bx lr
37ENDPROC(__kvm_flush_vm_context) 44ENDPROC(__kvm_flush_vm_context)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 4decdb618019..4347d68f052f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -23,12 +23,21 @@
23#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24#include <asm/kvm_arm.h> 24#include <asm/kvm_arm.h>
25#include <asm/kvm_mmu.h> 25#include <asm/kvm_mmu.h>
26#include <asm/kvm_asm.h>
26#include <asm/mach/map.h> 27#include <asm/mach/map.h>
28#include <trace/events/kvm.h>
29
30#include "trace.h"
27 31
28extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; 32extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
29 33
30static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 34static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
31 35
36static void kvm_tlb_flush_vmid(struct kvm *kvm)
37{
38 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
39}
40
32static void kvm_set_pte(pte_t *pte, pte_t new_pte) 41static void kvm_set_pte(pte_t *pte, pte_t new_pte)
33{ 42{
34 pte_val(*pte) = new_pte; 43 pte_val(*pte) = new_pte;
@@ -39,6 +48,38 @@ static void kvm_set_pte(pte_t *pte, pte_t new_pte)
39 flush_pmd_entry(pte); 48 flush_pmd_entry(pte);
40} 49}
41 50
51static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
52 int min, int max)
53{
54 void *page;
55
56 BUG_ON(max > KVM_NR_MEM_OBJS);
57 if (cache->nobjs >= min)
58 return 0;
59 while (cache->nobjs < max) {
60 page = (void *)__get_free_page(PGALLOC_GFP);
61 if (!page)
62 return -ENOMEM;
63 cache->objects[cache->nobjs++] = page;
64 }
65 return 0;
66}
67
68static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
69{
70 while (mc->nobjs)
71 free_page((unsigned long)mc->objects[--mc->nobjs]);
72}
73
74static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
75{
76 void *p;
77
78 BUG_ON(!mc || !mc->nobjs);
79 p = mc->objects[--mc->nobjs];
80 return p;
81}
82
42static void free_ptes(pmd_t *pmd, unsigned long addr) 83static void free_ptes(pmd_t *pmd, unsigned long addr)
43{ 84{
44 pte_t *pte; 85 pte_t *pte;
@@ -217,11 +258,333 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
217 return __create_hyp_mappings(from, to, &pfn); 258 return __create_hyp_mappings(from, to, &pfn);
218} 259}
219 260
261/**
262 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
263 * @kvm: The KVM struct pointer for the VM.
264 *
265 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
266 * support either full 40-bit input addresses or limited to 32-bit input
267 * addresses). Clears the allocated pages.
268 *
269 * Note we don't need locking here as this is only called when the VM is
270 * created, which can only be done once.
271 */
272int kvm_alloc_stage2_pgd(struct kvm *kvm)
273{
274 pgd_t *pgd;
275
276 if (kvm->arch.pgd != NULL) {
277 kvm_err("kvm_arch already initialized?\n");
278 return -EINVAL;
279 }
280
281 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
282 if (!pgd)
283 return -ENOMEM;
284
285 /* stage-2 pgd must be aligned to its size */
286 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
287
288 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
289 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
290 kvm->arch.pgd = pgd;
291
292 return 0;
293}
294
295static void clear_pud_entry(pud_t *pud)
296{
297 pmd_t *pmd_table = pmd_offset(pud, 0);
298 pud_clear(pud);
299 pmd_free(NULL, pmd_table);
300 put_page(virt_to_page(pud));
301}
302
303static void clear_pmd_entry(pmd_t *pmd)
304{
305 pte_t *pte_table = pte_offset_kernel(pmd, 0);
306 pmd_clear(pmd);
307 pte_free_kernel(NULL, pte_table);
308 put_page(virt_to_page(pmd));
309}
310
311static bool pmd_empty(pmd_t *pmd)
312{
313 struct page *pmd_page = virt_to_page(pmd);
314 return page_count(pmd_page) == 1;
315}
316
317static void clear_pte_entry(pte_t *pte)
318{
319 if (pte_present(*pte)) {
320 kvm_set_pte(pte, __pte(0));
321 put_page(virt_to_page(pte));
322 }
323}
324
325static bool pte_empty(pte_t *pte)
326{
327 struct page *pte_page = virt_to_page(pte);
328 return page_count(pte_page) == 1;
329}
330
331/**
332 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
333 * @kvm: The VM pointer
334 * @start: The intermediate physical base address of the range to unmap
335 * @size: The size of the area to unmap
336 *
337 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
338 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
339 * destroying the VM), otherwise another faulting VCPU may come in and mess
340 * with things behind our backs.
341 */
342static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
343{
344 pgd_t *pgd;
345 pud_t *pud;
346 pmd_t *pmd;
347 pte_t *pte;
348 phys_addr_t addr = start, end = start + size;
349 u64 range;
350
351 while (addr < end) {
352 pgd = kvm->arch.pgd + pgd_index(addr);
353 pud = pud_offset(pgd, addr);
354 if (pud_none(*pud)) {
355 addr += PUD_SIZE;
356 continue;
357 }
358
359 pmd = pmd_offset(pud, addr);
360 if (pmd_none(*pmd)) {
361 addr += PMD_SIZE;
362 continue;
363 }
364
365 pte = pte_offset_kernel(pmd, addr);
366 clear_pte_entry(pte);
367 range = PAGE_SIZE;
368
369 /* If we emptied the pte, walk back up the ladder */
370 if (pte_empty(pte)) {
371 clear_pmd_entry(pmd);
372 range = PMD_SIZE;
373 if (pmd_empty(pmd)) {
374 clear_pud_entry(pud);
375 range = PUD_SIZE;
376 }
377 }
378
379 addr += range;
380 }
381}
382
383/**
384 * kvm_free_stage2_pgd - free all stage-2 tables
385 * @kvm: The KVM struct pointer for the VM.
386 *
387 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
388 * underlying level-2 and level-3 tables before freeing the actual level-1 table
389 * and setting the struct pointer to NULL.
390 *
391 * Note we don't need locking here as this is only called when the VM is
392 * destroyed, which can only be done once.
393 */
394void kvm_free_stage2_pgd(struct kvm *kvm)
395{
396 if (kvm->arch.pgd == NULL)
397 return;
398
399 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
400 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
401 kvm->arch.pgd = NULL;
402}
403
404
405static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
406 phys_addr_t addr, const pte_t *new_pte, bool iomap)
407{
408 pgd_t *pgd;
409 pud_t *pud;
410 pmd_t *pmd;
411 pte_t *pte, old_pte;
412
413 /* Create 2nd stage page table mapping - Level 1 */
414 pgd = kvm->arch.pgd + pgd_index(addr);
415 pud = pud_offset(pgd, addr);
416 if (pud_none(*pud)) {
417 if (!cache)
418 return 0; /* ignore calls from kvm_set_spte_hva */
419 pmd = mmu_memory_cache_alloc(cache);
420 pud_populate(NULL, pud, pmd);
421 pmd += pmd_index(addr);
422 get_page(virt_to_page(pud));
423 } else
424 pmd = pmd_offset(pud, addr);
425
426 /* Create 2nd stage page table mapping - Level 2 */
427 if (pmd_none(*pmd)) {
428 if (!cache)
429 return 0; /* ignore calls from kvm_set_spte_hva */
430 pte = mmu_memory_cache_alloc(cache);
431 clean_pte_table(pte);
432 pmd_populate_kernel(NULL, pmd, pte);
433 pte += pte_index(addr);
434 get_page(virt_to_page(pmd));
435 } else
436 pte = pte_offset_kernel(pmd, addr);
437
438 if (iomap && pte_present(*pte))
439 return -EFAULT;
440
441 /* Create 2nd stage page table mapping - Level 3 */
442 old_pte = *pte;
443 kvm_set_pte(pte, *new_pte);
444 if (pte_present(old_pte))
445 kvm_tlb_flush_vmid(kvm);
446 else
447 get_page(virt_to_page(pte));
448
449 return 0;
450}
451
452/**
453 * kvm_phys_addr_ioremap - map a device range to guest IPA
454 *
455 * @kvm: The KVM pointer
456 * @guest_ipa: The IPA at which to insert the mapping
457 * @pa: The physical address of the device
458 * @size: The size of the mapping
459 */
460int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
461 phys_addr_t pa, unsigned long size)
462{
463 phys_addr_t addr, end;
464 int ret = 0;
465 unsigned long pfn;
466 struct kvm_mmu_memory_cache cache = { 0, };
467
468 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
469 pfn = __phys_to_pfn(pa);
470
471 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
472 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
473
474 ret = mmu_topup_memory_cache(&cache, 2, 2);
475 if (ret)
476 goto out;
477 spin_lock(&kvm->mmu_lock);
478 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
479 spin_unlock(&kvm->mmu_lock);
480 if (ret)
481 goto out;
482
483 pfn++;
484 }
485
486out:
487 mmu_free_memory_cache(&cache);
488 return ret;
489}
490
220int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) 491int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
221{ 492{
222 return -EINVAL; 493 return -EINVAL;
223} 494}
224 495
496static void handle_hva_to_gpa(struct kvm *kvm,
497 unsigned long start,
498 unsigned long end,
499 void (*handler)(struct kvm *kvm,
500 gpa_t gpa, void *data),
501 void *data)
502{
503 struct kvm_memslots *slots;
504 struct kvm_memory_slot *memslot;
505
506 slots = kvm_memslots(kvm);
507
508 /* we only care about the pages that the guest sees */
509 kvm_for_each_memslot(memslot, slots) {
510 unsigned long hva_start, hva_end;
511 gfn_t gfn, gfn_end;
512
513 hva_start = max(start, memslot->userspace_addr);
514 hva_end = min(end, memslot->userspace_addr +
515 (memslot->npages << PAGE_SHIFT));
516 if (hva_start >= hva_end)
517 continue;
518
519 /*
520 * {gfn(page) | page intersects with [hva_start, hva_end)} =
521 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
522 */
523 gfn = hva_to_gfn_memslot(hva_start, memslot);
524 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
525
526 for (; gfn < gfn_end; ++gfn) {
527 gpa_t gpa = gfn << PAGE_SHIFT;
528 handler(kvm, gpa, data);
529 }
530 }
531}
532
533static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
534{
535 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
536 kvm_tlb_flush_vmid(kvm);
537}
538
539int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
540{
541 unsigned long end = hva + PAGE_SIZE;
542
543 if (!kvm->arch.pgd)
544 return 0;
545
546 trace_kvm_unmap_hva(hva);
547 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
548 return 0;
549}
550
551int kvm_unmap_hva_range(struct kvm *kvm,
552 unsigned long start, unsigned long end)
553{
554 if (!kvm->arch.pgd)
555 return 0;
556
557 trace_kvm_unmap_hva_range(start, end);
558 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
559 return 0;
560}
561
562static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
563{
564 pte_t *pte = (pte_t *)data;
565
566 stage2_set_pte(kvm, NULL, gpa, pte, false);
567}
568
569
570void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
571{
572 unsigned long end = hva + PAGE_SIZE;
573 pte_t stage2_pte;
574
575 if (!kvm->arch.pgd)
576 return;
577
578 trace_kvm_set_spte_hva(hva);
579 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
580 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
581}
582
583void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
584{
585 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
586}
587
225phys_addr_t kvm_mmu_get_httbr(void) 588phys_addr_t kvm_mmu_get_httbr(void)
226{ 589{
227 VM_BUG_ON(!virt_addr_valid(hyp_pgd)); 590 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
@@ -230,7 +593,12 @@ phys_addr_t kvm_mmu_get_httbr(void)
230 593
231int kvm_mmu_init(void) 594int kvm_mmu_init(void)
232{ 595{
233 return hyp_pgd ? 0 : -ENOMEM; 596 if (!hyp_pgd) {
597 kvm_err("Hyp mode PGD not allocated\n");
598 return -ENOMEM;
599 }
600
601 return 0;
234} 602}
235 603
236/** 604/**
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index f8869c19c0a3..862b2cc12fbe 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -39,7 +39,53 @@ TRACE_EVENT(kvm_exit,
39 TP_printk("PC: 0x%08lx", __entry->vcpu_pc) 39 TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
40); 40);
41 41
42TRACE_EVENT(kvm_unmap_hva,
43 TP_PROTO(unsigned long hva),
44 TP_ARGS(hva),
42 45
46 TP_STRUCT__entry(
47 __field( unsigned long, hva )
48 ),
49
50 TP_fast_assign(
51 __entry->hva = hva;
52 ),
53
54 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
55);
56
57TRACE_EVENT(kvm_unmap_hva_range,
58 TP_PROTO(unsigned long start, unsigned long end),
59 TP_ARGS(start, end),
60
61 TP_STRUCT__entry(
62 __field( unsigned long, start )
63 __field( unsigned long, end )
64 ),
65
66 TP_fast_assign(
67 __entry->start = start;
68 __entry->end = end;
69 ),
70
71 TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
72 __entry->start, __entry->end)
73);
74
75TRACE_EVENT(kvm_set_spte_hva,
76 TP_PROTO(unsigned long hva),
77 TP_ARGS(hva),
78
79 TP_STRUCT__entry(
80 __field( unsigned long, hva )
81 ),
82
83 TP_fast_assign(
84 __entry->hva = hva;
85 ),
86
87 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
88);
43 89
44#endif /* _TRACE_KVM_H */ 90#endif /* _TRACE_KVM_H */
45 91