aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2012-10-15 06:27:37 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-03-06 18:48:44 -0500
commitc62ee2b22798d7a5a8eb5f799b5183ef993ca7e4 (patch)
tree9491eda93c012710ac7e01fd181c3819952651f4
parentc088f8f0088decf0ce172a6bb4a6f5742cc54c15 (diff)
ARM: KVM: abstract most MMU operations
Move low level MMU-related operations to kvm_mmu.h. This makes the MMU code reusable by the arm64 port. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h58
-rw-r--r--arch/arm/kvm/mmu.c58
2 files changed, 70 insertions, 46 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..ac784937cc0f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,9 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h>
24
22int create_hyp_mappings(void *from, void *to); 25int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 26int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 27void free_hyp_pmds(void);
@@ -36,6 +39,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void); 39int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 40void kvm_clear_hyp_idmap(void);
38 41
42static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
43{
44 pte_val(*pte) = new_pte;
45 /*
46 * flush_pmd_entry just takes a void pointer and cleans the necessary
47 * cache entries, so we can reuse the function for ptes.
48 */
49 flush_pmd_entry(pte);
50}
51
39static inline bool kvm_is_write_fault(unsigned long hsr) 52static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 53{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 54 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +60,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 60 return true;
48} 61}
49 62
63static inline void kvm_clean_pgd(pgd_t *pgd)
64{
65 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
66}
67
68static inline void kvm_clean_pmd_entry(pmd_t *pmd)
69{
70 clean_pmd_entry(pmd);
71}
72
73static inline void kvm_clean_pte(pte_t *pte)
74{
75 clean_pte_table(pte);
76}
77
78static inline void kvm_set_s2pte_writable(pte_t *pte)
79{
80 pte_val(*pte) |= L_PTE_S2_RDWR;
81}
82
83struct kvm;
84
85static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
86{
87 /*
88 * If we are going to insert an instruction page and the icache is
89 * either VIPT or PIPT, there is a potential problem where the host
90 * (or another VM) may have used the same page as this guest, and we
91 * read incorrect data from the icache. If we're using a PIPT cache,
92 * we can invalidate just that page, but if we are using a VIPT cache
93 * we need to invalidate the entire icache - damn shame - as written
94 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
95 *
96 * VIVT caches are tagged using both the ASID and the VMID and doesn't
97 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
98 */
99 if (icache_is_pipt()) {
100 unsigned long hva = gfn_to_hva(kvm, gfn);
101 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
102 } else if (!icache_is_vivt_asid_tagged()) {
103 /* any kind of VIPT cache */
104 __flush_icache_all();
105 }
106}
107
50#endif /* __ARM_KVM_MMU_H__ */ 108#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8e9047a4b3b7..6b4ea185956e 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -28,8 +28,6 @@
28#include <asm/kvm_mmio.h> 28#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h> 29#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h> 30#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33 31
34#include "trace.h" 32#include "trace.h"
35 33
@@ -42,16 +40,6 @@ static void kvm_tlb_flush_vmid(struct kvm *kvm)
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); 40 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
43} 41}
44 42
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53}
54
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 43static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
56 int min, int max) 44 int min, int max)
57{ 45{
@@ -290,7 +278,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); 278 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291 279
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); 280 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); 281 kvm_clean_pgd(pgd);
294 kvm->arch.pgd = pgd; 282 kvm->arch.pgd = pgd;
295 283
296 return 0; 284 return 0;
@@ -422,22 +410,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
422 return 0; /* ignore calls from kvm_set_spte_hva */ 410 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache); 411 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd); 412 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud)); 413 get_page(virt_to_page(pud));
427 } else 414 }
428 pmd = pmd_offset(pud, addr); 415
416 pmd = pmd_offset(pud, addr);
429 417
430 /* Create 2nd stage page table mapping - Level 2 */ 418 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) { 419 if (pmd_none(*pmd)) {
432 if (!cache) 420 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */ 421 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache); 422 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte); 423 kvm_clean_pte(pte);
436 pmd_populate_kernel(NULL, pmd, pte); 424 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd)); 425 get_page(virt_to_page(pmd));
439 } else 426 }
440 pte = pte_offset_kernel(pmd, addr); 427
428 pte = pte_offset_kernel(pmd, addr);
441 429
442 if (iomap && pte_present(*pte)) 430 if (iomap && pte_present(*pte))
443 return -EFAULT; 431 return -EFAULT;
@@ -473,7 +461,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
473 pfn = __phys_to_pfn(pa); 461 pfn = __phys_to_pfn(pa);
474 462
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { 463 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); 464 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
465 kvm_set_s2pte_writable(&pte);
477 466
478 ret = mmu_topup_memory_cache(&cache, 2, 2); 467 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret) 468 if (ret)
@@ -492,29 +481,6 @@ out:
492 return ret; 481 return ret;
493} 482}
494 483
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 484static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot, 485 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status) 486 unsigned long fault_status)
@@ -560,7 +526,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 526 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock; 527 goto out_unlock;
562 if (writable) { 528 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR; 529 kvm_set_s2pte_writable(&new_pte);
564 kvm_set_pfn_dirty(pfn); 530 kvm_set_pfn_dirty(pfn);
565 } 531 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); 532 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -774,7 +740,7 @@ void kvm_clear_hyp_idmap(void)
774 pmd = pmd_offset(pud, addr); 740 pmd = pmd_offset(pud, addr);
775 741
776 pud_clear(pud); 742 pud_clear(pud);
777 clean_pmd_entry(pmd); 743 kvm_clean_pmd_entry(pmd);
778 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); 744 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
779 } while (pgd++, addr = next, addr < end); 745 } while (pgd++, addr = next, addr < end);
780} 746}