diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-03 12:13:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-03 12:13:19 -0400 |
commit | 8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch) | |
tree | c646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/include/asm/kvm_mmu.h | |
parent | 9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff) | |
parent | 33b9f582c5c1db515412cc7efff28f7d1779321f (diff) |
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King:
"The major items included in here are:
- MCPM, multi-cluster power management, part of the infrastructure
required for ARMs big.LITTLE support.
- A rework of the ARM KVM code to allow re-use by ARM64.
- Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes
of that stuff for arch/arm
- Preparatory patches for Cortex-M3 support from Uwe Kleine-König.
There is also a set of three patches in here from Hugh/Catalin to
address freeing of inappropriate page tables on LPAE. You already
have these from akpm, but they were already part of my tree at the
time he sent them, so unfortunately they'll end up with duplicate
commits"
* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits)
ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE()
ARM: IMX: remove unnecessary use of IS_ERR_VALUE()
ARM: OMAP: use consistent error checking
ARM: cleanup: OMAP hwmod error checking
ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels
ARM: 7700/2: Make cpu_init() notrace
ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE
ARM: 7701/1: mm: Allow arch code to control the user page table ceiling
ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum()
ARM: mcpm: provide an interface to set the SMP ops at run time
ARM: mcpm: generic SMP secondary bringup and hotplug support
ARM: mcpm_head.S: vlock-based first man election
ARM: mcpm: Add baremetal voting mutexes
ARM: mcpm: introduce helpers for platform coherency exit/setup
ARM: mcpm: introduce the CPU/cluster power API
ARM: multi-cluster PM: secondary kernel entry code
ARM: cacheflush: add synchronization helpers for mixed cache state accesses
ARM: cpu hotplug: remove majority of cache flushing from platforms
ARM: smp: flush L1 cache in cpu_die()
ARM: tegra: remove tegra specific cpu_disable()
...
Diffstat (limited to 'arch/arm/include/asm/kvm_mmu.h')
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b34874..970f3b5fa109 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -19,6 +19,18 @@ | |||
19 | #ifndef __ARM_KVM_MMU_H__ | 19 | #ifndef __ARM_KVM_MMU_H__ |
20 | #define __ARM_KVM_MMU_H__ | 20 | #define __ARM_KVM_MMU_H__ |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | #include <asm/idmap.h> | ||
25 | |||
26 | /* | ||
27 | * We directly use the kernel VA for the HYP, as we can directly share | ||
28 | * the mapping (HTTBR "covers" TTBR1). | ||
29 | */ | ||
30 | #define HYP_PAGE_OFFSET_MASK (~0UL) | ||
31 | #define HYP_PAGE_OFFSET PAGE_OFFSET | ||
32 | #define KERN_TO_HYP(kva) (kva) | ||
33 | |||
22 | int create_hyp_mappings(void *from, void *to); | 34 | int create_hyp_mappings(void *from, void *to); |
23 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | 35 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
24 | void free_hyp_pmds(void); | 36 | void free_hyp_pmds(void); |
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void); | |||
36 | int kvm_mmu_init(void); | 48 | int kvm_mmu_init(void); |
37 | void kvm_clear_hyp_idmap(void); | 49 | void kvm_clear_hyp_idmap(void); |
38 | 50 | ||
51 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
52 | { | ||
53 | pte_val(*pte) = new_pte; | ||
54 | /* | ||
55 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
56 | * cache entries, so we can reuse the function for ptes. | ||
57 | */ | ||
58 | flush_pmd_entry(pte); | ||
59 | } | ||
60 | |||
39 | static inline bool kvm_is_write_fault(unsigned long hsr) | 61 | static inline bool kvm_is_write_fault(unsigned long hsr) |
40 | { | 62 | { |
41 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | 63 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; |
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr) | |||
47 | return true; | 69 | return true; |
48 | } | 70 | } |
49 | 71 | ||
72 | static inline void kvm_clean_pgd(pgd_t *pgd) | ||
73 | { | ||
74 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
75 | } | ||
76 | |||
77 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | ||
78 | { | ||
79 | clean_pmd_entry(pmd); | ||
80 | } | ||
81 | |||
82 | static inline void kvm_clean_pte(pte_t *pte) | ||
83 | { | ||
84 | clean_pte_table(pte); | ||
85 | } | ||
86 | |||
87 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
88 | { | ||
89 | pte_val(*pte) |= L_PTE_S2_RDWR; | ||
90 | } | ||
91 | |||
92 | struct kvm; | ||
93 | |||
94 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
95 | { | ||
96 | /* | ||
97 | * If we are going to insert an instruction page and the icache is | ||
98 | * either VIPT or PIPT, there is a potential problem where the host | ||
99 | * (or another VM) may have used the same page as this guest, and we | ||
100 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
101 | * we can invalidate just that page, but if we are using a VIPT cache | ||
102 | * we need to invalidate the entire icache - damn shame - as written | ||
103 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
104 | * | ||
105 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
106 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
107 | */ | ||
108 | if (icache_is_pipt()) { | ||
109 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
110 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
111 | } else if (!icache_is_vivt_asid_tagged()) { | ||
112 | /* any kind of VIPT cache */ | ||
113 | __flush_icache_all(); | ||
114 | } | ||
115 | } | ||
116 | |||
50 | #endif /* __ARM_KVM_MMU_H__ */ | 117 | #endif /* __ARM_KVM_MMU_H__ */ |