diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-03-14 20:14:28 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-03-14 20:14:28 -0400 |
commit | 0098fc39e6d575f940487f09f303787efbc7a373 (patch) | |
tree | 051e6aab9bc675f680659e03f07c51e09c2e8dcf | |
parent | 73a09d212ec65b7068a283e6034fa05649d3d075 (diff) | |
parent | f42798c6898bf1e536673e798d263e492355162f (diff) |
Merge branch 'kvm-arm-fixes' of git://github.com/columbia/linux-kvm-arm into devel-stable
-rw-r--r-- | arch/arm/include/asm/kvm_arm.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_asm.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 107 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 42 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 67 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_vgic.h | 1 | ||||
-rw-r--r-- | arch/arm/include/uapi/asm/kvm.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 8 | ||||
-rw-r--r-- | arch/arm/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 194 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.c | 28 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.h | 4 | ||||
-rw-r--r-- | arch/arm/kvm/emulate.c | 75 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 17 | ||||
-rw-r--r-- | arch/arm/kvm/handle_exit.c | 164 | ||||
-rw-r--r-- | arch/arm/kvm/interrupts.S | 13 | ||||
-rw-r--r-- | arch/arm/kvm/mmio.c | 46 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 184 | ||||
-rw-r--r-- | arch/arm/kvm/vgic.c | 2 |
19 files changed, 585 insertions, 387 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813e15df..124623e5ef14 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -211,4 +211,8 @@ | |||
211 | 211 | ||
212 | #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) | 212 | #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) |
213 | 213 | ||
214 | #define HSR_DABT_S1PTW (1U << 7) | ||
215 | #define HSR_DABT_CM (1U << 8) | ||
216 | #define HSR_DABT_EA (1U << 9) | ||
217 | |||
214 | #endif /* __ARM_KVM_ARM_H__ */ | 218 | #endif /* __ARM_KVM_ARM_H__ */ |
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4e23e1..18d50322a9e2 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[]; | |||
75 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 75 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
76 | 76 | ||
77 | extern void __kvm_flush_vm_context(void); | 77 | extern void __kvm_flush_vm_context(void); |
78 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 78 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
79 | 79 | ||
80 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 80 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
81 | #endif | 81 | #endif |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd611996bfb5..82b4babead2c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -22,11 +22,12 @@ | |||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/kvm_asm.h> | 23 | #include <asm/kvm_asm.h> |
24 | #include <asm/kvm_mmio.h> | 24 | #include <asm/kvm_mmio.h> |
25 | #include <asm/kvm_arm.h> | ||
25 | 26 | ||
26 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
27 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu); | 28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
28 | 29 | ||
29 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); | 30 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); |
30 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 31 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); |
31 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
32 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | |||
37 | return 1; | 38 | return 1; |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) | 41 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) |
41 | { | 42 | { |
42 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; | 43 | return &vcpu->arch.regs.usr_regs.ARM_pc; |
43 | } | 44 | } |
44 | 45 | ||
45 | static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) | 46 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) |
46 | { | 47 | { |
47 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; | 48 | return &vcpu->arch.regs.usr_regs.ARM_cpsr; |
48 | } | 49 | } |
49 | 50 | ||
50 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 51 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) | |||
69 | return reg == 15; | 70 | return reg == 15; |
70 | } | 71 | } |
71 | 72 | ||
73 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | return vcpu->arch.fault.hsr; | ||
76 | } | ||
77 | |||
78 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | return vcpu->arch.fault.hxfar; | ||
81 | } | ||
82 | |||
83 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) | ||
84 | { | ||
85 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; | ||
86 | } | ||
87 | |||
88 | static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) | ||
89 | { | ||
90 | return vcpu->arch.fault.hyp_pc; | ||
91 | } | ||
92 | |||
93 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) | ||
94 | { | ||
95 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; | ||
96 | } | ||
97 | |||
98 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; | ||
101 | } | ||
102 | |||
103 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) | ||
104 | { | ||
105 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; | ||
106 | } | ||
107 | |||
108 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) | ||
109 | { | ||
110 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | ||
111 | } | ||
112 | |||
113 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) | ||
114 | { | ||
115 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; | ||
116 | } | ||
117 | |||
118 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) | ||
119 | { | ||
120 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; | ||
121 | } | ||
122 | |||
123 | /* Get Access Size from a data abort */ | ||
124 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { | ||
127 | case 0: | ||
128 | return 1; | ||
129 | case 1: | ||
130 | return 2; | ||
131 | case 2: | ||
132 | return 4; | ||
133 | default: | ||
134 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | ||
135 | return -EFAULT; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /* This one is not specific to Data Abort */ | ||
140 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; | ||
143 | } | ||
144 | |||
145 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) | ||
146 | { | ||
147 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; | ||
148 | } | ||
149 | |||
150 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) | ||
151 | { | ||
152 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; | ||
153 | } | ||
154 | |||
155 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) | ||
156 | { | ||
157 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | ||
158 | } | ||
159 | |||
160 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | ||
163 | } | ||
164 | |||
72 | #endif /* __ARM_KVM_EMULATE_H__ */ | 165 | #endif /* __ARM_KVM_EMULATE_H__ */ |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a53b12d..0c4e643d939e 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache { | |||
80 | void *objects[KVM_NR_MEM_OBJS]; | 80 | void *objects[KVM_NR_MEM_OBJS]; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct kvm_vcpu_fault_info { | ||
84 | u32 hsr; /* Hyp Syndrome Register */ | ||
85 | u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ | ||
86 | u32 hpfar; /* Hyp IPA Fault Address Register */ | ||
87 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | ||
88 | }; | ||
89 | |||
90 | typedef struct vfp_hard_struct kvm_kernel_vfp_t; | ||
91 | |||
83 | struct kvm_vcpu_arch { | 92 | struct kvm_vcpu_arch { |
84 | struct kvm_regs regs; | 93 | struct kvm_regs regs; |
85 | 94 | ||
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch { | |||
93 | u32 midr; | 102 | u32 midr; |
94 | 103 | ||
95 | /* Exception Information */ | 104 | /* Exception Information */ |
96 | u32 hsr; /* Hyp Syndrome Register */ | 105 | struct kvm_vcpu_fault_info fault; |
97 | u32 hxfar; /* Hyp Data/Inst Fault Address Register */ | ||
98 | u32 hpfar; /* Hyp IPA Fault Address Register */ | ||
99 | 106 | ||
100 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ | 107 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ |
101 | struct vfp_hard_struct vfp_guest; | 108 | kvm_kernel_vfp_t vfp_guest; |
102 | struct vfp_hard_struct *vfp_host; | 109 | kvm_kernel_vfp_t *vfp_host; |
103 | 110 | ||
104 | /* VGIC state */ | 111 | /* VGIC state */ |
105 | struct vgic_cpu vgic_cpu; | 112 | struct vgic_cpu vgic_cpu; |
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch { | |||
122 | /* Interrupt related fields */ | 129 | /* Interrupt related fields */ |
123 | u32 irq_lines; /* IRQ and FIQ levels */ | 130 | u32 irq_lines; /* IRQ and FIQ levels */ |
124 | 131 | ||
125 | /* Hyp exception information */ | ||
126 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | ||
127 | |||
128 | /* Cache some mmu pages needed inside spinlock regions */ | 132 | /* Cache some mmu pages needed inside spinlock regions */ |
129 | struct kvm_mmu_memory_cache mmu_page_cache; | 133 | struct kvm_mmu_memory_cache mmu_page_cache; |
130 | 134 | ||
@@ -181,4 +185,26 @@ struct kvm_one_reg; | |||
181 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 185 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
182 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 186 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
183 | 187 | ||
188 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
189 | int exception_index); | ||
190 | |||
191 | static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, | ||
192 | unsigned long hyp_stack_ptr, | ||
193 | unsigned long vector_ptr) | ||
194 | { | ||
195 | unsigned long pgd_low, pgd_high; | ||
196 | |||
197 | pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); | ||
198 | pgd_high = (pgd_ptr >> 32ULL); | ||
199 | |||
200 | /* | ||
201 | * Call initialization code, and switch to the full blown | ||
202 | * HYP code. The init code doesn't need to preserve these registers as | ||
203 | * r1-r3 and r12 are already callee save according to the AAPCS. | ||
204 | * Note that we slightly misuse the prototype by casing the pgd_low to | ||
205 | * a void *. | ||
206 | */ | ||
207 | kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); | ||
208 | } | ||
209 | |||
184 | #endif /* __ARM_KVM_HOST_H__ */ | 210 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b34874..970f3b5fa109 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -19,6 +19,18 @@ | |||
19 | #ifndef __ARM_KVM_MMU_H__ | 19 | #ifndef __ARM_KVM_MMU_H__ |
20 | #define __ARM_KVM_MMU_H__ | 20 | #define __ARM_KVM_MMU_H__ |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | #include <asm/idmap.h> | ||
25 | |||
26 | /* | ||
27 | * We directly use the kernel VA for the HYP, as we can directly share | ||
28 | * the mapping (HTTBR "covers" TTBR1). | ||
29 | */ | ||
30 | #define HYP_PAGE_OFFSET_MASK (~0UL) | ||
31 | #define HYP_PAGE_OFFSET PAGE_OFFSET | ||
32 | #define KERN_TO_HYP(kva) (kva) | ||
33 | |||
22 | int create_hyp_mappings(void *from, void *to); | 34 | int create_hyp_mappings(void *from, void *to); |
23 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | 35 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
24 | void free_hyp_pmds(void); | 36 | void free_hyp_pmds(void); |
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void); | |||
36 | int kvm_mmu_init(void); | 48 | int kvm_mmu_init(void); |
37 | void kvm_clear_hyp_idmap(void); | 49 | void kvm_clear_hyp_idmap(void); |
38 | 50 | ||
51 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
52 | { | ||
53 | pte_val(*pte) = new_pte; | ||
54 | /* | ||
55 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
56 | * cache entries, so we can reuse the function for ptes. | ||
57 | */ | ||
58 | flush_pmd_entry(pte); | ||
59 | } | ||
60 | |||
39 | static inline bool kvm_is_write_fault(unsigned long hsr) | 61 | static inline bool kvm_is_write_fault(unsigned long hsr) |
40 | { | 62 | { |
41 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | 63 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; |
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr) | |||
47 | return true; | 69 | return true; |
48 | } | 70 | } |
49 | 71 | ||
72 | static inline void kvm_clean_pgd(pgd_t *pgd) | ||
73 | { | ||
74 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
75 | } | ||
76 | |||
77 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | ||
78 | { | ||
79 | clean_pmd_entry(pmd); | ||
80 | } | ||
81 | |||
82 | static inline void kvm_clean_pte(pte_t *pte) | ||
83 | { | ||
84 | clean_pte_table(pte); | ||
85 | } | ||
86 | |||
87 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
88 | { | ||
89 | pte_val(*pte) |= L_PTE_S2_RDWR; | ||
90 | } | ||
91 | |||
92 | struct kvm; | ||
93 | |||
94 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
95 | { | ||
96 | /* | ||
97 | * If we are going to insert an instruction page and the icache is | ||
98 | * either VIPT or PIPT, there is a potential problem where the host | ||
99 | * (or another VM) may have used the same page as this guest, and we | ||
100 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
101 | * we can invalidate just that page, but if we are using a VIPT cache | ||
102 | * we need to invalidate the entire icache - damn shame - as written | ||
103 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
104 | * | ||
105 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
106 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
107 | */ | ||
108 | if (icache_is_pipt()) { | ||
109 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
110 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
111 | } else if (!icache_is_vivt_asid_tagged()) { | ||
112 | /* any kind of VIPT cache */ | ||
113 | __flush_icache_all(); | ||
114 | } | ||
115 | } | ||
116 | |||
50 | #endif /* __ARM_KVM_MMU_H__ */ | 117 | #endif /* __ARM_KVM_MMU_H__ */ |
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207d9cd3..343744e4809c 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/kvm.h> | 23 | #include <linux/kvm.h> |
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/irqreturn.h> | 24 | #include <linux/irqreturn.h> |
26 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
27 | #include <linux/types.h> | 26 | #include <linux/types.h> |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 023bfeb367bf..c1ee007523d7 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -53,12 +53,12 @@ | |||
53 | #define KVM_ARM_FIQ_spsr fiq_regs[7] | 53 | #define KVM_ARM_FIQ_spsr fiq_regs[7] |
54 | 54 | ||
55 | struct kvm_regs { | 55 | struct kvm_regs { |
56 | struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ | 56 | struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ |
57 | __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ | 57 | unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ |
58 | __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ | 58 | unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ |
59 | __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ | 59 | unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ |
60 | __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ | 60 | unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ |
61 | __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ | 61 | unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* Supported Processor Types */ | 64 | /* Supported Processor Types */ |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5ce738b43508..3c09d8c7798b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -165,10 +165,10 @@ int main(void) | |||
165 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); | 165 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); |
166 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); | 166 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); |
167 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | 167 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); |
168 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); | 168 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); |
169 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); | 169 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); |
170 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); | 170 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); |
171 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); | 171 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); |
172 | #ifdef CONFIG_KVM_ARM_VGIC | 172 | #ifdef CONFIG_KVM_ARM_VGIC |
173 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | 173 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); |
174 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); | 174 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); |
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index fc96ce6f2357..8dc5e76cb789 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) | |||
17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
18 | 18 | ||
19 | obj-y += kvm-arm.o init.o interrupts.o | 19 | obj-y += kvm-arm.o init.o interrupts.o |
20 | obj-y += arm.o guest.o mmu.o emulate.o reset.o | 20 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o | 21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o |
22 | obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o | 22 | obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o |
23 | obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o | 23 | obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c10a45fa73f7 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -30,11 +30,9 @@ | |||
30 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
31 | #include "trace.h" | 31 | #include "trace.h" |
32 | 32 | ||
33 | #include <asm/unified.h> | ||
34 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
35 | #include <asm/ptrace.h> | 34 | #include <asm/ptrace.h> |
36 | #include <asm/mman.h> | 35 | #include <asm/mman.h> |
37 | #include <asm/cputype.h> | ||
38 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
39 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
40 | #include <asm/virt.h> | 38 | #include <asm/virt.h> |
@@ -44,14 +42,13 @@ | |||
44 | #include <asm/kvm_emulate.h> | 42 | #include <asm/kvm_emulate.h> |
45 | #include <asm/kvm_coproc.h> | 43 | #include <asm/kvm_coproc.h> |
46 | #include <asm/kvm_psci.h> | 44 | #include <asm/kvm_psci.h> |
47 | #include <asm/opcodes.h> | ||
48 | 45 | ||
49 | #ifdef REQUIRES_VIRT | 46 | #ifdef REQUIRES_VIRT |
50 | __asm__(".arch_extension virt"); | 47 | __asm__(".arch_extension virt"); |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 50 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
54 | static struct vfp_hard_struct __percpu *kvm_host_vfp_state; | 51 | static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state; |
55 | static unsigned long hyp_default_vectors; | 52 | static unsigned long hyp_default_vectors; |
56 | 53 | ||
57 | /* Per-CPU variable containing the currently running vcpu. */ | 54 | /* Per-CPU variable containing the currently running vcpu. */ |
@@ -303,22 +300,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
303 | return 0; | 300 | return 0; |
304 | } | 301 | } |
305 | 302 | ||
306 | int __attribute_const__ kvm_target_cpu(void) | ||
307 | { | ||
308 | unsigned long implementor = read_cpuid_implementor(); | ||
309 | unsigned long part_number = read_cpuid_part_number(); | ||
310 | |||
311 | if (implementor != ARM_CPU_IMP_ARM) | ||
312 | return -EINVAL; | ||
313 | |||
314 | switch (part_number) { | ||
315 | case ARM_CPU_PART_CORTEX_A15: | ||
316 | return KVM_ARM_TARGET_CORTEX_A15; | ||
317 | default: | ||
318 | return -EINVAL; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 303 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
323 | { | 304 | { |
324 | int ret; | 305 | int ret; |
@@ -481,163 +462,6 @@ static void update_vttbr(struct kvm *kvm) | |||
481 | spin_unlock(&kvm_vmid_lock); | 462 | spin_unlock(&kvm_vmid_lock); |
482 | } | 463 | } |
483 | 464 | ||
484 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
485 | { | ||
486 | /* SVC called from Hyp mode should never get here */ | ||
487 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
488 | BUG(); | ||
489 | return -EINVAL; /* Squash warning */ | ||
490 | } | ||
491 | |||
492 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
493 | { | ||
494 | trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | ||
495 | vcpu->arch.hsr & HSR_HVC_IMM_MASK); | ||
496 | |||
497 | if (kvm_psci_call(vcpu)) | ||
498 | return 1; | ||
499 | |||
500 | kvm_inject_undefined(vcpu); | ||
501 | return 1; | ||
502 | } | ||
503 | |||
504 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
505 | { | ||
506 | if (kvm_psci_call(vcpu)) | ||
507 | return 1; | ||
508 | |||
509 | kvm_inject_undefined(vcpu); | ||
510 | return 1; | ||
511 | } | ||
512 | |||
513 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
514 | { | ||
515 | /* The hypervisor should never cause aborts */ | ||
516 | kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
517 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
518 | return -EFAULT; | ||
519 | } | ||
520 | |||
521 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
522 | { | ||
523 | /* This is either an error in the ws. code or an external abort */ | ||
524 | kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
525 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
526 | return -EFAULT; | ||
527 | } | ||
528 | |||
529 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
530 | static exit_handle_fn arm_exit_handlers[] = { | ||
531 | [HSR_EC_WFI] = kvm_handle_wfi, | ||
532 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | ||
533 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | ||
534 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | ||
535 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
536 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | ||
537 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | ||
538 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | ||
539 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
540 | [HSR_EC_HVC] = handle_hvc, | ||
541 | [HSR_EC_SMC] = handle_smc, | ||
542 | [HSR_EC_IABT] = kvm_handle_guest_abort, | ||
543 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
544 | [HSR_EC_DABT] = kvm_handle_guest_abort, | ||
545 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
546 | }; | ||
547 | |||
548 | /* | ||
549 | * A conditional instruction is allowed to trap, even though it | ||
550 | * wouldn't be executed. So let's re-implement the hardware, in | ||
551 | * software! | ||
552 | */ | ||
553 | static bool kvm_condition_valid(struct kvm_vcpu *vcpu) | ||
554 | { | ||
555 | unsigned long cpsr, cond, insn; | ||
556 | |||
557 | /* | ||
558 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
559 | * catch undefined instructions, and then we won't get past | ||
560 | * the arm_exit_handlers test anyway. | ||
561 | */ | ||
562 | BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); | ||
563 | |||
564 | /* Top two bits non-zero? Unconditional. */ | ||
565 | if (vcpu->arch.hsr >> 30) | ||
566 | return true; | ||
567 | |||
568 | cpsr = *vcpu_cpsr(vcpu); | ||
569 | |||
570 | /* Is condition field valid? */ | ||
571 | if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) | ||
572 | cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; | ||
573 | else { | ||
574 | /* This can happen in Thumb mode: examine IT state. */ | ||
575 | unsigned long it; | ||
576 | |||
577 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
578 | |||
579 | /* it == 0 => unconditional. */ | ||
580 | if (it == 0) | ||
581 | return true; | ||
582 | |||
583 | /* The cond for this insn works out as the top 4 bits. */ | ||
584 | cond = (it >> 4); | ||
585 | } | ||
586 | |||
587 | /* Shift makes it look like an ARM-mode instruction */ | ||
588 | insn = cond << 28; | ||
589 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
590 | } | ||
591 | |||
592 | /* | ||
593 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
594 | * proper exit to QEMU. | ||
595 | */ | ||
596 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
597 | int exception_index) | ||
598 | { | ||
599 | unsigned long hsr_ec; | ||
600 | |||
601 | switch (exception_index) { | ||
602 | case ARM_EXCEPTION_IRQ: | ||
603 | return 1; | ||
604 | case ARM_EXCEPTION_UNDEFINED: | ||
605 | kvm_err("Undefined exception in Hyp mode at: %#08x\n", | ||
606 | vcpu->arch.hyp_pc); | ||
607 | BUG(); | ||
608 | panic("KVM: Hypervisor undefined exception!\n"); | ||
609 | case ARM_EXCEPTION_DATA_ABORT: | ||
610 | case ARM_EXCEPTION_PREF_ABORT: | ||
611 | case ARM_EXCEPTION_HVC: | ||
612 | hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; | ||
613 | |||
614 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) | ||
615 | || !arm_exit_handlers[hsr_ec]) { | ||
616 | kvm_err("Unkown exception class: %#08lx, " | ||
617 | "hsr: %#08x\n", hsr_ec, | ||
618 | (unsigned int)vcpu->arch.hsr); | ||
619 | BUG(); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
624 | * that fail their condition code check" | ||
625 | */ | ||
626 | if (!kvm_condition_valid(vcpu)) { | ||
627 | bool is_wide = vcpu->arch.hsr & HSR_IL; | ||
628 | kvm_skip_instr(vcpu, is_wide); | ||
629 | return 1; | ||
630 | } | ||
631 | |||
632 | return arm_exit_handlers[hsr_ec](vcpu, run); | ||
633 | default: | ||
634 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
635 | exception_index); | ||
636 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
637 | return 0; | ||
638 | } | ||
639 | } | ||
640 | |||
641 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 465 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
642 | { | 466 | { |
643 | if (likely(vcpu->arch.has_run_once)) | 467 | if (likely(vcpu->arch.has_run_once)) |
@@ -972,7 +796,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
972 | static void cpu_init_hyp_mode(void *vector) | 796 | static void cpu_init_hyp_mode(void *vector) |
973 | { | 797 | { |
974 | unsigned long long pgd_ptr; | 798 | unsigned long long pgd_ptr; |
975 | unsigned long pgd_low, pgd_high; | ||
976 | unsigned long hyp_stack_ptr; | 799 | unsigned long hyp_stack_ptr; |
977 | unsigned long stack_page; | 800 | unsigned long stack_page; |
978 | unsigned long vector_ptr; | 801 | unsigned long vector_ptr; |
@@ -981,20 +804,11 @@ static void cpu_init_hyp_mode(void *vector) | |||
981 | __hyp_set_vectors((unsigned long)vector); | 804 | __hyp_set_vectors((unsigned long)vector); |
982 | 805 | ||
983 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); | 806 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); |
984 | pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); | ||
985 | pgd_high = (pgd_ptr >> 32ULL); | ||
986 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); | 807 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); |
987 | hyp_stack_ptr = stack_page + PAGE_SIZE; | 808 | hyp_stack_ptr = stack_page + PAGE_SIZE; |
988 | vector_ptr = (unsigned long)__kvm_hyp_vector; | 809 | vector_ptr = (unsigned long)__kvm_hyp_vector; |
989 | 810 | ||
990 | /* | 811 | __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); |
991 | * Call initialization code, and switch to the full blown | ||
992 | * HYP code. The init code doesn't need to preserve these registers as | ||
993 | * r1-r3 and r12 are already callee save according to the AAPCS. | ||
994 | * Note that we slightly misuse the prototype by casing the pgd_low to | ||
995 | * a void *. | ||
996 | */ | ||
997 | kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); | ||
998 | } | 812 | } |
999 | 813 | ||
1000 | /** | 814 | /** |
@@ -1077,7 +891,7 @@ static int init_hyp_mode(void) | |||
1077 | /* | 891 | /* |
1078 | * Map the host VFP structures | 892 | * Map the host VFP structures |
1079 | */ | 893 | */ |
1080 | kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); | 894 | kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t); |
1081 | if (!kvm_host_vfp_state) { | 895 | if (!kvm_host_vfp_state) { |
1082 | err = -ENOMEM; | 896 | err = -ENOMEM; |
1083 | kvm_err("Cannot allocate host VFP state\n"); | 897 | kvm_err("Cannot allocate host VFP state\n"); |
@@ -1085,7 +899,7 @@ static int init_hyp_mode(void) | |||
1085 | } | 899 | } |
1086 | 900 | ||
1087 | for_each_possible_cpu(cpu) { | 901 | for_each_possible_cpu(cpu) { |
1088 | struct vfp_hard_struct *vfp; | 902 | kvm_kernel_vfp_t *vfp; |
1089 | 903 | ||
1090 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); | 904 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); |
1091 | err = create_hyp_mappings(vfp, vfp + 1); | 905 | err = create_hyp_mappings(vfp, vfp + 1); |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269c..94eee8befc83 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, | |||
76 | const struct coproc_params *p, | 76 | const struct coproc_params *p, |
77 | const struct coproc_reg *r) | 77 | const struct coproc_reg *r) |
78 | { | 78 | { |
79 | u32 val; | 79 | unsigned long val; |
80 | int cpu; | 80 | int cpu; |
81 | 81 | ||
82 | cpu = get_cpu(); | 82 | cpu = get_cpu(); |
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, | |||
293 | 293 | ||
294 | if (likely(r->access(vcpu, params, r))) { | 294 | if (likely(r->access(vcpu, params, r))) { |
295 | /* Skip instruction, since it was emulated */ | 295 | /* Skip instruction, since it was emulated */ |
296 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | 296 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
297 | return 1; | 297 | return 1; |
298 | } | 298 | } |
299 | /* If access function fails, it should complain. */ | 299 | /* If access function fails, it should complain. */ |
300 | } else { | 300 | } else { |
301 | kvm_err("Unsupported guest CP15 access at: %08x\n", | 301 | kvm_err("Unsupported guest CP15 access at: %08lx\n", |
302 | *vcpu_pc(vcpu)); | 302 | *vcpu_pc(vcpu)); |
303 | print_cp_instr(params); | 303 | print_cp_instr(params); |
304 | } | 304 | } |
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
315 | { | 315 | { |
316 | struct coproc_params params; | 316 | struct coproc_params params; |
317 | 317 | ||
318 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | 318 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
319 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | 319 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
320 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | 320 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
321 | params.is_64bit = true; | 321 | params.is_64bit = true; |
322 | 322 | ||
323 | params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; | 323 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; |
324 | params.Op2 = 0; | 324 | params.Op2 = 0; |
325 | params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; | 325 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
326 | params.CRn = 0; | 326 | params.CRn = 0; |
327 | 327 | ||
328 | return emulate_cp15(vcpu, ¶ms); | 328 | return emulate_cp15(vcpu, ¶ms); |
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
347 | { | 347 | { |
348 | struct coproc_params params; | 348 | struct coproc_params params; |
349 | 349 | ||
350 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | 350 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
351 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | 351 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
352 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | 352 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
353 | params.is_64bit = false; | 353 | params.is_64bit = false; |
354 | 354 | ||
355 | params.CRn = (vcpu->arch.hsr >> 10) & 0xf; | 355 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
356 | params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; | 356 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; |
357 | params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; | 357 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; |
358 | params.Rt2 = 0; | 358 | params.Rt2 = 0; |
359 | 359 | ||
360 | return emulate_cp15(vcpu, ¶ms); | 360 | return emulate_cp15(vcpu, ¶ms); |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 992adfafa2ff..b7301d3e4799 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu, | |||
84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | 84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, |
85 | const struct coproc_params *params) | 85 | const struct coproc_params *params) |
86 | { | 86 | { |
87 | kvm_debug("CP15 write to read-only register at: %08x\n", | 87 | kvm_debug("CP15 write to read-only register at: %08lx\n", |
88 | *vcpu_pc(vcpu)); | 88 | *vcpu_pc(vcpu)); |
89 | print_cp_instr(params); | 89 | print_cp_instr(params); |
90 | return false; | 90 | return false; |
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | |||
93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | 93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, |
94 | const struct coproc_params *params) | 94 | const struct coproc_params *params) |
95 | { | 95 | { |
96 | kvm_debug("CP15 read to write-only register at: %08x\n", | 96 | kvm_debug("CP15 read to write-only register at: %08lx\n", |
97 | *vcpu_pc(vcpu)); | 97 | *vcpu_pc(vcpu)); |
98 | print_cp_instr(params); | 98 | print_cp_instr(params); |
99 | return false; | 99 | return false; |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index d61450ac6665..bdede9e7da51 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <asm/kvm_arm.h> | 21 | #include <asm/kvm_arm.h> |
22 | #include <asm/kvm_emulate.h> | 22 | #include <asm/kvm_emulate.h> |
23 | #include <asm/opcodes.h> | ||
23 | #include <trace/events/kvm.h> | 24 | #include <trace/events/kvm.h> |
24 | 25 | ||
25 | #include "trace.h" | 26 | #include "trace.h" |
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { | |||
109 | * Return a pointer to the register number valid in the current mode of | 110 | * Return a pointer to the register number valid in the current mode of |
110 | * the virtual CPU. | 111 | * the virtual CPU. |
111 | */ | 112 | */ |
112 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | 113 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) |
113 | { | 114 | { |
114 | u32 *reg_array = (u32 *)&vcpu->arch.regs; | 115 | unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; |
115 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 116 | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; |
116 | 117 | ||
117 | switch (mode) { | 118 | switch (mode) { |
118 | case USR_MODE...SVC_MODE: | 119 | case USR_MODE...SVC_MODE: |
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | |||
141 | /* | 142 | /* |
142 | * Return the SPSR for the current mode of the virtual CPU. | 143 | * Return the SPSR for the current mode of the virtual CPU. |
143 | */ | 144 | */ |
144 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu) | 145 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) |
145 | { | 146 | { |
146 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | 147 | unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; |
147 | switch (mode) { | 148 | switch (mode) { |
148 | case SVC_MODE: | 149 | case SVC_MODE: |
149 | return &vcpu->arch.regs.KVM_ARM_SVC_spsr; | 150 | return &vcpu->arch.regs.KVM_ARM_SVC_spsr; |
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
160 | } | 161 | } |
161 | } | 162 | } |
162 | 163 | ||
163 | /** | 164 | /* |
164 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | 165 | * A conditional instruction is allowed to trap, even though it |
165 | * @vcpu: the vcpu pointer | 166 | * wouldn't be executed. So let's re-implement the hardware, in |
166 | * @run: the kvm_run structure pointer | 167 | * software! |
167 | * | ||
168 | * Simply sets the wait_for_interrupts flag on the vcpu structure, which will | ||
169 | * halt execution of world-switches and schedule other host processes until | ||
170 | * there is an incoming IRQ or FIQ to the VM. | ||
171 | */ | 168 | */ |
172 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | 169 | bool kvm_condition_valid(struct kvm_vcpu *vcpu) |
173 | { | 170 | { |
174 | trace_kvm_wfi(*vcpu_pc(vcpu)); | 171 | unsigned long cpsr, cond, insn; |
175 | kvm_vcpu_block(vcpu); | 172 | |
176 | return 1; | 173 | /* |
174 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
175 | * catch undefined instructions, and then we won't get past | ||
176 | * the arm_exit_handlers test anyway. | ||
177 | */ | ||
178 | BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); | ||
179 | |||
180 | /* Top two bits non-zero? Unconditional. */ | ||
181 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | ||
182 | return true; | ||
183 | |||
184 | cpsr = *vcpu_cpsr(vcpu); | ||
185 | |||
186 | /* Is condition field valid? */ | ||
187 | if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) | ||
188 | cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; | ||
189 | else { | ||
190 | /* This can happen in Thumb mode: examine IT state. */ | ||
191 | unsigned long it; | ||
192 | |||
193 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
194 | |||
195 | /* it == 0 => unconditional. */ | ||
196 | if (it == 0) | ||
197 | return true; | ||
198 | |||
199 | /* The cond for this insn works out as the top 4 bits. */ | ||
200 | cond = (it >> 4); | ||
201 | } | ||
202 | |||
203 | /* Shift makes it look like an ARM-mode instruction */ | ||
204 | insn = cond << 28; | ||
205 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
177 | } | 206 | } |
178 | 207 | ||
179 | /** | 208 | /** |
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) | |||
257 | */ | 286 | */ |
258 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 287 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
259 | { | 288 | { |
260 | u32 new_lr_value; | 289 | unsigned long new_lr_value; |
261 | u32 new_spsr_value; | 290 | unsigned long new_spsr_value; |
262 | u32 cpsr = *vcpu_cpsr(vcpu); | 291 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
263 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 292 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; |
264 | bool is_thumb = (cpsr & PSR_T_BIT); | 293 | bool is_thumb = (cpsr & PSR_T_BIT); |
265 | u32 vect_offset = 4; | 294 | u32 vect_offset = 4; |
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) | |||
291 | */ | 320 | */ |
292 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | 321 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) |
293 | { | 322 | { |
294 | u32 new_lr_value; | 323 | unsigned long new_lr_value; |
295 | u32 new_spsr_value; | 324 | unsigned long new_spsr_value; |
296 | u32 cpsr = *vcpu_cpsr(vcpu); | 325 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
297 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 326 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; |
298 | bool is_thumb = (cpsr & PSR_T_BIT); | 327 | bool is_thumb = (cpsr & PSR_T_BIT); |
299 | u32 vect_offset; | 328 | u32 vect_offset; |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 2339d9609d36..152d03612181 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <asm/cputype.h> | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/kvm.h> | 27 | #include <asm/kvm.h> |
27 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
180 | return -EINVAL; | 181 | return -EINVAL; |
181 | } | 182 | } |
182 | 183 | ||
184 | int __attribute_const__ kvm_target_cpu(void) | ||
185 | { | ||
186 | unsigned long implementor = read_cpuid_implementor(); | ||
187 | unsigned long part_number = read_cpuid_part_number(); | ||
188 | |||
189 | if (implementor != ARM_CPU_IMP_ARM) | ||
190 | return -EINVAL; | ||
191 | |||
192 | switch (part_number) { | ||
193 | case ARM_CPU_PART_CORTEX_A15: | ||
194 | return KVM_ARM_TARGET_CORTEX_A15; | ||
195 | default: | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | } | ||
199 | |||
183 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | 200 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
184 | const struct kvm_vcpu_init *init) | 201 | const struct kvm_vcpu_init *init) |
185 | { | 202 | { |
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c new file mode 100644 index 000000000000..26ad17310a1e --- /dev/null +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kvm.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <asm/kvm_emulate.h> | ||
22 | #include <asm/kvm_coproc.h> | ||
23 | #include <asm/kvm_mmu.h> | ||
24 | #include <asm/kvm_psci.h> | ||
25 | #include <trace/events/kvm.h> | ||
26 | |||
27 | #include "trace.h" | ||
28 | |||
29 | #include "trace.h" | ||
30 | |||
31 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
32 | |||
33 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
34 | { | ||
35 | /* SVC called from Hyp mode should never get here */ | ||
36 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
37 | BUG(); | ||
38 | return -EINVAL; /* Squash warning */ | ||
39 | } | ||
40 | |||
41 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
42 | { | ||
43 | trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | ||
44 | kvm_vcpu_hvc_get_imm(vcpu)); | ||
45 | |||
46 | if (kvm_psci_call(vcpu)) | ||
47 | return 1; | ||
48 | |||
49 | kvm_inject_undefined(vcpu); | ||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
54 | { | ||
55 | if (kvm_psci_call(vcpu)) | ||
56 | return 1; | ||
57 | |||
58 | kvm_inject_undefined(vcpu); | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
63 | { | ||
64 | /* The hypervisor should never cause aborts */ | ||
65 | kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
66 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
67 | return -EFAULT; | ||
68 | } | ||
69 | |||
70 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
71 | { | ||
72 | /* This is either an error in the ws. code or an external abort */ | ||
73 | kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", | ||
74 | kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); | ||
75 | return -EFAULT; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | ||
80 | * @vcpu: the vcpu pointer | ||
81 | * @run: the kvm_run structure pointer | ||
82 | * | ||
83 | * Simply sets the wait_for_interrupts flag on the vcpu structure, which will | ||
84 | * halt execution of world-switches and schedule other host processes until | ||
85 | * there is an incoming IRQ or FIQ to the VM. | ||
86 | */ | ||
87 | static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
88 | { | ||
89 | trace_kvm_wfi(*vcpu_pc(vcpu)); | ||
90 | kvm_vcpu_block(vcpu); | ||
91 | return 1; | ||
92 | } | ||
93 | |||
94 | static exit_handle_fn arm_exit_handlers[] = { | ||
95 | [HSR_EC_WFI] = kvm_handle_wfi, | ||
96 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | ||
97 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | ||
98 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | ||
99 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
100 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | ||
101 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | ||
102 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | ||
103 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
104 | [HSR_EC_HVC] = handle_hvc, | ||
105 | [HSR_EC_SMC] = handle_smc, | ||
106 | [HSR_EC_IABT] = kvm_handle_guest_abort, | ||
107 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
108 | [HSR_EC_DABT] = kvm_handle_guest_abort, | ||
109 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
110 | }; | ||
111 | |||
112 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | ||
113 | { | ||
114 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | ||
115 | |||
116 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | ||
117 | !arm_exit_handlers[hsr_ec]) { | ||
118 | kvm_err("Unkown exception class: hsr: %#08x\n", | ||
119 | (unsigned int)kvm_vcpu_get_hsr(vcpu)); | ||
120 | BUG(); | ||
121 | } | ||
122 | |||
123 | return arm_exit_handlers[hsr_ec]; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
128 | * proper exit to userspace. | ||
129 | */ | ||
130 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
131 | int exception_index) | ||
132 | { | ||
133 | exit_handle_fn exit_handler; | ||
134 | |||
135 | switch (exception_index) { | ||
136 | case ARM_EXCEPTION_IRQ: | ||
137 | return 1; | ||
138 | case ARM_EXCEPTION_UNDEFINED: | ||
139 | kvm_err("Undefined exception in Hyp mode at: %#08lx\n", | ||
140 | kvm_vcpu_get_hyp_pc(vcpu)); | ||
141 | BUG(); | ||
142 | panic("KVM: Hypervisor undefined exception!\n"); | ||
143 | case ARM_EXCEPTION_DATA_ABORT: | ||
144 | case ARM_EXCEPTION_PREF_ABORT: | ||
145 | case ARM_EXCEPTION_HVC: | ||
146 | /* | ||
147 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
148 | * that fail their condition code check" | ||
149 | */ | ||
150 | if (!kvm_condition_valid(vcpu)) { | ||
151 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
152 | return 1; | ||
153 | } | ||
154 | |||
155 | exit_handler = kvm_get_exit_handler(vcpu); | ||
156 | |||
157 | return exit_handler(vcpu, run); | ||
158 | default: | ||
159 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
160 | exception_index); | ||
161 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
162 | return 0; | ||
163 | } | ||
164 | } | ||
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 8ca87ab0919d..f7793df62f58 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S | |||
@@ -35,15 +35,18 @@ __kvm_hyp_code_start: | |||
35 | /******************************************************************** | 35 | /******************************************************************** |
36 | * Flush per-VMID TLBs | 36 | * Flush per-VMID TLBs |
37 | * | 37 | * |
38 | * void __kvm_tlb_flush_vmid(struct kvm *kvm); | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
39 | * | 39 | * |
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | 40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs |
41 | * inside the inner-shareable domain (which is the case for all v7 | 41 | * inside the inner-shareable domain (which is the case for all v7 |
42 | * implementations). If we come across a non-IS SMP implementation, we'll | 42 | * implementations). If we come across a non-IS SMP implementation, we'll |
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | 43 | * have to use an IPI based mechanism. Until then, we stick to the simple |
44 | * hardware assisted version. | 44 | * hardware assisted version. |
45 | * | ||
46 | * As v7 does not support flushing per IPA, just nuke the whole TLB | ||
47 | * instead, ignoring the ipa value. | ||
45 | */ | 48 | */ |
46 | ENTRY(__kvm_tlb_flush_vmid) | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
47 | push {r2, r3} | 50 | push {r2, r3} |
48 | 51 | ||
49 | add r0, r0, #KVM_VTTBR | 52 | add r0, r0, #KVM_VTTBR |
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid) | |||
60 | 63 | ||
61 | pop {r2, r3} | 64 | pop {r2, r3} |
62 | bx lr | 65 | bx lr |
63 | ENDPROC(__kvm_tlb_flush_vmid) | 66 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
64 | 67 | ||
65 | /******************************************************************** | 68 | /******************************************************************** |
66 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable | 69 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp) | |||
235 | * instruction is issued since all traps are disabled when running the host | 238 | * instruction is issued since all traps are disabled when running the host |
236 | * kernel as per the Hyp-mode initialization at boot time. | 239 | * kernel as per the Hyp-mode initialization at boot time. |
237 | * | 240 | * |
238 | * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc | 241 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc |
239 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the | 242 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the |
240 | * host kernel) and they cause a trap to the vector page + offset 0xc when HVC | 243 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC |
241 | * instructions are called from within Hyp-mode. | 244 | * instructions are called from within Hyp-mode. |
242 | * | 245 | * |
243 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | 246 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 98a870ff1a5c..72a12f2171b2 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -33,16 +33,16 @@ | |||
33 | */ | 33 | */ |
34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | 34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
35 | { | 35 | { |
36 | __u32 *dest; | 36 | unsigned long *dest; |
37 | unsigned int len; | 37 | unsigned int len; |
38 | int mask; | 38 | int mask; |
39 | 39 | ||
40 | if (!run->mmio.is_write) { | 40 | if (!run->mmio.is_write) { |
41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); | 41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); |
42 | memset(dest, 0, sizeof(int)); | 42 | *dest = 0; |
43 | 43 | ||
44 | len = run->mmio.len; | 44 | len = run->mmio.len; |
45 | if (len > 4) | 45 | if (len > sizeof(unsigned long)) |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | 47 | ||
48 | memcpy(dest, run->mmio.data, len); | 48 | memcpy(dest, run->mmio.data, len); |
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | 50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, |
51 | *((u64 *)run->mmio.data)); | 51 | *((u64 *)run->mmio.data)); |
52 | 52 | ||
53 | if (vcpu->arch.mmio_decode.sign_extend && len < 4) { | 53 | if (vcpu->arch.mmio_decode.sign_extend && |
54 | len < sizeof(unsigned long)) { | ||
54 | mask = 1U << ((len * 8) - 1); | 55 | mask = 1U << ((len * 8) - 1); |
55 | *dest = (*dest ^ mask) - mask; | 56 | *dest = (*dest ^ mask) - mask; |
56 | } | 57 | } |
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
65 | unsigned long rt, len; | 66 | unsigned long rt, len; |
66 | bool is_write, sign_extend; | 67 | bool is_write, sign_extend; |
67 | 68 | ||
68 | if ((vcpu->arch.hsr >> 8) & 1) { | 69 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
69 | /* cache operation on I/O addr, tell guest unsupported */ | 70 | /* cache operation on I/O addr, tell guest unsupported */ |
70 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | 71 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
71 | return 1; | 72 | return 1; |
72 | } | 73 | } |
73 | 74 | ||
74 | if ((vcpu->arch.hsr >> 7) & 1) { | 75 | if (kvm_vcpu_dabt_iss1tw(vcpu)) { |
75 | /* page table accesses IO mem: tell guest to fix its TTBR */ | 76 | /* page table accesses IO mem: tell guest to fix its TTBR */ |
76 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | 77 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
77 | return 1; | 78 | return 1; |
78 | } | 79 | } |
79 | 80 | ||
80 | switch ((vcpu->arch.hsr >> 22) & 0x3) { | 81 | len = kvm_vcpu_dabt_get_as(vcpu); |
81 | case 0: | 82 | if (unlikely(len < 0)) |
82 | len = 1; | 83 | return len; |
83 | break; | ||
84 | case 1: | ||
85 | len = 2; | ||
86 | break; | ||
87 | case 2: | ||
88 | len = 4; | ||
89 | break; | ||
90 | default: | ||
91 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | ||
92 | return -EFAULT; | ||
93 | } | ||
94 | 84 | ||
95 | is_write = vcpu->arch.hsr & HSR_WNR; | 85 | is_write = kvm_vcpu_dabt_iswrite(vcpu); |
96 | sign_extend = vcpu->arch.hsr & HSR_SSE; | 86 | sign_extend = kvm_vcpu_dabt_issext(vcpu); |
97 | rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | 87 | rt = kvm_vcpu_dabt_get_rd(vcpu); |
98 | 88 | ||
99 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { | 89 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { |
100 | /* IO memory trying to read/write pc */ | 90 | /* IO memory trying to read/write pc */ |
101 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | 91 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
102 | return 1; | 92 | return 1; |
103 | } | 93 | } |
104 | 94 | ||
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
112 | * The MMIO instruction is emulated and should not be re-executed | 102 | * The MMIO instruction is emulated and should not be re-executed |
113 | * in the guest. | 103 | * in the guest. |
114 | */ | 104 | */ |
115 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | 105 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
116 | return 0; | 106 | return 0; |
117 | } | 107 | } |
118 | 108 | ||
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
130 | * space do its magic. | 120 | * space do its magic. |
131 | */ | 121 | */ |
132 | 122 | ||
133 | if (vcpu->arch.hsr & HSR_ISV) { | 123 | if (kvm_vcpu_dabt_isvalid(vcpu)) { |
134 | ret = decode_hsr(vcpu, fault_ipa, &mmio); | 124 | ret = decode_hsr(vcpu, fault_ipa, &mmio); |
135 | if (ret) | 125 | if (ret) |
136 | return ret; | 126 | return ret; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 99e07c7dd745..2f12e4056408 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <trace/events/kvm.h> | 22 | #include <trace/events/kvm.h> |
23 | #include <asm/idmap.h> | ||
24 | #include <asm/pgalloc.h> | 23 | #include <asm/pgalloc.h> |
25 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
26 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
@@ -28,8 +27,6 @@ | |||
28 | #include <asm/kvm_mmio.h> | 27 | #include <asm/kvm_mmio.h> |
29 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
30 | #include <asm/kvm_emulate.h> | 29 | #include <asm/kvm_emulate.h> |
31 | #include <asm/mach/map.h> | ||
32 | #include <trace/events/kvm.h> | ||
33 | 30 | ||
34 | #include "trace.h" | 31 | #include "trace.h" |
35 | 32 | ||
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |||
37 | 34 | ||
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 35 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | 36 | ||
40 | static void kvm_tlb_flush_vmid(struct kvm *kvm) | 37 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
41 | { | 38 | { |
42 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | 39 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
43 | } | ||
44 | |||
45 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
46 | { | ||
47 | pte_val(*pte) = new_pte; | ||
48 | /* | ||
49 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
50 | * cache entries, so we can reuse the function for ptes. | ||
51 | */ | ||
52 | flush_pmd_entry(pte); | ||
53 | } | 40 | } |
54 | 41 | ||
55 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 42 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr) | |||
98 | } | 85 | } |
99 | } | 86 | } |
100 | 87 | ||
88 | static void free_hyp_pgd_entry(unsigned long addr) | ||
89 | { | ||
90 | pgd_t *pgd; | ||
91 | pud_t *pud; | ||
92 | pmd_t *pmd; | ||
93 | unsigned long hyp_addr = KERN_TO_HYP(addr); | ||
94 | |||
95 | pgd = hyp_pgd + pgd_index(hyp_addr); | ||
96 | pud = pud_offset(pgd, hyp_addr); | ||
97 | |||
98 | if (pud_none(*pud)) | ||
99 | return; | ||
100 | BUG_ON(pud_bad(*pud)); | ||
101 | |||
102 | pmd = pmd_offset(pud, hyp_addr); | ||
103 | free_ptes(pmd, addr); | ||
104 | pmd_free(NULL, pmd); | ||
105 | pud_clear(pud); | ||
106 | } | ||
107 | |||
101 | /** | 108 | /** |
102 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables | 109 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables |
103 | * | 110 | * |
104 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains | 111 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains |
105 | * only mappings in the kernel memory area, which is above PAGE_OFFSET. | 112 | * either mappings in the kernel memory area (above PAGE_OFFSET), or |
113 | * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END). | ||
106 | */ | 114 | */ |
107 | void free_hyp_pmds(void) | 115 | void free_hyp_pmds(void) |
108 | { | 116 | { |
109 | pgd_t *pgd; | ||
110 | pud_t *pud; | ||
111 | pmd_t *pmd; | ||
112 | unsigned long addr; | 117 | unsigned long addr; |
113 | 118 | ||
114 | mutex_lock(&kvm_hyp_pgd_mutex); | 119 | mutex_lock(&kvm_hyp_pgd_mutex); |
115 | for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { | 120 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
116 | pgd = hyp_pgd + pgd_index(addr); | 121 | free_hyp_pgd_entry(addr); |
117 | pud = pud_offset(pgd, addr); | 122 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
118 | 123 | free_hyp_pgd_entry(addr); | |
119 | if (pud_none(*pud)) | ||
120 | continue; | ||
121 | BUG_ON(pud_bad(*pud)); | ||
122 | |||
123 | pmd = pmd_offset(pud, addr); | ||
124 | free_ptes(pmd, addr); | ||
125 | pmd_free(NULL, pmd); | ||
126 | pud_clear(pud); | ||
127 | } | ||
128 | mutex_unlock(&kvm_hyp_pgd_mutex); | 124 | mutex_unlock(&kvm_hyp_pgd_mutex); |
129 | } | 125 | } |
130 | 126 | ||
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |||
136 | struct page *page; | 132 | struct page *page; |
137 | 133 | ||
138 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 134 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
139 | pte = pte_offset_kernel(pmd, addr); | 135 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
136 | |||
137 | pte = pte_offset_kernel(pmd, hyp_addr); | ||
140 | BUG_ON(!virt_addr_valid(addr)); | 138 | BUG_ON(!virt_addr_valid(addr)); |
141 | page = virt_to_page(addr); | 139 | page = virt_to_page(addr); |
142 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); | 140 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); |
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, | |||
151 | unsigned long addr; | 149 | unsigned long addr; |
152 | 150 | ||
153 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 151 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
154 | pte = pte_offset_kernel(pmd, addr); | 152 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
153 | |||
154 | pte = pte_offset_kernel(pmd, hyp_addr); | ||
155 | BUG_ON(pfn_valid(*pfn_base)); | 155 | BUG_ON(pfn_valid(*pfn_base)); |
156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); | 156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); |
157 | (*pfn_base)++; | 157 | (*pfn_base)++; |
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |||
166 | unsigned long addr, next; | 166 | unsigned long addr, next; |
167 | 167 | ||
168 | for (addr = start; addr < end; addr = next) { | 168 | for (addr = start; addr < end; addr = next) { |
169 | pmd = pmd_offset(pud, addr); | 169 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
170 | pmd = pmd_offset(pud, hyp_addr); | ||
170 | 171 | ||
171 | BUG_ON(pmd_sect(*pmd)); | 172 | BUG_ON(pmd_sect(*pmd)); |
172 | 173 | ||
173 | if (pmd_none(*pmd)) { | 174 | if (pmd_none(*pmd)) { |
174 | pte = pte_alloc_one_kernel(NULL, addr); | 175 | pte = pte_alloc_one_kernel(NULL, hyp_addr); |
175 | if (!pte) { | 176 | if (!pte) { |
176 | kvm_err("Cannot allocate Hyp pte\n"); | 177 | kvm_err("Cannot allocate Hyp pte\n"); |
177 | return -ENOMEM; | 178 | return -ENOMEM; |
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) | |||
206 | unsigned long addr, next; | 207 | unsigned long addr, next; |
207 | int err = 0; | 208 | int err = 0; |
208 | 209 | ||
209 | BUG_ON(start > end); | 210 | if (start >= end) |
210 | if (start < PAGE_OFFSET) | 211 | return -EINVAL; |
212 | /* Check for a valid kernel memory mapping */ | ||
213 | if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1))) | ||
214 | return -EINVAL; | ||
215 | /* Check for a valid kernel IO mapping */ | ||
216 | if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))) | ||
211 | return -EINVAL; | 217 | return -EINVAL; |
212 | 218 | ||
213 | mutex_lock(&kvm_hyp_pgd_mutex); | 219 | mutex_lock(&kvm_hyp_pgd_mutex); |
214 | for (addr = start; addr < end; addr = next) { | 220 | for (addr = start; addr < end; addr = next) { |
215 | pgd = hyp_pgd + pgd_index(addr); | 221 | unsigned long hyp_addr = KERN_TO_HYP(addr); |
216 | pud = pud_offset(pgd, addr); | 222 | pgd = hyp_pgd + pgd_index(hyp_addr); |
223 | pud = pud_offset(pgd, hyp_addr); | ||
217 | 224 | ||
218 | if (pud_none_or_clear_bad(pud)) { | 225 | if (pud_none_or_clear_bad(pud)) { |
219 | pmd = pmd_alloc_one(NULL, addr); | 226 | pmd = pmd_alloc_one(NULL, hyp_addr); |
220 | if (!pmd) { | 227 | if (!pmd) { |
221 | kvm_err("Cannot allocate Hyp pmd\n"); | 228 | kvm_err("Cannot allocate Hyp pmd\n"); |
222 | err = -ENOMEM; | 229 | err = -ENOMEM; |
@@ -236,12 +243,13 @@ out: | |||
236 | } | 243 | } |
237 | 244 | ||
238 | /** | 245 | /** |
239 | * create_hyp_mappings - map a kernel virtual address range in Hyp mode | 246 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
240 | * @from: The virtual kernel start address of the range | 247 | * @from: The virtual kernel start address of the range |
241 | * @to: The virtual kernel end address of the range (exclusive) | 248 | * @to: The virtual kernel end address of the range (exclusive) |
242 | * | 249 | * |
243 | * The same virtual address as the kernel virtual address is also used in | 250 | * The same virtual address as the kernel virtual address is also used |
244 | * Hyp-mode mapping to the same underlying physical pages. | 251 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
252 | * physical pages. | ||
245 | * | 253 | * |
246 | * Note: Wrapping around zero in the "to" address is not supported. | 254 | * Note: Wrapping around zero in the "to" address is not supported. |
247 | */ | 255 | */ |
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to) | |||
251 | } | 259 | } |
252 | 260 | ||
253 | /** | 261 | /** |
254 | * create_hyp_io_mappings - map a physical IO range in Hyp mode | 262 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
255 | * @from: The virtual HYP start address of the range | 263 | * @from: The kernel start VA of the range |
256 | * @to: The virtual HYP end address of the range (exclusive) | 264 | * @to: The kernel end VA of the range (exclusive) |
257 | * @addr: The physical start address which gets mapped | 265 | * @addr: The physical start address which gets mapped |
266 | * | ||
267 | * The resulting HYP VA is the same as the kernel VA, modulo | ||
268 | * HYP_PAGE_OFFSET. | ||
258 | */ | 269 | */ |
259 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) | 270 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) |
260 | { | 271 | { |
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
290 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); | 301 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); |
291 | 302 | ||
292 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 303 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
293 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 304 | kvm_clean_pgd(pgd); |
294 | kvm->arch.pgd = pgd; | 305 | kvm->arch.pgd = pgd; |
295 | 306 | ||
296 | return 0; | 307 | return 0; |
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
422 | return 0; /* ignore calls from kvm_set_spte_hva */ | 433 | return 0; /* ignore calls from kvm_set_spte_hva */ |
423 | pmd = mmu_memory_cache_alloc(cache); | 434 | pmd = mmu_memory_cache_alloc(cache); |
424 | pud_populate(NULL, pud, pmd); | 435 | pud_populate(NULL, pud, pmd); |
425 | pmd += pmd_index(addr); | ||
426 | get_page(virt_to_page(pud)); | 436 | get_page(virt_to_page(pud)); |
427 | } else | 437 | } |
428 | pmd = pmd_offset(pud, addr); | 438 | |
439 | pmd = pmd_offset(pud, addr); | ||
429 | 440 | ||
430 | /* Create 2nd stage page table mapping - Level 2 */ | 441 | /* Create 2nd stage page table mapping - Level 2 */ |
431 | if (pmd_none(*pmd)) { | 442 | if (pmd_none(*pmd)) { |
432 | if (!cache) | 443 | if (!cache) |
433 | return 0; /* ignore calls from kvm_set_spte_hva */ | 444 | return 0; /* ignore calls from kvm_set_spte_hva */ |
434 | pte = mmu_memory_cache_alloc(cache); | 445 | pte = mmu_memory_cache_alloc(cache); |
435 | clean_pte_table(pte); | 446 | kvm_clean_pte(pte); |
436 | pmd_populate_kernel(NULL, pmd, pte); | 447 | pmd_populate_kernel(NULL, pmd, pte); |
437 | pte += pte_index(addr); | ||
438 | get_page(virt_to_page(pmd)); | 448 | get_page(virt_to_page(pmd)); |
439 | } else | 449 | } |
440 | pte = pte_offset_kernel(pmd, addr); | 450 | |
451 | pte = pte_offset_kernel(pmd, addr); | ||
441 | 452 | ||
442 | if (iomap && pte_present(*pte)) | 453 | if (iomap && pte_present(*pte)) |
443 | return -EFAULT; | 454 | return -EFAULT; |
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |||
446 | old_pte = *pte; | 457 | old_pte = *pte; |
447 | kvm_set_pte(pte, *new_pte); | 458 | kvm_set_pte(pte, *new_pte); |
448 | if (pte_present(old_pte)) | 459 | if (pte_present(old_pte)) |
449 | kvm_tlb_flush_vmid(kvm); | 460 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
450 | else | 461 | else |
451 | get_page(virt_to_page(pte)); | 462 | get_page(virt_to_page(pte)); |
452 | 463 | ||
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |||
473 | pfn = __phys_to_pfn(pa); | 484 | pfn = __phys_to_pfn(pa); |
474 | 485 | ||
475 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | 486 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
476 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); | 487 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
488 | kvm_set_s2pte_writable(&pte); | ||
477 | 489 | ||
478 | ret = mmu_topup_memory_cache(&cache, 2, 2); | 490 | ret = mmu_topup_memory_cache(&cache, 2, 2); |
479 | if (ret) | 491 | if (ret) |
@@ -492,29 +504,6 @@ out: | |||
492 | return ret; | 504 | return ret; |
493 | } | 505 | } |
494 | 506 | ||
495 | static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
496 | { | ||
497 | /* | ||
498 | * If we are going to insert an instruction page and the icache is | ||
499 | * either VIPT or PIPT, there is a potential problem where the host | ||
500 | * (or another VM) may have used the same page as this guest, and we | ||
501 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
502 | * we can invalidate just that page, but if we are using a VIPT cache | ||
503 | * we need to invalidate the entire icache - damn shame - as written | ||
504 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
505 | * | ||
506 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
507 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
508 | */ | ||
509 | if (icache_is_pipt()) { | ||
510 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
511 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
512 | } else if (!icache_is_vivt_asid_tagged()) { | ||
513 | /* any kind of VIPT cache */ | ||
514 | __flush_icache_all(); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 507 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
519 | gfn_t gfn, struct kvm_memory_slot *memslot, | 508 | gfn_t gfn, struct kvm_memory_slot *memslot, |
520 | unsigned long fault_status) | 509 | unsigned long fault_status) |
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
526 | unsigned long mmu_seq; | 515 | unsigned long mmu_seq; |
527 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | 516 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
528 | 517 | ||
529 | write_fault = kvm_is_write_fault(vcpu->arch.hsr); | 518 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); |
530 | if (fault_status == FSC_PERM && !write_fault) { | 519 | if (fault_status == FSC_PERM && !write_fault) { |
531 | kvm_err("Unexpected L2 read permission error\n"); | 520 | kvm_err("Unexpected L2 read permission error\n"); |
532 | return -EFAULT; | 521 | return -EFAULT; |
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
560 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | 549 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
561 | goto out_unlock; | 550 | goto out_unlock; |
562 | if (writable) { | 551 | if (writable) { |
563 | pte_val(new_pte) |= L_PTE_S2_RDWR; | 552 | kvm_set_s2pte_writable(&new_pte); |
564 | kvm_set_pfn_dirty(pfn); | 553 | kvm_set_pfn_dirty(pfn); |
565 | } | 554 | } |
566 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); | 555 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); |
@@ -585,7 +574,6 @@ out_unlock: | |||
585 | */ | 574 | */ |
586 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | 575 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
587 | { | 576 | { |
588 | unsigned long hsr_ec; | ||
589 | unsigned long fault_status; | 577 | unsigned long fault_status; |
590 | phys_addr_t fault_ipa; | 578 | phys_addr_t fault_ipa; |
591 | struct kvm_memory_slot *memslot; | 579 | struct kvm_memory_slot *memslot; |
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
593 | gfn_t gfn; | 581 | gfn_t gfn; |
594 | int ret, idx; | 582 | int ret, idx; |
595 | 583 | ||
596 | hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; | 584 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
597 | is_iabt = (hsr_ec == HSR_EC_IABT); | 585 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
598 | fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; | ||
599 | 586 | ||
600 | trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, | 587 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
601 | vcpu->arch.hxfar, fault_ipa); | 588 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
602 | 589 | ||
603 | /* Check the stage-2 fault is trans. fault or write fault */ | 590 | /* Check the stage-2 fault is trans. fault or write fault */ |
604 | fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); | 591 | fault_status = kvm_vcpu_trap_get_fault(vcpu); |
605 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 592 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
606 | kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", | 593 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", |
607 | hsr_ec, fault_status); | 594 | kvm_vcpu_trap_get_class(vcpu), fault_status); |
608 | return -EFAULT; | 595 | return -EFAULT; |
609 | } | 596 | } |
610 | 597 | ||
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
614 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 601 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
615 | if (is_iabt) { | 602 | if (is_iabt) { |
616 | /* Prefetch Abort on I/O address */ | 603 | /* Prefetch Abort on I/O address */ |
617 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | 604 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
618 | ret = 1; | 605 | ret = 1; |
619 | goto out_unlock; | 606 | goto out_unlock; |
620 | } | 607 | } |
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
626 | goto out_unlock; | 613 | goto out_unlock; |
627 | } | 614 | } |
628 | 615 | ||
629 | /* Adjust page offset */ | 616 | /* |
630 | fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; | 617 | * The IPA is reported as [MAX:12], so we need to |
618 | * complement it with the bottom 12 bits from the | ||
619 | * faulting VA. This is always 12 bits, irrespective | ||
620 | * of the page size. | ||
621 | */ | ||
622 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | ||
631 | ret = io_mem_abort(vcpu, run, fault_ipa); | 623 | ret = io_mem_abort(vcpu, run, fault_ipa); |
632 | goto out_unlock; | 624 | goto out_unlock; |
633 | } | 625 | } |
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
682 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 674 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
683 | { | 675 | { |
684 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 676 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
685 | kvm_tlb_flush_vmid(kvm); | 677 | kvm_tlb_flush_vmid_ipa(kvm, gpa); |
686 | } | 678 | } |
687 | 679 | ||
688 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 680 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void) | |||
776 | pmd = pmd_offset(pud, addr); | 768 | pmd = pmd_offset(pud, addr); |
777 | 769 | ||
778 | pud_clear(pud); | 770 | pud_clear(pud); |
779 | clean_pmd_entry(pmd); | 771 | kvm_clean_pmd_entry(pmd); |
780 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); | 772 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); |
781 | } while (pgd++, addr = next, addr < end); | 773 | } while (pgd++, addr = next, addr < end); |
782 | } | 774 | } |
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..161d5c15f0f0 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
@@ -1484,7 +1484,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |||
1484 | if (addr & ~KVM_PHYS_MASK) | 1484 | if (addr & ~KVM_PHYS_MASK) |
1485 | return -E2BIG; | 1485 | return -E2BIG; |
1486 | 1486 | ||
1487 | if (addr & ~PAGE_MASK) | 1487 | if (addr & (SZ_4K - 1)) |
1488 | return -EINVAL; | 1488 | return -EINVAL; |
1489 | 1489 | ||
1490 | mutex_lock(&kvm->lock); | 1490 | mutex_lock(&kvm->lock); |