diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2016-09-06 04:28:43 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-09-08 06:53:00 -0400 |
commit | 3aedd5c49e63c0d31a53b00ab906d48f53abb68b (patch) | |
tree | 276d564323cc9a23398f157be5dd0cf0aed07f6e | |
parent | 427d7cacf97220844aa39146e11365655bbff8bd (diff) |
arm: KVM: Use common AArch32 conditional execution code
Add the bit of glue and const-ification that is required to use
the code inherited from the arm64 port, and move over to it.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 34 | ||||
-rw-r--r-- | arch/arm/kvm/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kvm/emulate.c | 97 | ||||
-rw-r--r-- | virt/kvm/arm/aarch32.c | 5 |
4 files changed, 33 insertions, 104 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index ee5328fc4b06..448d63cdcc3d 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -40,18 +40,28 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |||
40 | *vcpu_reg(vcpu, reg_num) = val; | 40 | *vcpu_reg(vcpu, reg_num) = val; |
41 | } | 41 | } |
42 | 42 | ||
43 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); | 43 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
44 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 44 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
46 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 46 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
47 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 47 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
48 | 48 | ||
49 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | ||
50 | { | ||
51 | return kvm_condition_valid32(vcpu); | ||
52 | } | ||
53 | |||
54 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
55 | { | ||
56 | kvm_skip_instr32(vcpu, is_wide_instr); | ||
57 | } | ||
58 | |||
49 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 59 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
50 | { | 60 | { |
51 | vcpu->arch.hcr = HCR_GUEST_MASK; | 61 | vcpu->arch.hcr = HCR_GUEST_MASK; |
52 | } | 62 | } |
53 | 63 | ||
54 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | 64 | static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu) |
55 | { | 65 | { |
56 | return vcpu->arch.hcr; | 66 | return vcpu->arch.hcr; |
57 | } | 67 | } |
@@ -61,7 +71,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | |||
61 | vcpu->arch.hcr = hcr; | 71 | vcpu->arch.hcr = hcr; |
62 | } | 72 | } |
63 | 73 | ||
64 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 74 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
65 | { | 75 | { |
66 | return 1; | 76 | return 1; |
67 | } | 77 | } |
@@ -71,9 +81,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) | |||
71 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; | 81 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; |
72 | } | 82 | } |
73 | 83 | ||
74 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) | 84 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
75 | { | 85 | { |
76 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; | 86 | return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; |
77 | } | 87 | } |
78 | 88 | ||
79 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 89 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
@@ -93,11 +103,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | |||
93 | return cpsr_mode > USR_MODE;; | 103 | return cpsr_mode > USR_MODE;; |
94 | } | 104 | } |
95 | 105 | ||
96 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | 106 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) |
97 | { | 107 | { |
98 | return vcpu->arch.fault.hsr; | 108 | return vcpu->arch.fault.hsr; |
99 | } | 109 | } |
100 | 110 | ||
111 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | ||
112 | { | ||
113 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
114 | |||
115 | if (hsr & HSR_CV) | ||
116 | return (hsr & HSR_COND) >> HSR_COND_SHIFT; | ||
117 | |||
118 | return -1; | ||
119 | } | ||
120 | |||
101 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | 121 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) |
102 | { | 122 | { |
103 | return vcpu->arch.fault.hxfar; | 123 | return vcpu->arch.fault.hxfar; |
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 10d77a66cad5..339ec88a15a6 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ | |||
21 | obj-y += kvm-arm.o init.o interrupts.o | 21 | obj-y += kvm-arm.o init.o interrupts.o |
22 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o | 22 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
23 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o | 23 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o |
24 | obj-y += $(KVM)/arm/aarch32.o | ||
24 | 25 | ||
25 | obj-y += $(KVM)/arm/vgic/vgic.o | 26 | obj-y += $(KVM)/arm/vgic/vgic.o |
26 | obj-y += $(KVM)/arm/vgic/vgic-init.o | 27 | obj-y += $(KVM)/arm/vgic/vgic-init.o |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index eda9ddd03e7c..ff9acd1b027b 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -161,103 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | ||
165 | * A conditional instruction is allowed to trap, even though it | ||
166 | * wouldn't be executed. So let's re-implement the hardware, in | ||
167 | * software! | ||
168 | */ | ||
169 | bool kvm_condition_valid(struct kvm_vcpu *vcpu) | ||
170 | { | ||
171 | unsigned long cpsr, cond, insn; | ||
172 | |||
173 | /* | ||
174 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
175 | * catch undefined instructions, and then we won't get past | ||
176 | * the arm_exit_handlers test anyway. | ||
177 | */ | ||
178 | BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); | ||
179 | |||
180 | /* Top two bits non-zero? Unconditional. */ | ||
181 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | ||
182 | return true; | ||
183 | |||
184 | cpsr = *vcpu_cpsr(vcpu); | ||
185 | |||
186 | /* Is condition field valid? */ | ||
187 | if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) | ||
188 | cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; | ||
189 | else { | ||
190 | /* This can happen in Thumb mode: examine IT state. */ | ||
191 | unsigned long it; | ||
192 | |||
193 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
194 | |||
195 | /* it == 0 => unconditional. */ | ||
196 | if (it == 0) | ||
197 | return true; | ||
198 | |||
199 | /* The cond for this insn works out as the top 4 bits. */ | ||
200 | cond = (it >> 4); | ||
201 | } | ||
202 | |||
203 | /* Shift makes it look like an ARM-mode instruction */ | ||
204 | insn = cond << 28; | ||
205 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block | ||
210 | * @vcpu: The VCPU pointer | ||
211 | * | ||
212 | * When exceptions occur while instructions are executed in Thumb IF-THEN | ||
213 | * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have | ||
214 | * to do this little bit of work manually. The fields map like this: | ||
215 | * | ||
216 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | ||
217 | */ | ||
218 | static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | ||
219 | { | ||
220 | unsigned long itbits, cond; | ||
221 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
222 | bool is_arm = !(cpsr & PSR_T_BIT); | ||
223 | |||
224 | if (is_arm || !(cpsr & PSR_IT_MASK)) | ||
225 | return; | ||
226 | |||
227 | cond = (cpsr & 0xe000) >> 13; | ||
228 | itbits = (cpsr & 0x1c00) >> (10 - 2); | ||
229 | itbits |= (cpsr & (0x3 << 25)) >> 25; | ||
230 | |||
231 | /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ | ||
232 | if ((itbits & 0x7) == 0) | ||
233 | itbits = cond = 0; | ||
234 | else | ||
235 | itbits = (itbits << 1) & 0x1f; | ||
236 | |||
237 | cpsr &= ~PSR_IT_MASK; | ||
238 | cpsr |= cond << 13; | ||
239 | cpsr |= (itbits & 0x1c) << (10 - 2); | ||
240 | cpsr |= (itbits & 0x3) << 25; | ||
241 | *vcpu_cpsr(vcpu) = cpsr; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | ||
246 | * @vcpu: The vcpu pointer | ||
247 | */ | ||
248 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
249 | { | ||
250 | bool is_thumb; | ||
251 | |||
252 | is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); | ||
253 | if (is_thumb && !is_wide_instr) | ||
254 | *vcpu_pc(vcpu) += 2; | ||
255 | else | ||
256 | *vcpu_pc(vcpu) += 4; | ||
257 | kvm_adjust_itstate(vcpu); | ||
258 | } | ||
259 | |||
260 | |||
261 | /****************************************************************************** | 164 | /****************************************************************************** |
262 | * Inject exceptions into the guest | 165 | * Inject exceptions into the guest |
263 | */ | 166 | */ |
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index cb02e562acb8..ba4417b640c7 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c | |||
@@ -24,6 +24,11 @@ | |||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_emulate.h> | 25 | #include <asm/kvm_emulate.h> |
26 | 26 | ||
27 | #ifndef CONFIG_ARM64 | ||
28 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
29 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
30 | #endif | ||
31 | |||
27 | /* | 32 | /* |
28 | * stolen from arch/arm/kernel/opcodes.c | 33 | * stolen from arch/arm/kernel/opcodes.c |
29 | * | 34 | * |