diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-02-06 06:29:35 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-06-12 11:42:18 -0400 |
commit | e82e030556e42e823e174e0c3bd97988d1a09d1f (patch) | |
tree | 4c394a68bfe1a1f4164f120fcf7b09f289bd7da2 /arch/arm64 | |
parent | b4afad06c19e3489767532f86ff453a1d1e28b8c (diff) |
arm64: KVM: 32bit guest fault injection
Add fault injection capability for 32bit guests.
Reviewed-by: Christopher Covington <cov@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 79 |
1 files changed, 78 insertions, 1 deletions
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 54f656271266..81a02a8762b0 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Fault injection for 64bit guests. | 2 | * Fault injection for both 32 and 64bit guests. |
3 | * | 3 | * |
4 | * Copyright (C) 2012,2013 - ARM Ltd | 4 | * Copyright (C) 2012,2013 - ARM Ltd |
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | 5 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
@@ -29,6 +29,74 @@ | |||
29 | PSR_I_BIT | PSR_D_BIT) | 29 | PSR_I_BIT | PSR_D_BIT) |
30 | #define EL1_EXCEPT_SYNC_OFFSET 0x200 | 30 | #define EL1_EXCEPT_SYNC_OFFSET 0x200 |
31 | 31 | ||
32 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
33 | { | ||
34 | unsigned long cpsr; | ||
35 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
36 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
37 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
38 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
39 | |||
40 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
41 | |||
42 | if (sctlr & (1 << 30)) | ||
43 | cpsr |= COMPAT_PSR_T_BIT; | ||
44 | if (sctlr & (1 << 25)) | ||
45 | cpsr |= COMPAT_PSR_E_BIT; | ||
46 | |||
47 | *vcpu_cpsr(vcpu) = cpsr; | ||
48 | |||
49 | /* Note: These now point to the banked copies */ | ||
50 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
51 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
52 | |||
53 | /* Branch to exception vector */ | ||
54 | if (sctlr & (1 << 13)) | ||
55 | vect_offset += 0xffff0000; | ||
56 | else /* always have security exceptions */ | ||
57 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
58 | |||
59 | *vcpu_pc(vcpu) = vect_offset; | ||
60 | } | ||
61 | |||
62 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
69 | * pseudocode. | ||
70 | */ | ||
71 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
72 | unsigned long addr) | ||
73 | { | ||
74 | u32 vect_offset; | ||
75 | u32 *far, *fsr; | ||
76 | bool is_lpae; | ||
77 | |||
78 | if (is_pabt) { | ||
79 | vect_offset = 12; | ||
80 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
81 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
82 | } else { /* !iabt */ | ||
83 | vect_offset = 16; | ||
84 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
85 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
86 | } | ||
87 | |||
88 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
89 | |||
90 | *far = addr; | ||
91 | |||
92 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
93 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
94 | if (is_lpae) | ||
95 | *fsr = 1 << 9 | 0x34; | ||
96 | else | ||
97 | *fsr = 0x14; | ||
98 | } | ||
99 | |||
32 | static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) | 100 | static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) |
33 | { | 101 | { |
34 | unsigned long cpsr = *vcpu_cpsr(vcpu); | 102 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
@@ -98,6 +166,9 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
98 | */ | 166 | */ |
99 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | 167 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
100 | { | 168 | { |
169 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
170 | inject_abt32(vcpu, false, addr); | ||
171 | |||
101 | inject_abt64(vcpu, false, addr); | 172 | inject_abt64(vcpu, false, addr); |
102 | } | 173 | } |
103 | 174 | ||
@@ -111,6 +182,9 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
111 | */ | 182 | */ |
112 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | 183 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
113 | { | 184 | { |
185 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
186 | inject_abt32(vcpu, true, addr); | ||
187 | |||
114 | inject_abt64(vcpu, true, addr); | 188 | inject_abt64(vcpu, true, addr); |
115 | } | 189 | } |
116 | 190 | ||
@@ -122,5 +196,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
122 | */ | 196 | */ |
123 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 197 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
124 | { | 198 | { |
199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
200 | inject_undef32(vcpu); | ||
201 | |||
125 | inject_undef64(vcpu); | 202 | inject_undef64(vcpu); |
126 | } | 203 | } |