diff options
Diffstat (limited to 'arch/arm64')
| -rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 3 | ||||
| -rw-r--r-- | arch/arm64/kvm/inject_fault.c | 74 |
2 files changed, 6 insertions, 71 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index e5df3fce0008..bf61da0ef82b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |||
| 41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | 41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
| 42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
| 43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
| 44 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); | ||
| 45 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
| 46 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
| 44 | 47 | ||
| 45 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 48 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
| 46 | { | 49 | { |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index da6a8cfa54a0..8ecbcb40e317 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
| @@ -33,74 +33,6 @@ | |||
| 33 | #define LOWER_EL_AArch64_VECTOR 0x400 | 33 | #define LOWER_EL_AArch64_VECTOR 0x400 |
| 34 | #define LOWER_EL_AArch32_VECTOR 0x600 | 34 | #define LOWER_EL_AArch32_VECTOR 0x600 |
| 35 | 35 | ||
| 36 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
| 37 | { | ||
| 38 | unsigned long cpsr; | ||
| 39 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
| 40 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
| 41 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
| 42 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
| 43 | |||
| 44 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
| 45 | |||
| 46 | if (sctlr & (1 << 30)) | ||
| 47 | cpsr |= COMPAT_PSR_T_BIT; | ||
| 48 | if (sctlr & (1 << 25)) | ||
| 49 | cpsr |= COMPAT_PSR_E_BIT; | ||
| 50 | |||
| 51 | *vcpu_cpsr(vcpu) = cpsr; | ||
| 52 | |||
| 53 | /* Note: These now point to the banked copies */ | ||
| 54 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
| 55 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
| 56 | |||
| 57 | /* Branch to exception vector */ | ||
| 58 | if (sctlr & (1 << 13)) | ||
| 59 | vect_offset += 0xffff0000; | ||
| 60 | else /* always have security exceptions */ | ||
| 61 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
| 62 | |||
| 63 | *vcpu_pc(vcpu) = vect_offset; | ||
| 64 | } | ||
| 65 | |||
| 66 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
| 67 | { | ||
| 68 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
| 69 | } | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
| 73 | * pseudocode. | ||
| 74 | */ | ||
| 75 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
| 76 | unsigned long addr) | ||
| 77 | { | ||
| 78 | u32 vect_offset; | ||
| 79 | u32 *far, *fsr; | ||
| 80 | bool is_lpae; | ||
| 81 | |||
| 82 | if (is_pabt) { | ||
| 83 | vect_offset = 12; | ||
| 84 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
| 85 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
| 86 | } else { /* !iabt */ | ||
| 87 | vect_offset = 16; | ||
| 88 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
| 89 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
| 90 | } | ||
| 91 | |||
| 92 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
| 93 | |||
| 94 | *far = addr; | ||
| 95 | |||
| 96 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
| 97 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
| 98 | if (is_lpae) | ||
| 99 | *fsr = 1 << 9 | 0x34; | ||
| 100 | else | ||
| 101 | *fsr = 0x14; | ||
| 102 | } | ||
| 103 | |||
| 104 | enum exception_type { | 36 | enum exception_type { |
| 105 | except_type_sync = 0, | 37 | except_type_sync = 0, |
| 106 | except_type_irq = 0x80, | 38 | except_type_irq = 0x80, |
| @@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
| 197 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | 129 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
| 198 | { | 130 | { |
| 199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 131 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
| 200 | inject_abt32(vcpu, false, addr); | 132 | kvm_inject_dabt32(vcpu, addr); |
| 201 | else | 133 | else |
| 202 | inject_abt64(vcpu, false, addr); | 134 | inject_abt64(vcpu, false, addr); |
| 203 | } | 135 | } |
| @@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
| 213 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | 145 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
| 214 | { | 146 | { |
| 215 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 147 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
| 216 | inject_abt32(vcpu, true, addr); | 148 | kvm_inject_pabt32(vcpu, addr); |
| 217 | else | 149 | else |
| 218 | inject_abt64(vcpu, true, addr); | 150 | inject_abt64(vcpu, true, addr); |
| 219 | } | 151 | } |
| @@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
| 227 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 159 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
| 228 | { | 160 | { |
| 229 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 161 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
| 230 | inject_undef32(vcpu); | 162 | kvm_inject_undef32(vcpu); |
| 231 | else | 163 | else |
| 232 | inject_undef64(vcpu); | 164 | inject_undef64(vcpu); |
| 233 | } | 165 | } |
