diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 16:00:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 16:00:24 -0500 |
commit | 974aa5630b318938273d7efe7a2cf031c7b927db (patch) | |
tree | b79803c07b9c16d87058ce69f80ebe173cdfd838 /arch/arm64/kvm/inject_fault.c | |
parent | 441692aafc1731087bbaf657a8b6059d95c2a6df (diff) | |
parent | a6014f1ab7088dc02b58991cfb6b32a34afdbf12 (diff) |
Merge tag 'kvm-4.15-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář:
"First batch of KVM changes for 4.15
Common:
- Python 3 support in kvm_stat
- Accounting of slabs to kmemcg
ARM:
- Optimized arch timer handling for KVM/ARM
- Improvements to the VGIC ITS code and introduction of an ITS reset
ioctl
- Unification of the 32-bit fault injection logic
- More exact external abort matching logic
PPC:
- Support for running hashed page table (HPT) MMU mode on a host that
is using the radix MMU mode; single threaded mode on POWER 9 is
added as a pre-requisite
- Resolution of merge conflicts with the last second 4.14 HPT fixes
- Fixes and cleanups
s390:
- Some initial preparation patches for exitless interrupts and crypto
- New capability for AIS migration
- Fixes
x86:
- Improved emulation of LAPIC timer mode changes, MCi_STATUS MSRs,
and after-reset state
- Refined dependencies for VMX features
- Fixes for nested SMI injection
- A lot of cleanups"
* tag 'kvm-4.15-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (89 commits)
KVM: s390: provide a capability for AIS state migration
KVM: s390: clear_io_irq() requests are not expected for adapter interrupts
KVM: s390: abstract conversion between isc and enum irq_types
KVM: s390: vsie: use common code functions for pinning
KVM: s390: SIE considerations for AP Queue virtualization
KVM: s390: document memory ordering for kvm_s390_vcpu_wakeup
KVM: PPC: Book3S HV: Cosmetic post-merge cleanups
KVM: arm/arm64: fix the incompatible matching for external abort
KVM: arm/arm64: Unify 32bit fault injection
KVM: arm/arm64: vgic-its: Implement KVM_DEV_ARM_ITS_CTRL_RESET
KVM: arm/arm64: Document KVM_DEV_ARM_ITS_CTRL_RESET
KVM: arm/arm64: vgic-its: Free caches when GITS_BASER Valid bit is cleared
KVM: arm/arm64: vgic-its: New helper functions to free the caches
KVM: arm/arm64: vgic-its: Remove kvm_its_unmap_device
arm/arm64: KVM: Load the timer state when enabling the timer
KVM: arm/arm64: Rework kvm_timer_should_fire
KVM: arm/arm64: Get rid of kvm_timer_flush_hwstate
KVM: arm/arm64: Avoid phys timer emulation in vcpu entry/exit
KVM: arm/arm64: Move phys_timer_emulate function
KVM: arm/arm64: Use kvm_arm_timer_set/get_reg for guest register traps
...
Diffstat (limited to 'arch/arm64/kvm/inject_fault.c')
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 88 |
1 files changed, 3 insertions, 85 deletions
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 3556715a774e..8ecbcb40e317 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -33,88 +33,6 @@ | |||
33 | #define LOWER_EL_AArch64_VECTOR 0x400 | 33 | #define LOWER_EL_AArch64_VECTOR 0x400 |
34 | #define LOWER_EL_AArch32_VECTOR 0x600 | 34 | #define LOWER_EL_AArch32_VECTOR 0x600 |
35 | 35 | ||
36 | /* | ||
37 | * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. | ||
38 | */ | ||
39 | static const u8 return_offsets[8][2] = { | ||
40 | [0] = { 0, 0 }, /* Reset, unused */ | ||
41 | [1] = { 4, 2 }, /* Undefined */ | ||
42 | [2] = { 0, 0 }, /* SVC, unused */ | ||
43 | [3] = { 4, 4 }, /* Prefetch abort */ | ||
44 | [4] = { 8, 8 }, /* Data abort */ | ||
45 | [5] = { 0, 0 }, /* HVC, unused */ | ||
46 | [6] = { 4, 4 }, /* IRQ, unused */ | ||
47 | [7] = { 4, 4 }, /* FIQ, unused */ | ||
48 | }; | ||
49 | |||
50 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
51 | { | ||
52 | unsigned long cpsr; | ||
53 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
54 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
55 | u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; | ||
56 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
57 | |||
58 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
59 | |||
60 | if (sctlr & (1 << 30)) | ||
61 | cpsr |= COMPAT_PSR_T_BIT; | ||
62 | if (sctlr & (1 << 25)) | ||
63 | cpsr |= COMPAT_PSR_E_BIT; | ||
64 | |||
65 | *vcpu_cpsr(vcpu) = cpsr; | ||
66 | |||
67 | /* Note: These now point to the banked copies */ | ||
68 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
69 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
70 | |||
71 | /* Branch to exception vector */ | ||
72 | if (sctlr & (1 << 13)) | ||
73 | vect_offset += 0xffff0000; | ||
74 | else /* always have security exceptions */ | ||
75 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
76 | |||
77 | *vcpu_pc(vcpu) = vect_offset; | ||
78 | } | ||
79 | |||
80 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
81 | { | ||
82 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
87 | * pseudocode. | ||
88 | */ | ||
89 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
90 | unsigned long addr) | ||
91 | { | ||
92 | u32 vect_offset; | ||
93 | u32 *far, *fsr; | ||
94 | bool is_lpae; | ||
95 | |||
96 | if (is_pabt) { | ||
97 | vect_offset = 12; | ||
98 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
99 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
100 | } else { /* !iabt */ | ||
101 | vect_offset = 16; | ||
102 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
103 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
104 | } | ||
105 | |||
106 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
107 | |||
108 | *far = addr; | ||
109 | |||
110 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
111 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
112 | if (is_lpae) | ||
113 | *fsr = 1 << 9 | 0x34; | ||
114 | else | ||
115 | *fsr = 0x14; | ||
116 | } | ||
117 | |||
118 | enum exception_type { | 36 | enum exception_type { |
119 | except_type_sync = 0, | 37 | except_type_sync = 0, |
120 | except_type_irq = 0x80, | 38 | except_type_irq = 0x80, |
@@ -211,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
211 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | 129 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
212 | { | 130 | { |
213 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 131 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
214 | inject_abt32(vcpu, false, addr); | 132 | kvm_inject_dabt32(vcpu, addr); |
215 | else | 133 | else |
216 | inject_abt64(vcpu, false, addr); | 134 | inject_abt64(vcpu, false, addr); |
217 | } | 135 | } |
@@ -227,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
227 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | 145 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
228 | { | 146 | { |
229 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 147 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
230 | inject_abt32(vcpu, true, addr); | 148 | kvm_inject_pabt32(vcpu, addr); |
231 | else | 149 | else |
232 | inject_abt64(vcpu, true, addr); | 150 | inject_abt64(vcpu, true, addr); |
233 | } | 151 | } |
@@ -241,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
241 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 159 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
242 | { | 160 | { |
243 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 161 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
244 | inject_undef32(vcpu); | 162 | kvm_inject_undef32(vcpu); |
245 | else | 163 | else |
246 | inject_undef64(vcpu); | 164 | inject_undef64(vcpu); |
247 | } | 165 | } |