diff options
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/kvm_mmio.h | 22 | ||||
-rw-r--r-- | arch/arm/kvm/mmio.c | 64 |
2 files changed, 37 insertions, 49 deletions
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h index 3f83db2f6cf0..d8e90c8cb5fa 100644 --- a/arch/arm/include/asm/kvm_mmio.h +++ b/arch/arm/include/asm/kvm_mmio.h | |||
@@ -28,28 +28,6 @@ struct kvm_decode { | |||
28 | bool sign_extend; | 28 | bool sign_extend; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | /* | ||
32 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
33 | * which is an anonymous type. Use our own type instead. | ||
34 | */ | ||
35 | struct kvm_exit_mmio { | ||
36 | phys_addr_t phys_addr; | ||
37 | u8 data[8]; | ||
38 | u32 len; | ||
39 | bool is_write; | ||
40 | void *private; | ||
41 | }; | ||
42 | |||
43 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
44 | struct kvm_exit_mmio *mmio) | ||
45 | { | ||
46 | run->mmio.phys_addr = mmio->phys_addr; | ||
47 | run->mmio.len = mmio->len; | ||
48 | run->mmio.is_write = mmio->is_write; | ||
49 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
50 | run->exit_reason = KVM_EXIT_MMIO; | ||
51 | } | ||
52 | |||
53 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | 31 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); |
54 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | 32 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
55 | phys_addr_t fault_ipa); | 33 | phys_addr_t fault_ipa); |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 5d3bfc0eb3f0..974b1c606d04 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 124 | static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) |
125 | struct kvm_exit_mmio *mmio) | ||
126 | { | 125 | { |
127 | unsigned long rt; | 126 | unsigned long rt; |
128 | int len; | 127 | int access_size; |
129 | bool is_write, sign_extend; | 128 | bool sign_extend; |
130 | 129 | ||
131 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | 130 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
132 | /* cache operation on I/O addr, tell guest unsupported */ | 131 | /* cache operation on I/O addr, tell guest unsupported */ |
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
140 | return 1; | 139 | return 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | len = kvm_vcpu_dabt_get_as(vcpu); | 142 | access_size = kvm_vcpu_dabt_get_as(vcpu); |
144 | if (unlikely(len < 0)) | 143 | if (unlikely(access_size < 0)) |
145 | return len; | 144 | return access_size; |
146 | 145 | ||
147 | is_write = kvm_vcpu_dabt_iswrite(vcpu); | 146 | *is_write = kvm_vcpu_dabt_iswrite(vcpu); |
148 | sign_extend = kvm_vcpu_dabt_issext(vcpu); | 147 | sign_extend = kvm_vcpu_dabt_issext(vcpu); |
149 | rt = kvm_vcpu_dabt_get_rd(vcpu); | 148 | rt = kvm_vcpu_dabt_get_rd(vcpu); |
150 | 149 | ||
151 | mmio->is_write = is_write; | 150 | *len = access_size; |
152 | mmio->phys_addr = fault_ipa; | ||
153 | mmio->len = len; | ||
154 | vcpu->arch.mmio_decode.sign_extend = sign_extend; | 151 | vcpu->arch.mmio_decode.sign_extend = sign_extend; |
155 | vcpu->arch.mmio_decode.rt = rt; | 152 | vcpu->arch.mmio_decode.rt = rt; |
156 | 153 | ||
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
165 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | 162 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
166 | phys_addr_t fault_ipa) | 163 | phys_addr_t fault_ipa) |
167 | { | 164 | { |
168 | struct kvm_exit_mmio mmio; | ||
169 | unsigned long data; | 165 | unsigned long data; |
170 | unsigned long rt; | 166 | unsigned long rt; |
171 | int ret; | 167 | int ret; |
168 | bool is_write; | ||
169 | int len; | ||
170 | u8 data_buf[8]; | ||
172 | 171 | ||
173 | /* | 172 | /* |
174 | * Prepare MMIO operation. First stash it in a private | 173 | * Prepare MMIO operation. First decode the syndrome data we get |
175 | * structure that we can use for in-kernel emulation. If the | 174 | * from the CPU. Then try if some in-kernel emulation feels |
176 | * kernel can't handle it, copy it into run->mmio and let user | 175 | * responsible, otherwise let user space do its magic. |
177 | * space do its magic. | ||
178 | */ | 176 | */ |
179 | |||
180 | if (kvm_vcpu_dabt_isvalid(vcpu)) { | 177 | if (kvm_vcpu_dabt_isvalid(vcpu)) { |
181 | ret = decode_hsr(vcpu, fault_ipa, &mmio); | 178 | ret = decode_hsr(vcpu, &is_write, &len); |
182 | if (ret) | 179 | if (ret) |
183 | return ret; | 180 | return ret; |
184 | } else { | 181 | } else { |
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
188 | 185 | ||
189 | rt = vcpu->arch.mmio_decode.rt; | 186 | rt = vcpu->arch.mmio_decode.rt; |
190 | 187 | ||
191 | if (mmio.is_write) { | 188 | if (is_write) { |
192 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), | 189 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); |
193 | mmio.len); | 190 | |
191 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); | ||
192 | mmio_write_buf(data_buf, len, data); | ||
194 | 193 | ||
195 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, | 194 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, |
196 | fault_ipa, data); | 195 | data_buf); |
197 | mmio_write_buf(mmio.data, mmio.len, data); | ||
198 | } else { | 196 | } else { |
199 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, | 197 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, |
200 | fault_ipa, 0); | 198 | fault_ipa, 0); |
199 | |||
200 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, | ||
201 | data_buf); | ||
201 | } | 202 | } |
202 | 203 | ||
203 | if (vgic_handle_mmio(vcpu, run, &mmio)) | 204 | /* Now prepare kvm_run for the potential return to userland. */ |
205 | run->mmio.is_write = is_write; | ||
206 | run->mmio.phys_addr = fault_ipa; | ||
207 | run->mmio.len = len; | ||
208 | memcpy(run->mmio.data, data_buf, len); | ||
209 | |||
210 | if (!ret) { | ||
211 | /* We handled the access successfully in the kernel. */ | ||
212 | kvm_handle_mmio_return(vcpu, run); | ||
204 | return 1; | 213 | return 1; |
214 | } | ||
205 | 215 | ||
206 | kvm_prepare_mmio(run, &mmio); | 216 | run->exit_reason = KVM_EXIT_MMIO; |
207 | return 0; | 217 | return 0; |
208 | } | 218 | } |