diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-11-11 06:05:20 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-11-11 06:05:20 -0500 |
commit | ede582224231e64e41af0f89117a302580a2da2e (patch) | |
tree | 50c28a6d8b9b7c3ae2f6822bd548b84853e7ec09 /arch | |
parent | 6da8ae556c11bb3b832dcc0bd077c2d563f5583f (diff) | |
parent | ce94fe93d566bf381c6ecbd45010d36c5f04d692 (diff) |
Merge tag 'kvm-arm64/for-3.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into kvm-next
A handful of fixes for KVM/arm64:
- A couple a basic fixes for running BE guests on a LE host
- A performance improvement for overcommitted VMs (same as the equivalent
patch for ARM)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Conflicts:
arch/arm/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 46 | ||||
-rw-r--r-- | arch/arm/kvm/mmio.c | 86 | ||||
-rw-r--r-- | arch/arm/kvm/psci.c | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 56 | ||||
-rw-r--r-- | arch/arm64/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 18 |
7 files changed, 201 insertions, 18 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 708e4d8a647f..0fa90c962ac8 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -162,4 +162,50 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | |||
162 | return vcpu->arch.cp15[c0_MPIDR]; | 162 | return vcpu->arch.cp15[c0_MPIDR]; |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | ||
166 | { | ||
167 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | ||
168 | } | ||
169 | |||
170 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) | ||
171 | { | ||
172 | return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); | ||
173 | } | ||
174 | |||
175 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | ||
176 | unsigned long data, | ||
177 | unsigned int len) | ||
178 | { | ||
179 | if (kvm_vcpu_is_be(vcpu)) { | ||
180 | switch (len) { | ||
181 | case 1: | ||
182 | return data & 0xff; | ||
183 | case 2: | ||
184 | return be16_to_cpu(data & 0xffff); | ||
185 | default: | ||
186 | return be32_to_cpu(data); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | return data; /* Leave LE untouched */ | ||
191 | } | ||
192 | |||
193 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | ||
194 | unsigned long data, | ||
195 | unsigned int len) | ||
196 | { | ||
197 | if (kvm_vcpu_is_be(vcpu)) { | ||
198 | switch (len) { | ||
199 | case 1: | ||
200 | return data & 0xff; | ||
201 | case 2: | ||
202 | return cpu_to_be16(data & 0xffff); | ||
203 | default: | ||
204 | return cpu_to_be32(data); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | return data; /* Leave LE untouched */ | ||
209 | } | ||
210 | |||
165 | #endif /* __ARM_KVM_EMULATE_H__ */ | 211 | #endif /* __ARM_KVM_EMULATE_H__ */ |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 0c25d9487d53..4cb5a93182e9 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -23,6 +23,68 @@ | |||
23 | 23 | ||
24 | #include "trace.h" | 24 | #include "trace.h" |
25 | 25 | ||
26 | static void mmio_write_buf(char *buf, unsigned int len, unsigned long data) | ||
27 | { | ||
28 | void *datap = NULL; | ||
29 | union { | ||
30 | u8 byte; | ||
31 | u16 hword; | ||
32 | u32 word; | ||
33 | u64 dword; | ||
34 | } tmp; | ||
35 | |||
36 | switch (len) { | ||
37 | case 1: | ||
38 | tmp.byte = data; | ||
39 | datap = &tmp.byte; | ||
40 | break; | ||
41 | case 2: | ||
42 | tmp.hword = data; | ||
43 | datap = &tmp.hword; | ||
44 | break; | ||
45 | case 4: | ||
46 | tmp.word = data; | ||
47 | datap = &tmp.word; | ||
48 | break; | ||
49 | case 8: | ||
50 | tmp.dword = data; | ||
51 | datap = &tmp.dword; | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | memcpy(buf, datap, len); | ||
56 | } | ||
57 | |||
58 | static unsigned long mmio_read_buf(char *buf, unsigned int len) | ||
59 | { | ||
60 | unsigned long data = 0; | ||
61 | union { | ||
62 | u16 hword; | ||
63 | u32 word; | ||
64 | u64 dword; | ||
65 | } tmp; | ||
66 | |||
67 | switch (len) { | ||
68 | case 1: | ||
69 | data = buf[0]; | ||
70 | break; | ||
71 | case 2: | ||
72 | memcpy(&tmp.hword, buf, len); | ||
73 | data = tmp.hword; | ||
74 | break; | ||
75 | case 4: | ||
76 | memcpy(&tmp.word, buf, len); | ||
77 | data = tmp.word; | ||
78 | break; | ||
79 | case 8: | ||
80 | memcpy(&tmp.dword, buf, len); | ||
81 | data = tmp.dword; | ||
82 | break; | ||
83 | } | ||
84 | |||
85 | return data; | ||
86 | } | ||
87 | |||
26 | /** | 88 | /** |
27 | * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation | 89 | * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation |
28 | * @vcpu: The VCPU pointer | 90 | * @vcpu: The VCPU pointer |
@@ -33,28 +95,27 @@ | |||
33 | */ | 95 | */ |
34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | 96 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
35 | { | 97 | { |
36 | unsigned long *dest; | 98 | unsigned long data; |
37 | unsigned int len; | 99 | unsigned int len; |
38 | int mask; | 100 | int mask; |
39 | 101 | ||
40 | if (!run->mmio.is_write) { | 102 | if (!run->mmio.is_write) { |
41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); | ||
42 | *dest = 0; | ||
43 | |||
44 | len = run->mmio.len; | 103 | len = run->mmio.len; |
45 | if (len > sizeof(unsigned long)) | 104 | if (len > sizeof(unsigned long)) |
46 | return -EINVAL; | 105 | return -EINVAL; |
47 | 106 | ||
48 | memcpy(dest, run->mmio.data, len); | 107 | data = mmio_read_buf(run->mmio.data, len); |
49 | |||
50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | ||
51 | *((u64 *)run->mmio.data)); | ||
52 | 108 | ||
53 | if (vcpu->arch.mmio_decode.sign_extend && | 109 | if (vcpu->arch.mmio_decode.sign_extend && |
54 | len < sizeof(unsigned long)) { | 110 | len < sizeof(unsigned long)) { |
55 | mask = 1U << ((len * 8) - 1); | 111 | mask = 1U << ((len * 8) - 1); |
56 | *dest = (*dest ^ mask) - mask; | 112 | data = (data ^ mask) - mask; |
57 | } | 113 | } |
114 | |||
115 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | ||
116 | data); | ||
117 | data = vcpu_data_host_to_guest(vcpu, data, len); | ||
118 | *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; | ||
58 | } | 119 | } |
59 | 120 | ||
60 | return 0; | 121 | return 0; |
@@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
105 | phys_addr_t fault_ipa) | 166 | phys_addr_t fault_ipa) |
106 | { | 167 | { |
107 | struct kvm_exit_mmio mmio; | 168 | struct kvm_exit_mmio mmio; |
169 | unsigned long data; | ||
108 | unsigned long rt; | 170 | unsigned long rt; |
109 | int ret; | 171 | int ret; |
110 | 172 | ||
@@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
125 | } | 187 | } |
126 | 188 | ||
127 | rt = vcpu->arch.mmio_decode.rt; | 189 | rt = vcpu->arch.mmio_decode.rt; |
190 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len); | ||
191 | |||
128 | trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : | 192 | trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : |
129 | KVM_TRACE_MMIO_READ_UNSATISFIED, | 193 | KVM_TRACE_MMIO_READ_UNSATISFIED, |
130 | mmio.len, fault_ipa, | 194 | mmio.len, fault_ipa, |
131 | (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); | 195 | (mmio.is_write) ? data : 0); |
132 | 196 | ||
133 | if (mmio.is_write) | 197 | if (mmio.is_write) |
134 | memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); | 198 | mmio_write_buf(mmio.data, mmio.len, data); |
135 | 199 | ||
136 | if (vgic_handle_mmio(vcpu, run, &mmio)) | 200 | if (vgic_handle_mmio(vcpu, run, &mmio)) |
137 | return 1; | 201 | return 1; |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 311263124acf..0881bf169fbc 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -71,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
71 | vcpu_set_thumb(vcpu); | 71 | vcpu_set_thumb(vcpu); |
72 | } | 72 | } |
73 | 73 | ||
74 | /* Propagate caller endianness */ | ||
75 | if (kvm_vcpu_is_be(source_vcpu)) | ||
76 | kvm_vcpu_set_be(vcpu); | ||
77 | |||
74 | *vcpu_pc(vcpu) = target_pc; | 78 | *vcpu_pc(vcpu) = target_pc; |
75 | vcpu->arch.pause = false; | 79 | vcpu->arch.pause = false; |
76 | smp_mb(); /* Make sure the above is visible */ | 80 | smp_mb(); /* Make sure the above is visible */ |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a5f28e2720c7..c98ef4771c73 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -63,6 +63,7 @@ | |||
63 | * TAC: Trap ACTLR | 63 | * TAC: Trap ACTLR |
64 | * TSC: Trap SMC | 64 | * TSC: Trap SMC |
65 | * TSW: Trap cache operations by set/way | 65 | * TSW: Trap cache operations by set/way |
66 | * TWE: Trap WFE | ||
66 | * TWI: Trap WFI | 67 | * TWI: Trap WFI |
67 | * TIDCP: Trap L2CTLR/L2ECTLR | 68 | * TIDCP: Trap L2CTLR/L2ECTLR |
68 | * BSU_IS: Upgrade barriers to the inner shareable domain | 69 | * BSU_IS: Upgrade barriers to the inner shareable domain |
@@ -72,8 +73,9 @@ | |||
72 | * FMO: Override CPSR.F and enable signaling with VF | 73 | * FMO: Override CPSR.F and enable signaling with VF |
73 | * SWIO: Turn set/way invalidates into set/way clean+invalidate | 74 | * SWIO: Turn set/way invalidates into set/way clean+invalidate |
74 | */ | 75 | */ |
75 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ | 76 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ |
76 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ | 77 | HCR_BSU_IS | HCR_FB | HCR_TAC | \ |
78 | HCR_AMO | HCR_IMO | HCR_FMO | \ | ||
77 | HCR_SWIO | HCR_TIDCP | HCR_RW) | 79 | HCR_SWIO | HCR_TIDCP | HCR_RW) |
78 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | 80 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) |
79 | 81 | ||
@@ -242,4 +244,6 @@ | |||
242 | 244 | ||
243 | #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 | 245 | #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 |
244 | 246 | ||
247 | #define ESR_EL2_EC_WFI_ISS_WFE (1 << 0) | ||
248 | |||
245 | #endif /* __ARM64_KVM_ARM_H__ */ | 249 | #endif /* __ARM64_KVM_ARM_H__ */ |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 6df93cdc652b..dd8ecfc3f995 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -182,4 +182,60 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | |||
182 | return vcpu_sys_reg(vcpu, MPIDR_EL1); | 182 | return vcpu_sys_reg(vcpu, MPIDR_EL1); |
183 | } | 183 | } |
184 | 184 | ||
185 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | ||
186 | { | ||
187 | if (vcpu_mode_is_32bit(vcpu)) | ||
188 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; | ||
189 | else | ||
190 | vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25); | ||
191 | } | ||
192 | |||
193 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) | ||
194 | { | ||
195 | if (vcpu_mode_is_32bit(vcpu)) | ||
196 | return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); | ||
197 | |||
198 | return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); | ||
199 | } | ||
200 | |||
201 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | ||
202 | unsigned long data, | ||
203 | unsigned int len) | ||
204 | { | ||
205 | if (kvm_vcpu_is_be(vcpu)) { | ||
206 | switch (len) { | ||
207 | case 1: | ||
208 | return data & 0xff; | ||
209 | case 2: | ||
210 | return be16_to_cpu(data & 0xffff); | ||
211 | case 4: | ||
212 | return be32_to_cpu(data & 0xffffffff); | ||
213 | default: | ||
214 | return be64_to_cpu(data); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | return data; /* Leave LE untouched */ | ||
219 | } | ||
220 | |||
221 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | ||
222 | unsigned long data, | ||
223 | unsigned int len) | ||
224 | { | ||
225 | if (kvm_vcpu_is_be(vcpu)) { | ||
226 | switch (len) { | ||
227 | case 1: | ||
228 | return data & 0xff; | ||
229 | case 2: | ||
230 | return cpu_to_be16(data & 0xffff); | ||
231 | case 4: | ||
232 | return cpu_to_be32(data & 0xffffffff); | ||
233 | default: | ||
234 | return cpu_to_be64(data); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | return data; /* Leave LE untouched */ | ||
239 | } | ||
240 | |||
185 | #endif /* __ARM64_KVM_EMULATE_H__ */ | 241 | #endif /* __ARM64_KVM_EMULATE_H__ */ |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 21e90820bd23..4480ab339a00 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -21,6 +21,7 @@ config KVM | |||
21 | select MMU_NOTIFIER | 21 | select MMU_NOTIFIER |
22 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 23 | select ANON_INODES |
24 | select HAVE_KVM_CPU_RELAX_INTERCEPT | ||
24 | select KVM_MMIO | 25 | select KVM_MMIO |
25 | select KVM_ARM_HOST | 26 | select KVM_ARM_HOST |
26 | select KVM_ARM_VGIC | 27 | select KVM_ARM_VGIC |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 9beaca033437..8da56067c304 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | 50 | * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event |
51 | * instruction executed by a guest | ||
52 | * | ||
51 | * @vcpu: the vcpu pointer | 53 | * @vcpu: the vcpu pointer |
52 | * | 54 | * |
53 | * Simply call kvm_vcpu_block(), which will halt execution of | 55 | * WFE: Yield the CPU and come back to this vcpu when the scheduler |
56 | * decides to. | ||
57 | * WFI: Simply call kvm_vcpu_block(), which will halt execution of | ||
54 | * world-switches and schedule other host processes until there is an | 58 | * world-switches and schedule other host processes until there is an |
55 | * incoming IRQ or FIQ to the VM. | 59 | * incoming IRQ or FIQ to the VM. |
56 | */ | 60 | */ |
57 | static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | 61 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) |
58 | { | 62 | { |
59 | kvm_vcpu_block(vcpu); | 63 | if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) |
64 | kvm_vcpu_on_spin(vcpu); | ||
65 | else | ||
66 | kvm_vcpu_block(vcpu); | ||
67 | |||
60 | return 1; | 68 | return 1; |
61 | } | 69 | } |
62 | 70 | ||
63 | static exit_handle_fn arm_exit_handlers[] = { | 71 | static exit_handle_fn arm_exit_handlers[] = { |
64 | [ESR_EL2_EC_WFI] = kvm_handle_wfi, | 72 | [ESR_EL2_EC_WFI] = kvm_handle_wfx, |
65 | [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, | 73 | [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, |
66 | [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, | 74 | [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, |
67 | [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, | 75 | [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, |