aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 11:47:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 11:47:12 -0400
commitb05d59dfceaea72565b1648af929b037b0f96d7f (patch)
treebbe92714be468ed8783bce6ac2c305c0aedf8eb5 /arch/arm/kvm
parentdaf342af2f7856fd2f5c66b9fb39a8f24986ca53 (diff)
parent820b3fcdeb80d30410f4427d2cbf9161c35fdeef (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm into next
Pull KVM updates from Paolo Bonzini: "At over 200 commits, covering almost all supported architectures, this was a pretty active cycle for KVM. Changes include: - a lot of s390 changes: optimizations, support for migration, GDB support and more - ARM changes are pretty small: support for the PSCI 0.2 hypercall interface on both the guest and the host (the latter acked by Catalin) - initial POWER8 and little-endian host support - support for running u-boot on embedded POWER targets - pretty large changes to MIPS too, completing the userspace interface and improving the handling of virtualized timer hardware - for x86, a larger set of changes is scheduled for 3.17. Still, we have a few emulator bugfixes and support for running nested fully-virtualized Xen guests (para-virtualized Xen guests have always worked). And some optimizations too. The only missing architecture here is ia64. It's not a coincidence that support for KVM on ia64 is scheduled for removal in 3.17" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (203 commits) KVM: add missing cleanup_srcu_struct KVM: PPC: Book3S PR: Rework SLB switching code KVM: PPC: Book3S PR: Use SLB entry 0 KVM: PPC: Book3S HV: Fix machine check delivery to guest KVM: PPC: Book3S HV: Work around POWER8 performance monitor bugs KVM: PPC: Book3S HV: Make sure we don't miss dirty pages KVM: PPC: Book3S HV: Fix dirty map for hugepages KVM: PPC: Book3S HV: Put huge-page HPTEs in rmap chain for base address KVM: PPC: Book3S HV: Fix check for running inside guest in global_invalidates() KVM: PPC: Book3S: Move KVM_REG_PPC_WORT to an unused register number KVM: PPC: Book3S: Add ONE_REG register names that were missed KVM: PPC: Add CAP to indicate hcall fixes KVM: PPC: MPIC: Reset IRQ source private members KVM: PPC: Graciously fail broken LE hypercalls PPC: ePAPR: Fix hypercall on LE guest KVM: PPC: BOOK3S: Remove open coded make_dsisr in alignment handler KVM: PPC: BOOK3S: Always use the saved DAR value PPC: KVM: Make NX bit available with magic page KVM: PPC: Disable NX for old magic page using guests KVM: PPC: BOOK3S: HV: Add mixed page-size support for guest ...
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/arm.c1
-rw-r--r--arch/arm/kvm/handle_exit.c10
-rw-r--r--arch/arm/kvm/psci.c235
3 files changed, 224 insertions, 22 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index f0e50a0f3a65..3c82b37c0f9e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -197,6 +197,7 @@ int kvm_dev_ioctl_check_extension(long ext)
197 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 197 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
198 case KVM_CAP_ONE_REG: 198 case KVM_CAP_ONE_REG:
199 case KVM_CAP_ARM_PSCI: 199 case KVM_CAP_ARM_PSCI:
200 case KVM_CAP_ARM_PSCI_0_2:
200 r = 1; 201 r = 1;
201 break; 202 break;
202 case KVM_CAP_COALESCED_MMIO: 203 case KVM_CAP_COALESCED_MMIO:
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 0de91fc6de0f..4c979d466cc1 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -38,14 +38,18 @@ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
38 38
39static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 39static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
40{ 40{
41 int ret;
42
41 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
42 kvm_vcpu_hvc_get_imm(vcpu)); 44 kvm_vcpu_hvc_get_imm(vcpu));
43 45
44 if (kvm_psci_call(vcpu)) 46 ret = kvm_psci_call(vcpu);
47 if (ret < 0) {
48 kvm_inject_undefined(vcpu);
45 return 1; 49 return 1;
50 }
46 51
47 kvm_inject_undefined(vcpu); 52 return ret;
48 return 1;
49} 53}
50 54
51static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 448f60e8d23c..09cf37737ee2 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -27,6 +27,36 @@
27 * as described in ARM document number ARM DEN 0022A. 27 * as described in ARM document number ARM DEN 0022A.
28 */ 28 */
29 29
30#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
31
32static unsigned long psci_affinity_mask(unsigned long affinity_level)
33{
34 if (affinity_level <= 3)
35 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
36
37 return 0;
38}
39
40static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
41{
42 /*
43 * NOTE: For simplicity, we make VCPU suspend emulation to be
44 * same-as WFI (Wait-for-interrupt) emulation.
45 *
46 * This means for KVM the wakeup events are interrupts and
47 * this is consistent with intended use of StateID as described
48 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
49 *
50 * Further, we also treat power-down request to be same as
51 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
52 * specification (ARM DEN 0022A). This means all suspend states
53 * for KVM will preserve the register state.
54 */
55 kvm_vcpu_block(vcpu);
56
57 return PSCI_RET_SUCCESS;
58}
59
30static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) 60static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
31{ 61{
32 vcpu->arch.pause = true; 62 vcpu->arch.pause = true;
@@ -38,6 +68,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
38 struct kvm_vcpu *vcpu = NULL, *tmp; 68 struct kvm_vcpu *vcpu = NULL, *tmp;
39 wait_queue_head_t *wq; 69 wait_queue_head_t *wq;
40 unsigned long cpu_id; 70 unsigned long cpu_id;
71 unsigned long context_id;
41 unsigned long mpidr; 72 unsigned long mpidr;
42 phys_addr_t target_pc; 73 phys_addr_t target_pc;
43 int i; 74 int i;
@@ -58,10 +89,17 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
58 * Make sure the caller requested a valid CPU and that the CPU is 89 * Make sure the caller requested a valid CPU and that the CPU is
59 * turned off. 90 * turned off.
60 */ 91 */
61 if (!vcpu || !vcpu->arch.pause) 92 if (!vcpu)
62 return KVM_PSCI_RET_INVAL; 93 return PSCI_RET_INVALID_PARAMS;
94 if (!vcpu->arch.pause) {
95 if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
96 return PSCI_RET_ALREADY_ON;
97 else
98 return PSCI_RET_INVALID_PARAMS;
99 }
63 100
64 target_pc = *vcpu_reg(source_vcpu, 2); 101 target_pc = *vcpu_reg(source_vcpu, 2);
102 context_id = *vcpu_reg(source_vcpu, 3);
65 103
66 kvm_reset_vcpu(vcpu); 104 kvm_reset_vcpu(vcpu);
67 105
@@ -76,26 +114,160 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
76 kvm_vcpu_set_be(vcpu); 114 kvm_vcpu_set_be(vcpu);
77 115
78 *vcpu_pc(vcpu) = target_pc; 116 *vcpu_pc(vcpu) = target_pc;
117 /*
118 * NOTE: We always update r0 (or x0) because for PSCI v0.1
119 * the general puspose registers are undefined upon CPU_ON.
120 */
121 *vcpu_reg(vcpu, 0) = context_id;
79 vcpu->arch.pause = false; 122 vcpu->arch.pause = false;
80 smp_mb(); /* Make sure the above is visible */ 123 smp_mb(); /* Make sure the above is visible */
81 124
82 wq = kvm_arch_vcpu_wq(vcpu); 125 wq = kvm_arch_vcpu_wq(vcpu);
83 wake_up_interruptible(wq); 126 wake_up_interruptible(wq);
84 127
85 return KVM_PSCI_RET_SUCCESS; 128 return PSCI_RET_SUCCESS;
86} 129}
87 130
88/** 131static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
89 * kvm_psci_call - handle PSCI call if r0 value is in range 132{
90 * @vcpu: Pointer to the VCPU struct 133 int i;
91 * 134 unsigned long mpidr;
92 * Handle PSCI calls from guests through traps from HVC instructions. 135 unsigned long target_affinity;
93 * The calling convention is similar to SMC calls to the secure world where 136 unsigned long target_affinity_mask;
94 * the function number is placed in r0 and this function returns true if the 137 unsigned long lowest_affinity_level;
95 * function number specified in r0 is withing the PSCI range, and false 138 struct kvm *kvm = vcpu->kvm;
96 * otherwise. 139 struct kvm_vcpu *tmp;
97 */ 140
98bool kvm_psci_call(struct kvm_vcpu *vcpu) 141 target_affinity = *vcpu_reg(vcpu, 1);
142 lowest_affinity_level = *vcpu_reg(vcpu, 2);
143
144 /* Determine target affinity mask */
145 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
146 if (!target_affinity_mask)
147 return PSCI_RET_INVALID_PARAMS;
148
149 /* Ignore other bits of target affinity */
150 target_affinity &= target_affinity_mask;
151
152 /*
153 * If one or more VCPU matching target affinity are running
154 * then ON else OFF
155 */
156 kvm_for_each_vcpu(i, tmp, kvm) {
157 mpidr = kvm_vcpu_get_mpidr(tmp);
158 if (((mpidr & target_affinity_mask) == target_affinity) &&
159 !tmp->arch.pause) {
160 return PSCI_0_2_AFFINITY_LEVEL_ON;
161 }
162 }
163
164 return PSCI_0_2_AFFINITY_LEVEL_OFF;
165}
166
167static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
168{
169 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
170 vcpu->run->system_event.type = type;
171 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
172}
173
174static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
175{
176 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
177}
178
179static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
180{
181 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
182}
183
184int kvm_psci_version(struct kvm_vcpu *vcpu)
185{
186 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
187 return KVM_ARM_PSCI_0_2;
188
189 return KVM_ARM_PSCI_0_1;
190}
191
192static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
193{
194 int ret = 1;
195 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
196 unsigned long val;
197
198 switch (psci_fn) {
199 case PSCI_0_2_FN_PSCI_VERSION:
200 /*
201 * Bits[31:16] = Major Version = 0
202 * Bits[15:0] = Minor Version = 2
203 */
204 val = 2;
205 break;
206 case PSCI_0_2_FN_CPU_SUSPEND:
207 case PSCI_0_2_FN64_CPU_SUSPEND:
208 val = kvm_psci_vcpu_suspend(vcpu);
209 break;
210 case PSCI_0_2_FN_CPU_OFF:
211 kvm_psci_vcpu_off(vcpu);
212 val = PSCI_RET_SUCCESS;
213 break;
214 case PSCI_0_2_FN_CPU_ON:
215 case PSCI_0_2_FN64_CPU_ON:
216 val = kvm_psci_vcpu_on(vcpu);
217 break;
218 case PSCI_0_2_FN_AFFINITY_INFO:
219 case PSCI_0_2_FN64_AFFINITY_INFO:
220 val = kvm_psci_vcpu_affinity_info(vcpu);
221 break;
222 case PSCI_0_2_FN_MIGRATE:
223 case PSCI_0_2_FN64_MIGRATE:
224 val = PSCI_RET_NOT_SUPPORTED;
225 break;
226 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
227 /*
228 * Trusted OS is MP hence does not require migration
229 * or
230 * Trusted OS is not present
231 */
232 val = PSCI_0_2_TOS_MP;
233 break;
234 case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
235 case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
236 val = PSCI_RET_NOT_SUPPORTED;
237 break;
238 case PSCI_0_2_FN_SYSTEM_OFF:
239 kvm_psci_system_off(vcpu);
240 /*
241 * We should'nt be going back to guest VCPU after
242 * receiving SYSTEM_OFF request.
243 *
244 * If user space accidently/deliberately resumes
245 * guest VCPU after SYSTEM_OFF request then guest
246 * VCPU should see internal failure from PSCI return
247 * value. To achieve this, we preload r0 (or x0) with
248 * PSCI return value INTERNAL_FAILURE.
249 */
250 val = PSCI_RET_INTERNAL_FAILURE;
251 ret = 0;
252 break;
253 case PSCI_0_2_FN_SYSTEM_RESET:
254 kvm_psci_system_reset(vcpu);
255 /*
256 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
257 * with PSCI return value INTERNAL_FAILURE.
258 */
259 val = PSCI_RET_INTERNAL_FAILURE;
260 ret = 0;
261 break;
262 default:
263 return -EINVAL;
264 }
265
266 *vcpu_reg(vcpu, 0) = val;
267 return ret;
268}
269
270static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
99{ 271{
100 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 272 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
101 unsigned long val; 273 unsigned long val;
@@ -103,20 +275,45 @@ bool kvm_psci_call(struct kvm_vcpu *vcpu)
103 switch (psci_fn) { 275 switch (psci_fn) {
104 case KVM_PSCI_FN_CPU_OFF: 276 case KVM_PSCI_FN_CPU_OFF:
105 kvm_psci_vcpu_off(vcpu); 277 kvm_psci_vcpu_off(vcpu);
106 val = KVM_PSCI_RET_SUCCESS; 278 val = PSCI_RET_SUCCESS;
107 break; 279 break;
108 case KVM_PSCI_FN_CPU_ON: 280 case KVM_PSCI_FN_CPU_ON:
109 val = kvm_psci_vcpu_on(vcpu); 281 val = kvm_psci_vcpu_on(vcpu);
110 break; 282 break;
111 case KVM_PSCI_FN_CPU_SUSPEND: 283 case KVM_PSCI_FN_CPU_SUSPEND:
112 case KVM_PSCI_FN_MIGRATE: 284 case KVM_PSCI_FN_MIGRATE:
113 val = KVM_PSCI_RET_NI; 285 val = PSCI_RET_NOT_SUPPORTED;
114 break; 286 break;
115
116 default: 287 default:
117 return false; 288 return -EINVAL;
118 } 289 }
119 290
120 *vcpu_reg(vcpu, 0) = val; 291 *vcpu_reg(vcpu, 0) = val;
121 return true; 292 return 1;
293}
294
295/**
296 * kvm_psci_call - handle PSCI call if r0 value is in range
297 * @vcpu: Pointer to the VCPU struct
298 *
299 * Handle PSCI calls from guests through traps from HVC instructions.
300 * The calling convention is similar to SMC calls to the secure world
301 * where the function number is placed in r0.
302 *
303 * This function returns: > 0 (success), 0 (success but exit to user
304 * space), and < 0 (errors)
305 *
306 * Errors:
307 * -EINVAL: Unrecognized PSCI function
308 */
309int kvm_psci_call(struct kvm_vcpu *vcpu)
310{
311 switch (kvm_psci_version(vcpu)) {
312 case KVM_ARM_PSCI_0_2:
313 return kvm_psci_0_2_call(vcpu);
314 case KVM_ARM_PSCI_0_1:
315 return kvm_psci_0_1_call(vcpu);
316 default:
317 return -EINVAL;
318 };
122} 319}