aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-07-08 06:53:23 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-08-01 12:42:22 -0400
commite41637d85846b5b4b6ef5232a22b7e74c03f1be6 (patch)
tree239feed77d5f70d19c6c45d7687b105a2e74f49f
parent28cc5bd568745a58bb06291ac336d06b66c66dff (diff)
MIPS: KVM: Make entry code MIPS64 friendly
The MIPS KVM entry code (originally kvm_locore.S, later locore.S, and now entry.c) has never quite been right when built for 64-bit, using 32-bit instructions when 64-bit instructions were needed for handling 64-bit registers and pointers. Fix several cases of this now. The changes roughly fall into the following categories. - COP0 scratch registers contain guest register values and the VCPU pointer, and are themselves full width. Similarly CP0_EPC and CP0_BadVAddr registers are full width (even though technically we don't support 64-bit guest address spaces with trap & emulate KVM). Use MFC0/MTC0 for accessing them. - Handling of stack pointers and the VCPU pointer must match the pointer size of the kernel ABI (always o32 or n64), so use ADDIU. - The CPU number in thread_info, and the guest_{user,kernel}_asid arrays in kvm_vcpu_arch are all 32 bit integers, so use lw (instead of LW) to load them. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/mips/kvm/entry.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 75ba7c2ecb3d..f4556d0279c6 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -120,12 +120,12 @@ static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
120 unsigned int frame) 120 unsigned int frame)
121{ 121{
122 /* Save the VCPU scratch register value in cp0_epc of the stack frame */ 122 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
123 uasm_i_mfc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); 123 UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
124 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); 124 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
125 125
126 /* Save the temp scratch register value in cp0_cause of stack frame */ 126 /* Save the temp scratch register value in cp0_cause of stack frame */
127 if (scratch_tmp[0] == 31) { 127 if (scratch_tmp[0] == 31) {
128 uasm_i_mfc0(p, tmp, scratch_tmp[0], scratch_tmp[1]); 128 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
129 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); 129 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
130 } 130 }
131} 131}
@@ -138,11 +138,11 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
138 * kvm_mips_build_save_scratch(). 138 * kvm_mips_build_save_scratch().
139 */ 139 */
140 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); 140 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
141 uasm_i_mtc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); 141 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
142 142
143 if (scratch_tmp[0] == 31) { 143 if (scratch_tmp[0] == 31) {
144 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); 144 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
145 uasm_i_mtc0(p, tmp, scratch_tmp[0], scratch_tmp[1]); 145 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
146 } 146 }
147} 147}
148 148
@@ -171,7 +171,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
171 */ 171 */
172 172
173 /* k0/k1 not being used in host kernel context */ 173 /* k0/k1 not being used in host kernel context */
174 uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs)); 174 UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
175 for (i = 16; i < 32; ++i) { 175 for (i = 16; i < 32; ++i) {
176 if (i == 24) 176 if (i == 24)
177 i = 28; 177 i = 28;
@@ -186,10 +186,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
186 kvm_mips_build_save_scratch(&p, V1, K1); 186 kvm_mips_build_save_scratch(&p, V1, K1);
187 187
188 /* VCPU scratch register has pointer to vcpu */ 188 /* VCPU scratch register has pointer to vcpu */
189 uasm_i_mtc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); 189 UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
190 190
191 /* Offset into vcpu->arch */ 191 /* Offset into vcpu->arch */
192 uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); 192 UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
193 193
194 /* 194 /*
195 * Save the host stack to VCPU, used for exception processing 195 * Save the host stack to VCPU, used for exception processing
@@ -252,7 +252,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
252 252
253 /* Set Guest EPC */ 253 /* Set Guest EPC */
254 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); 254 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
255 uasm_i_mtc0(&p, T0, C0_EPC); 255 UASM_i_MTC0(&p, T0, C0_EPC);
256 256
257 /* Set the ASID for the Guest Kernel */ 257 /* Set the ASID for the Guest Kernel */
258 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); 258 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
@@ -261,20 +261,20 @@ static void *kvm_mips_build_enter_guest(void *addr)
261 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); 261 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
262 uasm_i_xori(&p, T0, T0, KSU_USER); 262 uasm_i_xori(&p, T0, T0, KSU_USER);
263 uasm_il_bnez(&p, &r, T0, label_kernel_asid); 263 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
264 uasm_i_addiu(&p, T1, K1, 264 UASM_i_ADDIU(&p, T1, K1,
265 offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); 265 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
266 /* else user */ 266 /* else user */
267 uasm_i_addiu(&p, T1, K1, 267 UASM_i_ADDIU(&p, T1, K1,
268 offsetof(struct kvm_vcpu_arch, guest_user_asid)); 268 offsetof(struct kvm_vcpu_arch, guest_user_asid));
269 uasm_l_kernel_asid(&l, p); 269 uasm_l_kernel_asid(&l, p);
270 270
271 /* t1: contains the base of the ASID array, need to get the cpu id */ 271 /* t1: contains the base of the ASID array, need to get the cpu id */
272 /* smp_processor_id */ 272 /* smp_processor_id */
273 UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP); 273 uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
274 /* x4 */ 274 /* x4 */
275 uasm_i_sll(&p, T2, T2, 2); 275 uasm_i_sll(&p, T2, T2, 2);
276 UASM_i_ADDU(&p, T3, T1, T2); 276 UASM_i_ADDU(&p, T3, T1, T2);
277 UASM_i_LW(&p, K0, 0, T3); 277 uasm_i_lw(&p, K0, 0, T3);
278#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE 278#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
279 /* x sizeof(struct cpuinfo_mips)/4 */ 279 /* x sizeof(struct cpuinfo_mips)/4 */
280 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); 280 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
@@ -344,11 +344,11 @@ void *kvm_mips_build_exception(void *addr, void *handler)
344 memset(relocs, 0, sizeof(relocs)); 344 memset(relocs, 0, sizeof(relocs));
345 345
346 /* Save guest k1 into scratch register */ 346 /* Save guest k1 into scratch register */
347 uasm_i_mtc0(&p, K1, scratch_tmp[0], scratch_tmp[1]); 347 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
348 348
349 /* Get the VCPU pointer from the VCPU scratch register */ 349 /* Get the VCPU pointer from the VCPU scratch register */
350 uasm_i_mfc0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); 350 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
351 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); 351 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
352 352
353 /* Save guest k0 into VCPU structure */ 353 /* Save guest k0 into VCPU structure */
354 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); 354 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
@@ -415,13 +415,13 @@ void *kvm_mips_build_exit(void *addr)
415 415
416 /* Finally save guest k1 to VCPU */ 416 /* Finally save guest k1 to VCPU */
417 uasm_i_ehb(&p); 417 uasm_i_ehb(&p);
418 uasm_i_mfc0(&p, T0, scratch_tmp[0], scratch_tmp[1]); 418 UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
419 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); 419 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
420 420
421 /* Now that context has been saved, we can use other registers */ 421 /* Now that context has been saved, we can use other registers */
422 422
423 /* Restore vcpu */ 423 /* Restore vcpu */
424 uasm_i_mfc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); 424 UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
425 uasm_i_move(&p, S1, A1); 425 uasm_i_move(&p, S1, A1);
426 426
427 /* Restore run (vcpu->run) */ 427 /* Restore run (vcpu->run) */
@@ -433,10 +433,10 @@ void *kvm_mips_build_exit(void *addr)
433 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process 433 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
434 * the exception 434 * the exception
435 */ 435 */
436 uasm_i_mfc0(&p, K0, C0_EPC); 436 UASM_i_MFC0(&p, K0, C0_EPC);
437 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); 437 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
438 438
439 uasm_i_mfc0(&p, K0, C0_BADVADDR); 439 UASM_i_MFC0(&p, K0, C0_BADVADDR);
440 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), 440 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
441 K1); 441 K1);
442 442
@@ -506,7 +506,7 @@ void *kvm_mips_build_exit(void *addr)
506 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); 506 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
507 507
508 /* Saved host state */ 508 /* Saved host state */
509 uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs)); 509 UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
510 510
511 /* 511 /*
512 * XXXKYMA do we need to load the host ASID, maybe not because the 512 * XXXKYMA do we need to load the host ASID, maybe not because the
@@ -529,7 +529,7 @@ void *kvm_mips_build_exit(void *addr)
529 */ 529 */
530 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); 530 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
531 uasm_i_jalr(&p, RA, T9); 531 uasm_i_jalr(&p, RA, T9);
532 uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ); 532 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
533 533
534 uasm_resolve_relocs(relocs, labels); 534 uasm_resolve_relocs(relocs, labels);
535 535
@@ -569,7 +569,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
569 */ 569 */
570 570
571 uasm_i_move(&p, K1, S1); 571 uasm_i_move(&p, K1, S1);
572 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); 572 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
573 573
574 /* 574 /*
575 * Check return value, should tell us if we are returning to the 575 * Check return value, should tell us if we are returning to the
@@ -603,7 +603,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
603 u32 *p = addr; 603 u32 *p = addr;
604 604
605 /* Put the saved pointer to vcpu (s1) back into the scratch register */ 605 /* Put the saved pointer to vcpu (s1) back into the scratch register */
606 uasm_i_mtc0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); 606 UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
607 607
608 /* Load up the Guest EBASE to minimize the window where BEV is set */ 608 /* Load up the Guest EBASE to minimize the window where BEV is set */
609 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); 609 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
@@ -645,7 +645,7 @@ static void *kvm_mips_build_ret_to_host(void *addr)
645 645
646 /* EBASE is already pointing to Linux */ 646 /* EBASE is already pointing to Linux */
647 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); 647 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
648 uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs)); 648 UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
649 649
650 /* 650 /*
651 * r2/v0 is the return code, shift it down by 2 (arithmetic) 651 * r2/v0 is the return code, shift it down by 2 (arithmetic)