aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-08-01 16:22:35 -0400
committerGleb Natapov <gleb@redhat.com>2013-08-26 05:30:49 -0400
commitea69f28ddfcccbd40f941e68045ddfd7bb6cdc00 (patch)
treea7ad6b750545ae4cdf6bd3aee7da8656ff5f8198
parentbb48c2fc6429314fa607106ccb901552484c6663 (diff)
mips/kvm: Make kvm_locore.S 64-bit buildable/safe.
We need to use more of the Macros in asm.h to allow kvm_locore.S to build in a 64-bit kernel. For 32-bit there is no change in the generated object code. Signed-off-by: David Daney <david.daney@cavium.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/mips/kvm/kvm_locore.S54
1 files changed, 27 insertions, 27 deletions
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index ace372b0e3d1..bbace092ad0a 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -60,7 +60,7 @@
60 60
61FEXPORT(__kvm_mips_vcpu_run) 61FEXPORT(__kvm_mips_vcpu_run)
62 /* k0/k1 not being used in host kernel context */ 62 /* k0/k1 not being used in host kernel context */
63 addiu k1, sp, -PT_SIZE 63 INT_ADDIU k1, sp, -PT_SIZE
64 LONG_S $0, PT_R0(k1) 64 LONG_S $0, PT_R0(k1)
65 LONG_S $1, PT_R1(k1) 65 LONG_S $1, PT_R1(k1)
66 LONG_S $2, PT_R2(k1) 66 LONG_S $2, PT_R2(k1)
@@ -121,7 +121,7 @@ FEXPORT(__kvm_mips_vcpu_run)
121 mtc0 a1, CP0_DDATA_LO 121 mtc0 a1, CP0_DDATA_LO
122 122
123 /* Offset into vcpu->arch */ 123 /* Offset into vcpu->arch */
124 addiu k1, a1, VCPU_HOST_ARCH 124 INT_ADDIU k1, a1, VCPU_HOST_ARCH
125 125
126 /* 126 /*
127 * Save the host stack to VCPU, used for exception processing 127 * Save the host stack to VCPU, used for exception processing
@@ -159,16 +159,16 @@ FEXPORT(__kvm_mips_vcpu_run)
159 159
160FEXPORT(__kvm_mips_load_asid) 160FEXPORT(__kvm_mips_load_asid)
161 /* Set the ASID for the Guest Kernel */ 161 /* Set the ASID for the Guest Kernel */
162 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 162 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
163 /* addresses shift to 0x80000000 */ 163 /* addresses shift to 0x80000000 */
164 bltz t0, 1f /* If kernel */ 164 bltz t0, 1f /* If kernel */
165 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
166 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1671: 1671:
168 /* t1: contains the base of the ASID array, need to get the cpu id */ 168 /* t1: contains the base of the ASID array, need to get the cpu id */
169 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 169 LONG_L t2, TI_CPU($28) /* smp_processor_id */
170 sll t2, t2, 2 /* x4 */ 170 INT_SLL t2, t2, 2 /* x4 */
171 addu t3, t1, t2 171 REG_ADDU t3, t1, t2
172 LONG_L k0, (t3) 172 LONG_L k0, (t3)
173 andi k0, k0, 0xff 173 andi k0, k0, 0xff
174 mtc0 k0, CP0_ENTRYHI 174 mtc0 k0, CP0_ENTRYHI
@@ -236,10 +236,10 @@ VECTOR(MIPSX(exception), unknown)
236 ehb #02: 236 ehb #02:
237 237
238 mfc0 k0, CP0_EBASE #02: Get EBASE 238 mfc0 k0, CP0_EBASE #02: Get EBASE
239 srl k0, k0, 10 #03: Get rid of CPUNum 239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
240 sll k0, k0, 10 #04 240 INT_SLL k0, k0, 10 #04
241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
242 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
243 j k0 #07: jump to the function 243 j k0 #07: jump to the function
244 nop #08: branch delay slot 244 nop #08: branch delay slot
245VECTOR_END(MIPSX(exceptionEnd)) 245VECTOR_END(MIPSX(exceptionEnd))
@@ -253,7 +253,7 @@ VECTOR_END(MIPSX(exceptionEnd))
253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) 253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
254 /* Get the VCPU pointer from DDTATA_LO */ 254 /* Get the VCPU pointer from DDTATA_LO */
255 mfc0 k1, CP0_DDATA_LO 255 mfc0 k1, CP0_DDATA_LO
256 addiu k1, k1, VCPU_HOST_ARCH 256 INT_ADDIU k1, k1, VCPU_HOST_ARCH
257 257
258 /* Start saving Guest context to VCPU */ 258 /* Start saving Guest context to VCPU */
259 LONG_S $0, VCPU_R0(k1) 259 LONG_S $0, VCPU_R0(k1)
@@ -304,7 +304,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
304 LONG_S t0, VCPU_R26(k1) 304 LONG_S t0, VCPU_R26(k1)
305 305
306 /* Get GUEST k1 and save it in VCPU */ 306 /* Get GUEST k1 and save it in VCPU */
307 la t1, ~0x2ff 307 PTR_LI t1, ~0x2ff
308 mfc0 t0, CP0_EBASE 308 mfc0 t0, CP0_EBASE
309 and t0, t0, t1 309 and t0, t0, t1
310 LONG_L t0, 0x3000(t0) 310 LONG_L t0, 0x3000(t0)
@@ -367,7 +367,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
367 LONG_L sp, VCPU_HOST_STACK(k1) 367 LONG_L sp, VCPU_HOST_STACK(k1)
368 368
369 /* Saved host state */ 369 /* Saved host state */
370 addiu sp, sp, -PT_SIZE 370 INT_ADDIU sp, sp, -PT_SIZE
371 371
372 /* XXXKYMA do we need to load the host ASID, maybe not because the 372 /* XXXKYMA do we need to load the host ASID, maybe not because the
373 * kernel entries are marked GLOBAL, need to verify 373 * kernel entries are marked GLOBAL, need to verify
@@ -378,7 +378,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
378 mtc0 k0, CP0_DDATA_LO 378 mtc0 k0, CP0_DDATA_LO
379 379
380 /* Restore RDHWR access */ 380 /* Restore RDHWR access */
381 la k0, 0x2000000F 381 PTR_LI k0, 0x2000000F
382 mtc0 k0, CP0_HWRENA 382 mtc0 k0, CP0_HWRENA
383 383
384 /* Jump to handler */ 384 /* Jump to handler */
@@ -386,9 +386,9 @@ FEXPORT(__kvm_mips_jump_to_handler)
386 /* XXXKYMA: not sure if this is safe, how large is the stack?? 386 /* XXXKYMA: not sure if this is safe, how large is the stack??
387 * Now jump to the kvm_mips_handle_exit() to see if we can deal 387 * Now jump to the kvm_mips_handle_exit() to see if we can deal
388 * with this in the kernel */ 388 * with this in the kernel */
389 la t9, kvm_mips_handle_exit 389 PTR_LA t9, kvm_mips_handle_exit
390 jalr.hb t9 390 jalr.hb t9
391 addiu sp, sp, -CALLFRAME_SIZ /* BD Slot */ 391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
392 392
393 /* Return from handler Make sure interrupts are disabled */ 393 /* Return from handler Make sure interrupts are disabled */
394 di 394 di
@@ -400,7 +400,7 @@ FEXPORT(__kvm_mips_jump_to_handler)
400 */ 400 */
401 401
402 move k1, s1 402 move k1, s1
403 addiu k1, k1, VCPU_HOST_ARCH 403 INT_ADDIU k1, k1, VCPU_HOST_ARCH
404 404
405 /* Check return value, should tell us if we are returning to the 405 /* Check return value, should tell us if we are returning to the
406 * host (handle I/O etc)or resuming the guest 406 * host (handle I/O etc)or resuming the guest
@@ -438,16 +438,16 @@ __kvm_mips_return_to_guest:
438 mtc0 t0, CP0_EPC 438 mtc0 t0, CP0_EPC
439 439
440 /* Set the ASID for the Guest Kernel */ 440 /* Set the ASID for the Guest Kernel */
441 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 441 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
442 /* addresses shift to 0x80000000 */ 442 /* addresses shift to 0x80000000 */
443 bltz t0, 1f /* If kernel */ 443 bltz t0, 1f /* If kernel */
444 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 444 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
445 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ 445 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4461: 4461:
447 /* t1: contains the base of the ASID array, need to get the cpu id */ 447 /* t1: contains the base of the ASID array, need to get the cpu id */
448 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 448 LONG_L t2, TI_CPU($28) /* smp_processor_id */
449 sll t2, t2, 2 /* x4 */ 449 INT_SLL t2, t2, 2 /* x4 */
450 addu t3, t1, t2 450 REG_ADDU t3, t1, t2
451 LONG_L k0, (t3) 451 LONG_L k0, (t3)
452 andi k0, k0, 0xff 452 andi k0, k0, 0xff
453 mtc0 k0,CP0_ENTRYHI 453 mtc0 k0,CP0_ENTRYHI
@@ -505,7 +505,7 @@ FEXPORT(__kvm_mips_skip_guest_restore)
505__kvm_mips_return_to_host: 505__kvm_mips_return_to_host:
506 /* EBASE is already pointing to Linux */ 506 /* EBASE is already pointing to Linux */
507 LONG_L k1, VCPU_HOST_STACK(k1) 507 LONG_L k1, VCPU_HOST_STACK(k1)
508 addiu k1,k1, -PT_SIZE 508 INT_ADDIU k1,k1, -PT_SIZE
509 509
510 /* Restore host DDATA_LO */ 510 /* Restore host DDATA_LO */
511 LONG_L k0, PT_HOST_USERLOCAL(k1) 511 LONG_L k0, PT_HOST_USERLOCAL(k1)
@@ -523,7 +523,7 @@ __kvm_mips_return_to_host:
523 523
524 /* r2/v0 is the return code, shift it down by 2 (arithmetic) 524 /* r2/v0 is the return code, shift it down by 2 (arithmetic)
525 * to recover the err code */ 525 * to recover the err code */
526 sra k0, v0, 2 526 INT_SRA k0, v0, 2
527 move $2, k0 527 move $2, k0
528 528
529 LONG_L $3, PT_R3(k1) 529 LONG_L $3, PT_R3(k1)
@@ -563,7 +563,7 @@ __kvm_mips_return_to_host:
563 mtlo k0 563 mtlo k0
564 564
565 /* Restore RDHWR access */ 565 /* Restore RDHWR access */
566 la k0, 0x2000000F 566 PTR_LI k0, 0x2000000F
567 mtc0 k0, CP0_HWRENA 567 mtc0 k0, CP0_HWRENA
568 568
569 569
@@ -627,13 +627,13 @@ LEAF(MIPSX(SyncICache))
627 .set mips32r2 627 .set mips32r2
628 beq a1, zero, 20f 628 beq a1, zero, 20f
629 nop 629 nop
630 addu a1, a0, a1 630 REG_ADDU a1, a0, a1
631 rdhwr v0, HW_SYNCI_Step 631 rdhwr v0, HW_SYNCI_Step
632 beq v0, zero, 20f 632 beq v0, zero, 20f
633 nop 633 nop
63410: 63410:
635 synci 0(a0) 635 synci 0(a0)
636 addu a0, a0, v0 636 REG_ADDU a0, a0, v0
637 sltu v1, a0, a1 637 sltu v1, a0, a1
638 bne v1, zero, 10b 638 bne v1, zero, 10b
639 nop 639 nop