aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-04 16:19:25 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:46 -0500
commit97c4cfbe890a4ad82dde8660008d42b7b05dc488 (patch)
tree9efc0daf1ec644291a10e8e6c91ff212f91732e1 /arch/powerpc
parentb480f780f071a068810ccd0e49c1daa210bfbeab (diff)
KVM: PPC: Enable lightweight exits again
The PowerPC C ABI defines that registers r14-r31 need to be preserved across function calls. Since our exit handler is written in C, we can make use of that and don't need to reload r14-r31 on every entry/exit cycle. This technique is also used in the BookE code and is called "lightweight exits" there. To follow the tradition, it's called the same in Book3S. So far this optimization was disabled though, as the code didn't do what it was expected to do, but failed to work. This patch fixes and enables lightweight exits again. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S106
2 files changed, 57 insertions, 53 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 492dcc198dd3..fd2a4d531582 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -539,8 +539,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
539 r = kvmppc_emulate_mmio(run, vcpu); 539 r = kvmppc_emulate_mmio(run, vcpu);
540 if ( r == RESUME_HOST_NV ) 540 if ( r == RESUME_HOST_NV )
541 r = RESUME_HOST; 541 r = RESUME_HOST;
542 if ( r == RESUME_GUEST_NV )
543 r = RESUME_GUEST;
544 } 542 }
545 543
546 return r; 544 return r;
@@ -645,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
645 er = kvmppc_emulate_instruction(run, vcpu); 643 er = kvmppc_emulate_instruction(run, vcpu);
646 switch (er) { 644 switch (er) {
647 case EMULATE_DONE: 645 case EMULATE_DONE:
648 r = RESUME_GUEST; 646 r = RESUME_GUEST_NV;
649 break; 647 break;
650 case EMULATE_FAIL: 648 case EMULATE_FAIL:
651 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 649 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index 7b55d8094c8b..d95d0d967d56 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -40,6 +40,26 @@
40 mtmsrd r0,1 40 mtmsrd r0,1
41.endm 41.endm
42 42
43#define VCPU_LOAD_NVGPRS(vcpu) \
44 ld r14, VCPU_GPR(r14)(vcpu); \
45 ld r15, VCPU_GPR(r15)(vcpu); \
46 ld r16, VCPU_GPR(r16)(vcpu); \
47 ld r17, VCPU_GPR(r17)(vcpu); \
48 ld r18, VCPU_GPR(r18)(vcpu); \
49 ld r19, VCPU_GPR(r19)(vcpu); \
50 ld r20, VCPU_GPR(r20)(vcpu); \
51 ld r21, VCPU_GPR(r21)(vcpu); \
52 ld r22, VCPU_GPR(r22)(vcpu); \
53 ld r23, VCPU_GPR(r23)(vcpu); \
54 ld r24, VCPU_GPR(r24)(vcpu); \
55 ld r25, VCPU_GPR(r25)(vcpu); \
56 ld r26, VCPU_GPR(r26)(vcpu); \
57 ld r27, VCPU_GPR(r27)(vcpu); \
58 ld r28, VCPU_GPR(r28)(vcpu); \
59 ld r29, VCPU_GPR(r29)(vcpu); \
60 ld r30, VCPU_GPR(r30)(vcpu); \
61 ld r31, VCPU_GPR(r31)(vcpu); \
62
43/***************************************************************************** 63/*****************************************************************************
44 * * 64 * *
45 * Guest entry / exit code that is in kernel module memory (highmem) * 65 * Guest entry / exit code that is in kernel module memory (highmem) *
@@ -67,12 +87,16 @@ kvm_start_entry:
67 SAVE_NVGPRS(r1) 87 SAVE_NVGPRS(r1)
68 88
69 /* Save LR */ 89 /* Save LR */
70 mflr r14 90 std r0, _LINK(r1)
71 std r14, _LINK(r1) 91
92 /* Load non-volatile guest state from the vcpu */
93 VCPU_LOAD_NVGPRS(r4)
72 94
73/* XXX optimize non-volatile loading away */
74kvm_start_lightweight: 95kvm_start_lightweight:
75 96
97 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
98 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
99
76 DISABLE_INTERRUPTS 100 DISABLE_INTERRUPTS
77 101
78 /* Save R1/R2 in the PACA */ 102 /* Save R1/R2 in the PACA */
@@ -81,29 +105,6 @@ kvm_start_lightweight:
81 ld r3, VCPU_HIGHMEM_HANDLER(r4) 105 ld r3, VCPU_HIGHMEM_HANDLER(r4)
82 std r3, PACASAVEDMSR(r13) 106 std r3, PACASAVEDMSR(r13)
83 107
84 /* Load non-volatile guest state from the vcpu */
85 ld r14, VCPU_GPR(r14)(r4)
86 ld r15, VCPU_GPR(r15)(r4)
87 ld r16, VCPU_GPR(r16)(r4)
88 ld r17, VCPU_GPR(r17)(r4)
89 ld r18, VCPU_GPR(r18)(r4)
90 ld r19, VCPU_GPR(r19)(r4)
91 ld r20, VCPU_GPR(r20)(r4)
92 ld r21, VCPU_GPR(r21)(r4)
93 ld r22, VCPU_GPR(r22)(r4)
94 ld r23, VCPU_GPR(r23)(r4)
95 ld r24, VCPU_GPR(r24)(r4)
96 ld r25, VCPU_GPR(r25)(r4)
97 ld r26, VCPU_GPR(r26)(r4)
98 ld r27, VCPU_GPR(r27)(r4)
99 ld r28, VCPU_GPR(r28)(r4)
100 ld r29, VCPU_GPR(r29)(r4)
101 ld r30, VCPU_GPR(r30)(r4)
102 ld r31, VCPU_GPR(r31)(r4)
103
104 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
105 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
106
107 ld r3, VCPU_TRAMPOLINE_ENTER(r4) 108 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
108 mtsrr0 r3 109 mtsrr0 r3
109 110
@@ -247,7 +248,6 @@ kvmppc_handler_highmem:
247 248
248no_dcbz32_off: 249no_dcbz32_off:
249 250
250 /* XXX maybe skip on lightweight? */
251 std r14, VCPU_GPR(r14)(r12) 251 std r14, VCPU_GPR(r14)(r12)
252 std r15, VCPU_GPR(r15)(r12) 252 std r15, VCPU_GPR(r15)(r12)
253 std r16, VCPU_GPR(r16)(r12) 253 std r16, VCPU_GPR(r16)(r12)
@@ -267,9 +267,6 @@ no_dcbz32_off:
267 std r30, VCPU_GPR(r30)(r12) 267 std r30, VCPU_GPR(r30)(r12)
268 std r31, VCPU_GPR(r31)(r12) 268 std r31, VCPU_GPR(r31)(r12)
269 269
270 /* Restore non-volatile host registers (r14 - r31) */
271 REST_NVGPRS(r1)
272
273 /* Save guest PC (R10) */ 270 /* Save guest PC (R10) */
274 std r10, VCPU_PC(r12) 271 std r10, VCPU_PC(r12)
275 272
@@ -351,42 +348,51 @@ kvm_return_point:
351 348
352 /* Jump back to lightweight entry if we're supposed to */ 349 /* Jump back to lightweight entry if we're supposed to */
353 /* go back into the guest */ 350 /* go back into the guest */
351
352 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
354 mr r5, r3 353 mr r5, r3
354
355 /* Restore r3 (kvm_run) and r4 (vcpu) */ 355 /* Restore r3 (kvm_run) and r4 (vcpu) */
356 REST_2GPRS(3, r1) 356 REST_2GPRS(3, r1)
357 bl KVMPPC_HANDLE_EXIT 357 bl KVMPPC_HANDLE_EXIT
358 358
359#if 0 /* XXX get lightweight exits back */ 359 /* If RESUME_GUEST, get back in the loop */
360 cmpwi r3, RESUME_GUEST 360 cmpwi r3, RESUME_GUEST
361 bne kvm_exit_heavyweight 361 beq kvm_loop_lightweight
362 362
363 /* put VCPU and KVM_RUN back into place and roll again! */ 363 cmpwi r3, RESUME_GUEST_NV
364 REST_2GPRS(3, r1) 364 beq kvm_loop_heavyweight
365 b kvm_start_lightweight
366 365
367kvm_exit_heavyweight: 366kvm_exit_loop:
368 /* Restore non-volatile host registers */
369 ld r14, _LINK(r1)
370 mtlr r14
371 REST_NVGPRS(r1)
372 367
373 addi r1, r1, SWITCH_FRAME_SIZE
374#else
375 ld r4, _LINK(r1) 368 ld r4, _LINK(r1)
376 mtlr r4 369 mtlr r4
377 370
378 cmpwi r3, RESUME_GUEST 371 /* Restore non-volatile host registers (r14 - r31) */
379 bne kvm_exit_heavyweight 372 REST_NVGPRS(r1)
373
374 addi r1, r1, SWITCH_FRAME_SIZE
375 blr
376
377kvm_loop_heavyweight:
380 378
379 ld r4, _LINK(r1)
380 std r4, (16 + SWITCH_FRAME_SIZE)(r1)
381
382 /* Load vcpu and cpu_run */
381 REST_2GPRS(3, r1) 383 REST_2GPRS(3, r1)
382 384
383 addi r1, r1, SWITCH_FRAME_SIZE 385 /* Load non-volatile guest state from the vcpu */
386 VCPU_LOAD_NVGPRS(r4)
384 387
385 b kvm_start_entry 388 /* Jump back into the beginning of this function */
389 b kvm_start_lightweight
386 390
387kvm_exit_heavyweight: 391kvm_loop_lightweight:
388 392
389 addi r1, r1, SWITCH_FRAME_SIZE 393 /* We'll need the vcpu pointer */
390#endif 394 REST_GPR(4, r1)
395
396 /* Jump back into the beginning of this function */
397 b kvm_start_lightweight
391 398
392 blr