diff options
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_interrupts.S | 106 |
2 files changed, 57 insertions, 53 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 492dcc198dd..fd2a4d53158 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -539,8 +539,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
539 | r = kvmppc_emulate_mmio(run, vcpu); | 539 | r = kvmppc_emulate_mmio(run, vcpu); |
540 | if ( r == RESUME_HOST_NV ) | 540 | if ( r == RESUME_HOST_NV ) |
541 | r = RESUME_HOST; | 541 | r = RESUME_HOST; |
542 | if ( r == RESUME_GUEST_NV ) | ||
543 | r = RESUME_GUEST; | ||
544 | } | 542 | } |
545 | 543 | ||
546 | return r; | 544 | return r; |
@@ -645,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
645 | er = kvmppc_emulate_instruction(run, vcpu); | 643 | er = kvmppc_emulate_instruction(run, vcpu); |
646 | switch (er) { | 644 | switch (er) { |
647 | case EMULATE_DONE: | 645 | case EMULATE_DONE: |
648 | r = RESUME_GUEST; | 646 | r = RESUME_GUEST_NV; |
649 | break; | 647 | break; |
650 | case EMULATE_FAIL: | 648 | case EMULATE_FAIL: |
651 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 649 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 7b55d8094c8..d95d0d967d5 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S | |||
@@ -40,6 +40,26 @@ | |||
40 | mtmsrd r0,1 | 40 | mtmsrd r0,1 |
41 | .endm | 41 | .endm |
42 | 42 | ||
43 | #define VCPU_LOAD_NVGPRS(vcpu) \ | ||
44 | ld r14, VCPU_GPR(r14)(vcpu); \ | ||
45 | ld r15, VCPU_GPR(r15)(vcpu); \ | ||
46 | ld r16, VCPU_GPR(r16)(vcpu); \ | ||
47 | ld r17, VCPU_GPR(r17)(vcpu); \ | ||
48 | ld r18, VCPU_GPR(r18)(vcpu); \ | ||
49 | ld r19, VCPU_GPR(r19)(vcpu); \ | ||
50 | ld r20, VCPU_GPR(r20)(vcpu); \ | ||
51 | ld r21, VCPU_GPR(r21)(vcpu); \ | ||
52 | ld r22, VCPU_GPR(r22)(vcpu); \ | ||
53 | ld r23, VCPU_GPR(r23)(vcpu); \ | ||
54 | ld r24, VCPU_GPR(r24)(vcpu); \ | ||
55 | ld r25, VCPU_GPR(r25)(vcpu); \ | ||
56 | ld r26, VCPU_GPR(r26)(vcpu); \ | ||
57 | ld r27, VCPU_GPR(r27)(vcpu); \ | ||
58 | ld r28, VCPU_GPR(r28)(vcpu); \ | ||
59 | ld r29, VCPU_GPR(r29)(vcpu); \ | ||
60 | ld r30, VCPU_GPR(r30)(vcpu); \ | ||
61 | ld r31, VCPU_GPR(r31)(vcpu); \ | ||
62 | |||
43 | /***************************************************************************** | 63 | /***************************************************************************** |
44 | * * | 64 | * * |
45 | * Guest entry / exit code that is in kernel module memory (highmem) * | 65 | * Guest entry / exit code that is in kernel module memory (highmem) * |
@@ -67,12 +87,16 @@ kvm_start_entry: | |||
67 | SAVE_NVGPRS(r1) | 87 | SAVE_NVGPRS(r1) |
68 | 88 | ||
69 | /* Save LR */ | 89 | /* Save LR */ |
70 | mflr r14 | 90 | std r0, _LINK(r1) |
71 | std r14, _LINK(r1) | 91 | |
92 | /* Load non-volatile guest state from the vcpu */ | ||
93 | VCPU_LOAD_NVGPRS(r4) | ||
72 | 94 | ||
73 | /* XXX optimize non-volatile loading away */ | ||
74 | kvm_start_lightweight: | 95 | kvm_start_lightweight: |
75 | 96 | ||
97 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | ||
98 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
99 | |||
76 | DISABLE_INTERRUPTS | 100 | DISABLE_INTERRUPTS |
77 | 101 | ||
78 | /* Save R1/R2 in the PACA */ | 102 | /* Save R1/R2 in the PACA */ |
@@ -81,29 +105,6 @@ kvm_start_lightweight: | |||
81 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | 105 | ld r3, VCPU_HIGHMEM_HANDLER(r4) |
82 | std r3, PACASAVEDMSR(r13) | 106 | std r3, PACASAVEDMSR(r13) |
83 | 107 | ||
84 | /* Load non-volatile guest state from the vcpu */ | ||
85 | ld r14, VCPU_GPR(r14)(r4) | ||
86 | ld r15, VCPU_GPR(r15)(r4) | ||
87 | ld r16, VCPU_GPR(r16)(r4) | ||
88 | ld r17, VCPU_GPR(r17)(r4) | ||
89 | ld r18, VCPU_GPR(r18)(r4) | ||
90 | ld r19, VCPU_GPR(r19)(r4) | ||
91 | ld r20, VCPU_GPR(r20)(r4) | ||
92 | ld r21, VCPU_GPR(r21)(r4) | ||
93 | ld r22, VCPU_GPR(r22)(r4) | ||
94 | ld r23, VCPU_GPR(r23)(r4) | ||
95 | ld r24, VCPU_GPR(r24)(r4) | ||
96 | ld r25, VCPU_GPR(r25)(r4) | ||
97 | ld r26, VCPU_GPR(r26)(r4) | ||
98 | ld r27, VCPU_GPR(r27)(r4) | ||
99 | ld r28, VCPU_GPR(r28)(r4) | ||
100 | ld r29, VCPU_GPR(r29)(r4) | ||
101 | ld r30, VCPU_GPR(r30)(r4) | ||
102 | ld r31, VCPU_GPR(r31)(r4) | ||
103 | |||
104 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | ||
105 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
106 | |||
107 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | 108 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) |
108 | mtsrr0 r3 | 109 | mtsrr0 r3 |
109 | 110 | ||
@@ -247,7 +248,6 @@ kvmppc_handler_highmem: | |||
247 | 248 | ||
248 | no_dcbz32_off: | 249 | no_dcbz32_off: |
249 | 250 | ||
250 | /* XXX maybe skip on lightweight? */ | ||
251 | std r14, VCPU_GPR(r14)(r12) | 251 | std r14, VCPU_GPR(r14)(r12) |
252 | std r15, VCPU_GPR(r15)(r12) | 252 | std r15, VCPU_GPR(r15)(r12) |
253 | std r16, VCPU_GPR(r16)(r12) | 253 | std r16, VCPU_GPR(r16)(r12) |
@@ -267,9 +267,6 @@ no_dcbz32_off: | |||
267 | std r30, VCPU_GPR(r30)(r12) | 267 | std r30, VCPU_GPR(r30)(r12) |
268 | std r31, VCPU_GPR(r31)(r12) | 268 | std r31, VCPU_GPR(r31)(r12) |
269 | 269 | ||
270 | /* Restore non-volatile host registers (r14 - r31) */ | ||
271 | REST_NVGPRS(r1) | ||
272 | |||
273 | /* Save guest PC (R10) */ | 270 | /* Save guest PC (R10) */ |
274 | std r10, VCPU_PC(r12) | 271 | std r10, VCPU_PC(r12) |
275 | 272 | ||
@@ -351,42 +348,51 @@ kvm_return_point: | |||
351 | 348 | ||
352 | /* Jump back to lightweight entry if we're supposed to */ | 349 | /* Jump back to lightweight entry if we're supposed to */ |
353 | /* go back into the guest */ | 350 | /* go back into the guest */ |
351 | |||
352 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | ||
354 | mr r5, r3 | 353 | mr r5, r3 |
354 | |||
355 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 355 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
356 | REST_2GPRS(3, r1) | 356 | REST_2GPRS(3, r1) |
357 | bl KVMPPC_HANDLE_EXIT | 357 | bl KVMPPC_HANDLE_EXIT |
358 | 358 | ||
359 | #if 0 /* XXX get lightweight exits back */ | 359 | /* If RESUME_GUEST, get back in the loop */ |
360 | cmpwi r3, RESUME_GUEST | 360 | cmpwi r3, RESUME_GUEST |
361 | bne kvm_exit_heavyweight | 361 | beq kvm_loop_lightweight |
362 | 362 | ||
363 | /* put VCPU and KVM_RUN back into place and roll again! */ | 363 | cmpwi r3, RESUME_GUEST_NV |
364 | REST_2GPRS(3, r1) | 364 | beq kvm_loop_heavyweight |
365 | b kvm_start_lightweight | ||
366 | 365 | ||
367 | kvm_exit_heavyweight: | 366 | kvm_exit_loop: |
368 | /* Restore non-volatile host registers */ | ||
369 | ld r14, _LINK(r1) | ||
370 | mtlr r14 | ||
371 | REST_NVGPRS(r1) | ||
372 | 367 | ||
373 | addi r1, r1, SWITCH_FRAME_SIZE | ||
374 | #else | ||
375 | ld r4, _LINK(r1) | 368 | ld r4, _LINK(r1) |
376 | mtlr r4 | 369 | mtlr r4 |
377 | 370 | ||
378 | cmpwi r3, RESUME_GUEST | 371 | /* Restore non-volatile host registers (r14 - r31) */ |
379 | bne kvm_exit_heavyweight | 372 | REST_NVGPRS(r1) |
373 | |||
374 | addi r1, r1, SWITCH_FRAME_SIZE | ||
375 | blr | ||
376 | |||
377 | kvm_loop_heavyweight: | ||
380 | 378 | ||
379 | ld r4, _LINK(r1) | ||
380 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | ||
381 | |||
382 | /* Load vcpu and cpu_run */ | ||
381 | REST_2GPRS(3, r1) | 383 | REST_2GPRS(3, r1) |
382 | 384 | ||
383 | addi r1, r1, SWITCH_FRAME_SIZE | 385 | /* Load non-volatile guest state from the vcpu */ |
386 | VCPU_LOAD_NVGPRS(r4) | ||
384 | 387 | ||
385 | b kvm_start_entry | 388 | /* Jump back into the beginning of this function */ |
389 | b kvm_start_lightweight | ||
386 | 390 | ||
387 | kvm_exit_heavyweight: | 391 | kvm_loop_lightweight: |
388 | 392 | ||
389 | addi r1, r1, SWITCH_FRAME_SIZE | 393 | /* We'll need the vcpu pointer */ |
390 | #endif | 394 | REST_GPR(4, r1) |
395 | |||
396 | /* Jump back into the beginning of this function */ | ||
397 | b kvm_start_lightweight | ||
391 | 398 | ||
392 | blr | ||