diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 309 |
1 files changed, 262 insertions, 47 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e294bd9b8c6..9a271f0929c7 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -33,12 +33,9 @@ | |||
33 | 33 | ||
34 | /* #define EXIT_DEBUG */ | 34 | /* #define EXIT_DEBUG */ |
35 | /* #define EXIT_DEBUG_SIMPLE */ | 35 | /* #define EXIT_DEBUG_SIMPLE */ |
36 | /* #define DEBUG_EXT */ | ||
36 | 37 | ||
37 | /* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. | 38 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
38 | * When set, we retrigger a DEC interrupt after that if DEC <= 0. | ||
39 | * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */ | ||
40 | |||
41 | /* #define AGGRESSIVE_DEC */ | ||
42 | 39 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 40 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
44 | { "exits", VCPU_STAT(sum_exits) }, | 41 | { "exits", VCPU_STAT(sum_exits) }, |
@@ -72,16 +69,24 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
72 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 69 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
73 | { | 70 | { |
74 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | 71 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); |
72 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, | ||
73 | sizeof(get_paca()->shadow_vcpu)); | ||
75 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | 74 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; |
76 | } | 75 | } |
77 | 76 | ||
78 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 77 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
79 | { | 78 | { |
80 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | 79 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); |
80 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
81 | sizeof(get_paca()->shadow_vcpu)); | ||
81 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | 82 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; |
83 | |||
84 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
85 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
86 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
82 | } | 87 | } |
83 | 88 | ||
84 | #if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) | 89 | #if defined(EXIT_DEBUG) |
85 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | 90 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) |
86 | { | 91 | { |
87 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | 92 | u64 jd = mftb() - vcpu->arch.dec_jiffies; |
@@ -89,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | |||
89 | } | 94 | } |
90 | #endif | 95 | #endif |
91 | 96 | ||
97 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | vcpu->arch.shadow_msr = vcpu->arch.msr; | ||
100 | /* Guest MSR values */ | ||
101 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | ||
102 | MSR_BE | MSR_DE; | ||
103 | /* Process MSR values */ | ||
104 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | ||
105 | MSR_EE; | ||
106 | /* External providers the guest reserved */ | ||
107 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | ||
108 | /* 64-bit Process MSR values */ | ||
109 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
110 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | ||
111 | #endif | ||
112 | } | ||
113 | |||
92 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 114 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
93 | { | 115 | { |
94 | ulong old_msr = vcpu->arch.msr; | 116 | ulong old_msr = vcpu->arch.msr; |
@@ -96,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
96 | #ifdef EXIT_DEBUG | 118 | #ifdef EXIT_DEBUG |
97 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 119 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
98 | #endif | 120 | #endif |
121 | |||
99 | msr &= to_book3s(vcpu)->msr_mask; | 122 | msr &= to_book3s(vcpu)->msr_mask; |
100 | vcpu->arch.msr = msr; | 123 | vcpu->arch.msr = msr; |
101 | vcpu->arch.shadow_msr = msr | MSR_USER32; | 124 | kvmppc_recalc_shadow_msr(vcpu); |
102 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | ||
103 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | ||
104 | MSR_FE1); | ||
105 | 125 | ||
106 | if (msr & (MSR_WE|MSR_POW)) { | 126 | if (msr & (MSR_WE|MSR_POW)) { |
107 | if (!vcpu->arch.pending_exceptions) { | 127 | if (!vcpu->arch.pending_exceptions) { |
@@ -125,11 +145,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |||
125 | vcpu->arch.mmu.reset_msr(vcpu); | 145 | vcpu->arch.mmu.reset_msr(vcpu); |
126 | } | 146 | } |
127 | 147 | ||
128 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | 148 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
129 | { | 149 | { |
130 | unsigned int prio; | 150 | unsigned int prio; |
131 | 151 | ||
132 | vcpu->stat.queue_intr++; | ||
133 | switch (vec) { | 152 | switch (vec) { |
134 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | 153 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; |
135 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | 154 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; |
@@ -149,15 +168,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |||
149 | default: prio = BOOK3S_IRQPRIO_MAX; break; | 168 | default: prio = BOOK3S_IRQPRIO_MAX; break; |
150 | } | 169 | } |
151 | 170 | ||
152 | set_bit(prio, &vcpu->arch.pending_exceptions); | 171 | return prio; |
172 | } | ||
173 | |||
174 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | ||
175 | unsigned int vec) | ||
176 | { | ||
177 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | ||
178 | &vcpu->arch.pending_exceptions); | ||
179 | } | ||
180 | |||
181 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | ||
182 | { | ||
183 | vcpu->stat.queue_intr++; | ||
184 | |||
185 | set_bit(kvmppc_book3s_vec2irqprio(vec), | ||
186 | &vcpu->arch.pending_exceptions); | ||
153 | #ifdef EXIT_DEBUG | 187 | #ifdef EXIT_DEBUG |
154 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | 188 | printk(KERN_INFO "Queueing interrupt %x\n", vec); |
155 | #endif | 189 | #endif |
156 | } | 190 | } |
157 | 191 | ||
158 | 192 | ||
159 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | 193 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
160 | { | 194 | { |
195 | to_book3s(vcpu)->prog_flags = flags; | ||
161 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); | 196 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); |
162 | } | 197 | } |
163 | 198 | ||
@@ -171,6 +206,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |||
171 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | 206 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); |
172 | } | 207 | } |
173 | 208 | ||
209 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | ||
210 | { | ||
211 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | ||
212 | } | ||
213 | |||
174 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 214 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
175 | struct kvm_interrupt *irq) | 215 | struct kvm_interrupt *irq) |
176 | { | 216 | { |
@@ -181,6 +221,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
181 | { | 221 | { |
182 | int deliver = 1; | 222 | int deliver = 1; |
183 | int vec = 0; | 223 | int vec = 0; |
224 | ulong flags = 0ULL; | ||
184 | 225 | ||
185 | switch (priority) { | 226 | switch (priority) { |
186 | case BOOK3S_IRQPRIO_DECREMENTER: | 227 | case BOOK3S_IRQPRIO_DECREMENTER: |
@@ -214,6 +255,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
214 | break; | 255 | break; |
215 | case BOOK3S_IRQPRIO_PROGRAM: | 256 | case BOOK3S_IRQPRIO_PROGRAM: |
216 | vec = BOOK3S_INTERRUPT_PROGRAM; | 257 | vec = BOOK3S_INTERRUPT_PROGRAM; |
258 | flags = to_book3s(vcpu)->prog_flags; | ||
217 | break; | 259 | break; |
218 | case BOOK3S_IRQPRIO_VSX: | 260 | case BOOK3S_IRQPRIO_VSX: |
219 | vec = BOOK3S_INTERRUPT_VSX; | 261 | vec = BOOK3S_INTERRUPT_VSX; |
@@ -244,7 +286,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
244 | #endif | 286 | #endif |
245 | 287 | ||
246 | if (deliver) | 288 | if (deliver) |
247 | kvmppc_inject_interrupt(vcpu, vec, 0ULL); | 289 | kvmppc_inject_interrupt(vcpu, vec, flags); |
248 | 290 | ||
249 | return deliver; | 291 | return deliver; |
250 | } | 292 | } |
@@ -254,21 +296,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
254 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 296 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
255 | unsigned int priority; | 297 | unsigned int priority; |
256 | 298 | ||
257 | /* XXX be more clever here - no need to mftb() on every entry */ | ||
258 | /* Issue DEC again if it's still active */ | ||
259 | #ifdef AGGRESSIVE_DEC | ||
260 | if (vcpu->arch.msr & MSR_EE) | ||
261 | if (kvmppc_get_dec(vcpu) & 0x80000000) | ||
262 | kvmppc_core_queue_dec(vcpu); | ||
263 | #endif | ||
264 | |||
265 | #ifdef EXIT_DEBUG | 299 | #ifdef EXIT_DEBUG |
266 | if (vcpu->arch.pending_exceptions) | 300 | if (vcpu->arch.pending_exceptions) |
267 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | 301 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); |
268 | #endif | 302 | #endif |
269 | priority = __ffs(*pending); | 303 | priority = __ffs(*pending); |
270 | while (priority <= (sizeof(unsigned int) * 8)) { | 304 | while (priority <= (sizeof(unsigned int) * 8)) { |
271 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { | 305 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
306 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | ||
307 | /* DEC interrupts get cleared by mtdec */ | ||
272 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 308 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
273 | break; | 309 | break; |
274 | } | 310 | } |
@@ -503,14 +539,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
503 | /* Page not found in guest PTE entries */ | 539 | /* Page not found in guest PTE entries */ |
504 | vcpu->arch.dear = vcpu->arch.fault_dear; | 540 | vcpu->arch.dear = vcpu->arch.fault_dear; |
505 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 541 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; |
506 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | 542 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); |
507 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 543 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
508 | } else if (page_found == -EPERM) { | 544 | } else if (page_found == -EPERM) { |
509 | /* Storage protection */ | 545 | /* Storage protection */ |
510 | vcpu->arch.dear = vcpu->arch.fault_dear; | 546 | vcpu->arch.dear = vcpu->arch.fault_dear; |
511 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | 547 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; |
512 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 548 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; |
513 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | 549 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); |
514 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 550 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
515 | } else if (page_found == -EINVAL) { | 551 | } else if (page_found == -EINVAL) { |
516 | /* Page not found in guest SLB */ | 552 | /* Page not found in guest SLB */ |
@@ -532,13 +568,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
532 | r = kvmppc_emulate_mmio(run, vcpu); | 568 | r = kvmppc_emulate_mmio(run, vcpu); |
533 | if ( r == RESUME_HOST_NV ) | 569 | if ( r == RESUME_HOST_NV ) |
534 | r = RESUME_HOST; | 570 | r = RESUME_HOST; |
535 | if ( r == RESUME_GUEST_NV ) | ||
536 | r = RESUME_GUEST; | ||
537 | } | 571 | } |
538 | 572 | ||
539 | return r; | 573 | return r; |
540 | } | 574 | } |
541 | 575 | ||
576 | static inline int get_fpr_index(int i) | ||
577 | { | ||
578 | #ifdef CONFIG_VSX | ||
579 | i *= 2; | ||
580 | #endif | ||
581 | return i; | ||
582 | } | ||
583 | |||
584 | /* Give up external provider (FPU, Altivec, VSX) */ | ||
585 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | ||
586 | { | ||
587 | struct thread_struct *t = ¤t->thread; | ||
588 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
589 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
590 | u64 *thread_fpr = (u64*)t->fpr; | ||
591 | int i; | ||
592 | |||
593 | if (!(vcpu->arch.guest_owned_ext & msr)) | ||
594 | return; | ||
595 | |||
596 | #ifdef DEBUG_EXT | ||
597 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | ||
598 | #endif | ||
599 | |||
600 | switch (msr) { | ||
601 | case MSR_FP: | ||
602 | giveup_fpu(current); | ||
603 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
604 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | ||
605 | |||
606 | vcpu->arch.fpscr = t->fpscr.val; | ||
607 | break; | ||
608 | case MSR_VEC: | ||
609 | #ifdef CONFIG_ALTIVEC | ||
610 | giveup_altivec(current); | ||
611 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | ||
612 | vcpu->arch.vscr = t->vscr; | ||
613 | #endif | ||
614 | break; | ||
615 | case MSR_VSX: | ||
616 | #ifdef CONFIG_VSX | ||
617 | __giveup_vsx(current); | ||
618 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
619 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | ||
620 | #endif | ||
621 | break; | ||
622 | default: | ||
623 | BUG(); | ||
624 | } | ||
625 | |||
626 | vcpu->arch.guest_owned_ext &= ~msr; | ||
627 | current->thread.regs->msr &= ~msr; | ||
628 | kvmppc_recalc_shadow_msr(vcpu); | ||
629 | } | ||
630 | |||
631 | /* Handle external providers (FPU, Altivec, VSX) */ | ||
632 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | ||
633 | ulong msr) | ||
634 | { | ||
635 | struct thread_struct *t = ¤t->thread; | ||
636 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
637 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
638 | u64 *thread_fpr = (u64*)t->fpr; | ||
639 | int i; | ||
640 | |||
641 | if (!(vcpu->arch.msr & msr)) { | ||
642 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
643 | return RESUME_GUEST; | ||
644 | } | ||
645 | |||
646 | #ifdef DEBUG_EXT | ||
647 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | ||
648 | #endif | ||
649 | |||
650 | current->thread.regs->msr |= msr; | ||
651 | |||
652 | switch (msr) { | ||
653 | case MSR_FP: | ||
654 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
655 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | ||
656 | |||
657 | t->fpscr.val = vcpu->arch.fpscr; | ||
658 | t->fpexc_mode = 0; | ||
659 | kvmppc_load_up_fpu(); | ||
660 | break; | ||
661 | case MSR_VEC: | ||
662 | #ifdef CONFIG_ALTIVEC | ||
663 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | ||
664 | t->vscr = vcpu->arch.vscr; | ||
665 | t->vrsave = -1; | ||
666 | kvmppc_load_up_altivec(); | ||
667 | #endif | ||
668 | break; | ||
669 | case MSR_VSX: | ||
670 | #ifdef CONFIG_VSX | ||
671 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
672 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | ||
673 | kvmppc_load_up_vsx(); | ||
674 | #endif | ||
675 | break; | ||
676 | default: | ||
677 | BUG(); | ||
678 | } | ||
679 | |||
680 | vcpu->arch.guest_owned_ext |= msr; | ||
681 | |||
682 | kvmppc_recalc_shadow_msr(vcpu); | ||
683 | |||
684 | return RESUME_GUEST; | ||
685 | } | ||
686 | |||
542 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 687 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
543 | unsigned int exit_nr) | 688 | unsigned int exit_nr) |
544 | { | 689 | { |
@@ -563,7 +708,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
563 | case BOOK3S_INTERRUPT_INST_STORAGE: | 708 | case BOOK3S_INTERRUPT_INST_STORAGE: |
564 | vcpu->stat.pf_instruc++; | 709 | vcpu->stat.pf_instruc++; |
565 | /* only care about PTEG not found errors, but leave NX alone */ | 710 | /* only care about PTEG not found errors, but leave NX alone */ |
566 | if (vcpu->arch.shadow_msr & 0x40000000) { | 711 | if (vcpu->arch.shadow_srr1 & 0x40000000) { |
567 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | 712 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); |
568 | vcpu->stat.sp_instruc++; | 713 | vcpu->stat.sp_instruc++; |
569 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 714 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
@@ -575,7 +720,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
575 | */ | 720 | */ |
576 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 721 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); |
577 | } else { | 722 | } else { |
578 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); | 723 | vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; |
579 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 724 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
580 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 725 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); |
581 | r = RESUME_GUEST; | 726 | r = RESUME_GUEST; |
@@ -621,6 +766,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
621 | case BOOK3S_INTERRUPT_PROGRAM: | 766 | case BOOK3S_INTERRUPT_PROGRAM: |
622 | { | 767 | { |
623 | enum emulation_result er; | 768 | enum emulation_result er; |
769 | ulong flags; | ||
770 | |||
771 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | ||
624 | 772 | ||
625 | if (vcpu->arch.msr & MSR_PR) { | 773 | if (vcpu->arch.msr & MSR_PR) { |
626 | #ifdef EXIT_DEBUG | 774 | #ifdef EXIT_DEBUG |
@@ -628,7 +776,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
628 | #endif | 776 | #endif |
629 | if ((vcpu->arch.last_inst & 0xff0007ff) != | 777 | if ((vcpu->arch.last_inst & 0xff0007ff) != |
630 | (INS_DCBZ & 0xfffffff7)) { | 778 | (INS_DCBZ & 0xfffffff7)) { |
631 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 779 | kvmppc_core_queue_program(vcpu, flags); |
632 | r = RESUME_GUEST; | 780 | r = RESUME_GUEST; |
633 | break; | 781 | break; |
634 | } | 782 | } |
@@ -638,12 +786,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
638 | er = kvmppc_emulate_instruction(run, vcpu); | 786 | er = kvmppc_emulate_instruction(run, vcpu); |
639 | switch (er) { | 787 | switch (er) { |
640 | case EMULATE_DONE: | 788 | case EMULATE_DONE: |
641 | r = RESUME_GUEST; | 789 | r = RESUME_GUEST_NV; |
642 | break; | 790 | break; |
643 | case EMULATE_FAIL: | 791 | case EMULATE_FAIL: |
644 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 792 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
645 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 793 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); |
646 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 794 | kvmppc_core_queue_program(vcpu, flags); |
647 | r = RESUME_GUEST; | 795 | r = RESUME_GUEST; |
648 | break; | 796 | break; |
649 | default: | 797 | default: |
@@ -653,23 +801,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
653 | } | 801 | } |
654 | case BOOK3S_INTERRUPT_SYSCALL: | 802 | case BOOK3S_INTERRUPT_SYSCALL: |
655 | #ifdef EXIT_DEBUG | 803 | #ifdef EXIT_DEBUG |
656 | printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); | 804 | printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); |
657 | #endif | 805 | #endif |
658 | vcpu->stat.syscall_exits++; | 806 | vcpu->stat.syscall_exits++; |
659 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 807 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
660 | r = RESUME_GUEST; | 808 | r = RESUME_GUEST; |
661 | break; | 809 | break; |
662 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
663 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | 810 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
664 | case BOOK3S_INTERRUPT_TRACE: | 811 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); |
812 | break; | ||
665 | case BOOK3S_INTERRUPT_ALTIVEC: | 813 | case BOOK3S_INTERRUPT_ALTIVEC: |
814 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); | ||
815 | break; | ||
666 | case BOOK3S_INTERRUPT_VSX: | 816 | case BOOK3S_INTERRUPT_VSX: |
817 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); | ||
818 | break; | ||
819 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
820 | case BOOK3S_INTERRUPT_TRACE: | ||
667 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 821 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
668 | r = RESUME_GUEST; | 822 | r = RESUME_GUEST; |
669 | break; | 823 | break; |
670 | default: | 824 | default: |
671 | /* Ugh - bork here! What did we get? */ | 825 | /* Ugh - bork here! What did we get? */ |
672 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); | 826 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
827 | exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); | ||
673 | r = RESUME_HOST; | 828 | r = RESUME_HOST; |
674 | BUG(); | 829 | BUG(); |
675 | break; | 830 | break; |
@@ -712,10 +867,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
712 | int i; | 867 | int i; |
713 | 868 | ||
714 | regs->pc = vcpu->arch.pc; | 869 | regs->pc = vcpu->arch.pc; |
715 | regs->cr = vcpu->arch.cr; | 870 | regs->cr = kvmppc_get_cr(vcpu); |
716 | regs->ctr = vcpu->arch.ctr; | 871 | regs->ctr = vcpu->arch.ctr; |
717 | regs->lr = vcpu->arch.lr; | 872 | regs->lr = vcpu->arch.lr; |
718 | regs->xer = vcpu->arch.xer; | 873 | regs->xer = kvmppc_get_xer(vcpu); |
719 | regs->msr = vcpu->arch.msr; | 874 | regs->msr = vcpu->arch.msr; |
720 | regs->srr0 = vcpu->arch.srr0; | 875 | regs->srr0 = vcpu->arch.srr0; |
721 | regs->srr1 = vcpu->arch.srr1; | 876 | regs->srr1 = vcpu->arch.srr1; |
@@ -729,7 +884,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
729 | regs->sprg7 = vcpu->arch.sprg6; | 884 | regs->sprg7 = vcpu->arch.sprg6; |
730 | 885 | ||
731 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 886 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
732 | regs->gpr[i] = vcpu->arch.gpr[i]; | 887 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
733 | 888 | ||
734 | return 0; | 889 | return 0; |
735 | } | 890 | } |
@@ -739,10 +894,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
739 | int i; | 894 | int i; |
740 | 895 | ||
741 | vcpu->arch.pc = regs->pc; | 896 | vcpu->arch.pc = regs->pc; |
742 | vcpu->arch.cr = regs->cr; | 897 | kvmppc_set_cr(vcpu, regs->cr); |
743 | vcpu->arch.ctr = regs->ctr; | 898 | vcpu->arch.ctr = regs->ctr; |
744 | vcpu->arch.lr = regs->lr; | 899 | vcpu->arch.lr = regs->lr; |
745 | vcpu->arch.xer = regs->xer; | 900 | kvmppc_set_xer(vcpu, regs->xer); |
746 | kvmppc_set_msr(vcpu, regs->msr); | 901 | kvmppc_set_msr(vcpu, regs->msr); |
747 | vcpu->arch.srr0 = regs->srr0; | 902 | vcpu->arch.srr0 = regs->srr0; |
748 | vcpu->arch.srr1 = regs->srr1; | 903 | vcpu->arch.srr1 = regs->srr1; |
@@ -754,8 +909,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
754 | vcpu->arch.sprg6 = regs->sprg5; | 909 | vcpu->arch.sprg6 = regs->sprg5; |
755 | vcpu->arch.sprg7 = regs->sprg6; | 910 | vcpu->arch.sprg7 = regs->sprg6; |
756 | 911 | ||
757 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | 912 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
758 | vcpu->arch.gpr[i] = regs->gpr[i]; | 913 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
759 | 914 | ||
760 | return 0; | 915 | return 0; |
761 | } | 916 | } |
@@ -850,7 +1005,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
850 | int is_dirty = 0; | 1005 | int is_dirty = 0; |
851 | int r, n; | 1006 | int r, n; |
852 | 1007 | ||
853 | down_write(&kvm->slots_lock); | 1008 | mutex_lock(&kvm->slots_lock); |
854 | 1009 | ||
855 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1010 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
856 | if (r) | 1011 | if (r) |
@@ -858,7 +1013,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
858 | 1013 | ||
859 | /* If nothing is dirty, don't bother messing with page tables. */ | 1014 | /* If nothing is dirty, don't bother messing with page tables. */ |
860 | if (is_dirty) { | 1015 | if (is_dirty) { |
861 | memslot = &kvm->memslots[log->slot]; | 1016 | memslot = &kvm->memslots->memslots[log->slot]; |
862 | 1017 | ||
863 | ga = memslot->base_gfn << PAGE_SHIFT; | 1018 | ga = memslot->base_gfn << PAGE_SHIFT; |
864 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 1019 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
@@ -872,7 +1027,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
872 | 1027 | ||
873 | r = 0; | 1028 | r = 0; |
874 | out: | 1029 | out: |
875 | up_write(&kvm->slots_lock); | 1030 | mutex_unlock(&kvm->slots_lock); |
876 | return r; | 1031 | return r; |
877 | } | 1032 | } |
878 | 1033 | ||
@@ -910,6 +1065,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
910 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | 1065 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; |
911 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | 1066 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; |
912 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | 1067 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; |
1068 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; | ||
913 | 1069 | ||
914 | vcpu->arch.shadow_msr = MSR_USER64; | 1070 | vcpu->arch.shadow_msr = MSR_USER64; |
915 | 1071 | ||
@@ -943,6 +1099,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |||
943 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1099 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
944 | { | 1100 | { |
945 | int ret; | 1101 | int ret; |
1102 | struct thread_struct ext_bkp; | ||
1103 | bool save_vec = current->thread.used_vr; | ||
1104 | bool save_vsx = current->thread.used_vsr; | ||
1105 | ulong ext_msr; | ||
946 | 1106 | ||
947 | /* No need to go into the guest when all we do is going out */ | 1107 | /* No need to go into the guest when all we do is going out */ |
948 | if (signal_pending(current)) { | 1108 | if (signal_pending(current)) { |
@@ -950,6 +1110,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
950 | return -EINTR; | 1110 | return -EINTR; |
951 | } | 1111 | } |
952 | 1112 | ||
1113 | /* Save FPU state in stack */ | ||
1114 | if (current->thread.regs->msr & MSR_FP) | ||
1115 | giveup_fpu(current); | ||
1116 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
1117 | ext_bkp.fpscr = current->thread.fpscr; | ||
1118 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | ||
1119 | |||
1120 | #ifdef CONFIG_ALTIVEC | ||
1121 | /* Save Altivec state in stack */ | ||
1122 | if (save_vec) { | ||
1123 | if (current->thread.regs->msr & MSR_VEC) | ||
1124 | giveup_altivec(current); | ||
1125 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | ||
1126 | ext_bkp.vscr = current->thread.vscr; | ||
1127 | ext_bkp.vrsave = current->thread.vrsave; | ||
1128 | } | ||
1129 | ext_bkp.used_vr = current->thread.used_vr; | ||
1130 | #endif | ||
1131 | |||
1132 | #ifdef CONFIG_VSX | ||
1133 | /* Save VSX state in stack */ | ||
1134 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | ||
1135 | __giveup_vsx(current); | ||
1136 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
1137 | #endif | ||
1138 | |||
1139 | /* Remember the MSR with disabled extensions */ | ||
1140 | ext_msr = current->thread.regs->msr; | ||
1141 | |||
953 | /* XXX we get called with irq disabled - change that! */ | 1142 | /* XXX we get called with irq disabled - change that! */ |
954 | local_irq_enable(); | 1143 | local_irq_enable(); |
955 | 1144 | ||
@@ -957,6 +1146,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
957 | 1146 | ||
958 | local_irq_disable(); | 1147 | local_irq_disable(); |
959 | 1148 | ||
1149 | current->thread.regs->msr = ext_msr; | ||
1150 | |||
1151 | /* Make sure we save the guest FPU/Altivec/VSX state */ | ||
1152 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
1153 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
1154 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
1155 | |||
1156 | /* Restore FPU state from stack */ | ||
1157 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | ||
1158 | current->thread.fpscr = ext_bkp.fpscr; | ||
1159 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | ||
1160 | |||
1161 | #ifdef CONFIG_ALTIVEC | ||
1162 | /* Restore Altivec state from stack */ | ||
1163 | if (save_vec && current->thread.used_vr) { | ||
1164 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | ||
1165 | current->thread.vscr = ext_bkp.vscr; | ||
1166 | current->thread.vrsave= ext_bkp.vrsave; | ||
1167 | } | ||
1168 | current->thread.used_vr = ext_bkp.used_vr; | ||
1169 | #endif | ||
1170 | |||
1171 | #ifdef CONFIG_VSX | ||
1172 | current->thread.used_vsr = ext_bkp.used_vsr; | ||
1173 | #endif | ||
1174 | |||
960 | return ret; | 1175 | return ret; |
961 | } | 1176 | } |
962 | 1177 | ||