diff options
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 276 |
1 files changed, 238 insertions, 38 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8d4e35f5372c..8462b3a1c1c7 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -62,9 +62,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
62 | { | 62 | { |
63 | int i; | 63 | int i; |
64 | 64 | ||
65 | printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); | 65 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
67 | printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); | 67 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
68 | vcpu->arch.shared->srr1); | ||
68 | 69 | ||
69 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 70 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
70 | 71 | ||
@@ -130,13 +131,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | |||
130 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 131 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
131 | struct kvm_interrupt *irq) | 132 | struct kvm_interrupt *irq) |
132 | { | 133 | { |
133 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); | 134 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
135 | |||
136 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | ||
137 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | ||
138 | |||
139 | kvmppc_booke_queue_irqprio(vcpu, prio); | ||
134 | } | 140 | } |
135 | 141 | ||
136 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 142 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
137 | struct kvm_interrupt *irq) | 143 | struct kvm_interrupt *irq) |
138 | { | 144 | { |
139 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | 145 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); |
146 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | ||
140 | } | 147 | } |
141 | 148 | ||
142 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 149 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
@@ -146,6 +153,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
146 | int allowed = 0; | 153 | int allowed = 0; |
147 | ulong uninitialized_var(msr_mask); | 154 | ulong uninitialized_var(msr_mask); |
148 | bool update_esr = false, update_dear = false; | 155 | bool update_esr = false, update_dear = false; |
156 | ulong crit_raw = vcpu->arch.shared->critical; | ||
157 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
158 | bool crit; | ||
159 | bool keep_irq = false; | ||
160 | |||
161 | /* Truncate crit indicators in 32 bit mode */ | ||
162 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
163 | crit_raw &= 0xffffffff; | ||
164 | crit_r1 &= 0xffffffff; | ||
165 | } | ||
166 | |||
167 | /* Critical section when crit == r1 */ | ||
168 | crit = (crit_raw == crit_r1); | ||
169 | /* ... and we're in supervisor mode */ | ||
170 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
171 | |||
172 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { | ||
173 | priority = BOOKE_IRQPRIO_EXTERNAL; | ||
174 | keep_irq = true; | ||
175 | } | ||
149 | 176 | ||
150 | switch (priority) { | 177 | switch (priority) { |
151 | case BOOKE_IRQPRIO_DTLB_MISS: | 178 | case BOOKE_IRQPRIO_DTLB_MISS: |
@@ -169,36 +196,38 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
169 | break; | 196 | break; |
170 | case BOOKE_IRQPRIO_CRITICAL: | 197 | case BOOKE_IRQPRIO_CRITICAL: |
171 | case BOOKE_IRQPRIO_WATCHDOG: | 198 | case BOOKE_IRQPRIO_WATCHDOG: |
172 | allowed = vcpu->arch.msr & MSR_CE; | 199 | allowed = vcpu->arch.shared->msr & MSR_CE; |
173 | msr_mask = MSR_ME; | 200 | msr_mask = MSR_ME; |
174 | break; | 201 | break; |
175 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 202 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
176 | allowed = vcpu->arch.msr & MSR_ME; | 203 | allowed = vcpu->arch.shared->msr & MSR_ME; |
177 | msr_mask = 0; | 204 | msr_mask = 0; |
178 | break; | 205 | break; |
179 | case BOOKE_IRQPRIO_EXTERNAL: | 206 | case BOOKE_IRQPRIO_EXTERNAL: |
180 | case BOOKE_IRQPRIO_DECREMENTER: | 207 | case BOOKE_IRQPRIO_DECREMENTER: |
181 | case BOOKE_IRQPRIO_FIT: | 208 | case BOOKE_IRQPRIO_FIT: |
182 | allowed = vcpu->arch.msr & MSR_EE; | 209 | allowed = vcpu->arch.shared->msr & MSR_EE; |
210 | allowed = allowed && !crit; | ||
183 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 211 | msr_mask = MSR_CE|MSR_ME|MSR_DE; |
184 | break; | 212 | break; |
185 | case BOOKE_IRQPRIO_DEBUG: | 213 | case BOOKE_IRQPRIO_DEBUG: |
186 | allowed = vcpu->arch.msr & MSR_DE; | 214 | allowed = vcpu->arch.shared->msr & MSR_DE; |
187 | msr_mask = MSR_ME; | 215 | msr_mask = MSR_ME; |
188 | break; | 216 | break; |
189 | } | 217 | } |
190 | 218 | ||
191 | if (allowed) { | 219 | if (allowed) { |
192 | vcpu->arch.srr0 = vcpu->arch.pc; | 220 | vcpu->arch.shared->srr0 = vcpu->arch.pc; |
193 | vcpu->arch.srr1 = vcpu->arch.msr; | 221 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; |
194 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 222 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
195 | if (update_esr == true) | 223 | if (update_esr == true) |
196 | vcpu->arch.esr = vcpu->arch.queued_esr; | 224 | vcpu->arch.esr = vcpu->arch.queued_esr; |
197 | if (update_dear == true) | 225 | if (update_dear == true) |
198 | vcpu->arch.dear = vcpu->arch.queued_dear; | 226 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; |
199 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 227 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
200 | 228 | ||
201 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 229 | if (!keep_irq) |
230 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
202 | } | 231 | } |
203 | 232 | ||
204 | return allowed; | 233 | return allowed; |
@@ -208,6 +237,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
208 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | 237 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
209 | { | 238 | { |
210 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 239 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
240 | unsigned long old_pending = vcpu->arch.pending_exceptions; | ||
211 | unsigned int priority; | 241 | unsigned int priority; |
212 | 242 | ||
213 | priority = __ffs(*pending); | 243 | priority = __ffs(*pending); |
@@ -219,6 +249,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
219 | BITS_PER_BYTE * sizeof(*pending), | 249 | BITS_PER_BYTE * sizeof(*pending), |
220 | priority + 1); | 250 | priority + 1); |
221 | } | 251 | } |
252 | |||
253 | /* Tell the guest about our interrupt status */ | ||
254 | if (*pending) | ||
255 | vcpu->arch.shared->int_pending = 1; | ||
256 | else if (old_pending) | ||
257 | vcpu->arch.shared->int_pending = 0; | ||
222 | } | 258 | } |
223 | 259 | ||
224 | /** | 260 | /** |
@@ -265,7 +301,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
265 | break; | 301 | break; |
266 | 302 | ||
267 | case BOOKE_INTERRUPT_PROGRAM: | 303 | case BOOKE_INTERRUPT_PROGRAM: |
268 | if (vcpu->arch.msr & MSR_PR) { | 304 | if (vcpu->arch.shared->msr & MSR_PR) { |
269 | /* Program traps generated by user-level software must be handled | 305 | /* Program traps generated by user-level software must be handled |
270 | * by the guest kernel. */ | 306 | * by the guest kernel. */ |
271 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 307 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
@@ -337,7 +373,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
337 | break; | 373 | break; |
338 | 374 | ||
339 | case BOOKE_INTERRUPT_SYSCALL: | 375 | case BOOKE_INTERRUPT_SYSCALL: |
340 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 376 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
377 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | ||
378 | /* KVM PV hypercalls */ | ||
379 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
380 | r = RESUME_GUEST; | ||
381 | } else { | ||
382 | /* Guest syscalls */ | ||
383 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | ||
384 | } | ||
341 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 385 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
342 | r = RESUME_GUEST; | 386 | r = RESUME_GUEST; |
343 | break; | 387 | break; |
@@ -466,15 +510,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
466 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 510 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
467 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 511 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
468 | { | 512 | { |
513 | int i; | ||
514 | |||
469 | vcpu->arch.pc = 0; | 515 | vcpu->arch.pc = 0; |
470 | vcpu->arch.msr = 0; | 516 | vcpu->arch.shared->msr = 0; |
471 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 517 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
472 | 518 | ||
473 | vcpu->arch.shadow_pid = 1; | 519 | vcpu->arch.shadow_pid = 1; |
474 | 520 | ||
475 | /* Eye-catching number so we know if the guest takes an interrupt | 521 | /* Eye-catching numbers so we know if the guest takes an interrupt |
476 | * before it's programmed its own IVPR. */ | 522 | * before it's programmed its own IVPR/IVORs. */ |
477 | vcpu->arch.ivpr = 0x55550000; | 523 | vcpu->arch.ivpr = 0x55550000; |
524 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) | ||
525 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | ||
478 | 526 | ||
479 | kvmppc_init_timing_stats(vcpu); | 527 | kvmppc_init_timing_stats(vcpu); |
480 | 528 | ||
@@ -490,17 +538,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
490 | regs->ctr = vcpu->arch.ctr; | 538 | regs->ctr = vcpu->arch.ctr; |
491 | regs->lr = vcpu->arch.lr; | 539 | regs->lr = vcpu->arch.lr; |
492 | regs->xer = kvmppc_get_xer(vcpu); | 540 | regs->xer = kvmppc_get_xer(vcpu); |
493 | regs->msr = vcpu->arch.msr; | 541 | regs->msr = vcpu->arch.shared->msr; |
494 | regs->srr0 = vcpu->arch.srr0; | 542 | regs->srr0 = vcpu->arch.shared->srr0; |
495 | regs->srr1 = vcpu->arch.srr1; | 543 | regs->srr1 = vcpu->arch.shared->srr1; |
496 | regs->pid = vcpu->arch.pid; | 544 | regs->pid = vcpu->arch.pid; |
497 | regs->sprg0 = vcpu->arch.sprg0; | 545 | regs->sprg0 = vcpu->arch.shared->sprg0; |
498 | regs->sprg1 = vcpu->arch.sprg1; | 546 | regs->sprg1 = vcpu->arch.shared->sprg1; |
499 | regs->sprg2 = vcpu->arch.sprg2; | 547 | regs->sprg2 = vcpu->arch.shared->sprg2; |
500 | regs->sprg3 = vcpu->arch.sprg3; | 548 | regs->sprg3 = vcpu->arch.shared->sprg3; |
501 | regs->sprg5 = vcpu->arch.sprg4; | 549 | regs->sprg4 = vcpu->arch.sprg4; |
502 | regs->sprg6 = vcpu->arch.sprg5; | 550 | regs->sprg5 = vcpu->arch.sprg5; |
503 | regs->sprg7 = vcpu->arch.sprg6; | 551 | regs->sprg6 = vcpu->arch.sprg6; |
552 | regs->sprg7 = vcpu->arch.sprg7; | ||
504 | 553 | ||
505 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 554 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
506 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 555 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
@@ -518,15 +567,17 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
518 | vcpu->arch.lr = regs->lr; | 567 | vcpu->arch.lr = regs->lr; |
519 | kvmppc_set_xer(vcpu, regs->xer); | 568 | kvmppc_set_xer(vcpu, regs->xer); |
520 | kvmppc_set_msr(vcpu, regs->msr); | 569 | kvmppc_set_msr(vcpu, regs->msr); |
521 | vcpu->arch.srr0 = regs->srr0; | 570 | vcpu->arch.shared->srr0 = regs->srr0; |
522 | vcpu->arch.srr1 = regs->srr1; | 571 | vcpu->arch.shared->srr1 = regs->srr1; |
523 | vcpu->arch.sprg0 = regs->sprg0; | 572 | kvmppc_set_pid(vcpu, regs->pid); |
524 | vcpu->arch.sprg1 = regs->sprg1; | 573 | vcpu->arch.shared->sprg0 = regs->sprg0; |
525 | vcpu->arch.sprg2 = regs->sprg2; | 574 | vcpu->arch.shared->sprg1 = regs->sprg1; |
526 | vcpu->arch.sprg3 = regs->sprg3; | 575 | vcpu->arch.shared->sprg2 = regs->sprg2; |
527 | vcpu->arch.sprg5 = regs->sprg4; | 576 | vcpu->arch.shared->sprg3 = regs->sprg3; |
528 | vcpu->arch.sprg6 = regs->sprg5; | 577 | vcpu->arch.sprg4 = regs->sprg4; |
529 | vcpu->arch.sprg7 = regs->sprg6; | 578 | vcpu->arch.sprg5 = regs->sprg5; |
579 | vcpu->arch.sprg6 = regs->sprg6; | ||
580 | vcpu->arch.sprg7 = regs->sprg7; | ||
530 | 581 | ||
531 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 582 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
532 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 583 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
@@ -534,16 +585,165 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
534 | return 0; | 585 | return 0; |
535 | } | 586 | } |
536 | 587 | ||
588 | static void get_sregs_base(struct kvm_vcpu *vcpu, | ||
589 | struct kvm_sregs *sregs) | ||
590 | { | ||
591 | u64 tb = get_tb(); | ||
592 | |||
593 | sregs->u.e.features |= KVM_SREGS_E_BASE; | ||
594 | |||
595 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | ||
596 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | ||
597 | sregs->u.e.mcsr = vcpu->arch.mcsr; | ||
598 | sregs->u.e.esr = vcpu->arch.esr; | ||
599 | sregs->u.e.dear = vcpu->arch.shared->dar; | ||
600 | sregs->u.e.tsr = vcpu->arch.tsr; | ||
601 | sregs->u.e.tcr = vcpu->arch.tcr; | ||
602 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | ||
603 | sregs->u.e.tb = tb; | ||
604 | sregs->u.e.vrsave = vcpu->arch.vrsave; | ||
605 | } | ||
606 | |||
607 | static int set_sregs_base(struct kvm_vcpu *vcpu, | ||
608 | struct kvm_sregs *sregs) | ||
609 | { | ||
610 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | ||
611 | return 0; | ||
612 | |||
613 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | ||
614 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | ||
615 | vcpu->arch.mcsr = sregs->u.e.mcsr; | ||
616 | vcpu->arch.esr = sregs->u.e.esr; | ||
617 | vcpu->arch.shared->dar = sregs->u.e.dear; | ||
618 | vcpu->arch.vrsave = sregs->u.e.vrsave; | ||
619 | vcpu->arch.tcr = sregs->u.e.tcr; | ||
620 | |||
621 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) | ||
622 | vcpu->arch.dec = sregs->u.e.dec; | ||
623 | |||
624 | kvmppc_emulate_dec(vcpu); | ||
625 | |||
626 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { | ||
627 | /* | ||
628 | * FIXME: existing KVM timer handling is incomplete. | ||
629 | * TSR cannot be read by the guest, and its value in | ||
630 | * vcpu->arch is always zero. For now, just handle | ||
631 | * the case where the caller is trying to inject a | ||
632 | * decrementer interrupt. | ||
633 | */ | ||
634 | |||
635 | if ((sregs->u.e.tsr & TSR_DIS) && | ||
636 | (vcpu->arch.tcr & TCR_DIE)) | ||
637 | kvmppc_core_queue_dec(vcpu); | ||
638 | } | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | ||
644 | struct kvm_sregs *sregs) | ||
645 | { | ||
646 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | ||
647 | |||
648 | sregs->u.e.pir = 0; | ||
649 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; | ||
650 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | ||
651 | sregs->u.e.decar = vcpu->arch.decar; | ||
652 | sregs->u.e.ivpr = vcpu->arch.ivpr; | ||
653 | } | ||
654 | |||
655 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | ||
656 | struct kvm_sregs *sregs) | ||
657 | { | ||
658 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | ||
659 | return 0; | ||
660 | |||
661 | if (sregs->u.e.pir != 0) | ||
662 | return -EINVAL; | ||
663 | |||
664 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | ||
665 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | ||
666 | vcpu->arch.decar = sregs->u.e.decar; | ||
667 | vcpu->arch.ivpr = sregs->u.e.ivpr; | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
673 | { | ||
674 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | ||
675 | |||
676 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | ||
677 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | ||
678 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | ||
679 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | ||
680 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | ||
681 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | ||
682 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | ||
683 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | ||
684 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | ||
685 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | ||
686 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | ||
687 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | ||
688 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | ||
689 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | ||
690 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | ||
691 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | ||
692 | } | ||
693 | |||
694 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
695 | { | ||
696 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | ||
697 | return 0; | ||
698 | |||
699 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | ||
700 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | ||
701 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | ||
702 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | ||
703 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | ||
704 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | ||
705 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | ||
706 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | ||
707 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | ||
708 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | ||
709 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | ||
710 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | ||
711 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | ||
712 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | ||
713 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | ||
714 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
537 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 719 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
538 | struct kvm_sregs *sregs) | 720 | struct kvm_sregs *sregs) |
539 | { | 721 | { |
540 | return -ENOTSUPP; | 722 | sregs->pvr = vcpu->arch.pvr; |
723 | |||
724 | get_sregs_base(vcpu, sregs); | ||
725 | get_sregs_arch206(vcpu, sregs); | ||
726 | kvmppc_core_get_sregs(vcpu, sregs); | ||
727 | return 0; | ||
541 | } | 728 | } |
542 | 729 | ||
543 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 730 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
544 | struct kvm_sregs *sregs) | 731 | struct kvm_sregs *sregs) |
545 | { | 732 | { |
546 | return -ENOTSUPP; | 733 | int ret; |
734 | |||
735 | if (vcpu->arch.pvr != sregs->pvr) | ||
736 | return -EINVAL; | ||
737 | |||
738 | ret = set_sregs_base(vcpu, sregs); | ||
739 | if (ret < 0) | ||
740 | return ret; | ||
741 | |||
742 | ret = set_sregs_arch206(vcpu, sregs); | ||
743 | if (ret < 0) | ||
744 | return ret; | ||
745 | |||
746 | return kvmppc_core_set_sregs(vcpu, sregs); | ||
547 | } | 747 | } |
548 | 748 | ||
549 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 749 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |