aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c87
1 files changed, 61 insertions, 26 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06f5a9ecc42c..4d686cc6b260 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
69 69
70 for (i = 0; i < 32; i += 4) { 70 for (i = 0; i < 32; i += 4) {
71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, 71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 vcpu->arch.gpr[i], 72 kvmppc_get_gpr(vcpu, i),
73 vcpu->arch.gpr[i+1], 73 kvmppc_get_gpr(vcpu, i+1),
74 vcpu->arch.gpr[i+2], 74 kvmppc_get_gpr(vcpu, i+2),
75 vcpu->arch.gpr[i+3]); 75 kvmppc_get_gpr(vcpu, i+3));
76 } 76 }
77} 77}
78 78
@@ -82,8 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 set_bit(priority, &vcpu->arch.pending_exceptions); 82 set_bit(priority, &vcpu->arch.pending_exceptions);
83} 83}
84 84
85void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) 85static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
86 ulong dear_flags, ulong esr_flags)
86{ 87{
88 vcpu->arch.queued_dear = dear_flags;
89 vcpu->arch.queued_esr = esr_flags;
90 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
91}
92
93static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
94 ulong dear_flags, ulong esr_flags)
95{
96 vcpu->arch.queued_dear = dear_flags;
97 vcpu->arch.queued_esr = esr_flags;
98 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
99}
100
101static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
102 ulong esr_flags)
103{
104 vcpu->arch.queued_esr = esr_flags;
105 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
106}
107
108void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
109{
110 vcpu->arch.queued_esr = esr_flags;
87 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); 111 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
88} 112}
89 113
@@ -97,6 +121,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
97 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 121 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
98} 122}
99 123
124void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
125{
126 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
127}
128
100void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 129void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
101 struct kvm_interrupt *irq) 130 struct kvm_interrupt *irq)
102{ 131{
@@ -109,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
109{ 138{
110 int allowed = 0; 139 int allowed = 0;
111 ulong msr_mask; 140 ulong msr_mask;
141 bool update_esr = false, update_dear = false;
112 142
113 switch (priority) { 143 switch (priority) {
114 case BOOKE_IRQPRIO_PROGRAM:
115 case BOOKE_IRQPRIO_DTLB_MISS: 144 case BOOKE_IRQPRIO_DTLB_MISS:
116 case BOOKE_IRQPRIO_ITLB_MISS:
117 case BOOKE_IRQPRIO_SYSCALL:
118 case BOOKE_IRQPRIO_DATA_STORAGE: 145 case BOOKE_IRQPRIO_DATA_STORAGE:
146 update_dear = true;
147 /* fall through */
119 case BOOKE_IRQPRIO_INST_STORAGE: 148 case BOOKE_IRQPRIO_INST_STORAGE:
149 case BOOKE_IRQPRIO_PROGRAM:
150 update_esr = true;
151 /* fall through */
152 case BOOKE_IRQPRIO_ITLB_MISS:
153 case BOOKE_IRQPRIO_SYSCALL:
120 case BOOKE_IRQPRIO_FP_UNAVAIL: 154 case BOOKE_IRQPRIO_FP_UNAVAIL:
121 case BOOKE_IRQPRIO_SPE_UNAVAIL: 155 case BOOKE_IRQPRIO_SPE_UNAVAIL:
122 case BOOKE_IRQPRIO_SPE_FP_DATA: 156 case BOOKE_IRQPRIO_SPE_FP_DATA:
@@ -151,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
151 vcpu->arch.srr0 = vcpu->arch.pc; 185 vcpu->arch.srr0 = vcpu->arch.pc;
152 vcpu->arch.srr1 = vcpu->arch.msr; 186 vcpu->arch.srr1 = vcpu->arch.msr;
153 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 187 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
188 if (update_esr == true)
189 vcpu->arch.esr = vcpu->arch.queued_esr;
190 if (update_dear == true)
191 vcpu->arch.dear = vcpu->arch.queued_dear;
154 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 192 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
155 193
156 clear_bit(priority, &vcpu->arch.pending_exceptions); 194 clear_bit(priority, &vcpu->arch.pending_exceptions);
@@ -223,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
223 if (vcpu->arch.msr & MSR_PR) { 261 if (vcpu->arch.msr & MSR_PR) {
224 /* Program traps generated by user-level software must be handled 262 /* Program traps generated by user-level software must be handled
225 * by the guest kernel. */ 263 * by the guest kernel. */
226 vcpu->arch.esr = vcpu->arch.fault_esr; 264 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
228 r = RESUME_GUEST; 265 r = RESUME_GUEST;
229 kvmppc_account_exit(vcpu, USR_PR_INST); 266 kvmppc_account_exit(vcpu, USR_PR_INST);
230 break; 267 break;
@@ -280,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
280 break; 317 break;
281 318
282 case BOOKE_INTERRUPT_DATA_STORAGE: 319 case BOOKE_INTERRUPT_DATA_STORAGE:
283 vcpu->arch.dear = vcpu->arch.fault_dear; 320 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
284 vcpu->arch.esr = vcpu->arch.fault_esr; 321 vcpu->arch.fault_esr);
285 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
286 kvmppc_account_exit(vcpu, DSI_EXITS); 322 kvmppc_account_exit(vcpu, DSI_EXITS);
287 r = RESUME_GUEST; 323 r = RESUME_GUEST;
288 break; 324 break;
289 325
290 case BOOKE_INTERRUPT_INST_STORAGE: 326 case BOOKE_INTERRUPT_INST_STORAGE:
291 vcpu->arch.esr = vcpu->arch.fault_esr; 327 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
292 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
293 kvmppc_account_exit(vcpu, ISI_EXITS); 328 kvmppc_account_exit(vcpu, ISI_EXITS);
294 r = RESUME_GUEST; 329 r = RESUME_GUEST;
295 break; 330 break;
@@ -310,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
310 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 345 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
311 if (gtlb_index < 0) { 346 if (gtlb_index < 0) {
312 /* The guest didn't have a mapping for it. */ 347 /* The guest didn't have a mapping for it. */
313 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 348 kvmppc_core_queue_dtlb_miss(vcpu,
314 vcpu->arch.dear = vcpu->arch.fault_dear; 349 vcpu->arch.fault_dear,
315 vcpu->arch.esr = vcpu->arch.fault_esr; 350 vcpu->arch.fault_esr);
316 kvmppc_mmu_dtlb_miss(vcpu); 351 kvmppc_mmu_dtlb_miss(vcpu);
317 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); 352 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
318 r = RESUME_GUEST; 353 r = RESUME_GUEST;
@@ -426,7 +461,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
426{ 461{
427 vcpu->arch.pc = 0; 462 vcpu->arch.pc = 0;
428 vcpu->arch.msr = 0; 463 vcpu->arch.msr = 0;
429 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 464 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
430 465
431 vcpu->arch.shadow_pid = 1; 466 vcpu->arch.shadow_pid = 1;
432 467
@@ -444,10 +479,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
444 int i; 479 int i;
445 480
446 regs->pc = vcpu->arch.pc; 481 regs->pc = vcpu->arch.pc;
447 regs->cr = vcpu->arch.cr; 482 regs->cr = kvmppc_get_cr(vcpu);
448 regs->ctr = vcpu->arch.ctr; 483 regs->ctr = vcpu->arch.ctr;
449 regs->lr = vcpu->arch.lr; 484 regs->lr = vcpu->arch.lr;
450 regs->xer = vcpu->arch.xer; 485 regs->xer = kvmppc_get_xer(vcpu);
451 regs->msr = vcpu->arch.msr; 486 regs->msr = vcpu->arch.msr;
452 regs->srr0 = vcpu->arch.srr0; 487 regs->srr0 = vcpu->arch.srr0;
453 regs->srr1 = vcpu->arch.srr1; 488 regs->srr1 = vcpu->arch.srr1;
@@ -461,7 +496,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
461 regs->sprg7 = vcpu->arch.sprg6; 496 regs->sprg7 = vcpu->arch.sprg6;
462 497
463 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 498 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
464 regs->gpr[i] = vcpu->arch.gpr[i]; 499 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
465 500
466 return 0; 501 return 0;
467} 502}
@@ -471,10 +506,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
471 int i; 506 int i;
472 507
473 vcpu->arch.pc = regs->pc; 508 vcpu->arch.pc = regs->pc;
474 vcpu->arch.cr = regs->cr; 509 kvmppc_set_cr(vcpu, regs->cr);
475 vcpu->arch.ctr = regs->ctr; 510 vcpu->arch.ctr = regs->ctr;
476 vcpu->arch.lr = regs->lr; 511 vcpu->arch.lr = regs->lr;
477 vcpu->arch.xer = regs->xer; 512 kvmppc_set_xer(vcpu, regs->xer);
478 kvmppc_set_msr(vcpu, regs->msr); 513 kvmppc_set_msr(vcpu, regs->msr);
479 vcpu->arch.srr0 = regs->srr0; 514 vcpu->arch.srr0 = regs->srr0;
480 vcpu->arch.srr1 = regs->srr1; 515 vcpu->arch.srr1 = regs->srr1;
@@ -486,8 +521,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
486 vcpu->arch.sprg6 = regs->sprg5; 521 vcpu->arch.sprg6 = regs->sprg5;
487 vcpu->arch.sprg7 = regs->sprg6; 522 vcpu->arch.sprg7 = regs->sprg6;
488 523
489 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 524 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
490 vcpu->arch.gpr[i] = regs->gpr[i]; 525 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
491 526
492 return 0; 527 return 0;
493} 528}