diff options
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 32 |
1 files changed, 9 insertions, 23 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index dec3f50a494f..b285e3d32466 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -38,11 +38,9 @@ unsigned long kvmppc_booke_handlers; | |||
38 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 38 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
39 | 39 | ||
40 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 40 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
41 | { "exits", VCPU_STAT(sum_exits) }, | ||
42 | { "mmio", VCPU_STAT(mmio_exits) }, | 41 | { "mmio", VCPU_STAT(mmio_exits) }, |
43 | { "dcr", VCPU_STAT(dcr_exits) }, | 42 | { "dcr", VCPU_STAT(dcr_exits) }, |
44 | { "sig", VCPU_STAT(signal_exits) }, | 43 | { "sig", VCPU_STAT(signal_exits) }, |
45 | { "light", VCPU_STAT(light_exits) }, | ||
46 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | 44 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
47 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | 45 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
48 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | 46 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, |
@@ -263,6 +261,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
263 | break; | 261 | break; |
264 | 262 | ||
265 | case BOOKE_INTERRUPT_EXTERNAL: | 263 | case BOOKE_INTERRUPT_EXTERNAL: |
264 | vcpu->stat.ext_intr_exits++; | ||
265 | if (need_resched()) | ||
266 | cond_resched(); | ||
267 | r = RESUME_GUEST; | ||
268 | break; | ||
269 | |||
266 | case BOOKE_INTERRUPT_DECREMENTER: | 270 | case BOOKE_INTERRUPT_DECREMENTER: |
267 | /* Since we switched IVPR back to the host's value, the host | 271 | /* Since we switched IVPR back to the host's value, the host |
268 | * handled this interrupt the moment we enabled interrupts. | 272 | * handled this interrupt the moment we enabled interrupts. |
@@ -272,12 +276,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
272 | * we do reschedule the host will fault over it. Perhaps we | 276 | * we do reschedule the host will fault over it. Perhaps we |
273 | * should politely restore the host's entries to minimize | 277 | * should politely restore the host's entries to minimize |
274 | * misses before ceding control. */ | 278 | * misses before ceding control. */ |
279 | vcpu->stat.dec_exits++; | ||
275 | if (need_resched()) | 280 | if (need_resched()) |
276 | cond_resched(); | 281 | cond_resched(); |
277 | if (exit_nr == BOOKE_INTERRUPT_DECREMENTER) | ||
278 | vcpu->stat.dec_exits++; | ||
279 | else | ||
280 | vcpu->stat.ext_intr_exits++; | ||
281 | r = RESUME_GUEST; | 282 | r = RESUME_GUEST; |
282 | break; | 283 | break; |
283 | 284 | ||
@@ -301,6 +302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
301 | break; | 302 | break; |
302 | case EMULATE_DO_DCR: | 303 | case EMULATE_DO_DCR: |
303 | run->exit_reason = KVM_EXIT_DCR; | 304 | run->exit_reason = KVM_EXIT_DCR; |
305 | vcpu->stat.dcr_exits++; | ||
304 | r = RESUME_HOST; | 306 | r = RESUME_HOST; |
305 | break; | 307 | break; |
306 | case EMULATE_FAIL: | 308 | case EMULATE_FAIL: |
@@ -379,6 +381,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
379 | /* Guest has mapped and accessed a page which is not | 381 | /* Guest has mapped and accessed a page which is not |
380 | * actually RAM. */ | 382 | * actually RAM. */ |
381 | r = kvmppc_emulate_mmio(run, vcpu); | 383 | r = kvmppc_emulate_mmio(run, vcpu); |
384 | vcpu->stat.mmio_exits++; | ||
382 | } | 385 | } |
383 | 386 | ||
384 | break; | 387 | break; |
@@ -445,8 +448,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
445 | 448 | ||
446 | kvmppc_core_deliver_interrupts(vcpu); | 449 | kvmppc_core_deliver_interrupts(vcpu); |
447 | 450 | ||
448 | /* Do some exit accounting. */ | ||
449 | vcpu->stat.sum_exits++; | ||
450 | if (!(r & RESUME_HOST)) { | 451 | if (!(r & RESUME_HOST)) { |
451 | /* To avoid clobbering exit_reason, only check for signals if | 452 | /* To avoid clobbering exit_reason, only check for signals if |
452 | * we aren't already exiting to userspace for some other | 453 | * we aren't already exiting to userspace for some other |
@@ -454,22 +455,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
454 | if (signal_pending(current)) { | 455 | if (signal_pending(current)) { |
455 | run->exit_reason = KVM_EXIT_INTR; | 456 | run->exit_reason = KVM_EXIT_INTR; |
456 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 457 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
457 | |||
458 | vcpu->stat.signal_exits++; | 458 | vcpu->stat.signal_exits++; |
459 | } else { | ||
460 | vcpu->stat.light_exits++; | ||
461 | } | ||
462 | } else { | ||
463 | switch (run->exit_reason) { | ||
464 | case KVM_EXIT_MMIO: | ||
465 | vcpu->stat.mmio_exits++; | ||
466 | break; | ||
467 | case KVM_EXIT_DCR: | ||
468 | vcpu->stat.dcr_exits++; | ||
469 | break; | ||
470 | case KVM_EXIT_INTR: | ||
471 | vcpu->stat.signal_exits++; | ||
472 | break; | ||
473 | } | 459 | } |
474 | } | 460 | } |
475 | 461 | ||