diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-12-02 16:51:57 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:55:41 -0500 |
commit | 73e75b416ffcfa3a84952d8e389a0eca080f00e1 (patch) | |
tree | 6195be5b0fa56235550679f35ca990093dd081ca /arch/powerpc/kvm/booke.c | |
parent | c5fbdffbda79254047ec83b09c1a61a3655d052a (diff) |
KVM: ppc: Implement in-kernel exit timing statistics
Existing KVM statistics are either just counters (kvm_stat) reported for
KVM generally or trace based aproaches like kvm_trace.
For KVM on powerpc we had the need to track the timings of the different exit
types. While this could be achieved parsing data created with a kvm_trace
extension this adds too much overhead (at least on embedded PowerPC) slowing
down the workloads we wanted to measure.
Therefore this patch adds a in-kernel exit timing statistic to the powerpc kvm
code. These statistic is available per vm&vcpu under the kvm debugfs directory.
As this statistic is low, but still some overhead it can be enabled via a
.config entry and should be off by default.
Since this patch touched all powerpc kvm_stat code anyway this code is now
merged and simplified together with the exit timing statistic code (still
working with exit timing disabled in .config).
Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index eb24383c87d2..0f171248e450 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
31 | #include "timing.h" | ||
31 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
32 | #include <asm/kvm_44x.h> | 33 | #include <asm/kvm_44x.h> |
33 | 34 | ||
@@ -185,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
185 | enum emulation_result er; | 186 | enum emulation_result er; |
186 | int r = RESUME_HOST; | 187 | int r = RESUME_HOST; |
187 | 188 | ||
189 | /* update before a new last_exit_type is rewritten */ | ||
190 | kvmppc_update_timing_stats(vcpu); | ||
191 | |||
188 | local_irq_enable(); | 192 | local_irq_enable(); |
189 | 193 | ||
190 | run->exit_reason = KVM_EXIT_UNKNOWN; | 194 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -198,7 +202,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
198 | break; | 202 | break; |
199 | 203 | ||
200 | case BOOKE_INTERRUPT_EXTERNAL: | 204 | case BOOKE_INTERRUPT_EXTERNAL: |
201 | vcpu->stat.ext_intr_exits++; | 205 | account_exit(vcpu, EXT_INTR_EXITS); |
202 | if (need_resched()) | 206 | if (need_resched()) |
203 | cond_resched(); | 207 | cond_resched(); |
204 | r = RESUME_GUEST; | 208 | r = RESUME_GUEST; |
@@ -208,8 +212,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
208 | /* Since we switched IVPR back to the host's value, the host | 212 | /* Since we switched IVPR back to the host's value, the host |
209 | * handled this interrupt the moment we enabled interrupts. | 213 | * handled this interrupt the moment we enabled interrupts. |
210 | * Now we just offer it a chance to reschedule the guest. */ | 214 | * Now we just offer it a chance to reschedule the guest. */ |
211 | 215 | account_exit(vcpu, DEC_EXITS); | |
212 | vcpu->stat.dec_exits++; | ||
213 | if (need_resched()) | 216 | if (need_resched()) |
214 | cond_resched(); | 217 | cond_resched(); |
215 | r = RESUME_GUEST; | 218 | r = RESUME_GUEST; |
@@ -222,20 +225,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
222 | vcpu->arch.esr = vcpu->arch.fault_esr; | 225 | vcpu->arch.esr = vcpu->arch.fault_esr; |
223 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 226 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
224 | r = RESUME_GUEST; | 227 | r = RESUME_GUEST; |
228 | account_exit(vcpu, USR_PR_INST); | ||
225 | break; | 229 | break; |
226 | } | 230 | } |
227 | 231 | ||
228 | er = kvmppc_emulate_instruction(run, vcpu); | 232 | er = kvmppc_emulate_instruction(run, vcpu); |
229 | switch (er) { | 233 | switch (er) { |
230 | case EMULATE_DONE: | 234 | case EMULATE_DONE: |
235 | /* don't overwrite subtypes, just account kvm_stats */ | ||
236 | account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
231 | /* Future optimization: only reload non-volatiles if | 237 | /* Future optimization: only reload non-volatiles if |
232 | * they were actually modified by emulation. */ | 238 | * they were actually modified by emulation. */ |
233 | vcpu->stat.emulated_inst_exits++; | ||
234 | r = RESUME_GUEST_NV; | 239 | r = RESUME_GUEST_NV; |
235 | break; | 240 | break; |
236 | case EMULATE_DO_DCR: | 241 | case EMULATE_DO_DCR: |
237 | run->exit_reason = KVM_EXIT_DCR; | 242 | run->exit_reason = KVM_EXIT_DCR; |
238 | vcpu->stat.dcr_exits++; | ||
239 | r = RESUME_HOST; | 243 | r = RESUME_HOST; |
240 | break; | 244 | break; |
241 | case EMULATE_FAIL: | 245 | case EMULATE_FAIL: |
@@ -255,6 +259,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
255 | 259 | ||
256 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 260 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
257 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); | 261 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
262 | account_exit(vcpu, FP_UNAVAIL); | ||
258 | r = RESUME_GUEST; | 263 | r = RESUME_GUEST; |
259 | break; | 264 | break; |
260 | 265 | ||
@@ -262,20 +267,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
262 | vcpu->arch.dear = vcpu->arch.fault_dear; | 267 | vcpu->arch.dear = vcpu->arch.fault_dear; |
263 | vcpu->arch.esr = vcpu->arch.fault_esr; | 268 | vcpu->arch.esr = vcpu->arch.fault_esr; |
264 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | 269 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
265 | vcpu->stat.dsi_exits++; | 270 | account_exit(vcpu, DSI_EXITS); |
266 | r = RESUME_GUEST; | 271 | r = RESUME_GUEST; |
267 | break; | 272 | break; |
268 | 273 | ||
269 | case BOOKE_INTERRUPT_INST_STORAGE: | 274 | case BOOKE_INTERRUPT_INST_STORAGE: |
270 | vcpu->arch.esr = vcpu->arch.fault_esr; | 275 | vcpu->arch.esr = vcpu->arch.fault_esr; |
271 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | 276 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
272 | vcpu->stat.isi_exits++; | 277 | account_exit(vcpu, ISI_EXITS); |
273 | r = RESUME_GUEST; | 278 | r = RESUME_GUEST; |
274 | break; | 279 | break; |
275 | 280 | ||
276 | case BOOKE_INTERRUPT_SYSCALL: | 281 | case BOOKE_INTERRUPT_SYSCALL: |
277 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 282 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); |
278 | vcpu->stat.syscall_exits++; | 283 | account_exit(vcpu, SYSCALL_EXITS); |
279 | r = RESUME_GUEST; | 284 | r = RESUME_GUEST; |
280 | break; | 285 | break; |
281 | 286 | ||
@@ -294,7 +299,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
294 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 299 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
295 | vcpu->arch.dear = vcpu->arch.fault_dear; | 300 | vcpu->arch.dear = vcpu->arch.fault_dear; |
296 | vcpu->arch.esr = vcpu->arch.fault_esr; | 301 | vcpu->arch.esr = vcpu->arch.fault_esr; |
297 | vcpu->stat.dtlb_real_miss_exits++; | 302 | account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
298 | r = RESUME_GUEST; | 303 | r = RESUME_GUEST; |
299 | break; | 304 | break; |
300 | } | 305 | } |
@@ -312,13 +317,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
312 | * invoking the guest. */ | 317 | * invoking the guest. */ |
313 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, | 318 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, |
314 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); | 319 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
315 | vcpu->stat.dtlb_virt_miss_exits++; | 320 | account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
316 | r = RESUME_GUEST; | 321 | r = RESUME_GUEST; |
317 | } else { | 322 | } else { |
318 | /* Guest has mapped and accessed a page which is not | 323 | /* Guest has mapped and accessed a page which is not |
319 | * actually RAM. */ | 324 | * actually RAM. */ |
320 | r = kvmppc_emulate_mmio(run, vcpu); | 325 | r = kvmppc_emulate_mmio(run, vcpu); |
321 | vcpu->stat.mmio_exits++; | 326 | account_exit(vcpu, MMIO_EXITS); |
322 | } | 327 | } |
323 | 328 | ||
324 | break; | 329 | break; |
@@ -340,11 +345,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
340 | if (gtlb_index < 0) { | 345 | if (gtlb_index < 0) { |
341 | /* The guest didn't have a mapping for it. */ | 346 | /* The guest didn't have a mapping for it. */ |
342 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | 347 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
343 | vcpu->stat.itlb_real_miss_exits++; | 348 | account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
344 | break; | 349 | break; |
345 | } | 350 | } |
346 | 351 | ||
347 | vcpu->stat.itlb_virt_miss_exits++; | 352 | account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
348 | 353 | ||
349 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | 354 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
350 | gpaddr = tlb_xlate(gtlbe, eaddr); | 355 | gpaddr = tlb_xlate(gtlbe, eaddr); |
@@ -378,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
378 | mtspr(SPRN_DBSR, dbsr); | 383 | mtspr(SPRN_DBSR, dbsr); |
379 | 384 | ||
380 | run->exit_reason = KVM_EXIT_DEBUG; | 385 | run->exit_reason = KVM_EXIT_DEBUG; |
386 | account_exit(vcpu, DEBUG_EXITS); | ||
381 | r = RESUME_HOST; | 387 | r = RESUME_HOST; |
382 | break; | 388 | break; |
383 | } | 389 | } |
@@ -398,7 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
398 | if (signal_pending(current)) { | 404 | if (signal_pending(current)) { |
399 | run->exit_reason = KVM_EXIT_INTR; | 405 | run->exit_reason = KVM_EXIT_INTR; |
400 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 406 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
401 | vcpu->stat.signal_exits++; | 407 | account_exit(vcpu, SIGNAL_EXITS); |
402 | } | 408 | } |
403 | } | 409 | } |
404 | 410 | ||
@@ -418,6 +424,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
418 | * before it's programmed its own IVPR. */ | 424 | * before it's programmed its own IVPR. */ |
419 | vcpu->arch.ivpr = 0x55550000; | 425 | vcpu->arch.ivpr = 0x55550000; |
420 | 426 | ||
427 | kvmppc_init_timing_stats(vcpu); | ||
428 | |||
421 | return kvmppc_core_vcpu_setup(vcpu); | 429 | return kvmppc_core_vcpu_setup(vcpu); |
422 | } | 430 | } |
423 | 431 | ||