diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-12-02 16:51:57 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:55:41 -0500 |
commit | 73e75b416ffcfa3a84952d8e389a0eca080f00e1 (patch) | |
tree | 6195be5b0fa56235550679f35ca990093dd081ca /arch/powerpc/kvm | |
parent | c5fbdffbda79254047ec83b09c1a61a3655d052a (diff) |
KVM: ppc: Implement in-kernel exit timing statistics
Existing KVM statistics are either just counters (kvm_stat) reported for
KVM generally or trace based aproaches like kvm_trace.
For KVM on powerpc we had the need to track the timings of the different exit
types. While this could be achieved parsing data created with a kvm_trace
extension this adds too much overhead (at least on embedded PowerPC) slowing
down the workloads we wanted to measure.
Therefore this patch adds a in-kernel exit timing statistic to the powerpc kvm
code. These statistic is available per vm&vcpu under the kvm debugfs directory.
As this statistic is low, but still some overhead it can be enabled via a
.config entry and should be off by default.
Since this patch touched all powerpc kvm_stat code anyway this code is now
merged and simplified together with the exit timing statistic code (still
working with exit timing disabled in .config).
Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x_emulate.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/Kconfig | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 36 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 24 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/timing.c | 262 | ||||
-rw-r--r-- | arch/powerpc/kvm/timing.h | 102 |
11 files changed, 449 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 9ef79c78ede9..69f88d53c428 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/dcr-regs.h> | 22 | #include <asm/dcr-regs.h> |
23 | #include <asm/disassemble.h> | 23 | #include <asm/disassemble.h> |
24 | #include <asm/kvm_44x.h> | 24 | #include <asm/kvm_44x.h> |
25 | #include "timing.h" | ||
25 | 26 | ||
26 | #include "booke.h" | 27 | #include "booke.h" |
27 | #include "44x_tlb.h" | 28 | #include "44x_tlb.h" |
@@ -58,11 +59,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
58 | int ws; | 59 | int ws; |
59 | 60 | ||
60 | switch (get_op(inst)) { | 61 | switch (get_op(inst)) { |
61 | |||
62 | case OP_RFI: | 62 | case OP_RFI: |
63 | switch (get_xop(inst)) { | 63 | switch (get_xop(inst)) { |
64 | case XOP_RFI: | 64 | case XOP_RFI: |
65 | kvmppc_emul_rfi(vcpu); | 65 | kvmppc_emul_rfi(vcpu); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); | ||
66 | *advance = 0; | 67 | *advance = 0; |
67 | break; | 68 | break; |
68 | 69 | ||
@@ -78,10 +79,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
78 | case XOP_MFMSR: | 79 | case XOP_MFMSR: |
79 | rt = get_rt(inst); | 80 | rt = get_rt(inst); |
80 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | 81 | vcpu->arch.gpr[rt] = vcpu->arch.msr; |
82 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | ||
81 | break; | 83 | break; |
82 | 84 | ||
83 | case XOP_MTMSR: | 85 | case XOP_MTMSR: |
84 | rs = get_rs(inst); | 86 | rs = get_rs(inst); |
87 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | ||
85 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | 88 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); |
86 | break; | 89 | break; |
87 | 90 | ||
@@ -89,11 +92,13 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
89 | rs = get_rs(inst); | 92 | rs = get_rs(inst); |
90 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 93 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
91 | | (vcpu->arch.gpr[rs] & MSR_EE); | 94 | | (vcpu->arch.gpr[rs] & MSR_EE); |
95 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
92 | break; | 96 | break; |
93 | 97 | ||
94 | case XOP_WRTEEI: | 98 | case XOP_WRTEEI: |
95 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 99 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
96 | | (inst & MSR_EE); | 100 | | (inst & MSR_EE); |
101 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
97 | break; | 102 | break; |
98 | 103 | ||
99 | case XOP_MFDCR: | 104 | case XOP_MFDCR: |
@@ -127,6 +132,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
127 | run->dcr.is_write = 0; | 132 | run->dcr.is_write = 0; |
128 | vcpu->arch.io_gpr = rt; | 133 | vcpu->arch.io_gpr = rt; |
129 | vcpu->arch.dcr_needed = 1; | 134 | vcpu->arch.dcr_needed = 1; |
135 | account_exit(vcpu, DCR_EXITS); | ||
130 | emulated = EMULATE_DO_DCR; | 136 | emulated = EMULATE_DO_DCR; |
131 | } | 137 | } |
132 | 138 | ||
@@ -146,6 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
146 | run->dcr.data = vcpu->arch.gpr[rs]; | 152 | run->dcr.data = vcpu->arch.gpr[rs]; |
147 | run->dcr.is_write = 1; | 153 | run->dcr.is_write = 1; |
148 | vcpu->arch.dcr_needed = 1; | 154 | vcpu->arch.dcr_needed = 1; |
155 | account_exit(vcpu, DCR_EXITS); | ||
149 | emulated = EMULATE_DO_DCR; | 156 | emulated = EMULATE_DO_DCR; |
150 | } | 157 | } |
151 | 158 | ||
@@ -276,6 +283,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
276 | return EMULATE_FAIL; | 283 | return EMULATE_FAIL; |
277 | } | 284 | } |
278 | 285 | ||
286 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | ||
279 | return EMULATE_DONE; | 287 | return EMULATE_DONE; |
280 | } | 288 | } |
281 | 289 | ||
@@ -357,6 +365,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
357 | return EMULATE_FAIL; | 365 | return EMULATE_FAIL; |
358 | } | 366 | } |
359 | 367 | ||
368 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | ||
360 | return EMULATE_DONE; | 369 | return EMULATE_DONE; |
361 | } | 370 | } |
362 | 371 | ||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ff16d0e38433..9a34b8edb9e2 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/mmu-44x.h> | 27 | #include <asm/mmu-44x.h> |
28 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_44x.h> | 29 | #include <asm/kvm_44x.h> |
30 | #include "timing.h" | ||
30 | 31 | ||
31 | #include "44x_tlb.h" | 32 | #include "44x_tlb.h" |
32 | 33 | ||
@@ -470,6 +471,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
470 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, | 471 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, |
471 | tlbe->word1, tlbe->word2, handler); | 472 | tlbe->word1, tlbe->word2, handler); |
472 | 473 | ||
474 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
473 | return EMULATE_DONE; | 475 | return EMULATE_DONE; |
474 | } | 476 | } |
475 | 477 | ||
@@ -493,5 +495,6 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | |||
493 | } | 495 | } |
494 | vcpu->arch.gpr[rt] = gtlb_index; | 496 | vcpu->arch.gpr[rt] = gtlb_index; |
495 | 497 | ||
498 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
496 | return EMULATE_DONE; | 499 | return EMULATE_DONE; |
497 | } | 500 | } |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index e4ab1c7fd925..6dbdc4817d80 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -32,6 +32,17 @@ config KVM_440 | |||
32 | 32 | ||
33 | If unsure, say N. | 33 | If unsure, say N. |
34 | 34 | ||
35 | config KVM_EXIT_TIMING | ||
36 | bool "Detailed exit timing" | ||
37 | depends on KVM | ||
38 | ---help--- | ||
39 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | ||
40 | report is available in debugfs kvm/vm#_vcpu#_timing. | ||
41 | The overhead is relatively small, however it is not recommended for | ||
42 | production environments. | ||
43 | |||
44 | If unsure, say N. | ||
45 | |||
35 | config KVM_TRACE | 46 | config KVM_TRACE |
36 | bool "KVM trace support" | 47 | bool "KVM trace support" |
37 | depends on KVM && MARKERS && SYSFS | 48 | depends on KVM && MARKERS && SYSFS |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index f045fad0f4f1..df7ba59e6d53 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -9,6 +9,7 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | |||
9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) |
10 | 10 | ||
11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o | 11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o |
12 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 13 | obj-$(CONFIG_KVM) += kvm.o |
13 | 14 | ||
14 | AFLAGS_booke_interrupts.o := -I$(obj) | 15 | AFLAGS_booke_interrupts.o := -I$(obj) |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index eb24383c87d2..0f171248e450 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
31 | #include "timing.h" | ||
31 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
32 | #include <asm/kvm_44x.h> | 33 | #include <asm/kvm_44x.h> |
33 | 34 | ||
@@ -185,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
185 | enum emulation_result er; | 186 | enum emulation_result er; |
186 | int r = RESUME_HOST; | 187 | int r = RESUME_HOST; |
187 | 188 | ||
189 | /* update before a new last_exit_type is rewritten */ | ||
190 | kvmppc_update_timing_stats(vcpu); | ||
191 | |||
188 | local_irq_enable(); | 192 | local_irq_enable(); |
189 | 193 | ||
190 | run->exit_reason = KVM_EXIT_UNKNOWN; | 194 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -198,7 +202,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
198 | break; | 202 | break; |
199 | 203 | ||
200 | case BOOKE_INTERRUPT_EXTERNAL: | 204 | case BOOKE_INTERRUPT_EXTERNAL: |
201 | vcpu->stat.ext_intr_exits++; | 205 | account_exit(vcpu, EXT_INTR_EXITS); |
202 | if (need_resched()) | 206 | if (need_resched()) |
203 | cond_resched(); | 207 | cond_resched(); |
204 | r = RESUME_GUEST; | 208 | r = RESUME_GUEST; |
@@ -208,8 +212,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
208 | /* Since we switched IVPR back to the host's value, the host | 212 | /* Since we switched IVPR back to the host's value, the host |
209 | * handled this interrupt the moment we enabled interrupts. | 213 | * handled this interrupt the moment we enabled interrupts. |
210 | * Now we just offer it a chance to reschedule the guest. */ | 214 | * Now we just offer it a chance to reschedule the guest. */ |
211 | 215 | account_exit(vcpu, DEC_EXITS); | |
212 | vcpu->stat.dec_exits++; | ||
213 | if (need_resched()) | 216 | if (need_resched()) |
214 | cond_resched(); | 217 | cond_resched(); |
215 | r = RESUME_GUEST; | 218 | r = RESUME_GUEST; |
@@ -222,20 +225,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
222 | vcpu->arch.esr = vcpu->arch.fault_esr; | 225 | vcpu->arch.esr = vcpu->arch.fault_esr; |
223 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 226 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
224 | r = RESUME_GUEST; | 227 | r = RESUME_GUEST; |
228 | account_exit(vcpu, USR_PR_INST); | ||
225 | break; | 229 | break; |
226 | } | 230 | } |
227 | 231 | ||
228 | er = kvmppc_emulate_instruction(run, vcpu); | 232 | er = kvmppc_emulate_instruction(run, vcpu); |
229 | switch (er) { | 233 | switch (er) { |
230 | case EMULATE_DONE: | 234 | case EMULATE_DONE: |
235 | /* don't overwrite subtypes, just account kvm_stats */ | ||
236 | account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
231 | /* Future optimization: only reload non-volatiles if | 237 | /* Future optimization: only reload non-volatiles if |
232 | * they were actually modified by emulation. */ | 238 | * they were actually modified by emulation. */ |
233 | vcpu->stat.emulated_inst_exits++; | ||
234 | r = RESUME_GUEST_NV; | 239 | r = RESUME_GUEST_NV; |
235 | break; | 240 | break; |
236 | case EMULATE_DO_DCR: | 241 | case EMULATE_DO_DCR: |
237 | run->exit_reason = KVM_EXIT_DCR; | 242 | run->exit_reason = KVM_EXIT_DCR; |
238 | vcpu->stat.dcr_exits++; | ||
239 | r = RESUME_HOST; | 243 | r = RESUME_HOST; |
240 | break; | 244 | break; |
241 | case EMULATE_FAIL: | 245 | case EMULATE_FAIL: |
@@ -255,6 +259,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
255 | 259 | ||
256 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 260 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
257 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); | 261 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
262 | account_exit(vcpu, FP_UNAVAIL); | ||
258 | r = RESUME_GUEST; | 263 | r = RESUME_GUEST; |
259 | break; | 264 | break; |
260 | 265 | ||
@@ -262,20 +267,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
262 | vcpu->arch.dear = vcpu->arch.fault_dear; | 267 | vcpu->arch.dear = vcpu->arch.fault_dear; |
263 | vcpu->arch.esr = vcpu->arch.fault_esr; | 268 | vcpu->arch.esr = vcpu->arch.fault_esr; |
264 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | 269 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
265 | vcpu->stat.dsi_exits++; | 270 | account_exit(vcpu, DSI_EXITS); |
266 | r = RESUME_GUEST; | 271 | r = RESUME_GUEST; |
267 | break; | 272 | break; |
268 | 273 | ||
269 | case BOOKE_INTERRUPT_INST_STORAGE: | 274 | case BOOKE_INTERRUPT_INST_STORAGE: |
270 | vcpu->arch.esr = vcpu->arch.fault_esr; | 275 | vcpu->arch.esr = vcpu->arch.fault_esr; |
271 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | 276 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
272 | vcpu->stat.isi_exits++; | 277 | account_exit(vcpu, ISI_EXITS); |
273 | r = RESUME_GUEST; | 278 | r = RESUME_GUEST; |
274 | break; | 279 | break; |
275 | 280 | ||
276 | case BOOKE_INTERRUPT_SYSCALL: | 281 | case BOOKE_INTERRUPT_SYSCALL: |
277 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 282 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); |
278 | vcpu->stat.syscall_exits++; | 283 | account_exit(vcpu, SYSCALL_EXITS); |
279 | r = RESUME_GUEST; | 284 | r = RESUME_GUEST; |
280 | break; | 285 | break; |
281 | 286 | ||
@@ -294,7 +299,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
294 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 299 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
295 | vcpu->arch.dear = vcpu->arch.fault_dear; | 300 | vcpu->arch.dear = vcpu->arch.fault_dear; |
296 | vcpu->arch.esr = vcpu->arch.fault_esr; | 301 | vcpu->arch.esr = vcpu->arch.fault_esr; |
297 | vcpu->stat.dtlb_real_miss_exits++; | 302 | account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
298 | r = RESUME_GUEST; | 303 | r = RESUME_GUEST; |
299 | break; | 304 | break; |
300 | } | 305 | } |
@@ -312,13 +317,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
312 | * invoking the guest. */ | 317 | * invoking the guest. */ |
313 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, | 318 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, |
314 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); | 319 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
315 | vcpu->stat.dtlb_virt_miss_exits++; | 320 | account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
316 | r = RESUME_GUEST; | 321 | r = RESUME_GUEST; |
317 | } else { | 322 | } else { |
318 | /* Guest has mapped and accessed a page which is not | 323 | /* Guest has mapped and accessed a page which is not |
319 | * actually RAM. */ | 324 | * actually RAM. */ |
320 | r = kvmppc_emulate_mmio(run, vcpu); | 325 | r = kvmppc_emulate_mmio(run, vcpu); |
321 | vcpu->stat.mmio_exits++; | 326 | account_exit(vcpu, MMIO_EXITS); |
322 | } | 327 | } |
323 | 328 | ||
324 | break; | 329 | break; |
@@ -340,11 +345,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
340 | if (gtlb_index < 0) { | 345 | if (gtlb_index < 0) { |
341 | /* The guest didn't have a mapping for it. */ | 346 | /* The guest didn't have a mapping for it. */ |
342 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | 347 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
343 | vcpu->stat.itlb_real_miss_exits++; | 348 | account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
344 | break; | 349 | break; |
345 | } | 350 | } |
346 | 351 | ||
347 | vcpu->stat.itlb_virt_miss_exits++; | 352 | account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
348 | 353 | ||
349 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | 354 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
350 | gpaddr = tlb_xlate(gtlbe, eaddr); | 355 | gpaddr = tlb_xlate(gtlbe, eaddr); |
@@ -378,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
378 | mtspr(SPRN_DBSR, dbsr); | 383 | mtspr(SPRN_DBSR, dbsr); |
379 | 384 | ||
380 | run->exit_reason = KVM_EXIT_DEBUG; | 385 | run->exit_reason = KVM_EXIT_DEBUG; |
386 | account_exit(vcpu, DEBUG_EXITS); | ||
381 | r = RESUME_HOST; | 387 | r = RESUME_HOST; |
382 | break; | 388 | break; |
383 | } | 389 | } |
@@ -398,7 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
398 | if (signal_pending(current)) { | 404 | if (signal_pending(current)) { |
399 | run->exit_reason = KVM_EXIT_INTR; | 405 | run->exit_reason = KVM_EXIT_INTR; |
400 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 406 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
401 | vcpu->stat.signal_exits++; | 407 | account_exit(vcpu, SIGNAL_EXITS); |
402 | } | 408 | } |
403 | } | 409 | } |
404 | 410 | ||
@@ -418,6 +424,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
418 | * before it's programmed its own IVPR. */ | 424 | * before it's programmed its own IVPR. */ |
419 | vcpu->arch.ivpr = 0x55550000; | 425 | vcpu->arch.ivpr = 0x55550000; |
420 | 426 | ||
427 | kvmppc_init_timing_stats(vcpu); | ||
428 | |||
421 | return kvmppc_core_vcpu_setup(vcpu); | 429 | return kvmppc_core_vcpu_setup(vcpu); |
422 | } | 430 | } |
423 | 431 | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 48d905fd60ab..cf7c94ca24bf 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include "timing.h" | ||
25 | 26 | ||
26 | /* interrupt priortity ordering */ | 27 | /* interrupt priortity ordering */ |
27 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 | 28 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 |
@@ -50,8 +51,10 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | |||
50 | 51 | ||
51 | vcpu->arch.msr = new_msr; | 52 | vcpu->arch.msr = new_msr; |
52 | 53 | ||
53 | if (vcpu->arch.msr & MSR_WE) | 54 | if (vcpu->arch.msr & MSR_WE) { |
54 | kvm_vcpu_block(vcpu); | 55 | kvm_vcpu_block(vcpu); |
56 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | ||
57 | }; | ||
55 | } | 58 | } |
56 | 59 | ||
57 | #endif /* __KVM_BOOKE_H__ */ | 60 | #endif /* __KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index eb2186823e4e..084ebcd7dd83 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host) | |||
107 | li r6, 1 | 107 | li r6, 1 |
108 | slw r6, r6, r5 | 108 | slw r6, r6, r5 |
109 | 109 | ||
110 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
111 | /* save exit time */ | ||
112 | 1: | ||
113 | mfspr r7, SPRN_TBRU | ||
114 | mfspr r8, SPRN_TBRL | ||
115 | mfspr r9, SPRN_TBRU | ||
116 | cmpw r9, r7 | ||
117 | bne 1b | ||
118 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | ||
119 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | ||
120 | #endif | ||
121 | |||
110 | /* Save the faulting instruction and all GPRs for emulation. */ | 122 | /* Save the faulting instruction and all GPRs for emulation. */ |
111 | andi. r7, r6, NEED_INST_MASK | 123 | andi. r7, r6, NEED_INST_MASK |
112 | beq ..skip_inst_copy | 124 | beq ..skip_inst_copy |
@@ -375,6 +387,18 @@ lightweight_exit: | |||
375 | lwz r3, VCPU_SPRG7(r4) | 387 | lwz r3, VCPU_SPRG7(r4) |
376 | mtspr SPRN_SPRG7, r3 | 388 | mtspr SPRN_SPRG7, r3 |
377 | 389 | ||
390 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
391 | /* save enter time */ | ||
392 | 1: | ||
393 | mfspr r6, SPRN_TBRU | ||
394 | mfspr r7, SPRN_TBRL | ||
395 | mfspr r8, SPRN_TBRU | ||
396 | cmpw r8, r6 | ||
397 | bne 1b | ||
398 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
399 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
400 | #endif | ||
401 | |||
378 | /* Finish loading guest volatiles and jump to guest. */ | 402 | /* Finish loading guest volatiles and jump to guest. */ |
379 | lwz r3, VCPU_CTR(r4) | 403 | lwz r3, VCPU_CTR(r4) |
380 | mtctr r3 | 404 | mtctr r3 |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4c30fa0c31ea..d1d38daa93fb 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/disassemble.h> | 30 | #include <asm/disassemble.h> |
31 | #include "timing.h" | ||
31 | 32 | ||
32 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | 33 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
33 | { | 34 | { |
@@ -73,6 +74,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
73 | enum emulation_result emulated = EMULATE_DONE; | 74 | enum emulation_result emulated = EMULATE_DONE; |
74 | int advance = 1; | 75 | int advance = 1; |
75 | 76 | ||
77 | /* this default type might be overwritten by subcategories */ | ||
78 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | ||
79 | |||
76 | switch (get_op(inst)) { | 80 | switch (get_op(inst)) { |
77 | case 3: /* trap */ | 81 | case 3: /* trap */ |
78 | vcpu->arch.esr |= ESR_PTR; | 82 | vcpu->arch.esr |= ESR_PTR; |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7ad150e0fbbf..1deda37cb771 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -28,9 +28,9 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include "timing.h" | ||
31 | #include "../mm/mmu_decl.h" | 32 | #include "../mm/mmu_decl.h" |
32 | 33 | ||
33 | |||
34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
35 | { | 35 | { |
36 | return gfn; | 36 | return gfn; |
@@ -171,11 +171,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
171 | 171 | ||
172 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 172 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
173 | { | 173 | { |
174 | return kvmppc_core_vcpu_create(kvm, id); | 174 | struct kvm_vcpu *vcpu; |
175 | vcpu = kvmppc_core_vcpu_create(kvm, id); | ||
176 | kvmppc_create_vcpu_debugfs(vcpu, id); | ||
177 | return vcpu; | ||
175 | } | 178 | } |
176 | 179 | ||
177 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 180 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
178 | { | 181 | { |
182 | kvmppc_remove_vcpu_debugfs(vcpu); | ||
179 | kvmppc_core_vcpu_free(vcpu); | 183 | kvmppc_core_vcpu_free(vcpu); |
180 | } | 184 | } |
181 | 185 | ||
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c new file mode 100644 index 000000000000..f42d2728a6a5 --- /dev/null +++ b/arch/powerpc/kvm/timing.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/debugfs.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include "timing.h" | ||
28 | #include <asm/time.h> | ||
29 | #include <asm-generic/div64.h> | ||
30 | |||
31 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) | ||
32 | { | ||
33 | int i; | ||
34 | |||
35 | /* pause guest execution to avoid concurrent updates */ | ||
36 | local_irq_disable(); | ||
37 | mutex_lock(&vcpu->mutex); | ||
38 | |||
39 | vcpu->arch.last_exit_type = 0xDEAD; | ||
40 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
41 | vcpu->arch.timing_count_type[i] = 0; | ||
42 | vcpu->arch.timing_max_duration[i] = 0; | ||
43 | vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; | ||
44 | vcpu->arch.timing_sum_duration[i] = 0; | ||
45 | vcpu->arch.timing_sum_quad_duration[i] = 0; | ||
46 | } | ||
47 | vcpu->arch.timing_last_exit = 0; | ||
48 | vcpu->arch.timing_exit.tv64 = 0; | ||
49 | vcpu->arch.timing_last_enter.tv64 = 0; | ||
50 | |||
51 | mutex_unlock(&vcpu->mutex); | ||
52 | local_irq_enable(); | ||
53 | } | ||
54 | |||
55 | static void add_exit_timing(struct kvm_vcpu *vcpu, | ||
56 | u64 duration, int type) | ||
57 | { | ||
58 | u64 old; | ||
59 | |||
60 | do_div(duration, tb_ticks_per_usec); | ||
61 | if (unlikely(duration > 0xFFFFFFFF)) { | ||
62 | printk(KERN_ERR"%s - duration too big -> overflow" | ||
63 | " duration %lld type %d exit #%d\n", | ||
64 | __func__, duration, type, | ||
65 | vcpu->arch.timing_count_type[type]); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | vcpu->arch.timing_count_type[type]++; | ||
70 | |||
71 | /* sum */ | ||
72 | old = vcpu->arch.timing_sum_duration[type]; | ||
73 | vcpu->arch.timing_sum_duration[type] += duration; | ||
74 | if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { | ||
75 | printk(KERN_ERR"%s - wrap adding sum of durations" | ||
76 | " old %lld new %lld type %d exit # of type %d\n", | ||
77 | __func__, old, vcpu->arch.timing_sum_duration[type], | ||
78 | type, vcpu->arch.timing_count_type[type]); | ||
79 | } | ||
80 | |||
81 | /* square sum */ | ||
82 | old = vcpu->arch.timing_sum_quad_duration[type]; | ||
83 | vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); | ||
84 | if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { | ||
85 | printk(KERN_ERR"%s - wrap adding sum of squared durations" | ||
86 | " old %lld new %lld type %d exit # of type %d\n", | ||
87 | __func__, old, | ||
88 | vcpu->arch.timing_sum_quad_duration[type], | ||
89 | type, vcpu->arch.timing_count_type[type]); | ||
90 | } | ||
91 | |||
92 | /* set min/max */ | ||
93 | if (unlikely(duration < vcpu->arch.timing_min_duration[type])) | ||
94 | vcpu->arch.timing_min_duration[type] = duration; | ||
95 | if (unlikely(duration > vcpu->arch.timing_max_duration[type])) | ||
96 | vcpu->arch.timing_max_duration[type] = duration; | ||
97 | } | ||
98 | |||
99 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | u64 exit = vcpu->arch.timing_last_exit; | ||
102 | u64 enter = vcpu->arch.timing_last_enter.tv64; | ||
103 | |||
104 | /* save exit time, used next exit when the reenter time is known */ | ||
105 | vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; | ||
106 | |||
107 | if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) | ||
108 | return; /* skip incomplete cycle (e.g. after reset) */ | ||
109 | |||
110 | /* update statistics for average and standard deviation */ | ||
111 | add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); | ||
112 | /* enter -> timing_last_exit is time spent in guest - log this too */ | ||
113 | add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), | ||
114 | TIMEINGUEST); | ||
115 | } | ||
116 | |||
117 | static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { | ||
118 | [MMIO_EXITS] = "MMIO", | ||
119 | [DCR_EXITS] = "DCR", | ||
120 | [SIGNAL_EXITS] = "SIGNAL", | ||
121 | [ITLB_REAL_MISS_EXITS] = "ITLBREAL", | ||
122 | [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", | ||
123 | [DTLB_REAL_MISS_EXITS] = "DTLBREAL", | ||
124 | [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", | ||
125 | [SYSCALL_EXITS] = "SYSCALL", | ||
126 | [ISI_EXITS] = "ISI", | ||
127 | [DSI_EXITS] = "DSI", | ||
128 | [EMULATED_INST_EXITS] = "EMULINST", | ||
129 | [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", | ||
130 | [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", | ||
131 | [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", | ||
132 | [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", | ||
133 | [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", | ||
134 | [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", | ||
135 | [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", | ||
136 | [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", | ||
137 | [EMULATED_RFI_EXITS] = "EMUL_RFI", | ||
138 | [DEC_EXITS] = "DEC", | ||
139 | [EXT_INTR_EXITS] = "EXTINT", | ||
140 | [HALT_WAKEUP] = "HALT", | ||
141 | [USR_PR_INST] = "USR_PR_INST", | ||
142 | [FP_UNAVAIL] = "FP_UNAVAIL", | ||
143 | [DEBUG_EXITS] = "DEBUG", | ||
144 | [TIMEINGUEST] = "TIMEINGUEST" | ||
145 | }; | ||
146 | |||
147 | static int kvmppc_exit_timing_show(struct seq_file *m, void *private) | ||
148 | { | ||
149 | struct kvm_vcpu *vcpu = m->private; | ||
150 | int i; | ||
151 | u64 min, max; | ||
152 | |||
153 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
154 | if (vcpu->arch.timing_min_duration[i] == 0xFFFFFFFF) | ||
155 | min = 0; | ||
156 | else | ||
157 | min = vcpu->arch.timing_min_duration[i]; | ||
158 | if (vcpu->arch.timing_max_duration[i] == 0) | ||
159 | max = 0; | ||
160 | else | ||
161 | max = vcpu->arch.timing_max_duration[i]; | ||
162 | |||
163 | seq_printf(m, "%12s: count %10d min %10lld " | ||
164 | "max %10lld sum %20lld sum_quad %20lld\n", | ||
165 | kvm_exit_names[i], vcpu->arch.timing_count_type[i], | ||
166 | vcpu->arch.timing_min_duration[i], | ||
167 | vcpu->arch.timing_max_duration[i], | ||
168 | vcpu->arch.timing_sum_duration[i], | ||
169 | vcpu->arch.timing_sum_quad_duration[i]); | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static ssize_t kvmppc_exit_timing_write(struct file *file, | ||
175 | const char __user *user_buf, | ||
176 | size_t count, loff_t *ppos) | ||
177 | { | ||
178 | size_t len; | ||
179 | int err; | ||
180 | const char __user *p; | ||
181 | char c; | ||
182 | |||
183 | len = 0; | ||
184 | p = user_buf; | ||
185 | while (len < count) { | ||
186 | if (get_user(c, p++)) | ||
187 | err = -EFAULT; | ||
188 | if (c == 0 || c == '\n') | ||
189 | break; | ||
190 | len++; | ||
191 | } | ||
192 | |||
193 | if (len > 1) { | ||
194 | err = -EINVAL; | ||
195 | goto done; | ||
196 | } | ||
197 | |||
198 | if (copy_from_user(&c, user_buf, sizeof(c))) { | ||
199 | err = -EFAULT; | ||
200 | goto done; | ||
201 | } | ||
202 | |||
203 | if (c == 'c') { | ||
204 | struct seq_file *seqf = (struct seq_file *)file->private_data; | ||
205 | struct kvm_vcpu *vcpu = seqf->private; | ||
206 | /* write does not affect out buffers previsously generated with | ||
207 | * show. Seq file is locked here to prevent races of init with | ||
208 | * a show call */ | ||
209 | mutex_lock(&seqf->lock); | ||
210 | kvmppc_init_timing_stats(vcpu); | ||
211 | mutex_unlock(&seqf->lock); | ||
212 | err = count; | ||
213 | } else { | ||
214 | err = -EINVAL; | ||
215 | goto done; | ||
216 | } | ||
217 | |||
218 | done: | ||
219 | return err; | ||
220 | } | ||
221 | |||
222 | static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) | ||
223 | { | ||
224 | return single_open(file, kvmppc_exit_timing_show, inode->i_private); | ||
225 | } | ||
226 | |||
227 | static struct file_operations kvmppc_exit_timing_fops = { | ||
228 | .owner = THIS_MODULE, | ||
229 | .open = kvmppc_exit_timing_open, | ||
230 | .read = seq_read, | ||
231 | .write = kvmppc_exit_timing_write, | ||
232 | .llseek = seq_lseek, | ||
233 | .release = single_release, | ||
234 | }; | ||
235 | |||
236 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) | ||
237 | { | ||
238 | static char dbg_fname[50]; | ||
239 | struct dentry *debugfs_file; | ||
240 | |||
241 | snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%03u_timing", | ||
242 | current->pid, id); | ||
243 | debugfs_file = debugfs_create_file(dbg_fname, 0666, | ||
244 | kvm_debugfs_dir, vcpu, | ||
245 | &kvmppc_exit_timing_fops); | ||
246 | |||
247 | if (!debugfs_file) { | ||
248 | printk(KERN_ERR"%s: error creating debugfs file %s\n", | ||
249 | __func__, dbg_fname); | ||
250 | return; | ||
251 | } | ||
252 | |||
253 | vcpu->arch.debugfs_exit_timing = debugfs_file; | ||
254 | } | ||
255 | |||
256 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) | ||
257 | { | ||
258 | if (vcpu->arch.debugfs_exit_timing) { | ||
259 | debugfs_remove(vcpu->arch.debugfs_exit_timing); | ||
260 | vcpu->arch.debugfs_exit_timing = NULL; | ||
261 | } | ||
262 | } | ||
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h new file mode 100644 index 000000000000..1af7181fa2b5 --- /dev/null +++ b/arch/powerpc/kvm/timing.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_EXITTIMING_H__ | ||
21 | #define __POWERPC_KVM_EXITTIMING_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/kvm_host.h> | ||
25 | |||
26 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
27 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu); | ||
28 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu); | ||
29 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id); | ||
30 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu); | ||
31 | |||
32 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) | ||
33 | { | ||
34 | vcpu->arch.last_exit_type = type; | ||
35 | } | ||
36 | |||
37 | #else | ||
38 | /* if exit timing is not configured there is no need to build the c file */ | ||
39 | static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} | ||
40 | static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} | ||
41 | static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, | ||
42 | unsigned int id) {} | ||
43 | static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} | ||
44 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {} | ||
45 | #endif /* CONFIG_KVM_EXIT_TIMING */ | ||
46 | |||
47 | /* account the exit in kvm_stats */ | ||
48 | static inline void account_exit_stat(struct kvm_vcpu *vcpu, int type) | ||
49 | { | ||
50 | /* type has to be known at build time for optimization */ | ||
51 | BUILD_BUG_ON(__builtin_constant_p(type)); | ||
52 | switch (type) { | ||
53 | case EXT_INTR_EXITS: | ||
54 | vcpu->stat.ext_intr_exits++; | ||
55 | break; | ||
56 | case DEC_EXITS: | ||
57 | vcpu->stat.dec_exits++; | ||
58 | break; | ||
59 | case EMULATED_INST_EXITS: | ||
60 | vcpu->stat.emulated_inst_exits++; | ||
61 | break; | ||
62 | case DCR_EXITS: | ||
63 | vcpu->stat.dcr_exits++; | ||
64 | break; | ||
65 | case DSI_EXITS: | ||
66 | vcpu->stat.dsi_exits++; | ||
67 | break; | ||
68 | case ISI_EXITS: | ||
69 | vcpu->stat.isi_exits++; | ||
70 | break; | ||
71 | case SYSCALL_EXITS: | ||
72 | vcpu->stat.syscall_exits++; | ||
73 | break; | ||
74 | case DTLB_REAL_MISS_EXITS: | ||
75 | vcpu->stat.dtlb_real_miss_exits++; | ||
76 | break; | ||
77 | case DTLB_VIRT_MISS_EXITS: | ||
78 | vcpu->stat.dtlb_virt_miss_exits++; | ||
79 | break; | ||
80 | case MMIO_EXITS: | ||
81 | vcpu->stat.mmio_exits++; | ||
82 | break; | ||
83 | case ITLB_REAL_MISS_EXITS: | ||
84 | vcpu->stat.itlb_real_miss_exits++; | ||
85 | break; | ||
86 | case ITLB_VIRT_MISS_EXITS: | ||
87 | vcpu->stat.itlb_virt_miss_exits++; | ||
88 | break; | ||
89 | case SIGNAL_EXITS: | ||
90 | vcpu->stat.signal_exits++; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* wrapper to set exit time and account for it in kvm_stats */ | ||
96 | static inline void account_exit(struct kvm_vcpu *vcpu, int type) | ||
97 | { | ||
98 | kvmppc_set_exit_type(vcpu, type); | ||
99 | account_exit_stat(vcpu, type); | ||
100 | } | ||
101 | |||
102 | #endif /* __POWERPC_KVM_EXITTIMING_H__ */ | ||