diff options
| -rw-r--r-- | arch/mips/kvm/kvm_mips_emul.c | 2 | ||||
| -rw-r--r-- | arch/mips/kvm/kvm_tlb.c | 14 | ||||
| -rw-r--r-- | arch/mips/kvm/kvm_trap_emul.c | 12 |
3 files changed, 0 insertions, 28 deletions
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index c81ab791b8f2..8d4840090082 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c | |||
| @@ -2319,11 +2319,9 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
| 2319 | er = EMULATE_FAIL; | 2319 | er = EMULATE_FAIL; |
| 2320 | } | 2320 | } |
| 2321 | } else { | 2321 | } else { |
| 2322 | #ifdef DEBUG | ||
| 2323 | kvm_debug | 2322 | kvm_debug |
| 2324 | ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", | 2323 | ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", |
| 2325 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); | 2324 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); |
| 2326 | #endif | ||
| 2327 | /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ | 2325 | /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ |
| 2328 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, | 2326 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, |
| 2329 | NULL); | 2327 | NULL); |
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 994fc2384180..15ad06d717fd 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c | |||
| @@ -232,11 +232,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
| 232 | tlb_write_indexed(); | 232 | tlb_write_indexed(); |
| 233 | tlbw_use_hazard(); | 233 | tlbw_use_hazard(); |
| 234 | 234 | ||
| 235 | #ifdef DEBUG | ||
| 236 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", | 235 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", |
| 237 | vcpu->arch.pc, idx, read_c0_entryhi(), | 236 | vcpu->arch.pc, idx, read_c0_entryhi(), |
| 238 | read_c0_entrylo0(), read_c0_entrylo1()); | 237 | read_c0_entrylo0(), read_c0_entrylo1()); |
| 239 | #endif | ||
| 240 | 238 | ||
| 241 | /* Flush D-cache */ | 239 | /* Flush D-cache */ |
| 242 | if (flush_dcache_mask) { | 240 | if (flush_dcache_mask) { |
| @@ -343,11 +341,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
| 343 | mtc0_tlbw_hazard(); | 341 | mtc0_tlbw_hazard(); |
| 344 | tlbw_use_hazard(); | 342 | tlbw_use_hazard(); |
| 345 | 343 | ||
| 346 | #ifdef DEBUG | ||
| 347 | kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", | 344 | kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", |
| 348 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), | 345 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), |
| 349 | read_c0_entrylo0(), read_c0_entrylo1()); | 346 | read_c0_entrylo0(), read_c0_entrylo1()); |
| 350 | #endif | ||
| 351 | 347 | ||
| 352 | /* Restore old ASID */ | 348 | /* Restore old ASID */ |
| 353 | write_c0_entryhi(old_entryhi); | 349 | write_c0_entryhi(old_entryhi); |
| @@ -395,10 +391,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
| 395 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | | 391 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
| 396 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); | 392 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); |
| 397 | 393 | ||
| 398 | #ifdef DEBUG | ||
| 399 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | 394 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
| 400 | tlb->tlb_lo0, tlb->tlb_lo1); | 395 | tlb->tlb_lo0, tlb->tlb_lo1); |
| 401 | #endif | ||
| 402 | 396 | ||
| 403 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | 397 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
| 404 | tlb->tlb_mask); | 398 | tlb->tlb_mask); |
| @@ -419,10 +413,8 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
| 419 | } | 413 | } |
| 420 | } | 414 | } |
| 421 | 415 | ||
| 422 | #ifdef DEBUG | ||
| 423 | kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", | 416 | kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", |
| 424 | __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); | 417 | __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); |
| 425 | #endif | ||
| 426 | 418 | ||
| 427 | return index; | 419 | return index; |
| 428 | } | 420 | } |
| @@ -456,9 +448,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | |||
| 456 | 448 | ||
| 457 | local_irq_restore(flags); | 449 | local_irq_restore(flags); |
| 458 | 450 | ||
| 459 | #ifdef DEBUG | ||
| 460 | kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); | 451 | kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); |
| 461 | #endif | ||
| 462 | 452 | ||
| 463 | return idx; | 453 | return idx; |
| 464 | } | 454 | } |
| @@ -503,11 +493,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
| 503 | 493 | ||
| 504 | local_irq_restore(flags); | 494 | local_irq_restore(flags); |
| 505 | 495 | ||
| 506 | #ifdef DEBUG | ||
| 507 | if (idx > 0) | 496 | if (idx > 0) |
| 508 | kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, | 497 | kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, |
| 509 | (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); | 498 | (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); |
| 510 | #endif | ||
| 511 | 499 | ||
| 512 | return 0; | 500 | return 0; |
| 513 | } | 501 | } |
| @@ -675,9 +663,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 675 | unsigned long flags; | 663 | unsigned long flags; |
| 676 | int newasid = 0; | 664 | int newasid = 0; |
| 677 | 665 | ||
| 678 | #ifdef DEBUG | ||
| 679 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); | 666 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); |
| 680 | #endif | ||
| 681 | 667 | ||
| 682 | /* Alocate new kernel and user ASIDs if needed */ | 668 | /* Alocate new kernel and user ASIDs if needed */ |
| 683 | 669 | ||
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c index b171db324cf0..693f952b2fbb 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/kvm_trap_emul.c | |||
| @@ -32,9 +32,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
| 32 | gpa = KVM_INVALID_ADDR; | 32 | gpa = KVM_INVALID_ADDR; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | #ifdef DEBUG | ||
| 36 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); | 35 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
| 37 | #endif | ||
| 38 | 36 | ||
| 39 | return gpa; | 37 | return gpa; |
| 40 | } | 38 | } |
| @@ -85,11 +83,9 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |||
| 85 | 83 | ||
| 86 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 84 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
| 87 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 85 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
| 88 | #ifdef DEBUG | ||
| 89 | kvm_debug | 86 | kvm_debug |
| 90 | ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 87 | ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
| 91 | cause, opc, badvaddr); | 88 | cause, opc, badvaddr); |
| 92 | #endif | ||
| 93 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | 89 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
| 94 | 90 | ||
| 95 | if (er == EMULATE_DONE) | 91 | if (er == EMULATE_DONE) |
| @@ -138,11 +134,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
| 138 | } | 134 | } |
| 139 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 135 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
| 140 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 136 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
| 141 | #ifdef DEBUG | ||
| 142 | kvm_debug | 137 | kvm_debug |
| 143 | ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 138 | ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
| 144 | cause, opc, badvaddr); | 139 | cause, opc, badvaddr); |
| 145 | #endif | ||
| 146 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 140 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
| 147 | if (er == EMULATE_DONE) | 141 | if (er == EMULATE_DONE) |
| 148 | ret = RESUME_GUEST; | 142 | ret = RESUME_GUEST; |
| @@ -188,10 +182,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |||
| 188 | } | 182 | } |
| 189 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 183 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
| 190 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 184 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
| 191 | #ifdef DEBUG | ||
| 192 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", | 185 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", |
| 193 | vcpu->arch.pc, badvaddr); | 186 | vcpu->arch.pc, badvaddr); |
| 194 | #endif | ||
| 195 | 187 | ||
| 196 | /* User Address (UA) fault, this could happen if | 188 | /* User Address (UA) fault, this could happen if |
| 197 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 189 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
| @@ -236,9 +228,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
| 236 | 228 | ||
| 237 | if (KVM_GUEST_KERNEL_MODE(vcpu) | 229 | if (KVM_GUEST_KERNEL_MODE(vcpu) |
| 238 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | 230 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
| 239 | #ifdef DEBUG | ||
| 240 | kvm_debug("Emulate Store to MMIO space\n"); | 231 | kvm_debug("Emulate Store to MMIO space\n"); |
| 241 | #endif | ||
| 242 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 232 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
| 243 | if (er == EMULATE_FAIL) { | 233 | if (er == EMULATE_FAIL) { |
| 244 | printk("Emulate Store to MMIO space failed\n"); | 234 | printk("Emulate Store to MMIO space failed\n"); |
| @@ -268,9 +258,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
| 268 | int ret = RESUME_GUEST; | 258 | int ret = RESUME_GUEST; |
| 269 | 259 | ||
| 270 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | 260 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { |
| 271 | #ifdef DEBUG | ||
| 272 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | 261 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
| 273 | #endif | ||
| 274 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 262 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
| 275 | if (er == EMULATE_FAIL) { | 263 | if (er == EMULATE_FAIL) { |
| 276 | printk("Emulate Load from MMIO space failed\n"); | 264 | printk("Emulate Load from MMIO space failed\n"); |
