aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeng-Cheng Zhu <dengcheng.zhu@imgtec.com>2014-06-26 15:11:35 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-06-30 10:52:02 -0400
commit6ad78a5c75c5bcfdac4551f7d09b777b3dc3c19c (patch)
tree63b2a0178cc1e29a97b0df00255d63fe7e8a0e40
parentd116e812f9026e3cca46ce1009e577afec62916d (diff)
MIPS: KVM: Use KVM internal logger
Replace printks with kvm_[err|info|debug]. Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/mips/kvm/kvm_mips.c23
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c107
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c6
-rw-r--r--arch/mips/kvm/kvm_tlb.c60
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c31
5 files changed, 110 insertions, 117 deletions
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index 52be52adf030..330b3af701a6 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -817,8 +817,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
817 ga = memslot->base_gfn << PAGE_SHIFT; 817 ga = memslot->base_gfn << PAGE_SHIFT;
818 ga_end = ga + (memslot->npages << PAGE_SHIFT); 818 ga_end = ga + (memslot->npages << PAGE_SHIFT);
819 819
820 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, 820 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
821 ga_end); 821 ga_end);
822 822
823 n = kvm_dirty_bitmap_bytes(memslot); 823 n = kvm_dirty_bitmap_bytes(memslot);
824 memset(memslot->dirty_bitmap, 0, n); 824 memset(memslot->dirty_bitmap, 0, n);
@@ -925,24 +925,25 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
925 if (!vcpu) 925 if (!vcpu)
926 return -1; 926 return -1;
927 927
928 printk("VCPU Register Dump:\n"); 928 kvm_debug("VCPU Register Dump:\n");
929 printk("\tpc = 0x%08lx\n", vcpu->arch.pc); 929 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
930 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); 930 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
931 931
932 for (i = 0; i < 32; i += 4) { 932 for (i = 0; i < 32; i += 4) {
933 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, 933 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
934 vcpu->arch.gprs[i], 934 vcpu->arch.gprs[i],
935 vcpu->arch.gprs[i + 1], 935 vcpu->arch.gprs[i + 1],
936 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); 936 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
937 } 937 }
938 printk("\thi: 0x%08lx\n", vcpu->arch.hi); 938 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
939 printk("\tlo: 0x%08lx\n", vcpu->arch.lo); 939 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
940 940
941 cop0 = vcpu->arch.cop0; 941 cop0 = vcpu->arch.cop0;
942 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", 942 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
943 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); 943 kvm_read_c0_guest_status(cop0),
944 kvm_read_c0_guest_cause(cop0));
944 945
945 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); 946 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
946 947
947 return 0; 948 return 0;
948} 949}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 9ec9f1d54b9b..bdd1421b78fc 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -183,18 +183,18 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
183 183
184 /* And now the FPA/cp1 branch instructions. */ 184 /* And now the FPA/cp1 branch instructions. */
185 case cop1_op: 185 case cop1_op:
186 printk("%s: unsupported cop1_op\n", __func__); 186 kvm_err("%s: unsupported cop1_op\n", __func__);
187 break; 187 break;
188 } 188 }
189 189
190 return nextpc; 190 return nextpc;
191 191
192unaligned: 192unaligned:
193 printk("%s: unaligned epc\n", __func__); 193 kvm_err("%s: unaligned epc\n", __func__);
194 return nextpc; 194 return nextpc;
195 195
196sigill: 196sigill:
197 printk("%s: DSP branch but not DSP ASE\n", __func__); 197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
198 return nextpc; 198 return nextpc;
199} 199}
200 200
@@ -751,8 +751,8 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
751 kvm_clear_c0_guest_status(cop0, ST0_ERL); 751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
753 } else { 753 } else {
754 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
755 vcpu->arch.pc); 755 vcpu->arch.pc);
756 er = EMULATE_FAIL; 756 er = EMULATE_FAIL;
757 } 757 }
758 758
@@ -795,7 +795,7 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
795 enum emulation_result er = EMULATE_FAIL; 795 enum emulation_result er = EMULATE_FAIL;
796 uint32_t pc = vcpu->arch.pc; 796 uint32_t pc = vcpu->arch.pc;
797 797
798 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); 798 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
799 return er; 799 return er;
800} 800}
801 801
@@ -809,13 +809,12 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
809 uint32_t pc = vcpu->arch.pc; 809 uint32_t pc = vcpu->arch.pc;
810 810
811 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 811 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
812 printk("%s: illegal index: %d\n", __func__, index); 812 kvm_debug("%s: illegal index: %d\n", __func__, index);
813 printk 813 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
814 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 814 pc, index, kvm_read_c0_guest_entryhi(cop0),
815 pc, index, kvm_read_c0_guest_entryhi(cop0), 815 kvm_read_c0_guest_entrylo0(cop0),
816 kvm_read_c0_guest_entrylo0(cop0), 816 kvm_read_c0_guest_entrylo1(cop0),
817 kvm_read_c0_guest_entrylo1(cop0), 817 kvm_read_c0_guest_pagemask(cop0));
818 kvm_read_c0_guest_pagemask(cop0));
819 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 818 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
820 } 819 }
821 820
@@ -853,7 +852,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
853 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 852 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
854 853
855 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 854 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
856 printk("%s: illegal index: %d\n", __func__, index); 855 kvm_err("%s: illegal index: %d\n", __func__, index);
857 return EMULATE_FAIL; 856 return EMULATE_FAIL;
858 } 857 }
859 858
@@ -938,7 +937,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
938 er = kvm_mips_emul_tlbp(vcpu); 937 er = kvm_mips_emul_tlbp(vcpu);
939 break; 938 break;
940 case rfe_op: 939 case rfe_op:
941 printk("!!!COP0_RFE!!!\n"); 940 kvm_err("!!!COP0_RFE!!!\n");
942 break; 941 break;
943 case eret_op: 942 case eret_op:
944 er = kvm_mips_emul_eret(vcpu); 943 er = kvm_mips_emul_eret(vcpu);
@@ -987,8 +986,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
987 if ((rd == MIPS_CP0_TLB_INDEX) 986 if ((rd == MIPS_CP0_TLB_INDEX)
988 && (vcpu->arch.gprs[rt] >= 987 && (vcpu->arch.gprs[rt] >=
989 KVM_MIPS_GUEST_TLB_SIZE)) { 988 KVM_MIPS_GUEST_TLB_SIZE)) {
990 printk("Invalid TLB Index: %ld", 989 kvm_err("Invalid TLB Index: %ld",
991 vcpu->arch.gprs[rt]); 990 vcpu->arch.gprs[rt]);
992 er = EMULATE_FAIL; 991 er = EMULATE_FAIL;
993 break; 992 break;
994 } 993 }
@@ -998,8 +997,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
998 kvm_change_c0_guest_ebase(cop0, 997 kvm_change_c0_guest_ebase(cop0,
999 ~(C0_EBASE_CORE_MASK), 998 ~(C0_EBASE_CORE_MASK),
1000 vcpu->arch.gprs[rt]); 999 vcpu->arch.gprs[rt]);
1001 printk("MTCz, cop0->reg[EBASE]: %#lx\n", 1000 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1002 kvm_read_c0_guest_ebase(cop0)); 1001 kvm_read_c0_guest_ebase(cop0));
1003 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1002 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1004 uint32_t nasid = 1003 uint32_t nasid =
1005 vcpu->arch.gprs[rt] & ASID_MASK; 1004 vcpu->arch.gprs[rt] & ASID_MASK;
@@ -1072,9 +1071,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1072 break; 1071 break;
1073 1072
1074 case dmtc_op: 1073 case dmtc_op:
1075 printk 1074 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1076 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1075 vcpu->arch.pc, rt, rd, sel);
1077 vcpu->arch.pc, rt, rd, sel);
1078 er = EMULATE_FAIL; 1076 er = EMULATE_FAIL;
1079 break; 1077 break;
1080 1078
@@ -1119,9 +1117,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1119 } 1117 }
1120 break; 1118 break;
1121 default: 1119 default:
1122 printk 1120 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1123 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1121 vcpu->arch.pc, copz);
1124 vcpu->arch.pc, copz);
1125 er = EMULATE_FAIL; 1122 er = EMULATE_FAIL;
1126 break; 1123 break;
1127 } 1124 }
@@ -1242,7 +1239,7 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1242 break; 1239 break;
1243 1240
1244 default: 1241 default:
1245 printk("Store not yet supported"); 1242 kvm_err("Store not yet supported");
1246 er = EMULATE_FAIL; 1243 er = EMULATE_FAIL;
1247 break; 1244 break;
1248 } 1245 }
@@ -1351,7 +1348,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1351 break; 1348 break;
1352 1349
1353 default: 1350 default:
1354 printk("Load not yet supported"); 1351 kvm_err("Load not yet supported");
1355 er = EMULATE_FAIL; 1352 er = EMULATE_FAIL;
1356 break; 1353 break;
1357 } 1354 }
@@ -1370,7 +1367,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1370 gfn = va >> PAGE_SHIFT; 1367 gfn = va >> PAGE_SHIFT;
1371 1368
1372 if (gfn >= kvm->arch.guest_pmap_npages) { 1369 if (gfn >= kvm->arch.guest_pmap_npages) {
1373 printk("%s: Invalid gfn: %#llx\n", __func__, gfn); 1370 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1374 kvm_mips_dump_host_tlbs(); 1371 kvm_mips_dump_host_tlbs();
1375 kvm_arch_vcpu_dump_regs(vcpu); 1372 kvm_arch_vcpu_dump_regs(vcpu);
1376 return -1; 1373 return -1;
@@ -1378,7 +1375,8 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1378 pfn = kvm->arch.guest_pmap[gfn]; 1375 pfn = kvm->arch.guest_pmap[gfn];
1379 pa = (pfn << PAGE_SHIFT) | offset; 1376 pa = (pfn << PAGE_SHIFT) | offset;
1380 1377
1381 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); 1378 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1379 CKSEG0ADDR(pa));
1382 1380
1383 local_flush_icache_range(CKSEG0ADDR(pa), 32); 1381 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1384 return 0; 1382 return 0;
@@ -1444,8 +1442,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1444 else if (cache == MIPS_CACHE_ICACHE) 1442 else if (cache == MIPS_CACHE_ICACHE)
1445 r4k_blast_icache(); 1443 r4k_blast_icache();
1446 else { 1444 else {
1447 printk("%s: unsupported CACHE INDEX operation\n", 1445 kvm_err("%s: unsupported CACHE INDEX operation\n",
1448 __func__); 1446 __func__);
1449 return EMULATE_FAIL; 1447 return EMULATE_FAIL;
1450 } 1448 }
1451 1449
@@ -1504,9 +1502,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1504 } 1502 }
1505 } 1503 }
1506 } else { 1504 } else {
1507 printk 1505 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1508 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1506 cache, op, base, arch->gprs[base], offset);
1509 cache, op, base, arch->gprs[base], offset);
1510 er = EMULATE_FAIL; 1507 er = EMULATE_FAIL;
1511 preempt_enable(); 1508 preempt_enable();
1512 goto dont_update_pc; 1509 goto dont_update_pc;
@@ -1536,9 +1533,8 @@ skip_fault:
1536 kvm_mips_trans_cache_va(inst, opc, vcpu); 1533 kvm_mips_trans_cache_va(inst, opc, vcpu);
1537#endif 1534#endif
1538 } else { 1535 } else {
1539 printk 1536 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1540 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1537 cache, op, base, arch->gprs[base], offset);
1541 cache, op, base, arch->gprs[base], offset);
1542 er = EMULATE_FAIL; 1538 er = EMULATE_FAIL;
1543 preempt_enable(); 1539 preempt_enable();
1544 goto dont_update_pc; 1540 goto dont_update_pc;
@@ -1590,8 +1586,8 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1590 break; 1586 break;
1591 1587
1592 default: 1588 default:
1593 printk("Instruction emulation not supported (%p/%#x)\n", opc, 1589 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1594 inst); 1590 inst);
1595 kvm_arch_vcpu_dump_regs(vcpu); 1591 kvm_arch_vcpu_dump_regs(vcpu);
1596 er = EMULATE_FAIL; 1592 er = EMULATE_FAIL;
1597 break; 1593 break;
@@ -1628,7 +1624,7 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1628 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1624 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1629 1625
1630 } else { 1626 } else {
1631 printk("Trying to deliver SYSCALL when EXL is already set\n"); 1627 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1632 er = EMULATE_FAIL; 1628 er = EMULATE_FAIL;
1633 } 1629 }
1634 1630
@@ -1984,7 +1980,7 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1984 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1980 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1985 1981
1986 } else { 1982 } else {
1987 printk("Trying to deliver BP when EXL is already set\n"); 1983 kvm_err("Trying to deliver BP when EXL is already set\n");
1988 er = EMULATE_FAIL; 1984 er = EMULATE_FAIL;
1989 } 1985 }
1990 1986
@@ -2032,7 +2028,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2032 inst = kvm_get_inst(opc, vcpu); 2028 inst = kvm_get_inst(opc, vcpu);
2033 2029
2034 if (inst == KVM_INVALID_INST) { 2030 if (inst == KVM_INVALID_INST) {
2035 printk("%s: Cannot get inst @ %p\n", __func__, opc); 2031 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2036 return EMULATE_FAIL; 2032 return EMULATE_FAIL;
2037 } 2033 }
2038 2034
@@ -2099,7 +2095,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2099 unsigned long curr_pc; 2095 unsigned long curr_pc;
2100 2096
2101 if (run->mmio.len > sizeof(*gpr)) { 2097 if (run->mmio.len > sizeof(*gpr)) {
2102 printk("Bad MMIO length: %d", run->mmio.len); 2098 kvm_err("Bad MMIO length: %d", run->mmio.len);
2103 er = EMULATE_FAIL; 2099 er = EMULATE_FAIL;
2104 goto done; 2100 goto done;
2105 } 2101 }
@@ -2173,7 +2169,7 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2173 exccode, kvm_read_c0_guest_epc(cop0), 2169 exccode, kvm_read_c0_guest_epc(cop0),
2174 kvm_read_c0_guest_badvaddr(cop0)); 2170 kvm_read_c0_guest_badvaddr(cop0));
2175 } else { 2171 } else {
2176 printk("Trying to deliver EXC when EXL is already set\n"); 2172 kvm_err("Trying to deliver EXC when EXL is already set\n");
2177 er = EMULATE_FAIL; 2173 er = EMULATE_FAIL;
2178 } 2174 }
2179 2175
@@ -2213,8 +2209,8 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2213 * address error exception to the guest 2209 * address error exception to the guest
2214 */ 2210 */
2215 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2211 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2216 printk("%s: LD MISS @ %#lx\n", __func__, 2212 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2217 badvaddr); 2213 badvaddr);
2218 cause &= ~0xff; 2214 cause &= ~0xff;
2219 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); 2215 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2220 er = EMULATE_PRIV_FAIL; 2216 er = EMULATE_PRIV_FAIL;
@@ -2227,8 +2223,8 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2227 * address error exception to the guest 2223 * address error exception to the guest
2228 */ 2224 */
2229 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2225 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2230 printk("%s: ST MISS @ %#lx\n", __func__, 2226 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2231 badvaddr); 2227 badvaddr);
2232 cause &= ~0xff; 2228 cause &= ~0xff;
2233 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); 2229 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2234 er = EMULATE_PRIV_FAIL; 2230 er = EMULATE_PRIV_FAIL;
@@ -2236,8 +2232,8 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2236 break; 2232 break;
2237 2233
2238 case T_ADDR_ERR_ST: 2234 case T_ADDR_ERR_ST:
2239 printk("%s: address error ST @ %#lx\n", __func__, 2235 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2240 badvaddr); 2236 badvaddr);
2241 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2237 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2242 cause &= ~0xff; 2238 cause &= ~0xff;
2243 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); 2239 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
@@ -2245,8 +2241,8 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2245 er = EMULATE_PRIV_FAIL; 2241 er = EMULATE_PRIV_FAIL;
2246 break; 2242 break;
2247 case T_ADDR_ERR_LD: 2243 case T_ADDR_ERR_LD:
2248 printk("%s: address error LD @ %#lx\n", __func__, 2244 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2249 badvaddr); 2245 badvaddr);
2250 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2246 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2251 cause &= ~0xff; 2247 cause &= ~0xff;
2252 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); 2248 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
@@ -2301,7 +2297,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2301 } else if (exccode == T_TLB_ST_MISS) { 2297 } else if (exccode == T_TLB_ST_MISS) {
2302 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 2298 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2303 } else { 2299 } else {
2304 printk("%s: invalid exc code: %d\n", __func__, exccode); 2300 kvm_err("%s: invalid exc code: %d\n", __func__,
2301 exccode);
2305 er = EMULATE_FAIL; 2302 er = EMULATE_FAIL;
2306 } 2303 }
2307 } else { 2304 } else {
@@ -2319,8 +2316,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2319 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 2316 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2320 vcpu); 2317 vcpu);
2321 } else { 2318 } else {
2322 printk("%s: invalid exc code: %d\n", __func__, 2319 kvm_err("%s: invalid exc code: %d\n", __func__,
2323 exccode); 2320 exccode);
2324 er = EMULATE_FAIL; 2321 er = EMULATE_FAIL;
2325 } 2322 }
2326 } else { 2323 } else {
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
index 6efef38a324d..1ae9f88b4d38 100644
--- a/arch/mips/kvm/kvm_mips_stats.c
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -68,12 +68,12 @@ int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j; 69 int i, j;
70 70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); 71 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) { 72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) { 73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j]) 74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, 75 kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]); 76 vcpu->arch.cop0->stat[i][j]);
77 } 77 }
78 } 78 }
79#endif 79#endif
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index bb7418bd95b9..29a5bdb19a51 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -77,8 +77,8 @@ void kvm_mips_dump_host_tlbs(void)
77 old_entryhi = read_c0_entryhi(); 77 old_entryhi = read_c0_entryhi();
78 old_pagemask = read_c0_pagemask(); 78 old_pagemask = read_c0_pagemask();
79 79
80 printk("HOST TLBs:\n"); 80 kvm_info("HOST TLBs:\n");
81 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); 81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
82 82
83 for (i = 0; i < current_cpu_data.tlbsize; i++) { 83 for (i = 0; i < current_cpu_data.tlbsize; i++) {
84 write_c0_index(i); 84 write_c0_index(i);
@@ -92,19 +92,19 @@ void kvm_mips_dump_host_tlbs(void)
92 tlb.tlb_lo1 = read_c0_entrylo1(); 92 tlb.tlb_lo1 = read_c0_entrylo1();
93 tlb.tlb_mask = read_c0_pagemask(); 93 tlb.tlb_mask = read_c0_pagemask();
94 94
95 printk("TLB%c%3d Hi 0x%08lx ", 95 kvm_info("TLB%c%3d Hi 0x%08lx ",
96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
97 i, tlb.tlb_hi); 97 i, tlb.tlb_hi);
98 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 98 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102 (tlb.tlb_lo0 >> 3) & 7); 102 (tlb.tlb_lo0 >> 3) & 7);
103 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 103 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108 } 108 }
109 write_c0_entryhi(old_entryhi); 109 write_c0_entryhi(old_entryhi);
110 write_c0_pagemask(old_pagemask); 110 write_c0_pagemask(old_pagemask);
@@ -119,24 +119,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
119 struct kvm_mips_tlb tlb; 119 struct kvm_mips_tlb tlb;
120 int i; 120 int i;
121 121
122 printk("Guest TLBs:\n"); 122 kvm_info("Guest TLBs:\n");
123 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124 124
125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126 tlb = vcpu->arch.guest_tlb[i]; 126 tlb = vcpu->arch.guest_tlb[i];
127 printk("TLB%c%3d Hi 0x%08lx ", 127 kvm_info("TLB%c%3d Hi 0x%08lx ",
128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
129 i, tlb.tlb_hi); 129 i, tlb.tlb_hi);
130 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 130 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134 (tlb.tlb_lo0 >> 3) & 7); 134 (tlb.tlb_lo0 >> 3) & 7);
135 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 135 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 } 140 }
141} 141}
142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); 142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index 106335b36861..bd2f6bc64d45 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
27 if ((kseg == CKSEG0) || (kseg == CKSEG1)) 27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva); 28 gpa = CPHYSADDR(gva);
29 else { 29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); 30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs(); 31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR; 32 gpa = KVM_INVALID_ADDR;
33 } 33 }
@@ -98,17 +98,15 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
98 * when we are not using HIGHMEM. Need to address this in a 98 * when we are not using HIGHMEM. Need to address this in a
99 * HIGHMEM kernel 99 * HIGHMEM kernel
100 */ 100 */
101 printk 101 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
102 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", 102 cause, opc, badvaddr);
103 cause, opc, badvaddr);
104 kvm_mips_dump_host_tlbs(); 103 kvm_mips_dump_host_tlbs();
105 kvm_arch_vcpu_dump_regs(vcpu); 104 kvm_arch_vcpu_dump_regs(vcpu);
106 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 105 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
107 ret = RESUME_HOST; 106 ret = RESUME_HOST;
108 } else { 107 } else {
109 printk 108 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
110 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 109 cause, opc, badvaddr);
111 cause, opc, badvaddr);
112 kvm_mips_dump_host_tlbs(); 110 kvm_mips_dump_host_tlbs();
113 kvm_arch_vcpu_dump_regs(vcpu); 111 kvm_arch_vcpu_dump_regs(vcpu);
114 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 112 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -208,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
208 ret = RESUME_HOST; 206 ret = RESUME_HOST;
209 } 207 }
210 } else { 208 } else {
211 printk 209 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
212 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 210 cause, opc, badvaddr);
213 cause, opc, badvaddr);
214 kvm_mips_dump_host_tlbs(); 211 kvm_mips_dump_host_tlbs();
215 kvm_arch_vcpu_dump_regs(vcpu); 212 kvm_arch_vcpu_dump_regs(vcpu);
216 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -233,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
233 kvm_debug("Emulate Store to MMIO space\n"); 230 kvm_debug("Emulate Store to MMIO space\n");
234 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 231 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
235 if (er == EMULATE_FAIL) { 232 if (er == EMULATE_FAIL) {
236 printk("Emulate Store to MMIO space failed\n"); 233 kvm_err("Emulate Store to MMIO space failed\n");
237 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 234 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
238 ret = RESUME_HOST; 235 ret = RESUME_HOST;
239 } else { 236 } else {
@@ -241,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
241 ret = RESUME_HOST; 238 ret = RESUME_HOST;
242 } 239 }
243 } else { 240 } else {
244 printk 241 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
245 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", 242 cause, opc, badvaddr);
246 cause, opc, badvaddr);
247 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 243 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
248 ret = RESUME_HOST; 244 ret = RESUME_HOST;
249 } 245 }
@@ -263,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
263 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); 259 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
264 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 260 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
265 if (er == EMULATE_FAIL) { 261 if (er == EMULATE_FAIL) {
266 printk("Emulate Load from MMIO space failed\n"); 262 kvm_err("Emulate Load from MMIO space failed\n");
267 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 263 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
268 ret = RESUME_HOST; 264 ret = RESUME_HOST;
269 } else { 265 } else {
@@ -271,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
271 ret = RESUME_HOST; 267 ret = RESUME_HOST;
272 } 268 }
273 } else { 269 } else {
274 printk 270 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
275 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", 271 cause, opc, badvaddr);
276 cause, opc, badvaddr);
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 272 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST; 273 ret = RESUME_HOST;
279 er = EMULATE_FAIL; 274 er = EMULATE_FAIL;