aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2015-07-21 06:44:57 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-07-29 05:02:35 -0400
commit7cbde76b3d55ee299568eb943854c425b015b30c (patch)
tree38867b324d65dd22ef97089fd67b2abb9dcf4311 /arch/s390/kvm
parent1cb9cf726efeb77e05ee4f27f32700c46ecb1b8a (diff)
KVM: s390: adapt debug entries for instruction handling
Use the default log level 3 for state changing and/or seldom events, use 4 for others. Also change some numbers from %x to %d and vice versa to match documentation. If hex, let's prepend the numbers with 0x. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/priv.c22
-rw-r--r--arch/s390/kvm/sigp.c10
2 files changed, 17 insertions, 15 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 2658a7919c5e..afefa3bb2f13 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -53,6 +53,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
53 kvm_s390_set_psw_cc(vcpu, 3); 53 kvm_s390_set_psw_cc(vcpu, 3);
54 return 0; 54 return 0;
55 } 55 }
56 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
56 val = (val - hostclk) & ~0x3fUL; 57 val = (val - hostclk) & ~0x3fUL;
57 58
58 mutex_lock(&vcpu->kvm->lock); 59 mutex_lock(&vcpu->kvm->lock);
@@ -127,7 +128,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
127 if (rc) 128 if (rc)
128 return kvm_s390_inject_prog_cond(vcpu, rc); 129 return kvm_s390_inject_prog_cond(vcpu, rc);
129 130
130 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 131 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
131 trace_kvm_s390_handle_prefix(vcpu, 0, address); 132 trace_kvm_s390_handle_prefix(vcpu, 0, address);
132 return 0; 133 return 0;
133} 134}
@@ -153,7 +154,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
153 if (rc) 154 if (rc)
154 return kvm_s390_inject_prog_cond(vcpu, rc); 155 return kvm_s390_inject_prog_cond(vcpu, rc);
155 156
156 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 157 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
157 trace_kvm_s390_handle_stap(vcpu, ga); 158 trace_kvm_s390_handle_stap(vcpu, ga);
158 return 0; 159 return 0;
159} 160}
@@ -165,6 +166,7 @@ static int __skey_check_enable(struct kvm_vcpu *vcpu)
165 return rc; 166 return rc;
166 167
167 rc = s390_enable_skey(); 168 rc = s390_enable_skey();
169 VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
168 trace_kvm_s390_skey_related_inst(vcpu); 170 trace_kvm_s390_skey_related_inst(vcpu);
169 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
170 return rc; 172 return rc;
@@ -368,7 +370,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
368 &fac, sizeof(fac)); 370 &fac, sizeof(fac));
369 if (rc) 371 if (rc)
370 return rc; 372 return rc;
371 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 373 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
372 trace_kvm_s390_handle_stfl(vcpu, fac); 374 trace_kvm_s390_handle_stfl(vcpu, fac);
373 return 0; 375 return 0;
374} 376}
@@ -466,7 +468,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
466 if (rc) 468 if (rc)
467 return kvm_s390_inject_prog_cond(vcpu, rc); 469 return kvm_s390_inject_prog_cond(vcpu, rc);
468 470
469 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 471 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
470 return 0; 472 return 0;
471} 473}
472 474
@@ -519,7 +521,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
519 ar_t ar; 521 ar_t ar;
520 522
521 vcpu->stat.instruction_stsi++; 523 vcpu->stat.instruction_stsi++;
522 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 524 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
523 525
524 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
525 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -756,7 +758,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
756 struct gmap *gmap; 758 struct gmap *gmap;
757 int i; 759 int i;
758 760
759 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 761 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
760 gmap = vcpu->arch.gmap; 762 gmap = vcpu->arch.gmap;
761 vcpu->stat.instruction_essa++; 763 vcpu->stat.instruction_essa++;
762 if (!vcpu->kvm->arch.use_cmma) 764 if (!vcpu->kvm->arch.use_cmma)
@@ -827,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
827 if (ga & 3) 829 if (ga & 3)
828 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 830 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
829 831
830 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 832 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
831 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 833 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
832 834
833 nr_regs = ((reg3 - reg1) & 0xf) + 1; 835 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -866,7 +868,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
866 if (ga & 3) 868 if (ga & 3)
867 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
868 870
869 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 871 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
870 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 872 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
871 873
872 reg = reg1; 874 reg = reg1;
@@ -900,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
900 if (ga & 7) 902 if (ga & 7)
901 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 903 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
902 904
903 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 905 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
904 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 906 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
905 907
906 nr_regs = ((reg3 - reg1) & 0xf) + 1; 908 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -938,7 +940,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
938 if (ga & 7) 940 if (ga & 7)
939 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 941 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
940 942
941 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 943 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
942 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 944 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
943 945
944 reg = reg1; 946 reg = reg1;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 5318ea3ad1d3..da690b69f9fe 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -368,7 +368,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
368 return rc; 368 return rc;
369} 369}
370 370
371static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) 371static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
372 u16 cpu_addr)
372{ 373{
373 if (!vcpu->kvm->arch.user_sigp) 374 if (!vcpu->kvm->arch.user_sigp)
374 return 0; 375 return 0;
@@ -411,9 +412,8 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
411 default: 412 default:
412 vcpu->stat.instruction_sigp_unknown++; 413 vcpu->stat.instruction_sigp_unknown++;
413 } 414 }
414 415 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
415 VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", 416 order_code, cpu_addr);
416 order_code);
417 417
418 return 1; 418 return 1;
419} 419}
@@ -432,7 +432,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
433 433
434 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 434 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
435 if (handle_sigp_order_in_user_space(vcpu, order_code)) 435 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
436 return -EOPNOTSUPP; 436 return -EOPNOTSUPP;
437 437
438 if (r1 % 2) 438 if (r1 % 2)