aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-08-19 11:51:20 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-09-07 13:34:29 -0400
commitbbe41b950813ec643a14d2a6005475f66625292e (patch)
treec4f9cd6587c9923d43cb876822565960ac2681ee
parent119a9c01a5922600a78106d9bbdd6aaafc851a27 (diff)
KVM: x86: ratelimit and decrease severity for guest-triggered printk
These are mostly related to nested VMX. They needn't have a loglevel as high as KERN_WARN, and mustn't be allowed to pollute the host logs. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c1a81486764..2029c0064289 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6726,7 +6726,7 @@ static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
6726{ 6726{
6727 /* TODO: not to reset guest simply here. */ 6727 /* TODO: not to reset guest simply here. */
6728 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 6728 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6729 pr_warn("kvm: nested vmx abort, indicator %d\n", indicator); 6729 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
6730} 6730}
6731 6731
6732static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 6732static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
@@ -9598,7 +9598,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
9598 maxphyaddr = cpuid_maxphyaddr(vcpu); 9598 maxphyaddr = cpuid_maxphyaddr(vcpu);
9599 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || 9599 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
9600 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { 9600 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
9601 pr_warn_ratelimited( 9601 pr_debug_ratelimited(
9602 "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", 9602 "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
9603 addr_field, maxphyaddr, count, addr); 9603 addr_field, maxphyaddr, count, addr);
9604 return -EINVAL; 9604 return -EINVAL;
@@ -9671,13 +9671,13 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9671 for (i = 0; i < count; i++) { 9671 for (i = 0; i < count; i++) {
9672 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 9672 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
9673 &e, sizeof(e))) { 9673 &e, sizeof(e))) {
9674 pr_warn_ratelimited( 9674 pr_debug_ratelimited(
9675 "%s cannot read MSR entry (%u, 0x%08llx)\n", 9675 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9676 __func__, i, gpa + i * sizeof(e)); 9676 __func__, i, gpa + i * sizeof(e));
9677 goto fail; 9677 goto fail;
9678 } 9678 }
9679 if (nested_vmx_load_msr_check(vcpu, &e)) { 9679 if (nested_vmx_load_msr_check(vcpu, &e)) {
9680 pr_warn_ratelimited( 9680 pr_debug_ratelimited(
9681 "%s check failed (%u, 0x%x, 0x%x)\n", 9681 "%s check failed (%u, 0x%x, 0x%x)\n",
9682 __func__, i, e.index, e.reserved); 9682 __func__, i, e.index, e.reserved);
9683 goto fail; 9683 goto fail;
@@ -9685,7 +9685,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9685 msr.index = e.index; 9685 msr.index = e.index;
9686 msr.data = e.value; 9686 msr.data = e.value;
9687 if (kvm_set_msr(vcpu, &msr)) { 9687 if (kvm_set_msr(vcpu, &msr)) {
9688 pr_warn_ratelimited( 9688 pr_debug_ratelimited(
9689 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 9689 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9690 __func__, i, e.index, e.value); 9690 __func__, i, e.index, e.value);
9691 goto fail; 9691 goto fail;
@@ -9706,13 +9706,13 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9706 if (kvm_vcpu_read_guest(vcpu, 9706 if (kvm_vcpu_read_guest(vcpu,
9707 gpa + i * sizeof(e), 9707 gpa + i * sizeof(e),
9708 &e, 2 * sizeof(u32))) { 9708 &e, 2 * sizeof(u32))) {
9709 pr_warn_ratelimited( 9709 pr_debug_ratelimited(
9710 "%s cannot read MSR entry (%u, 0x%08llx)\n", 9710 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9711 __func__, i, gpa + i * sizeof(e)); 9711 __func__, i, gpa + i * sizeof(e));
9712 return -EINVAL; 9712 return -EINVAL;
9713 } 9713 }
9714 if (nested_vmx_store_msr_check(vcpu, &e)) { 9714 if (nested_vmx_store_msr_check(vcpu, &e)) {
9715 pr_warn_ratelimited( 9715 pr_debug_ratelimited(
9716 "%s check failed (%u, 0x%x, 0x%x)\n", 9716 "%s check failed (%u, 0x%x, 0x%x)\n",
9717 __func__, i, e.index, e.reserved); 9717 __func__, i, e.index, e.reserved);
9718 return -EINVAL; 9718 return -EINVAL;
@@ -9720,7 +9720,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9720 msr_info.host_initiated = false; 9720 msr_info.host_initiated = false;
9721 msr_info.index = e.index; 9721 msr_info.index = e.index;
9722 if (kvm_get_msr(vcpu, &msr_info)) { 9722 if (kvm_get_msr(vcpu, &msr_info)) {
9723 pr_warn_ratelimited( 9723 pr_debug_ratelimited(
9724 "%s cannot read MSR (%u, 0x%x)\n", 9724 "%s cannot read MSR (%u, 0x%x)\n",
9725 __func__, i, e.index); 9725 __func__, i, e.index);
9726 return -EINVAL; 9726 return -EINVAL;
@@ -9729,7 +9729,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9729 gpa + i * sizeof(e) + 9729 gpa + i * sizeof(e) +
9730 offsetof(struct vmx_msr_entry, value), 9730 offsetof(struct vmx_msr_entry, value),
9731 &msr_info.data, sizeof(msr_info.data))) { 9731 &msr_info.data, sizeof(msr_info.data))) {
9732 pr_warn_ratelimited( 9732 pr_debug_ratelimited(
9733 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 9733 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9734 __func__, i, e.index, msr_info.data); 9734 __func__, i, e.index, msr_info.data);
9735 return -EINVAL; 9735 return -EINVAL;