diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2011-09-12 05:26:22 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-09-25 12:52:43 -0400 |
commit | bd80158aff71a80292f96d9baea1a65bc0ce87b3 (patch) | |
tree | 7e5dc6ed6cb0be43d2c6ce20c4771965ce803d51 /arch | |
parent | 7712de872c8ec00a657b867ab0296913f69addac (diff) |
KVM: Clean up and extend rate-limited output
The use of printk_ratelimit is discouraged, replace it with
pr*_ratelimited or __ratelimit. While at it, convert remaining
guest-triggerable printks to rate-limited variants.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/i8259.c | 15 | ||||
-rw-r--r-- | arch/x86/kvm/mmu_audit.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 13 |
3 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 6b869ce0cc19..cac4746d7ffb 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #include <linux/kvm_host.h> | 34 | #include <linux/kvm_host.h> |
35 | #include "trace.h" | 35 | #include "trace.h" |
36 | 36 | ||
37 | #define pr_pic_unimpl(fmt, ...) \ | ||
38 | pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__) | ||
39 | |||
37 | static void pic_irq_request(struct kvm *kvm, int level); | 40 | static void pic_irq_request(struct kvm *kvm, int level); |
38 | 41 | ||
39 | static void pic_lock(struct kvm_pic *s) | 42 | static void pic_lock(struct kvm_pic *s) |
@@ -306,10 +309,10 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) | |||
306 | } | 309 | } |
307 | s->init_state = 1; | 310 | s->init_state = 1; |
308 | if (val & 0x02) | 311 | if (val & 0x02) |
309 | printk(KERN_ERR "single mode not supported"); | 312 | pr_pic_unimpl("single mode not supported"); |
310 | if (val & 0x08) | 313 | if (val & 0x08) |
311 | printk(KERN_ERR | 314 | pr_pic_unimpl( |
312 | "level sensitive irq not supported"); | 315 | "level sensitive irq not supported"); |
313 | } else if (val & 0x08) { | 316 | } else if (val & 0x08) { |
314 | if (val & 0x04) | 317 | if (val & 0x04) |
315 | s->poll = 1; | 318 | s->poll = 1; |
@@ -467,8 +470,7 @@ static int picdev_write(struct kvm_pic *s, | |||
467 | return -EOPNOTSUPP; | 470 | return -EOPNOTSUPP; |
468 | 471 | ||
469 | if (len != 1) { | 472 | if (len != 1) { |
470 | if (printk_ratelimit()) | 473 | pr_pic_unimpl("non byte write\n"); |
471 | printk(KERN_ERR "PIC: non byte write\n"); | ||
472 | return 0; | 474 | return 0; |
473 | } | 475 | } |
474 | pic_lock(s); | 476 | pic_lock(s); |
@@ -496,8 +498,7 @@ static int picdev_read(struct kvm_pic *s, | |||
496 | return -EOPNOTSUPP; | 498 | return -EOPNOTSUPP; |
497 | 499 | ||
498 | if (len != 1) { | 500 | if (len != 1) { |
499 | if (printk_ratelimit()) | 501 | pr_pic_unimpl("non byte read\n"); |
500 | printk(KERN_ERR "PIC: non byte read\n"); | ||
501 | return 0; | 502 | return 0; |
502 | } | 503 | } |
503 | pic_lock(s); | 504 | pic_lock(s); |
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index 2460a265be23..746ec259d024 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c | |||
@@ -121,16 +121,16 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) | |||
121 | 121 | ||
122 | static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) | 122 | static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) |
123 | { | 123 | { |
124 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | ||
124 | unsigned long *rmapp; | 125 | unsigned long *rmapp; |
125 | struct kvm_mmu_page *rev_sp; | 126 | struct kvm_mmu_page *rev_sp; |
126 | gfn_t gfn; | 127 | gfn_t gfn; |
127 | 128 | ||
128 | |||
129 | rev_sp = page_header(__pa(sptep)); | 129 | rev_sp = page_header(__pa(sptep)); |
130 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); | 130 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); |
131 | 131 | ||
132 | if (!gfn_to_memslot(kvm, gfn)) { | 132 | if (!gfn_to_memslot(kvm, gfn)) { |
133 | if (!printk_ratelimit()) | 133 | if (!__ratelimit(&ratelimit_state)) |
134 | return; | 134 | return; |
135 | audit_printk(kvm, "no memslot for gfn %llx\n", gfn); | 135 | audit_printk(kvm, "no memslot for gfn %llx\n", gfn); |
136 | audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", | 136 | audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", |
@@ -141,7 +141,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) | |||
141 | 141 | ||
142 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); | 142 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); |
143 | if (!*rmapp) { | 143 | if (!*rmapp) { |
144 | if (!printk_ratelimit()) | 144 | if (!__ratelimit(&ratelimit_state)) |
145 | return; | 145 | return; |
146 | audit_printk(kvm, "no rmap for writable spte %llx\n", | 146 | audit_printk(kvm, "no rmap for writable spte %llx\n", |
147 | *sptep); | 147 | *sptep); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 21217b65b129..a0d6bd9ad442 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2762,8 +2762,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
2762 | 2762 | ||
2763 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); | 2763 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); |
2764 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { | 2764 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { |
2765 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", | 2765 | pr_debug_ratelimited("%s: tss fixup for long mode. \n", |
2766 | __func__); | 2766 | __func__); |
2767 | vmcs_write32(GUEST_TR_AR_BYTES, | 2767 | vmcs_write32(GUEST_TR_AR_BYTES, |
2768 | (guest_tr_ar & ~AR_TYPE_MASK) | 2768 | (guest_tr_ar & ~AR_TYPE_MASK) |
2769 | | AR_TYPE_BUSY_64_TSS); | 2769 | | AR_TYPE_BUSY_64_TSS); |
@@ -5634,8 +5634,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
5634 | return 0; | 5634 | return 0; |
5635 | 5635 | ||
5636 | if (unlikely(vmx->fail)) { | 5636 | if (unlikely(vmx->fail)) { |
5637 | printk(KERN_INFO "%s failed vm entry %x\n", | 5637 | pr_info_ratelimited("%s failed vm entry %x\n", __func__, |
5638 | __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); | 5638 | vmcs_read32(VM_INSTRUCTION_ERROR)); |
5639 | return 1; | 5639 | return 1; |
5640 | } | 5640 | } |
5641 | 5641 | ||
@@ -6612,9 +6612,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
6612 | if (vmcs12->vm_entry_msr_load_count > 0 || | 6612 | if (vmcs12->vm_entry_msr_load_count > 0 || |
6613 | vmcs12->vm_exit_msr_load_count > 0 || | 6613 | vmcs12->vm_exit_msr_load_count > 0 || |
6614 | vmcs12->vm_exit_msr_store_count > 0) { | 6614 | vmcs12->vm_exit_msr_store_count > 0) { |
6615 | if (printk_ratelimit()) | 6615 | pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n", |
6616 | printk(KERN_WARNING | 6616 | __func__); |
6617 | "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__); | ||
6618 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 6617 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
6619 | return 1; | 6618 | return 1; |
6620 | } | 6619 | } |