diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-10-02 10:56:16 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-01-08 16:48:04 -0500 |
commit | fa4a2c080e37d362ae603f4ea157fe779bd85cb5 (patch) | |
tree | 1c10a3a3c21d937ac817b3ddb28d5122dfbb8a5b /arch | |
parent | 4c1a50de9223e1bb7ce5decdd69bdf34a864f03e (diff) |
KVM: x86: mmu: replace assertions with MMU_WARN_ON, a conditional WARN_ON
This makes the direction of the conditions consistent with code that
is already using WARN_ON.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b31eff8fa43d..a0985ebb5512 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -63,30 +63,16 @@ enum { | |||
63 | #undef MMU_DEBUG | 63 | #undef MMU_DEBUG |
64 | 64 | ||
65 | #ifdef MMU_DEBUG | 65 | #ifdef MMU_DEBUG |
66 | static bool dbg = 0; | ||
67 | module_param(dbg, bool, 0644); | ||
66 | 68 | ||
67 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) | 69 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) |
68 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) | 70 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) |
69 | 71 | #define MMU_WARN_ON(x) WARN_ON(x) | |
70 | #else | 72 | #else |
71 | |||
72 | #define pgprintk(x...) do { } while (0) | 73 | #define pgprintk(x...) do { } while (0) |
73 | #define rmap_printk(x...) do { } while (0) | 74 | #define rmap_printk(x...) do { } while (0) |
74 | 75 | #define MMU_WARN_ON(x) do { } while (0) | |
75 | #endif | ||
76 | |||
77 | #ifdef MMU_DEBUG | ||
78 | static bool dbg = 0; | ||
79 | module_param(dbg, bool, 0644); | ||
80 | #endif | ||
81 | |||
82 | #ifndef MMU_DEBUG | ||
83 | #define ASSERT(x) do { } while (0) | ||
84 | #else | ||
85 | #define ASSERT(x) \ | ||
86 | if (!(x)) { \ | ||
87 | printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ | ||
88 | __FILE__, __LINE__, #x); \ | ||
89 | } | ||
90 | #endif | 76 | #endif |
91 | 77 | ||
92 | #define PTE_PREFETCH_NUM 8 | 78 | #define PTE_PREFETCH_NUM 8 |
@@ -1536,7 +1522,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) | |||
1536 | 1522 | ||
1537 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) | 1523 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
1538 | { | 1524 | { |
1539 | ASSERT(is_empty_shadow_page(sp->spt)); | 1525 | MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); |
1540 | hlist_del(&sp->hash_link); | 1526 | hlist_del(&sp->hash_link); |
1541 | list_del(&sp->link); | 1527 | list_del(&sp->link); |
1542 | free_page((unsigned long)sp->spt); | 1528 | free_page((unsigned long)sp->spt); |
@@ -3041,7 +3027,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
3041 | for (i = 0; i < 4; ++i) { | 3027 | for (i = 0; i < 4; ++i) { |
3042 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 3028 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
3043 | 3029 | ||
3044 | ASSERT(!VALID_PAGE(root)); | 3030 | MMU_WARN_ON(VALID_PAGE(root)); |
3045 | spin_lock(&vcpu->kvm->mmu_lock); | 3031 | spin_lock(&vcpu->kvm->mmu_lock); |
3046 | make_mmu_pages_available(vcpu); | 3032 | make_mmu_pages_available(vcpu); |
3047 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), | 3033 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
@@ -3079,7 +3065,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3079 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { | 3065 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
3080 | hpa_t root = vcpu->arch.mmu.root_hpa; | 3066 | hpa_t root = vcpu->arch.mmu.root_hpa; |
3081 | 3067 | ||
3082 | ASSERT(!VALID_PAGE(root)); | 3068 | MMU_WARN_ON(VALID_PAGE(root)); |
3083 | 3069 | ||
3084 | spin_lock(&vcpu->kvm->mmu_lock); | 3070 | spin_lock(&vcpu->kvm->mmu_lock); |
3085 | make_mmu_pages_available(vcpu); | 3071 | make_mmu_pages_available(vcpu); |
@@ -3104,7 +3090,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3104 | for (i = 0; i < 4; ++i) { | 3090 | for (i = 0; i < 4; ++i) { |
3105 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 3091 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
3106 | 3092 | ||
3107 | ASSERT(!VALID_PAGE(root)); | 3093 | MMU_WARN_ON(VALID_PAGE(root)); |
3108 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { | 3094 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { |
3109 | pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); | 3095 | pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); |
3110 | if (!is_present_gpte(pdptr)) { | 3096 | if (!is_present_gpte(pdptr)) { |
@@ -3329,7 +3315,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
3329 | if (r) | 3315 | if (r) |
3330 | return r; | 3316 | return r; |
3331 | 3317 | ||
3332 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 3318 | MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
3333 | 3319 | ||
3334 | gfn = gva >> PAGE_SHIFT; | 3320 | gfn = gva >> PAGE_SHIFT; |
3335 | 3321 | ||
@@ -3395,7 +3381,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
3395 | int write = error_code & PFERR_WRITE_MASK; | 3381 | int write = error_code & PFERR_WRITE_MASK; |
3396 | bool map_writable; | 3382 | bool map_writable; |
3397 | 3383 | ||
3398 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 3384 | MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
3399 | 3385 | ||
3400 | if (unlikely(error_code & PFERR_RSVD_MASK)) { | 3386 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
3401 | r = handle_mmio_page_fault(vcpu, gpa, error_code, true); | 3387 | r = handle_mmio_page_fault(vcpu, gpa, error_code, true); |
@@ -3716,7 +3702,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, | |||
3716 | update_permission_bitmask(vcpu, context, false); | 3702 | update_permission_bitmask(vcpu, context, false); |
3717 | update_last_pte_bitmap(vcpu, context); | 3703 | update_last_pte_bitmap(vcpu, context); |
3718 | 3704 | ||
3719 | ASSERT(is_pae(vcpu)); | 3705 | MMU_WARN_ON(!is_pae(vcpu)); |
3720 | context->page_fault = paging64_page_fault; | 3706 | context->page_fault = paging64_page_fault; |
3721 | context->gva_to_gpa = paging64_gva_to_gpa; | 3707 | context->gva_to_gpa = paging64_gva_to_gpa; |
3722 | context->sync_page = paging64_sync_page; | 3708 | context->sync_page = paging64_sync_page; |
@@ -3806,7 +3792,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) | |||
3806 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); | 3792 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
3807 | struct kvm_mmu *context = &vcpu->arch.mmu; | 3793 | struct kvm_mmu *context = &vcpu->arch.mmu; |
3808 | 3794 | ||
3809 | ASSERT(!VALID_PAGE(context->root_hpa)); | 3795 | MMU_WARN_ON(VALID_PAGE(context->root_hpa)); |
3810 | 3796 | ||
3811 | if (!is_paging(vcpu)) | 3797 | if (!is_paging(vcpu)) |
3812 | nonpaging_init_context(vcpu, context); | 3798 | nonpaging_init_context(vcpu, context); |
@@ -3829,7 +3815,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) | |||
3829 | { | 3815 | { |
3830 | struct kvm_mmu *context = &vcpu->arch.mmu; | 3816 | struct kvm_mmu *context = &vcpu->arch.mmu; |
3831 | 3817 | ||
3832 | ASSERT(!VALID_PAGE(context->root_hpa)); | 3818 | MMU_WARN_ON(VALID_PAGE(context->root_hpa)); |
3833 | 3819 | ||
3834 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); | 3820 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
3835 | 3821 | ||
@@ -4293,7 +4279,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) | |||
4293 | 4279 | ||
4294 | void kvm_mmu_setup(struct kvm_vcpu *vcpu) | 4280 | void kvm_mmu_setup(struct kvm_vcpu *vcpu) |
4295 | { | 4281 | { |
4296 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 4282 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
4297 | 4283 | ||
4298 | init_kvm_mmu(vcpu); | 4284 | init_kvm_mmu(vcpu); |
4299 | } | 4285 | } |