aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-10-02 10:56:15 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-01-08 16:48:03 -0500
commit4c1a50de9223e1bb7ce5decdd69bdf34a864f03e (patch)
tree729ce13cbde94f911ac36079a6319f9f8f077552 /arch
parentad896af0b50ed656e38a31fca1fdb7bb7533db45 (diff)
KVM: x86: mmu: remove ASSERT(vcpu)
Because ASSERT is just a printk, these would oops right away. The assertion thus hardly adds anything. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 0 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d6d3d6f0ff1b..b31eff8fa43d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3329,7 +3329,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3329 if (r) 3329 if (r)
3330 return r; 3330 return r;
3331 3331
3332 ASSERT(vcpu);
3333 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3332 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
3334 3333
3335 gfn = gva >> PAGE_SHIFT; 3334 gfn = gva >> PAGE_SHIFT;
@@ -3396,7 +3395,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3396 int write = error_code & PFERR_WRITE_MASK; 3395 int write = error_code & PFERR_WRITE_MASK;
3397 bool map_writable; 3396 bool map_writable;
3398 3397
3399 ASSERT(vcpu);
3400 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3398 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
3401 3399
3402 if (unlikely(error_code & PFERR_RSVD_MASK)) { 3400 if (unlikely(error_code & PFERR_RSVD_MASK)) {
@@ -3808,7 +3806,6 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3808 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3806 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3809 struct kvm_mmu *context = &vcpu->arch.mmu; 3807 struct kvm_mmu *context = &vcpu->arch.mmu;
3810 3808
3811 ASSERT(vcpu);
3812 ASSERT(!VALID_PAGE(context->root_hpa)); 3809 ASSERT(!VALID_PAGE(context->root_hpa));
3813 3810
3814 if (!is_paging(vcpu)) 3811 if (!is_paging(vcpu))
@@ -3832,7 +3829,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
3832{ 3829{
3833 struct kvm_mmu *context = &vcpu->arch.mmu; 3830 struct kvm_mmu *context = &vcpu->arch.mmu;
3834 3831
3835 ASSERT(vcpu);
3836 ASSERT(!VALID_PAGE(context->root_hpa)); 3832 ASSERT(!VALID_PAGE(context->root_hpa));
3837 3833
3838 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 3834 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
@@ -3914,8 +3910,6 @@ static void init_kvm_mmu(struct kvm_vcpu *vcpu)
3914 3910
3915void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 3911void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3916{ 3912{
3917 ASSERT(vcpu);
3918
3919 kvm_mmu_unload(vcpu); 3913 kvm_mmu_unload(vcpu);
3920 init_kvm_mmu(vcpu); 3914 init_kvm_mmu(vcpu);
3921} 3915}
@@ -4271,8 +4265,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
4271 struct page *page; 4265 struct page *page;
4272 int i; 4266 int i;
4273 4267
4274 ASSERT(vcpu);
4275
4276 /* 4268 /*
4277 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 4269 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
4278 * Therefore we need to allocate shadow page tables in the first 4270 * Therefore we need to allocate shadow page tables in the first
@@ -4291,8 +4283,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
4291 4283
4292int kvm_mmu_create(struct kvm_vcpu *vcpu) 4284int kvm_mmu_create(struct kvm_vcpu *vcpu)
4293{ 4285{
4294 ASSERT(vcpu);
4295
4296 vcpu->arch.walk_mmu = &vcpu->arch.mmu; 4286 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
4297 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 4287 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4298 vcpu->arch.mmu.translate_gpa = translate_gpa; 4288 vcpu->arch.mmu.translate_gpa = translate_gpa;
@@ -4303,7 +4293,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
4303 4293
4304void kvm_mmu_setup(struct kvm_vcpu *vcpu) 4294void kvm_mmu_setup(struct kvm_vcpu *vcpu)
4305{ 4295{
4306 ASSERT(vcpu);
4307 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 4296 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
4308 4297
4309 init_kvm_mmu(vcpu); 4298 init_kvm_mmu(vcpu);
@@ -4611,8 +4600,6 @@ EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
4611 4600
4612void kvm_mmu_destroy(struct kvm_vcpu *vcpu) 4601void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
4613{ 4602{
4614 ASSERT(vcpu);
4615
4616 kvm_mmu_unload(vcpu); 4603 kvm_mmu_unload(vcpu);
4617 free_mmu_pages(vcpu); 4604 free_mmu_pages(vcpu);
4618 mmu_free_memory_caches(vcpu); 4605 mmu_free_memory_caches(vcpu);