From 941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 Mon Sep 17 00:00:00 2001 From: Amulya Date: Tue, 28 Aug 2018 12:34:55 +0530 Subject: nvgpu: common: MISRA 10.1 boolean fixes Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy Reviewed-by: Vijayakumar Subbu Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/bitmap_allocator.c | 38 ++++----- drivers/gpu/nvgpu/common/mm/buddy_allocator.c | 89 ++++++++++++---------- drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h | 6 +- drivers/gpu/nvgpu/common/mm/comptags.c | 2 +- drivers/gpu/nvgpu/common/mm/gmmu.c | 37 ++++----- drivers/gpu/nvgpu/common/mm/lockless_allocator.c | 12 +-- drivers/gpu/nvgpu/common/mm/mm.c | 18 ++--- drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c | 10 +-- drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | 34 ++++----- drivers/gpu/nvgpu/common/mm/page_allocator.c | 55 ++++++------- drivers/gpu/nvgpu/common/mm/pd_cache.c | 20 ++--- drivers/gpu/nvgpu/common/mm/vm.c | 61 ++++++++------- drivers/gpu/nvgpu/common/mm/vm_area.c | 14 ++-- drivers/gpu/nvgpu/common/pmu/pmu.c | 8 +- drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 4 +- drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | 44 +++++------ drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | 10 +-- drivers/gpu/nvgpu/common/pmu/pmu_pg.c | 23 +++--- drivers/gpu/nvgpu/include/nvgpu/allocator.h | 16 ++-- drivers/gpu/nvgpu/include/nvgpu/vm.h | 4 +- 20 files changed, 259 insertions(+), 246 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index 6b9db23c..e5b9b378 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c @@ -42,10 +42,10 @@ static u64 nvgpu_bitmap_alloc_base(struct nvgpu_allocator *a) return ba->base; } -static int nvgpu_bitmap_alloc_inited(struct nvgpu_allocator *a) +static bool nvgpu_bitmap_alloc_inited(struct nvgpu_allocator *a) { struct nvgpu_bitmap_allocator *ba = a->priv; - int inited = ba->inited; + bool inited = ba->inited; nvgpu_smp_rmb(); return inited; @@ -160,7 +160,7 @@ static struct nvgpu_bitmap_alloc *find_alloc_metadata( struct nvgpu_rbtree_node *node = NULL; nvgpu_rbtree_search(addr, &node, a->allocs); - if (!node) { + if (node == NULL) { return NULL; } @@ -180,7 +180,7 @@ static int nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a, struct nvgpu_bitmap_alloc *alloc = nvgpu_kmem_cache_alloc(a->meta_data_cache); - if (!alloc) { + if (alloc == NULL) { return -ENOMEM; } @@ -243,8 +243,8 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *na, u64 len) * either of these possibilities assume that the caller will keep what * data it needs around to successfully free this allocation. */ - if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && - nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) { + if ((a->flags & GPU_ALLOC_NO_ALLOC_PAGE) == 0ULL && + nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size) != 0) { goto fail_reset_bitmap; } @@ -280,7 +280,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr) } alloc = find_alloc_metadata(a, addr); - if (!alloc) { + if (alloc == NULL) { goto done; } @@ -299,7 +299,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr) a->bytes_freed += alloc->length; done: - if (a->meta_data_cache && alloc) { + if (a->meta_data_cache != NULL && alloc != NULL) { nvgpu_kmem_cache_free(a->meta_data_cache, alloc); } alloc_unlock(na); @@ -377,27 +377,29 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, { int err; struct nvgpu_bitmap_allocator *a; + bool is_blk_size_pwr_2 = (blk_size & (blk_size - 1ULL)) == 0ULL; + bool is_base_aligned = (base & (blk_size - 1ULL)) == 0ULL; + bool is_length_aligned = (length & (blk_size - 1ULL)) == 0ULL; - if (WARN_ON(blk_size & (blk_size - 1U))) { + if (WARN_ON(!is_blk_size_pwr_2)) { return -EINVAL; } /* - * blk_size must be a power-of-2; base length also need to be aligned - * to blk_size. + * blk_size must be a power-of-2; base and length also need to be + * aligned to blk_size. */ - if (blk_size & (blk_size - 1U) || - base & (blk_size - 1U) || length & (blk_size - 1U)) { + if (!is_blk_size_pwr_2 || !is_base_aligned || !is_length_aligned) { return -EINVAL; } - if (base == 0U) { + if (base == 0ULL) { base = blk_size; length -= blk_size; } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); - if (!a) { + if (a == NULL) { return -ENOMEM; } @@ -406,10 +408,10 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, goto fail; } - if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) { + if ((flags & GPU_ALLOC_NO_ALLOC_PAGE) == 0ULL) { a->meta_data_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_bitmap_alloc)); - if (!a->meta_data_cache) { + if (a->meta_data_cache == NULL) { err = -ENOMEM; goto fail; } @@ -426,7 +428,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, a->bitmap = nvgpu_kcalloc(g, BITS_TO_LONGS(a->num_bits), sizeof(*a->bitmap)); - if (!a->bitmap) { + if (a->bitmap == NULL) { err = -ENOMEM; goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index 516e5035..c0d1335e 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c @@ -129,7 +129,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a, struct nvgpu_buddy *new_buddy; new_buddy = nvgpu_kmem_cache_alloc(a->buddy_cache); - if (!new_buddy) { + if (new_buddy == NULL) { return NULL; } @@ -160,7 +160,7 @@ static void balloc_buddy_list_do_add(struct nvgpu_buddy_allocator *a, * This lets the code that checks if there are available blocks check * without cycling through the entire list. */ - if (a->flags & GPU_ALLOC_GVA_SPACE && + if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && b->pte_size == BALLOC_PTE_SIZE_BIG) { nvgpu_list_add_tail(&b->buddy_entry, list); } else { @@ -247,7 +247,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a) order = balloc_max_order_in(a, bstart, bend); buddy = balloc_new_buddy(a, NULL, bstart, order); - if (!buddy) { + if (buddy == NULL) { goto cleanup; } @@ -374,7 +374,7 @@ static void balloc_coalesce(struct nvgpu_buddy_allocator *a, * If both our buddy and I are both not allocated and not split then * we can coalesce ourselves. */ - if (!b->buddy) { + if (b->buddy == NULL) { return; } if (buddy_is_alloced(b->buddy) || buddy_is_split(b->buddy)) { @@ -412,14 +412,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, u64 half; left = balloc_new_buddy(a, b, b->start, b->order - 1U); - if (!left) { + if (left == NULL) { return -ENOMEM; } half = (b->end - b->start) / 2U; right = balloc_new_buddy(a, b, b->start + half, b->order - 1U); - if (!right) { + if (right == NULL) { nvgpu_kmem_cache_free(a->buddy_cache, left); return -ENOMEM; } @@ -448,7 +448,7 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, * we can leave the buddies PTE field alone since the PDE block has yet * to be assigned a PTE size. */ - if (a->flags & GPU_ALLOC_GVA_SPACE && + if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && left->order < a->pte_blk_order) { left->pte_size = pte_size; right->pte_size = pte_size; @@ -492,7 +492,7 @@ static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a, struct nvgpu_buddy *bud; nvgpu_rbtree_search(addr, &node, a->alloced_buddies); - if (!node) { + if (node == NULL) { return NULL; } @@ -518,7 +518,7 @@ static struct nvgpu_buddy *balloc_find_buddy(struct nvgpu_buddy_allocator *a, return NULL; } - if (a->flags & GPU_ALLOC_GVA_SPACE && + if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && pte_size == BALLOC_PTE_SIZE_BIG) { bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), nvgpu_buddy, buddy_entry); @@ -551,14 +551,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a, u64 split_order; struct nvgpu_buddy *bud = NULL; - split_order = order; - while (split_order <= a->max_order && - !(bud = balloc_find_buddy(a, split_order, pte_size))) { - split_order++; + for (split_order = order; split_order <= a->max_order; split_order++) { + bud = balloc_find_buddy(a, split_order, pte_size); + if (bud != NULL) { + break; + } } /* Out of memory! */ - if (!bud) { + if (bud == NULL) { return 0; } @@ -582,15 +583,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a, * TODO: Right now this uses the unoptimal approach of going through all * outstanding allocations and checking their base/ends. This could be better. */ -static int balloc_is_range_free(struct nvgpu_buddy_allocator *a, +static bool balloc_is_range_free(struct nvgpu_buddy_allocator *a, u64 base, u64 end) { struct nvgpu_rbtree_node *node = NULL; struct nvgpu_buddy *bud; nvgpu_rbtree_enum_start(0, &node, a->alloced_buddies); - if (!node) { - return 1; /* No allocs yet. */ + if (node == NULL) { + return true; /* No allocs yet. */ } bud = nvgpu_buddy_from_rbtree_node(node); @@ -598,17 +599,17 @@ static int balloc_is_range_free(struct nvgpu_buddy_allocator *a, while (bud->start < end) { if ((bud->start > base && bud->start < end) || (bud->end > base && bud->end < end)) { - return 0; + return false; } nvgpu_rbtree_enum_next(&node, node); - if (!node) { + if (node == NULL) { break; } bud = nvgpu_buddy_from_rbtree_node(node); } - return 1; + return true; } static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a, @@ -633,7 +634,7 @@ static struct nvgpu_fixed_alloc *balloc_free_fixed( struct nvgpu_rbtree_node *node = NULL; nvgpu_rbtree_search(addr, &node, a->fixed_allocs); - if (!node) { + if (node == NULL) { return NULL; } @@ -787,7 +788,7 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, bud = balloc_make_fixed_buddy(a, balloc_base_unshift(a, inc_base), align_order, pte_size); - if (!bud) { + if (bud == NULL) { alloc_dbg(balloc_owner(a), "Fixed buddy failed: {0x%llx, %llu}!", balloc_base_unshift(a, inc_base), @@ -891,7 +892,7 @@ static u64 nvgpu_buddy_balloc_pte(struct nvgpu_allocator *na, u64 len, alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); } - a->alloc_made = 1; + a->alloc_made = true; alloc_unlock(na); @@ -930,7 +931,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na, } falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc)); - if (!falloc) { + if (falloc == NULL) { goto fail; } @@ -946,7 +947,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na, } ret = balloc_do_alloc_fixed(a, falloc, base, len, pte_size); - if (!ret) { + if (ret == 0ULL) { alloc_dbg(balloc_owner(a), "Alloc-fixed failed ?? 0x%llx -> 0x%llx", base, base + len); @@ -988,7 +989,7 @@ static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na, alloc_lock(na); alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size); - a->alloc_made = 1; + a->alloc_made = true; alloc_unlock(na); return alloc; @@ -1003,7 +1004,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr) struct nvgpu_fixed_alloc *falloc; struct nvgpu_buddy_allocator *a = na->priv; - if (!addr) { + if (addr == 0ULL) { return; } @@ -1020,7 +1021,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr) } bud = balloc_free_buddy(a, addr); - if (!bud) { + if (bud == NULL) { goto done; } @@ -1090,7 +1091,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na, /* Should not be possible to fail... */ addr = nvgpu_balloc_fixed_buddy_locked(na, co->base, co->length, BALLOC_PTE_SIZE_ANY); - if (!addr) { + if (addr == 0ULL) { err = -ENOMEM; nvgpu_warn(na->g, "%s: Failed to reserve a valid carveout!", @@ -1133,10 +1134,10 @@ static u64 nvgpu_buddy_alloc_base(struct nvgpu_allocator *a) return ba->start; } -static int nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a) +static bool nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a) { struct nvgpu_buddy_allocator *ba = a->priv; - int inited = ba->initialized; + bool inited = ba->initialized; nvgpu_smp_rmb(); return inited; @@ -1292,12 +1293,15 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, int err; u64 pde_size; struct nvgpu_buddy_allocator *a; + bool is_gva_space = (flags & GPU_ALLOC_GVA_SPACE) != 0ULL; + bool is_blk_size_pwr_2 = (blk_size & (blk_size - 1ULL)) == 0ULL; + u64 base_big_page, size_big_page; /* blk_size must be greater than 0 and a power of 2. */ if (blk_size == 0U) { return -EINVAL; } - if (blk_size & (blk_size - 1U)) { + if (!is_blk_size_pwr_2) { return -EINVAL; } @@ -1306,12 +1310,12 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } /* If this is to manage a GVA space we need a VM. */ - if (flags & GPU_ALLOC_GVA_SPACE && !vm) { + if (is_gva_space && vm == NULL) { return -EINVAL; } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator)); - if (!a) { + if (a == NULL) { return -ENOMEM; } @@ -1336,8 +1340,8 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } a->vm = vm; - if (flags & GPU_ALLOC_GVA_SPACE) { - pde_size = 1ULL << nvgpu_vm_pde_coverage_bit_count(vm); + if (is_gva_space) { + pde_size = BIT64(nvgpu_vm_pde_coverage_bit_count(vm)); a->pte_blk_order = balloc_get_order(a, pde_size); } @@ -1346,9 +1350,10 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, * must be PDE aligned. If big_pages are not enabled then this * requirement is not necessary. */ - if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && - (base & ((vm->big_page_size << 10) - 1U) || - size & ((vm->big_page_size << 10) - 1U))) { + base_big_page = base & ((vm->big_page_size << 10U) - 1U); + size_big_page = size & ((vm->big_page_size << 10U) - 1U); + if (is_gva_space && vm->big_pages && + (base_big_page != 0ULL || size_big_page != 0ULL)) { return -EINVAL; } @@ -1359,7 +1364,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, balloc_compute_max_order(a); a->buddy_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_buddy)); - if (!a->buddy_cache) { + if (a->buddy_cache == NULL) { err = -ENOMEM; goto fail; } @@ -1373,7 +1378,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } nvgpu_smp_wmb(); - a->initialized = 1; + a->initialized = true; #ifdef CONFIG_DEBUG_FS nvgpu_init_alloc_debug(g, na); @@ -1382,7 +1387,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, alloc_dbg(na, " base 0x%llx", a->base); alloc_dbg(na, " size 0x%llx", a->length); alloc_dbg(na, " blk_size 0x%llx", a->blk_size); - if (flags & GPU_ALLOC_GVA_SPACE) { + if (is_gva_space) { alloc_dbg(balloc_owner(a), " pde_size 0x%llx", balloc_order_to_len(a, a->pte_blk_order)); diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h index a90530b6..7a22f030 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -175,8 +175,8 @@ struct nvgpu_buddy_allocator { */ u64 pte_blk_order; - int initialized; - int alloc_made; /* True after the first alloc. */ + bool initialized; + bool alloc_made; /* True after the first alloc. */ u64 flags; diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c index 3bde3a53..f6216648 100644 --- a/drivers/gpu/nvgpu/common/mm/comptags.c +++ b/drivers/gpu/nvgpu/common/mm/comptags.c @@ -88,7 +88,7 @@ int gk20a_comptag_allocator_init(struct gk20a *g, size--; allocator->bitmap = nvgpu_vzalloc(g, BITS_TO_LONGS(size) * sizeof(long)); - if (!allocator->bitmap) + if (allocator->bitmap == NULL) return -ENOMEM; allocator->size = size; diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index e21ffd8d..02e32b20 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -79,7 +79,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem); - if (!sgt) { + if (sgt == NULL) { return 0; } @@ -122,7 +122,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, nvgpu_sgt_free(g, sgt); - if (!vaddr) { + if (vaddr == 0ULL) { nvgpu_err(g, "failed to map buffer!"); return 0; } @@ -201,7 +201,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); - if (WARN_ON(err)) { + if (WARN_ON(err != 0)) { return err; } @@ -324,7 +324,7 @@ static int pd_allocate_children(struct vm_gk20a *vm, pd->num_entries = pd_entries(l, attrs); pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) * pd->num_entries); - if (!pd->entries) { + if (pd->entries == NULL) { return -ENOMEM; } @@ -433,7 +433,7 @@ static int __set_pd_level(struct vm_gk20a *vm, * to be the table of PDEs. When the next level is PTEs the * target addr is the real physical address we are aiming for. */ - target_addr = next_pd ? + target_addr = (next_pd != NULL) ? nvgpu_pde_phys_addr(g, next_pd) : phys_addr; @@ -486,7 +486,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, struct nvgpu_sgl *sgl; int err = 0; - if (!sgt) { + if (sgt == NULL) { /* * This is considered an unmap. Just pass in 0 as the physical * address for the entire GPU range. @@ -543,7 +543,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, /* * Cut out sgl ents for space_to_skip. */ - if (space_to_skip && + if (space_to_skip != 0ULL && space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); continue; @@ -630,10 +630,10 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " "kind=%#02x APT=%-6s %c%c%c%c%c", vm->name, - sgt ? "MAP" : "UNMAP", + (sgt != NULL) ? "MAP" : "UNMAP", virt_addr, length, - sgt ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0, + (sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0, space_to_skip, page_size >> 10, nvgpu_gmmu_perm_str(attrs->rw_flag), @@ -654,7 +654,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, nvgpu_mb(); - __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); + __gmmu_dbg(g, attrs, "%-5s Done!", + (sgt != NULL) ? "MAP" : "UNMAP"); return err; } @@ -700,7 +701,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, .sparse = sparse, .priv = priv, .coherent = flags & NVGPU_VM_MAP_IO_COHERENT, - .valid = !(flags & NVGPU_VM_MAP_UNMAPPED_PTE), + .valid = (flags & NVGPU_VM_MAP_UNMAPPED_PTE) == 0U, .aperture = aperture }; @@ -727,9 +728,9 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, * Only allocate a new GPU VA range if we haven't already been passed a * GPU VA range. This facilitates fixed mappings. */ - if (!vaddr) { + if (vaddr == 0ULL) { vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); - if (!vaddr) { + if (vaddr == 0ULL) { nvgpu_err(g, "failed to allocate va space"); err = -ENOMEM; goto fail_alloc; @@ -744,7 +745,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, goto fail_validate; } - if (!batch) { + if (batch == NULL) { g->ops.fb.tlb_invalidate(g, vm->pdb.mem); } else { batch->need_tlb_invalidate = true; @@ -800,7 +801,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, nvgpu_err(g, "failed to update gmmu ptes on unmap"); } - if (!batch) { + if (batch == NULL) { gk20a_mm_l2_flush(g, true); g->ops.fb.tlb_invalidate(g, vm->pdb.mem); } else { @@ -823,7 +824,7 @@ u32 __nvgpu_pte_words(struct gk20a *g) */ do { next_l = l + 1; - if (!next_l->update_entry) { + if (next_l->update_entry == NULL) { break; } @@ -859,7 +860,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx; /* Invalid entry! */ - if (!pd_next->mem) { + if (pd_next->mem == NULL) { return -EINVAL; } @@ -875,7 +876,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, pd_offs_out); } - if (!pd->mem) { + if (pd->mem == NULL) { return -EINVAL; } diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c index 79bf4cd6..59fae76d 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c @@ -41,10 +41,10 @@ static u64 nvgpu_lockless_alloc_base(struct nvgpu_allocator *a) return pa->base; } -static int nvgpu_lockless_alloc_inited(struct nvgpu_allocator *a) +static bool nvgpu_lockless_alloc_inited(struct nvgpu_allocator *a) { struct nvgpu_lockless_allocator *pa = a->priv; - int inited = pa->inited; + bool inited = pa->inited; nvgpu_smp_rmb(); return inited; @@ -169,7 +169,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, u64 count; struct nvgpu_lockless_allocator *a; - if (!blk_size) { + if (blk_size == 0ULL) { return -EINVAL; } @@ -178,12 +178,12 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, * In order to control memory footprint, we require count < INT_MAX */ count = length / blk_size; - if (!base || !count || count > INT_MAX) { + if (base == 0ULL || count == 0ULL || count > INT_MAX) { return -EINVAL; } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_lockless_allocator)); - if (!a) { + if (a == NULL) { return -ENOMEM; } @@ -193,7 +193,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } a->next = nvgpu_vzalloc(g, sizeof(*a->next) * count); - if (!a->next) { + if (a->next == NULL) { err = -ENOMEM; goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index f97d9ebd..03325cce 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -42,7 +42,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm, struct nvgpu_vm_area *vm_area; vm_area = nvgpu_vm_area_find(vm, base); - if (!vm_area) { + if (vm_area == NULL) { return GMMU_PAGE_SIZE_SMALL; } @@ -55,7 +55,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm, static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm, u64 base, u64 size) { - if (!base) { + if (base == 0ULL) { if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { return GMMU_PAGE_SIZE_BIG; } @@ -233,7 +233,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm) true, false, "system"); - if (!mm->pmu.vm) { + if (mm->pmu.vm == NULL) { return -ENOMEM; } @@ -275,7 +275,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm) NV_MM_DEFAULT_KERNEL_SIZE, NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, false, false, "cde"); - if (!mm->cde.vm) { + if (mm->cde.vm == NULL) { return -ENOMEM; } return 0; @@ -291,7 +291,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm) NV_MM_DEFAULT_KERNEL_SIZE, NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, false, false, "ce"); - if (!mm->ce.vm) { + if (mm->ce.vm == NULL) { return -ENOMEM; } return 0; @@ -386,7 +386,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm) mm->bar1.aperture_size, true, false, "bar1"); - if (!mm->bar1.vm) { + if (mm->bar1.vm == NULL) { return -ENOMEM; } @@ -442,8 +442,8 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) * this requires fixed allocations in vidmem which must be * allocated before all other buffers */ - if (g->ops.pmu.alloc_blob_space - && !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { + if (g->ops.pmu.alloc_blob_space != NULL && + !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob); if (err) { return err; diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c index bf624162..68d68ad6 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c @@ -45,10 +45,10 @@ u64 nvgpu_alloc_base(struct nvgpu_allocator *a) return 0; } -u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) +bool nvgpu_alloc_initialized(struct nvgpu_allocator *a) { - if (!a->ops || !a->ops->inited) { - return 0; + if (a->ops == NULL || a->ops->inited == NULL) { + return false; } return a->ops->inited(a); @@ -151,7 +151,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, { int err; - if (!ops) { + if (ops == NULL) { return -EINVAL; } @@ -159,7 +159,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, * This is the bare minimum operations required for a sensible * allocator. */ - if (!ops->alloc || !ops->free || !ops->fini) { + if (ops->alloc == NULL || ops->free == NULL || ops->fini == NULL) { return -EINVAL; } diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index e251f3c4..5cfaded0 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -128,7 +128,7 @@ bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt) void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) { - if (sgt && sgt->ops->sgt_free) { + if (sgt != NULL && sgt->ops->sgt_free != NULL) { sgt->ops->sgt_free(g, sgt); } } @@ -138,7 +138,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) /* ensure it is not vidmem allocation */ WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); - if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) { + if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit != NULL) { return phys | 1ULL << g->ops.mm.get_iommu_bit(g); } @@ -165,7 +165,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) */ if (nvgpu_iommuable(g) && nvgpu_sgt_iommuable(g, sgt) && - nvgpu_sgt_get_dma(sgt, sgt->sgl)) { + nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) { return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)); } @@ -195,7 +195,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) if (mem->aperture == APERTURE_SYSMEM) { u32 *ptr = mem->cpu_va; - WARN_ON(!ptr); + WARN_ON(ptr == NULL); data = ptr[w]; } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data); @@ -208,20 +208,20 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) { - WARN_ON(offset & 3U); + WARN_ON((offset & 3U) != 0U); return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); } void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, void *dest, u32 size) { - WARN_ON(offset & 3U); - WARN_ON(size & 3U); + WARN_ON((offset & 3U) != 0U); + WARN_ON((size & 3U) != 0U); if (mem->aperture == APERTURE_SYSMEM) { u8 *src = (u8 *)mem->cpu_va + offset; - WARN_ON(!mem->cpu_va); + WARN_ON(mem->cpu_va == NULL); memcpy(dest, src, size); } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_rd_n(g, mem, offset, size, dest); @@ -235,7 +235,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) if (mem->aperture == APERTURE_SYSMEM) { u32 *ptr = mem->cpu_va; - WARN_ON(!ptr); + WARN_ON(ptr == NULL); ptr[w] = data; } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data); @@ -249,20 +249,20 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) { - WARN_ON(offset & 3U); + WARN_ON((offset & 3U) != 0U); nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); } void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, void *src, u32 size) { - WARN_ON(offset & 3U); - WARN_ON(size & 3U); + WARN_ON((offset & 3U) != 0U); + WARN_ON((size & 3U) != 0U); if (mem->aperture == APERTURE_SYSMEM) { u8 *dest = (u8 *)mem->cpu_va + offset; - WARN_ON(!mem->cpu_va); + WARN_ON(mem->cpu_va == NULL); memcpy(dest, src, size); } else if (mem->aperture == APERTURE_VIDMEM) { nvgpu_pramin_wr_n(g, mem, offset, size, src); @@ -277,16 +277,16 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 c, u32 size) { - WARN_ON(offset & 3U); - WARN_ON(size & 3U); - WARN_ON(c & ~0xffU); + WARN_ON((offset & 3U) != 0U); + WARN_ON((size & 3U) != 0U); + WARN_ON((c & ~0xffU) != 0U); c &= 0xffU; if (mem->aperture == APERTURE_SYSMEM) { u8 *dest = (u8 *)mem->cpu_va + offset; - WARN_ON(!mem->cpu_va); + WARN_ON(mem->cpu_va == NULL); memset(dest, c, size); } else if (mem->aperture == APERTURE_VIDMEM) { u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index c8bc17c7..35c7e120 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -116,7 +116,7 @@ static u64 nvgpu_page_alloc_base(struct nvgpu_allocator *a) return nvgpu_alloc_base(&va->source_allocator); } -static int nvgpu_page_alloc_inited(struct nvgpu_allocator *a) +static bool nvgpu_page_alloc_inited(struct nvgpu_allocator *a) { struct nvgpu_page_allocator *va = a->priv; @@ -264,7 +264,7 @@ static struct nvgpu_page_alloc *find_page_alloc( struct nvgpu_rbtree_node *node = NULL; nvgpu_rbtree_search(addr, &node, a->allocs); - if (!node) { + if (node == NULL) { return NULL; } @@ -282,7 +282,7 @@ static struct page_alloc_slab_page *alloc_slab_page( struct page_alloc_slab_page *slab_page; slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); - if (!slab_page) { + if (slab_page == NULL) { palloc_dbg(a, "OOM: unable to alloc slab_page struct!"); return NULL; } @@ -290,7 +290,7 @@ static struct page_alloc_slab_page *alloc_slab_page( memset(slab_page, 0, sizeof(*slab_page)); slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); - if (!slab_page->page_addr) { + if (slab_page->page_addr == 0ULL) { nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); palloc_dbg(a, "OOM: vidmem is full!"); return NULL; @@ -354,9 +354,9 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a, del_slab_page_from_empty(slab, slab_page); } - if (!slab_page) { + if (slab_page == NULL) { slab_page = alloc_slab_page(a, slab); - if (!slab_page) { + if (slab_page == NULL) { return -ENOMEM; } } @@ -423,7 +423,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab( slab = &a->slabs[slab_nr]; alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); - if (!alloc) { + if (alloc == NULL) { palloc_dbg(a, "OOM: could not alloc page_alloc struct!"); goto fail; } @@ -431,7 +431,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab( alloc->sgt.ops = &page_alloc_sgl_ops; sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); - if (!sgl) { + if (sgl == NULL) { palloc_dbg(a, "OOM: could not alloc sgl struct!"); goto fail; } @@ -524,7 +524,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( int i = 0; alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); - if (!alloc) { + if (alloc == NULL) { goto fail; } @@ -545,7 +545,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( * requested size. The buddy allocator guarantees any given * single alloc is contiguous. */ - if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) { + if ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL && i != 0) { goto fail_cleanup; } @@ -563,23 +563,23 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( chunk_len); /* Divide by 2 and try again */ - if (!chunk_addr) { + if (chunk_addr == 0ULL) { palloc_dbg(a, "balloc failed: 0x%llx", chunk_len); chunk_len >>= 1; max_chunk_len = chunk_len; } - } while (!chunk_addr && chunk_len >= a->page_size); + } while (chunk_addr == 0ULL && chunk_len >= a->page_size); chunk_pages = chunk_len >> a->page_shift; - if (!chunk_addr) { + if (chunk_addr == 0ULL) { palloc_dbg(a, "bailing @ 0x%llx", chunk_len); goto fail_cleanup; } sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); - if (!sgl) { + if (sgl == NULL) { nvgpu_free(&a->source_allocator, chunk_addr); goto fail_cleanup; } @@ -638,7 +638,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages( pages = ALIGN(len, a->page_size) >> a->page_shift; alloc = do_nvgpu_alloc_pages(a, pages); - if (!alloc) { + if (alloc == NULL) { palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", pages << a->page_shift, pages); return NULL; @@ -679,18 +679,18 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len) * If we want contig pages we have to round up to a power of two. It's * easier to do that here than in the buddy allocator. */ - real_len = a->flags & GPU_ALLOC_FORCE_CONTIG ? + real_len = ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL) ? roundup_pow_of_two(len) : len; alloc_lock(na); - if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && + if ((a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL && real_len <= (a->page_size / 2U)) { alloc = nvgpu_alloc_slab(a, real_len); } else { alloc = nvgpu_alloc_pages(a, real_len); } - if (!alloc) { + if (alloc == NULL) { alloc_unlock(na); return 0; } @@ -728,7 +728,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base) ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); } - if (!alloc) { + if (alloc == NULL) { palloc_dbg(a, "Hrm, found no alloc?"); goto done; } @@ -760,13 +760,13 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed( alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); - if (!alloc || !sgl) { + if (alloc == NULL || sgl == NULL) { goto fail; } alloc->sgt.ops = &page_alloc_sgl_ops; alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); - if (!alloc->base) { + if (alloc->base == 0ULL) { WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); goto fail; } @@ -811,7 +811,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na, alloc_lock(na); alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); - if (!alloc) { + if (alloc == NULL) { alloc_unlock(na); return 0; } @@ -850,7 +850,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na, if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { alloc = find_page_alloc(a, base); - if (!alloc) { + if (alloc == NULL) { goto done; } } else { @@ -985,7 +985,7 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a) a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), nr_slabs, sizeof(struct page_alloc_slab)); - if (!a->slabs) { + if (a->slabs == NULL) { return -ENOMEM; } a->nr_slabs = nr_slabs; @@ -1018,7 +1018,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); - if (!a) { + if (a == NULL) { return -ENOMEM; } @@ -1031,7 +1031,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, sizeof(struct nvgpu_page_alloc)); a->slab_page_cache = nvgpu_kmem_cache_create(g, sizeof(struct page_alloc_slab_page)); - if (!a->alloc_cache || !a->slab_page_cache) { + if (a->alloc_cache == NULL || a->slab_page_cache == NULL) { err = -ENOMEM; goto fail; } @@ -1044,7 +1044,8 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, a->owner = na; a->flags = flags; - if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { + if ((flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL && + blk_size > SZ_4K) { err = nvgpu_page_alloc_init_slabs(a); if (err) { goto fail; diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index dae6d34e..a8ed10e7 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -102,7 +102,7 @@ int nvgpu_pd_cache_init(struct gk20a *g) } cache = nvgpu_kzalloc(g, sizeof(*cache)); - if (!cache) { + if (cache == NULL) { nvgpu_err(g, "Failed to alloc pd_cache!"); return -ENOMEM; } @@ -132,7 +132,7 @@ void nvgpu_pd_cache_fini(struct gk20a *g) u32 i; struct nvgpu_pd_cache *cache = g->mm.pd_cache; - if (!cache) { + if (cache == NULL) { return; } @@ -159,7 +159,7 @@ int nvgpu_pd_cache_alloc_direct(struct gk20a *g, pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem)); - if (!pd->mem) { + if (pd->mem == NULL) { nvgpu_err(g, "OOM allocating nvgpu_mem struct!"); return -ENOMEM; } @@ -205,7 +205,7 @@ static int nvgpu_pd_cache_alloc_new(struct gk20a *g, pd_dbg(g, "PD-Alloc [C] New: offs=0"); pentry = nvgpu_kzalloc(g, sizeof(*pentry)); - if (!pentry) { + if (pentry == NULL) { nvgpu_err(g, "OOM allocating pentry!"); return -ENOMEM; } @@ -313,7 +313,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); - if (bytes & (bytes - 1U) || + if ((bytes & (bytes - 1U)) != 0U || (bytes >= PAGE_SIZE || bytes < NVGPU_PD_CACHE_MIN)) { pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); @@ -321,7 +321,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, } pentry = nvgpu_pd_cache_get_partial(cache, bytes); - if (!pentry) { + if (pentry == NULL) { err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes); } else { err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); @@ -357,7 +357,7 @@ int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) return 0; } - if (WARN_ON(!g->mm.pd_cache)) { + if (WARN_ON(g->mm.pd_cache == NULL)) { return -ENOMEM; } @@ -372,7 +372,7 @@ void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) { pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); - if (!pd->mem) { + if (pd->mem == NULL) { return; } @@ -425,7 +425,7 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_look_up( nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node, cache->mem_tree); - if (!node) { + if (node == NULL) { return NULL; } @@ -440,7 +440,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, pd_dbg(g, "PD-Free [C] 0x%p", pd->mem); pentry = nvgpu_pd_cache_look_up(g, cache, pd); - if (!pentry) { + if (pentry == NULL) { WARN(1, "Attempting to free non-existent pd"); return; } diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 17e49969..98bad70b 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -59,7 +59,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, int vm_aspace_id(struct vm_gk20a *vm) { - return vm->as_share ? vm->as_share->id : -1; + return (vm->as_share != NULL) ? vm->as_share->id : -1; } /* @@ -112,7 +112,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, nvgpu_pd_cache_free_direct(g, pdb); - if (!pdb->entries) { + if (pdb->entries == NULL) { return; } @@ -153,7 +153,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U); addr = nvgpu_alloc_pte(vma, size, page_size); - if (!addr) { + if (addr == 0ULL) { nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size); return 0; } @@ -200,14 +200,16 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm, /* * Determine if the passed address space can support big pages or not. */ -int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) +bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) { - u64 mask = ((u64)vm->big_page_size << 10) - 1U; + u64 mask = ((u64)vm->big_page_size << 10ULL) - 1ULL; + u64 base_big_page = base & mask; + u64 size_big_page = size & mask; - if (base & mask || size & mask) { - return 0; + if (base_big_page != 0ULL || size_big_page != 0ULL) { + return false; } - return 1; + return true; } /* @@ -233,12 +235,12 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) } sema_sea = nvgpu_semaphore_sea_create(g); - if (!sema_sea) { + if (sema_sea == NULL) { return -ENOMEM; } err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); - if (err) { + if (err != 0) { return err; } @@ -254,7 +256,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) mm->channel.kernel_size, 512U * PAGE_SIZE, SZ_4K); - if (!sema_sea->gpu_va) { + if (sema_sea->gpu_va == 0ULL) { nvgpu_free(&vm->kernel, sema_sea->gpu_va); nvgpu_vm_put(vm); return -ENOMEM; @@ -387,7 +389,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, } kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? - 0U : GPU_ALLOC_GVA_SPACE; + 0ULL : GPU_ALLOC_GVA_SPACE; /* * A "user" area only makes sense for the GVA spaces. For VMs where @@ -579,7 +581,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, { struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); - if (!vm) { + if (vm == NULL) { return NULL; } @@ -615,7 +617,8 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) } } - if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) { + if (nvgpu_mem_is_valid(&g->syncpt_mem) && + vm->syncpt_ro_map_gpu_va != 0ULL) { nvgpu_gmmu_unmap(vm, &g->syncpt_mem, vm->syncpt_ro_map_gpu_va); } @@ -701,7 +704,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf( struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_search(addr, &node, root); - if (!node) { + if (node == NULL) { return NULL; } @@ -715,7 +718,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range( struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_range_search(addr, &node, root); - if (!node) { + if (node == NULL) { return NULL; } @@ -729,7 +732,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_less_than_search(addr, &node, root); - if (!node) { + if (node == NULL) { return NULL; } @@ -755,7 +758,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * vm->num_user_mapped_buffers); - if (!buffer_list) { + if (buffer_list == NULL) { nvgpu_mutex_release(&vm->update_gmmu_lock); return -ENOMEM; } @@ -841,7 +844,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, u8 pte_kind; if (vm->userspace_managed && - !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) { + (flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U) { nvgpu_err(g, "non-fixed-offset mapping not available on " "userspace managed address spaces"); @@ -883,7 +886,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, * Generate a new mapping! */ mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); - if (!mapped_buffer) { + if (mapped_buffer == NULL) { nvgpu_warn(g, "oom allocating tracking buffer"); return ERR_PTR(-ENOMEM); } @@ -895,7 +898,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr, min_t(u64, binfo.size, align)); } - map_size = map_size ? map_size : binfo.size; + map_size = (map_size != 0ULL) ? map_size : binfo.size; map_size = ALIGN(map_size, SZ_4K); if ((map_size > binfo.size) || @@ -929,7 +932,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, } if ((binfo.compr_kind != NVGPU_KIND_INVALID) && - (flags & NVGPU_VM_MAP_FIXED_OFFSET)) { + ((flags & NVGPU_VM_MAP_FIXED_OFFSET) != 0U)) { /* * Fixed-address compressible mapping is * requested. Make sure we're respecting the alignment @@ -1008,7 +1011,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, /* * Figure out the kind and ctag offset for the GMMU page tables */ - if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) { + if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset != 0U) { /* * Adjust the ctag_offset as per the buffer map offset */ @@ -1054,7 +1057,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, gk20a_comptags_finish_clear(os_buf, map_addr != 0U); } - if (!map_addr) { + if (map_addr == 0ULL) { err = -ENOMEM; goto clean_up; } @@ -1096,7 +1099,7 @@ clean_up: mapped_buffer->pgsz_idx, mapped_buffer->va_allocated, gk20a_mem_flag_none, - mapped_buffer->vm_area ? + (mapped_buffer->vm_area != NULL) ? mapped_buffer->vm_area->sparse : false, NULL); } @@ -1125,7 +1128,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, mapped_buffer->pgsz_idx, mapped_buffer->va_allocated, gk20a_mem_flag_none, - mapped_buffer->vm_area ? + (mapped_buffer->vm_area != NULL) ? mapped_buffer->vm_area->sparse : false, batch); @@ -1185,8 +1188,8 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, break; } nvgpu_msleep(10); - } while (!nvgpu_timeout_expired_msg(&timeout, - "sync-unmap failed on 0x%llx")); + } while (nvgpu_timeout_expired_msg(&timeout, + "sync-unmap failed on 0x%llx") == 0); if (nvgpu_timeout_expired(&timeout)) { ret = -ETIMEDOUT; @@ -1205,7 +1208,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, nvgpu_mutex_acquire(&vm->update_gmmu_lock); mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); - if (!mapped_buffer) { + if (mapped_buffer == NULL) { goto done; } diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index d096de5d..ac4708af 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c @@ -66,13 +66,13 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, /* Find the space reservation, but it's ok to have none for * userspace-managed address spaces */ vm_area = nvgpu_vm_area_find(vm, map_addr); - if (!vm_area && !vm->userspace_managed) { + if (vm_area == NULL && !vm->userspace_managed) { nvgpu_warn(g, "fixed offset mapping without space allocation"); return -EINVAL; } /* Mapped area should fit inside va, if there's one */ - if (vm_area && map_end > vm_area->addr + vm_area->size) { + if (vm_area != NULL && map_end > vm_area->addr + vm_area->size) { nvgpu_warn(g, "fixed offset mapping size overflows va node"); return -EINVAL; } @@ -82,7 +82,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, * that is less than our buffer end */ buffer = __nvgpu_vm_find_mapped_buf_less_than( vm, map_addr + map_size); - if (buffer && buffer->addr + buffer->size > map_addr) { + if (buffer != NULL && buffer->addr + buffer->size > map_addr) { nvgpu_warn(g, "overlapping buffer map requested"); return -EINVAL; } @@ -138,7 +138,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, } vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); - if (!vm_area) { + if (vm_area == NULL) { goto clean_up_err; } @@ -155,7 +155,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, page_size); } - if (!vaddr_start) { + if (vaddr_start == 0ULL) { goto clean_up_err; } @@ -183,7 +183,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, false, NULL, APERTURE_INVALID); - if (!map_addr) { + if (map_addr == 0ULL) { nvgpu_mutex_release(&vm->update_gmmu_lock); goto clean_up_err; } @@ -215,7 +215,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) nvgpu_mutex_acquire(&vm->update_gmmu_lock); vm_area = nvgpu_vm_area_find(vm, addr); - if (!vm_area) { + if (vm_area == NULL) { nvgpu_mutex_release(&vm->update_gmmu_lock); return 0; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 0395e463..6d1d5f00 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -170,8 +170,8 @@ void nvgpu_kill_task_pg_init(struct gk20a *g) break; } nvgpu_udelay(2); - } while (!nvgpu_timeout_expired_msg(&timeout, - "timeout - waiting PMU state machine thread stop")); + } while (nvgpu_timeout_expired_msg(&timeout, + "timeout - waiting PMU state machine thread stop") == 0); } } @@ -214,7 +214,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * sizeof(struct pmu_mutex)); - if (!pmu->mutex) { + if (pmu->mutex == NULL) { err = -ENOMEM; goto err; } @@ -226,7 +226,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES * sizeof(struct pmu_sequence)); - if (!pmu->seq) { + if (pmu->seq == NULL) { err = -ENOMEM; goto err_free_mutex; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index bf54e0d6..a94453fb 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -1738,12 +1738,12 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) nvgpu_log_fn(g, " "); - if (pmu->fw) { + if (pmu->fw != NULL) { return nvgpu_init_pmu_fw_support(pmu); } pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); - if (!pmu->fw) { + if (pmu->fw == NULL) { nvgpu_err(g, "failed to load pmu ucode!!"); return err; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 9fe999ae..6f88260f 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -221,7 +221,7 @@ invalid_cmd: "payload in=%p, in_size=%d, in_offset=%d,\n" "payload out=%p, out_size=%d, out_offset=%d", queue_id, cmd->hdr.size, cmd->hdr.unit_id, - msg, msg ? msg->hdr.unit_id : ~0, + msg, (msg != NULL) ? msg->hdr.unit_id : ~0, &payload->in, payload->in.size, payload->in.offset, &payload->out, payload->out.size, payload->out.offset); @@ -243,7 +243,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, do { err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); - if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) { + if (err == -EAGAIN && nvgpu_timeout_expired(&timeout) == 0) { nvgpu_usleep_range(1000, 2000); } else { break; @@ -273,7 +273,7 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd, dmem_alloc_size = payload->rpc.size_rpc + payload->rpc.size_scratch; dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size); - if (!dmem_alloc_offset) { + if (dmem_alloc_offset == 0U) { err = -ENOMEM; goto clean_up; } @@ -312,11 +312,11 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_log_fn(g, " "); - if (payload) { + if (payload != NULL) { seq->out_payload = payload->out.buf; } - if (payload && payload->in.offset != 0U) { + if (payload != NULL && payload->in.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &in, ((u8 *)&cmd->cmd + payload->in.offset)); @@ -331,14 +331,14 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = nvgpu_alloc(&pmu->dmem, pv->pmu_allocation_get_dmem_size(pmu, in)); - if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) { + if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) == 0U) { goto clean_up; } if (payload->in.fb_size != 0x0U) { seq->in_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); - if (!seq->in_mem) { + if (seq->in_mem == NULL) { err = -ENOMEM; goto clean_up; } @@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, pv->pmu_allocation_get_dmem_offset(pmu, in)); } - if (payload && payload->out.offset != 0U) { + if (payload != NULL && payload->out.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &out, ((u8 *)&cmd->cmd + payload->out.offset)); pv->pmu_allocation_set_dmem_size(pmu, out, @@ -376,15 +376,15 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_alloc(&pmu->dmem, pv->pmu_allocation_get_dmem_size(pmu, out)); - if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, - out))) { + if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, + out)) == 0U) { goto clean_up; } if (payload->out.fb_size != 0x0U) { seq->out_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); - if (!seq->out_mem) { + if (seq->out_mem == NULL) { err = -ENOMEM; goto clean_up; } @@ -439,16 +439,16 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_log_fn(g, " "); - if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { - if (!cmd) { + if (cmd == NULL || seq_desc == NULL || !pmu->pmu_ready) { + if (cmd == NULL) { nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); - } else if (!seq_desc) { + } else if (seq_desc == NULL) { nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); } else { nvgpu_warn(g, "%s(): PMU is not ready", __func__); } - WARN_ON(1); + WARN_ON(true); return -EINVAL; } @@ -612,7 +612,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) err = g->ops.perf.handle_pmu_perf_event(g, (void *)&msg->msg.perf); } else { - WARN_ON(1); + WARN_ON(true); } break; case PMU_UNIT_THERM: @@ -641,7 +641,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); - if (err || bytes_read != PMU_MSG_HDR_SIZE) { + if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err | -EINVAL; goto clean_up; @@ -657,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, /* read again after rewind */ err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); - if (err || bytes_read != PMU_MSG_HDR_SIZE) { + if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err | -EINVAL; @@ -676,7 +676,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, read_size, &bytes_read); - if (err || bytes_read != read_size) { + if (err != 0 || bytes_read != read_size) { nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err; @@ -750,7 +750,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); - } while (!nvgpu_timeout_expired(&timeout)); + } while (nvgpu_timeout_expired(&timeout) == 0); return -ETIMEDOUT; } @@ -887,7 +887,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, if (caller_cb == NULL) { rpc_payload = nvgpu_kzalloc(g, sizeof(struct rpc_handler_payload) + size_rpc); - if (!rpc_payload) { + if (rpc_payload == NULL) { status = ENOMEM; goto exit; } @@ -907,7 +907,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, } rpc_payload = nvgpu_kzalloc(g, sizeof(struct rpc_handler_payload)); - if (!rpc_payload) { + if (rpc_payload == NULL) { status = ENOMEM; goto exit; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index a99e86ce..12ab4422 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -51,7 +51,7 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu) default: unit_id = PMU_UNIT_INVALID; nvgpu_err(g, "no support for %x", ver); - WARN_ON(1); + WARN_ON(true); } return unit_id; @@ -75,11 +75,11 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) g->ops.pmu.pmu_init_perfmon_counter(g); - if (!pmu->sample_buffer) { + if (pmu->sample_buffer == 0U) { pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 2U * sizeof(u16)); } - if (!pmu->sample_buffer) { + if (pmu->sample_buffer == 0U) { nvgpu_err(g, "failed to allocate perfmon sample buffer"); return -ENOMEM; } @@ -240,7 +240,7 @@ int nvgpu_pmu_load_update(struct gk20a *g) void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, u32 *total_cycles) { - if (!g->power_on || gk20a_busy(g)) { + if (!g->power_on || gk20a_busy(g) != 0) { *busy_cycles = 0; *total_cycles = 0; return; @@ -254,7 +254,7 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, void nvgpu_pmu_reset_load_counters(struct gk20a *g) { - if (!g->power_on || gk20a_busy(g)) { + if (!g->power_on || gk20a_busy(g) != 0) { return; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 0758279d..d2615b1a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -89,9 +89,9 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, } if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { - if (g->ops.pmu.pmu_pg_engines_feature_list && + if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && g->ops.pmu.pmu_pg_engines_feature_list(g, - PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != + PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { pmu->initialized = true; nvgpu_pmu_state_change(g, PMU_STATE_STARTED, @@ -117,9 +117,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) u32 status = 0; if (enable_pg == true) { - if (g->ops.pmu.pmu_pg_engines_feature_list && + if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && g->ops.pmu.pmu_pg_engines_feature_list(g, - PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != + PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { if (g->ops.pmu.pmu_lpwr_enable_pg) { status = g->ops.pmu.pmu_lpwr_enable_pg(g, @@ -129,9 +129,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) status = nvgpu_pmu_enable_elpg(g); } } else if (enable_pg == false) { - if (g->ops.pmu.pmu_pg_engines_feature_list && + if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && g->ops.pmu.pmu_pg_engines_feature_list(g, - PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != + PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { if (g->ops.pmu.pmu_lpwr_disable_pg) { status = g->ops.pmu.pmu_lpwr_disable_pg(g, @@ -207,7 +207,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g) nvgpu_warn(g, "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", __func__, pmu->elpg_refcnt); - WARN_ON(1); + WARN_ON(true); } /* do NOT enable elpg until golden ctx is created, @@ -273,7 +273,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) nvgpu_warn(g, "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", __func__, pmu->elpg_refcnt); - WARN_ON(1); + WARN_ON(true); ret = 0; goto exit_unlock; } @@ -481,7 +481,8 @@ int nvgpu_pmu_init_powergating(struct gk20a *g) pg_engine_id++) { if (BIT(pg_engine_id) & pg_engine_id_list) { - if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { + if (pmu != NULL && + pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTING, false); } @@ -636,9 +637,9 @@ static void ap_callback_init_and_enable_ctrl( void *param, u32 seq_desc, u32 status) { /* Define p_ap (i.e pointer to pmu_ap structure) */ - WARN_ON(!msg); + WARN_ON(msg == NULL); - if (!status) { + if (status == 0U) { switch (msg->msg.pg.ap_msg.cmn.msg_id) { case PMU_AP_MSG_ID_INIT_ACK: nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT"); diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h index 2bff0efd..d722673d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -83,7 +83,7 @@ struct nvgpu_allocator_ops { u64 (*base)(struct nvgpu_allocator *allocator); u64 (*length)(struct nvgpu_allocator *allocator); u64 (*end)(struct nvgpu_allocator *allocator); - int (*inited)(struct nvgpu_allocator *allocator); + bool (*inited)(struct nvgpu_allocator *allocator); u64 (*space)(struct nvgpu_allocator *allocator); /* Destructor. */ @@ -188,11 +188,11 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node) * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be * set as well). */ -#define GPU_ALLOC_GVA_SPACE BIT(0) -#define GPU_ALLOC_NO_ALLOC_PAGE BIT(1) -#define GPU_ALLOC_4K_VIDMEM_PAGES BIT(2) -#define GPU_ALLOC_FORCE_CONTIG BIT(3) -#define GPU_ALLOC_NO_SCATTER_GATHER BIT(4) +#define GPU_ALLOC_GVA_SPACE BIT64(0) +#define GPU_ALLOC_NO_ALLOC_PAGE BIT64(1) +#define GPU_ALLOC_4K_VIDMEM_PAGES BIT64(2) +#define GPU_ALLOC_FORCE_CONTIG BIT64(3) +#define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4) static inline void alloc_lock(struct nvgpu_allocator *a) { @@ -256,7 +256,7 @@ void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, u64 nvgpu_alloc_base(struct nvgpu_allocator *a); u64 nvgpu_alloc_length(struct nvgpu_allocator *a); u64 nvgpu_alloc_end(struct nvgpu_allocator *a); -u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a); +bool nvgpu_alloc_initialized(struct nvgpu_allocator *a); u64 nvgpu_alloc_space(struct nvgpu_allocator *a); void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator); diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index b47d4ee0..23dac0ac 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -220,7 +220,7 @@ void nvgpu_vm_get(struct vm_gk20a *vm); void nvgpu_vm_put(struct vm_gk20a *vm); int vm_aspace_id(struct vm_gk20a *vm); -int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); +bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); -- cgit v1.2.2