summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/gmmu.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index e21ffd8d..02e32b20 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -79,7 +79,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
79 79
80 struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem); 80 struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem);
81 81
82 if (!sgt) { 82 if (sgt == NULL) {
83 return 0; 83 return 0;
84 } 84 }
85 85
@@ -122,7 +122,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
122 122
123 nvgpu_sgt_free(g, sgt); 123 nvgpu_sgt_free(g, sgt);
124 124
125 if (!vaddr) { 125 if (vaddr == 0ULL) {
126 nvgpu_err(g, "failed to map buffer!"); 126 nvgpu_err(g, "failed to map buffer!");
127 return 0; 127 return 0;
128 } 128 }
@@ -201,7 +201,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
201 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); 201 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
202 202
203 err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); 203 err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
204 if (WARN_ON(err)) { 204 if (WARN_ON(err != 0)) {
205 return err; 205 return err;
206 } 206 }
207 207
@@ -324,7 +324,7 @@ static int pd_allocate_children(struct vm_gk20a *vm,
324 pd->num_entries = pd_entries(l, attrs); 324 pd->num_entries = pd_entries(l, attrs);
325 pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) * 325 pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) *
326 pd->num_entries); 326 pd->num_entries);
327 if (!pd->entries) { 327 if (pd->entries == NULL) {
328 return -ENOMEM; 328 return -ENOMEM;
329 } 329 }
330 330
@@ -433,7 +433,7 @@ static int __set_pd_level(struct vm_gk20a *vm,
433 * to be the table of PDEs. When the next level is PTEs the 433 * to be the table of PDEs. When the next level is PTEs the
434 * target addr is the real physical address we are aiming for. 434 * target addr is the real physical address we are aiming for.
435 */ 435 */
436 target_addr = next_pd ? 436 target_addr = (next_pd != NULL) ?
437 nvgpu_pde_phys_addr(g, next_pd) : 437 nvgpu_pde_phys_addr(g, next_pd) :
438 phys_addr; 438 phys_addr;
439 439
@@ -486,7 +486,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
486 struct nvgpu_sgl *sgl; 486 struct nvgpu_sgl *sgl;
487 int err = 0; 487 int err = 0;
488 488
489 if (!sgt) { 489 if (sgt == NULL) {
490 /* 490 /*
491 * This is considered an unmap. Just pass in 0 as the physical 491 * This is considered an unmap. Just pass in 0 as the physical
492 * address for the entire GPU range. 492 * address for the entire GPU range.
@@ -543,7 +543,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
543 /* 543 /*
544 * Cut out sgl ents for space_to_skip. 544 * Cut out sgl ents for space_to_skip.
545 */ 545 */
546 if (space_to_skip && 546 if (space_to_skip != 0ULL &&
547 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { 547 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
548 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); 548 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
549 continue; 549 continue;
@@ -630,10 +630,10 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
630 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " 630 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
631 "kind=%#02x APT=%-6s %c%c%c%c%c", 631 "kind=%#02x APT=%-6s %c%c%c%c%c",
632 vm->name, 632 vm->name,
633 sgt ? "MAP" : "UNMAP", 633 (sgt != NULL) ? "MAP" : "UNMAP",
634 virt_addr, 634 virt_addr,
635 length, 635 length,
636 sgt ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0, 636 (sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0,
637 space_to_skip, 637 space_to_skip,
638 page_size >> 10, 638 page_size >> 10,
639 nvgpu_gmmu_perm_str(attrs->rw_flag), 639 nvgpu_gmmu_perm_str(attrs->rw_flag),
@@ -654,7 +654,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
654 654
655 nvgpu_mb(); 655 nvgpu_mb();
656 656
657 __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); 657 __gmmu_dbg(g, attrs, "%-5s Done!",
658 (sgt != NULL) ? "MAP" : "UNMAP");
658 659
659 return err; 660 return err;
660} 661}
@@ -700,7 +701,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
700 .sparse = sparse, 701 .sparse = sparse,
701 .priv = priv, 702 .priv = priv,
702 .coherent = flags & NVGPU_VM_MAP_IO_COHERENT, 703 .coherent = flags & NVGPU_VM_MAP_IO_COHERENT,
703 .valid = !(flags & NVGPU_VM_MAP_UNMAPPED_PTE), 704 .valid = (flags & NVGPU_VM_MAP_UNMAPPED_PTE) == 0U,
704 .aperture = aperture 705 .aperture = aperture
705 }; 706 };
706 707
@@ -727,9 +728,9 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
727 * Only allocate a new GPU VA range if we haven't already been passed a 728 * Only allocate a new GPU VA range if we haven't already been passed a
728 * GPU VA range. This facilitates fixed mappings. 729 * GPU VA range. This facilitates fixed mappings.
729 */ 730 */
730 if (!vaddr) { 731 if (vaddr == 0ULL) {
731 vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); 732 vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
732 if (!vaddr) { 733 if (vaddr == 0ULL) {
733 nvgpu_err(g, "failed to allocate va space"); 734 nvgpu_err(g, "failed to allocate va space");
734 err = -ENOMEM; 735 err = -ENOMEM;
735 goto fail_alloc; 736 goto fail_alloc;
@@ -744,7 +745,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
744 goto fail_validate; 745 goto fail_validate;
745 } 746 }
746 747
747 if (!batch) { 748 if (batch == NULL) {
748 g->ops.fb.tlb_invalidate(g, vm->pdb.mem); 749 g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
749 } else { 750 } else {
750 batch->need_tlb_invalidate = true; 751 batch->need_tlb_invalidate = true;
@@ -800,7 +801,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
800 nvgpu_err(g, "failed to update gmmu ptes on unmap"); 801 nvgpu_err(g, "failed to update gmmu ptes on unmap");
801 } 802 }
802 803
803 if (!batch) { 804 if (batch == NULL) {
804 gk20a_mm_l2_flush(g, true); 805 gk20a_mm_l2_flush(g, true);
805 g->ops.fb.tlb_invalidate(g, vm->pdb.mem); 806 g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
806 } else { 807 } else {
@@ -823,7 +824,7 @@ u32 __nvgpu_pte_words(struct gk20a *g)
823 */ 824 */
824 do { 825 do {
825 next_l = l + 1; 826 next_l = l + 1;
826 if (!next_l->update_entry) { 827 if (next_l->update_entry == NULL) {
827 break; 828 break;
828 } 829 }
829 830
@@ -859,7 +860,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
859 struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx; 860 struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx;
860 861
861 /* Invalid entry! */ 862 /* Invalid entry! */
862 if (!pd_next->mem) { 863 if (pd_next->mem == NULL) {
863 return -EINVAL; 864 return -EINVAL;
864 } 865 }
865 866
@@ -875,7 +876,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
875 pd_offs_out); 876 pd_offs_out);
876 } 877 }
877 878
878 if (!pd->mem) { 879 if (pd->mem == NULL) {
879 return -EINVAL; 880 return -EINVAL;
880 } 881 }
881 882