summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/vm.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 17e49969..98bad70b 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -59,7 +59,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer,
59 59
60int vm_aspace_id(struct vm_gk20a *vm) 60int vm_aspace_id(struct vm_gk20a *vm)
61{ 61{
62 return vm->as_share ? vm->as_share->id : -1; 62 return (vm->as_share != NULL) ? vm->as_share->id : -1;
63} 63}
64 64
65/* 65/*
@@ -112,7 +112,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
112 112
113 nvgpu_pd_cache_free_direct(g, pdb); 113 nvgpu_pd_cache_free_direct(g, pdb);
114 114
115 if (!pdb->entries) { 115 if (pdb->entries == NULL) {
116 return; 116 return;
117 } 117 }
118 118
@@ -153,7 +153,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc_pte(vma, size, page_size); 155 addr = nvgpu_alloc_pte(vma, size, page_size);
156 if (!addr) { 156 if (addr == 0ULL) {
157 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size); 157 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size);
158 return 0; 158 return 0;
159 } 159 }
@@ -200,14 +200,16 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
200/* 200/*
201 * Determine if the passed address space can support big pages or not. 201 * Determine if the passed address space can support big pages or not.
202 */ 202 */
203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) 203bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size)
204{ 204{
205 u64 mask = ((u64)vm->big_page_size << 10) - 1U; 205 u64 mask = ((u64)vm->big_page_size << 10ULL) - 1ULL;
206 u64 base_big_page = base & mask;
207 u64 size_big_page = size & mask;
206 208
207 if (base & mask || size & mask) { 209 if (base_big_page != 0ULL || size_big_page != 0ULL) {
208 return 0; 210 return false;
209 } 211 }
210 return 1; 212 return true;
211} 213}
212 214
213/* 215/*
@@ -233,12 +235,12 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
233 } 235 }
234 236
235 sema_sea = nvgpu_semaphore_sea_create(g); 237 sema_sea = nvgpu_semaphore_sea_create(g);
236 if (!sema_sea) { 238 if (sema_sea == NULL) {
237 return -ENOMEM; 239 return -ENOMEM;
238 } 240 }
239 241
240 err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); 242 err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool);
241 if (err) { 243 if (err != 0) {
242 return err; 244 return err;
243 } 245 }
244 246
@@ -254,7 +256,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
254 mm->channel.kernel_size, 256 mm->channel.kernel_size,
255 512U * PAGE_SIZE, 257 512U * PAGE_SIZE,
256 SZ_4K); 258 SZ_4K);
257 if (!sema_sea->gpu_va) { 259 if (sema_sea->gpu_va == 0ULL) {
258 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 260 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
259 nvgpu_vm_put(vm); 261 nvgpu_vm_put(vm);
260 return -ENOMEM; 262 return -ENOMEM;
@@ -387,7 +389,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
387 } 389 }
388 390
389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 391 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ?
390 0U : GPU_ALLOC_GVA_SPACE; 392 0ULL : GPU_ALLOC_GVA_SPACE;
391 393
392 /* 394 /*
393 * A "user" area only makes sense for the GVA spaces. For VMs where 395 * A "user" area only makes sense for the GVA spaces. For VMs where
@@ -579,7 +581,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
579{ 581{
580 struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); 582 struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
581 583
582 if (!vm) { 584 if (vm == NULL) {
583 return NULL; 585 return NULL;
584 } 586 }
585 587
@@ -615,7 +617,8 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm)
615 } 617 }
616 } 618 }
617 619
618 if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) { 620 if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
621 vm->syncpt_ro_map_gpu_va != 0ULL) {
619 nvgpu_gmmu_unmap(vm, &g->syncpt_mem, 622 nvgpu_gmmu_unmap(vm, &g->syncpt_mem,
620 vm->syncpt_ro_map_gpu_va); 623 vm->syncpt_ro_map_gpu_va);
621 } 624 }
@@ -701,7 +704,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
701 struct nvgpu_rbtree_node *root = vm->mapped_buffers; 704 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
702 705
703 nvgpu_rbtree_search(addr, &node, root); 706 nvgpu_rbtree_search(addr, &node, root);
704 if (!node) { 707 if (node == NULL) {
705 return NULL; 708 return NULL;
706 } 709 }
707 710
@@ -715,7 +718,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
715 struct nvgpu_rbtree_node *root = vm->mapped_buffers; 718 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
716 719
717 nvgpu_rbtree_range_search(addr, &node, root); 720 nvgpu_rbtree_range_search(addr, &node, root);
718 if (!node) { 721 if (node == NULL) {
719 return NULL; 722 return NULL;
720 } 723 }
721 724
@@ -729,7 +732,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
729 struct nvgpu_rbtree_node *root = vm->mapped_buffers; 732 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
730 733
731 nvgpu_rbtree_less_than_search(addr, &node, root); 734 nvgpu_rbtree_less_than_search(addr, &node, root);
732 if (!node) { 735 if (node == NULL) {
733 return NULL; 736 return NULL;
734 } 737 }
735 738
@@ -755,7 +758,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
755 758
756 buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * 759 buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) *
757 vm->num_user_mapped_buffers); 760 vm->num_user_mapped_buffers);
758 if (!buffer_list) { 761 if (buffer_list == NULL) {
759 nvgpu_mutex_release(&vm->update_gmmu_lock); 762 nvgpu_mutex_release(&vm->update_gmmu_lock);
760 return -ENOMEM; 763 return -ENOMEM;
761 } 764 }
@@ -841,7 +844,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
841 u8 pte_kind; 844 u8 pte_kind;
842 845
843 if (vm->userspace_managed && 846 if (vm->userspace_managed &&
844 !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) { 847 (flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U) {
845 nvgpu_err(g, 848 nvgpu_err(g,
846 "non-fixed-offset mapping not available on " 849 "non-fixed-offset mapping not available on "
847 "userspace managed address spaces"); 850 "userspace managed address spaces");
@@ -883,7 +886,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
883 * Generate a new mapping! 886 * Generate a new mapping!
884 */ 887 */
885 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); 888 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
886 if (!mapped_buffer) { 889 if (mapped_buffer == NULL) {
887 nvgpu_warn(g, "oom allocating tracking buffer"); 890 nvgpu_warn(g, "oom allocating tracking buffer");
888 return ERR_PTR(-ENOMEM); 891 return ERR_PTR(-ENOMEM);
889 } 892 }
@@ -895,7 +898,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
895 binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr, 898 binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr,
896 min_t(u64, binfo.size, align)); 899 min_t(u64, binfo.size, align));
897 } 900 }
898 map_size = map_size ? map_size : binfo.size; 901 map_size = (map_size != 0ULL) ? map_size : binfo.size;
899 map_size = ALIGN(map_size, SZ_4K); 902 map_size = ALIGN(map_size, SZ_4K);
900 903
901 if ((map_size > binfo.size) || 904 if ((map_size > binfo.size) ||
@@ -929,7 +932,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
929 } 932 }
930 933
931 if ((binfo.compr_kind != NVGPU_KIND_INVALID) && 934 if ((binfo.compr_kind != NVGPU_KIND_INVALID) &&
932 (flags & NVGPU_VM_MAP_FIXED_OFFSET)) { 935 ((flags & NVGPU_VM_MAP_FIXED_OFFSET) != 0U)) {
933 /* 936 /*
934 * Fixed-address compressible mapping is 937 * Fixed-address compressible mapping is
935 * requested. Make sure we're respecting the alignment 938 * requested. Make sure we're respecting the alignment
@@ -1008,7 +1011,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1008 /* 1011 /*
1009 * Figure out the kind and ctag offset for the GMMU page tables 1012 * Figure out the kind and ctag offset for the GMMU page tables
1010 */ 1013 */
1011 if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) { 1014 if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset != 0U) {
1012 /* 1015 /*
1013 * Adjust the ctag_offset as per the buffer map offset 1016 * Adjust the ctag_offset as per the buffer map offset
1014 */ 1017 */
@@ -1054,7 +1057,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1054 gk20a_comptags_finish_clear(os_buf, map_addr != 0U); 1057 gk20a_comptags_finish_clear(os_buf, map_addr != 0U);
1055 } 1058 }
1056 1059
1057 if (!map_addr) { 1060 if (map_addr == 0ULL) {
1058 err = -ENOMEM; 1061 err = -ENOMEM;
1059 goto clean_up; 1062 goto clean_up;
1060 } 1063 }
@@ -1096,7 +1099,7 @@ clean_up:
1096 mapped_buffer->pgsz_idx, 1099 mapped_buffer->pgsz_idx,
1097 mapped_buffer->va_allocated, 1100 mapped_buffer->va_allocated,
1098 gk20a_mem_flag_none, 1101 gk20a_mem_flag_none,
1099 mapped_buffer->vm_area ? 1102 (mapped_buffer->vm_area != NULL) ?
1100 mapped_buffer->vm_area->sparse : false, 1103 mapped_buffer->vm_area->sparse : false,
1101 NULL); 1104 NULL);
1102 } 1105 }
@@ -1125,7 +1128,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer,
1125 mapped_buffer->pgsz_idx, 1128 mapped_buffer->pgsz_idx,
1126 mapped_buffer->va_allocated, 1129 mapped_buffer->va_allocated,
1127 gk20a_mem_flag_none, 1130 gk20a_mem_flag_none,
1128 mapped_buffer->vm_area ? 1131 (mapped_buffer->vm_area != NULL) ?
1129 mapped_buffer->vm_area->sparse : false, 1132 mapped_buffer->vm_area->sparse : false,
1130 batch); 1133 batch);
1131 1134
@@ -1185,8 +1188,8 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm,
1185 break; 1188 break;
1186 } 1189 }
1187 nvgpu_msleep(10); 1190 nvgpu_msleep(10);
1188 } while (!nvgpu_timeout_expired_msg(&timeout, 1191 } while (nvgpu_timeout_expired_msg(&timeout,
1189 "sync-unmap failed on 0x%llx")); 1192 "sync-unmap failed on 0x%llx") == 0);
1190 1193
1191 if (nvgpu_timeout_expired(&timeout)) { 1194 if (nvgpu_timeout_expired(&timeout)) {
1192 ret = -ETIMEDOUT; 1195 ret = -ETIMEDOUT;
@@ -1205,7 +1208,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset,
1205 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1208 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1206 1209
1207 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 1210 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
1208 if (!mapped_buffer) { 1211 if (mapped_buffer == NULL) {
1209 goto done; 1212 goto done;
1210 } 1213 }
1211 1214