summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm_area.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/vm_area.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index d096de5d..ac4708af 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -66,13 +66,13 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
66 /* Find the space reservation, but it's ok to have none for 66 /* Find the space reservation, but it's ok to have none for
67 * userspace-managed address spaces */ 67 * userspace-managed address spaces */
68 vm_area = nvgpu_vm_area_find(vm, map_addr); 68 vm_area = nvgpu_vm_area_find(vm, map_addr);
69 if (!vm_area && !vm->userspace_managed) { 69 if (vm_area == NULL && !vm->userspace_managed) {
70 nvgpu_warn(g, "fixed offset mapping without space allocation"); 70 nvgpu_warn(g, "fixed offset mapping without space allocation");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 73
74 /* Mapped area should fit inside va, if there's one */ 74 /* Mapped area should fit inside va, if there's one */
75 if (vm_area && map_end > vm_area->addr + vm_area->size) { 75 if (vm_area != NULL && map_end > vm_area->addr + vm_area->size) {
76 nvgpu_warn(g, "fixed offset mapping size overflows va node"); 76 nvgpu_warn(g, "fixed offset mapping size overflows va node");
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
@@ -82,7 +82,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
82 * that is less than our buffer end */ 82 * that is less than our buffer end */
83 buffer = __nvgpu_vm_find_mapped_buf_less_than( 83 buffer = __nvgpu_vm_find_mapped_buf_less_than(
84 vm, map_addr + map_size); 84 vm, map_addr + map_size);
85 if (buffer && buffer->addr + buffer->size > map_addr) { 85 if (buffer != NULL && buffer->addr + buffer->size > map_addr) {
86 nvgpu_warn(g, "overlapping buffer map requested"); 86 nvgpu_warn(g, "overlapping buffer map requested");
87 return -EINVAL; 87 return -EINVAL;
88 } 88 }
@@ -138,7 +138,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
138 } 138 }
139 139
140 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); 140 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area));
141 if (!vm_area) { 141 if (vm_area == NULL) {
142 goto clean_up_err; 142 goto clean_up_err;
143 } 143 }
144 144
@@ -155,7 +155,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
155 page_size); 155 page_size);
156 } 156 }
157 157
158 if (!vaddr_start) { 158 if (vaddr_start == 0ULL) {
159 goto clean_up_err; 159 goto clean_up_err;
160 } 160 }
161 161
@@ -183,7 +183,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
183 false, 183 false,
184 NULL, 184 NULL,
185 APERTURE_INVALID); 185 APERTURE_INVALID);
186 if (!map_addr) { 186 if (map_addr == 0ULL) {
187 nvgpu_mutex_release(&vm->update_gmmu_lock); 187 nvgpu_mutex_release(&vm->update_gmmu_lock);
188 goto clean_up_err; 188 goto clean_up_err;
189 } 189 }
@@ -215,7 +215,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
215 215
216 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 216 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
217 vm_area = nvgpu_vm_area_find(vm, addr); 217 vm_area = nvgpu_vm_area_find(vm, addr);
218 if (!vm_area) { 218 if (vm_area == NULL) {
219 nvgpu_mutex_release(&vm->update_gmmu_lock); 219 nvgpu_mutex_release(&vm->update_gmmu_lock);
220 return 0; 220 return 0;
221 } 221 }