summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-17 01:20:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-29 11:59:31 -0400
commit2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (patch)
treec0f90c3dc6909122cfde071efff8ff24d2b61471 /drivers/gpu/nvgpu/common/mm/vm.c
parent19cd7ffb5def933db323fe682ec4a263eb1923f9 (diff)
gpu: nvgpu: common: fix MISRA Rule 10.4
MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals to have same type of operands when an arithmetic operation is performed. This fix violations where an arithmetic operation is performed on signed and unsigned int types. In balloc_get_order_list() the argument "int order" has been changed to a u64 because all callers of this function pass a u64 argument. JIRA NVGPU-992 Change-Id: Ie2964f9f1dfb2865a9bd6e6cdd65e7cda6c1f638 Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1784419 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index e556be12..b364f4d6 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -150,7 +150,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
150 } 150 }
151 151
152 /* Be certain we round up to page_size if needed */ 152 /* Be certain we round up to page_size if needed */
153 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc(vma, size); 155 addr = nvgpu_alloc(vma, size);
156 if (!addr) { 156 if (!addr) {
@@ -202,7 +202,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
202 */ 202 */
203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) 203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size)
204{ 204{
205 u64 mask = ((u64)vm->big_page_size << 10) - 1; 205 u64 mask = ((u64)vm->big_page_size << 10) - 1U;
206 206
207 if (base & mask || size & mask) { 207 if (base & mask || size & mask) {
208 return 0; 208 return 0;
@@ -252,7 +252,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel, 252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel,
253 vm->va_limit - 253 vm->va_limit -
254 mm->channel.kernel_size, 254 mm->channel.kernel_size,
255 512 * PAGE_SIZE, 255 512U * PAGE_SIZE,
256 SZ_4K); 256 SZ_4K);
257 if (!sema_sea->gpu_va) { 257 if (!sema_sea->gpu_va) {
258 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 258 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
@@ -296,7 +296,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
296 return -ENOMEM; 296 return -ENOMEM;
297 } 297 }
298 298
299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) { 299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) {
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 302
@@ -387,7 +387,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
387 } 387 }
388 388
389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ?
390 0 : GPU_ALLOC_GVA_SPACE; 390 0U : GPU_ALLOC_GVA_SPACE;
391 391
392 /* 392 /*
393 * A "user" area only makes sense for the GVA spaces. For VMs where 393 * A "user" area only makes sense for the GVA spaces. For VMs where
@@ -967,7 +967,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
967 g, gk20a_cbc_op_clear, 967 g, gk20a_cbc_op_clear,
968 comptags.offset, 968 comptags.offset,
969 (comptags.offset + 969 (comptags.offset +
970 comptags.lines - 1)); 970 comptags.lines - 1U));
971 gk20a_comptags_finish_clear( 971 gk20a_comptags_finish_clear(
972 os_buf, err == 0); 972 os_buf, err == 0);
973 if (err) { 973 if (err) {
@@ -1036,7 +1036,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1036 aperture); 1036 aperture);
1037 1037
1038 if (clear_ctags) { 1038 if (clear_ctags) {
1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0); 1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0U);
1040 } 1040 }
1041 1041
1042 if (!map_addr) { 1042 if (!map_addr) {