summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index e556be12..b364f4d6 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -150,7 +150,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
150 } 150 }
151 151
152 /* Be certain we round up to page_size if needed */ 152 /* Be certain we round up to page_size if needed */
153 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc(vma, size); 155 addr = nvgpu_alloc(vma, size);
156 if (!addr) { 156 if (!addr) {
@@ -202,7 +202,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
202 */ 202 */
203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) 203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size)
204{ 204{
205 u64 mask = ((u64)vm->big_page_size << 10) - 1; 205 u64 mask = ((u64)vm->big_page_size << 10) - 1U;
206 206
207 if (base & mask || size & mask) { 207 if (base & mask || size & mask) {
208 return 0; 208 return 0;
@@ -252,7 +252,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel, 252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel,
253 vm->va_limit - 253 vm->va_limit -
254 mm->channel.kernel_size, 254 mm->channel.kernel_size,
255 512 * PAGE_SIZE, 255 512U * PAGE_SIZE,
256 SZ_4K); 256 SZ_4K);
257 if (!sema_sea->gpu_va) { 257 if (!sema_sea->gpu_va) {
258 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 258 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
@@ -296,7 +296,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
296 return -ENOMEM; 296 return -ENOMEM;
297 } 297 }
298 298
299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) { 299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) {
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 302
@@ -387,7 +387,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
387 } 387 }
388 388
389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ?
390 0 : GPU_ALLOC_GVA_SPACE; 390 0U : GPU_ALLOC_GVA_SPACE;
391 391
392 /* 392 /*
393 * A "user" area only makes sense for the GVA spaces. For VMs where 393 * A "user" area only makes sense for the GVA spaces. For VMs where
@@ -967,7 +967,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
967 g, gk20a_cbc_op_clear, 967 g, gk20a_cbc_op_clear,
968 comptags.offset, 968 comptags.offset,
969 (comptags.offset + 969 (comptags.offset +
970 comptags.lines - 1)); 970 comptags.lines - 1U));
971 gk20a_comptags_finish_clear( 971 gk20a_comptags_finish_clear(
972 os_buf, err == 0); 972 os_buf, err == 0);
973 if (err) { 973 if (err) {
@@ -1036,7 +1036,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1036 aperture); 1036 aperture);
1037 1037
1038 if (clear_ctags) { 1038 if (clear_ctags) {
1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0); 1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0U);
1040 } 1040 }
1041 1041
1042 if (!map_addr) { 1042 if (!map_addr) {