summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 02273393..47d1e8ee 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -98,7 +98,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
98 * therefor we should not try and free it. But otherwise, if we do 98 * therefor we should not try and free it. But otherwise, if we do
99 * manage the VA alloc, we obviously must free it. 99 * manage the VA alloc, we obviously must free it.
100 */ 100 */
101 if (addr != 0) { 101 if (addr != 0U) {
102 mem->free_gpu_va = false; 102 mem->free_gpu_va = false;
103 } else { 103 } else {
104 mem->free_gpu_va = true; 104 mem->free_gpu_va = true;
@@ -300,7 +300,7 @@ static int pd_allocate(struct vm_gk20a *vm,
300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt, 300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
301 struct nvgpu_gmmu_attrs *attrs) 301 struct nvgpu_gmmu_attrs *attrs)
302{ 302{
303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1)) - 1ULL; 303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL;
304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz]; 304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz];
305 305
306 /* 306 /*
@@ -399,7 +399,7 @@ static int __set_pd_level(struct vm_gk20a *vm,
399 * start at a PDE boundary. 399 * start at a PDE boundary.
400 */ 400 */
401 chunk_size = min(length, 401 chunk_size = min(length,
402 pde_range - (virt_addr & (pde_range - 1))); 402 pde_range - (virt_addr & (pde_range - 1U)));
403 403
404 /* 404 /*
405 * If the next level has an update_entry function then we know 405 * If the next level has an update_entry function then we know
@@ -573,7 +573,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
573 virt_addr += chunk_length; 573 virt_addr += chunk_length;
574 length -= chunk_length; 574 length -= chunk_length;
575 575
576 if (length == 0) { 576 if (length == 0U) {
577 break; 577 break;
578 } 578 }
579 } 579 }
@@ -615,7 +615,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
615 615
616 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
617 617
618 if (space_to_skip & (page_size - 1)) { 618 if (space_to_skip & (page_size - 1U)) {
619 return -EINVAL; 619 return -EINVAL;
620 } 620 }
621 621