summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-17 01:20:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-29 11:59:31 -0400
commit2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (patch)
treec0f90c3dc6909122cfde071efff8ff24d2b61471 /drivers/gpu/nvgpu/common/mm
parent19cd7ffb5def933db323fe682ec4a263eb1923f9 (diff)
gpu: nvgpu: common: fix MISRA Rule 10.4
MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals to have same type of operands when an arithmetic operation is performed. This fix violations where an arithmetic operation is performed on signed and unsigned int types. In balloc_get_order_list() the argument "int order" has been changed to a u64 because all callers of this function pass a u64 argument. JIRA NVGPU-992 Change-Id: Ie2964f9f1dfb2865a9bd6e6cdd65e7cda6c1f638 Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1784419 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c8
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c52
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h4
-rw-r--r--drivers/gpu/nvgpu/common/mm/comptags.c6
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c20
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c2
10 files changed, 70 insertions, 70 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index 5316783d..1edfda51 100644
--- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -378,7 +378,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
378 int err; 378 int err;
379 struct nvgpu_bitmap_allocator *a; 379 struct nvgpu_bitmap_allocator *a;
380 380
381 if (WARN_ON(blk_size & (blk_size - 1))) { 381 if (WARN_ON(blk_size & (blk_size - 1U))) {
382 return -EINVAL; 382 return -EINVAL;
383 } 383 }
384 384
@@ -386,12 +386,12 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
386 * blk_size must be a power-of-2; base length also need to be aligned 386 * blk_size must be a power-of-2; base length also need to be aligned
387 * to blk_size. 387 * to blk_size.
388 */ 388 */
389 if (blk_size & (blk_size - 1) || 389 if (blk_size & (blk_size - 1U) ||
390 base & (blk_size - 1) || length & (blk_size - 1)) { 390 base & (blk_size - 1U) || length & (blk_size - 1U)) {
391 return -EINVAL; 391 return -EINVAL;
392 } 392 }
393 393
394 if (base == 0) { 394 if (base == 0U) {
395 base = blk_size; 395 base = blk_size;
396 length -= blk_size; 396 length -= blk_size;
397 } 397 }
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index e684e637..a9f90069 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -74,7 +74,7 @@ static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a)
74{ 74{
75 u64 true_max_order = ilog2(a->blks); 75 u64 true_max_order = ilog2(a->blks);
76 76
77 if (a->max_order == 0) { 77 if (a->max_order == 0U) {
78 a->max_order = true_max_order; 78 a->max_order = true_max_order;
79 return; 79 return;
80 } 80 }
@@ -95,7 +95,7 @@ static void balloc_allocator_align(struct nvgpu_buddy_allocator *a)
95{ 95{
96 a->start = ALIGN(a->base, a->blk_size); 96 a->start = ALIGN(a->base, a->blk_size);
97 WARN_ON(a->start != a->base); 97 WARN_ON(a->start != a->base);
98 a->end = (a->base + a->length) & ~(a->blk_size - 1); 98 a->end = (a->base + a->length) & ~(a->blk_size - 1U);
99 a->count = a->end - a->start; 99 a->count = a->end - a->start;
100 a->blks = a->count >> a->blk_shift; 100 a->blks = a->count >> a->blk_shift;
101} 101}
@@ -119,7 +119,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
119 new_buddy->parent = parent; 119 new_buddy->parent = parent;
120 new_buddy->start = start; 120 new_buddy->start = start;
121 new_buddy->order = order; 121 new_buddy->order = order;
122 new_buddy->end = start + (1 << order) * a->blk_size; 122 new_buddy->end = start + (U64(1) << order) * a->blk_size;
123 new_buddy->pte_size = BALLOC_PTE_SIZE_ANY; 123 new_buddy->pte_size = BALLOC_PTE_SIZE_ANY;
124 124
125 return new_buddy; 125 return new_buddy;
@@ -185,7 +185,7 @@ static void balloc_blist_rem(struct nvgpu_buddy_allocator *a,
185 185
186static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len) 186static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len)
187{ 187{
188 if (len == 0) { 188 if (len == 0U) {
189 return 0; 189 return 0;
190 } 190 }
191 191
@@ -200,7 +200,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a,
200{ 200{
201 u64 size = (end - start) >> a->blk_shift; 201 u64 size = (end - start) >> a->blk_shift;
202 202
203 if (size > 0) { 203 if (size > 0U) {
204 return min_t(u64, ilog2(size), a->max_order); 204 return min_t(u64, ilog2(size), a->max_order);
205 } else { 205 } else {
206 return GPU_BALLOC_MAX_ORDER; 206 return GPU_BALLOC_MAX_ORDER;
@@ -212,7 +212,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a,
212 */ 212 */
213static int balloc_init_lists(struct nvgpu_buddy_allocator *a) 213static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
214{ 214{
215 int i; 215 u32 i;
216 u64 bstart, bend, order; 216 u64 bstart, bend, order;
217 struct nvgpu_buddy *buddy; 217 struct nvgpu_buddy *buddy;
218 218
@@ -220,7 +220,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
220 bend = a->end; 220 bend = a->end;
221 221
222 /* First make sure the LLs are valid. */ 222 /* First make sure the LLs are valid. */
223 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 223 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
224 nvgpu_init_list_node(balloc_get_order_list(a, i)); 224 nvgpu_init_list_node(balloc_get_order_list(a, i));
225 } 225 }
226 226
@@ -239,7 +239,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
239 return 0; 239 return 0;
240 240
241cleanup: 241cleanup:
242 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 242 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
243 if (!nvgpu_list_empty(balloc_get_order_list(a, i))) { 243 if (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
244 buddy = nvgpu_list_first_entry( 244 buddy = nvgpu_list_first_entry(
245 balloc_get_order_list(a, i), 245 balloc_get_order_list(a, i),
@@ -257,7 +257,7 @@ cleanup:
257 */ 257 */
258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) 258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
259{ 259{
260 int i; 260 u32 i;
261 struct nvgpu_rbtree_node *node = NULL; 261 struct nvgpu_rbtree_node *node = NULL;
262 struct nvgpu_buddy *bud; 262 struct nvgpu_buddy *bud;
263 struct nvgpu_fixed_alloc *falloc; 263 struct nvgpu_fixed_alloc *falloc;
@@ -299,8 +299,8 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
299 /* 299 /*
300 * Now clean up the unallocated buddies. 300 * Now clean up the unallocated buddies.
301 */ 301 */
302 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 302 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
303 BUG_ON(a->buddy_list_alloced[i] != 0); 303 BUG_ON(a->buddy_list_alloced[i] != 0U);
304 304
305 while (!nvgpu_list_empty(balloc_get_order_list(a, i))) { 305 while (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
306 bud = nvgpu_list_first_entry( 306 bud = nvgpu_list_first_entry(
@@ -310,19 +310,19 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
310 nvgpu_kmem_cache_free(a->buddy_cache, bud); 310 nvgpu_kmem_cache_free(a->buddy_cache, bud);
311 } 311 }
312 312
313 if (a->buddy_list_len[i] != 0) { 313 if (a->buddy_list_len[i] != 0U) {
314 nvgpu_info(na->g, 314 nvgpu_info(na->g,
315 "Excess buddies!!! (%d: %llu)", 315 "Excess buddies!!! (%d: %llu)",
316 i, a->buddy_list_len[i]); 316 i, a->buddy_list_len[i]);
317 BUG(); 317 BUG();
318 } 318 }
319 if (a->buddy_list_split[i] != 0) { 319 if (a->buddy_list_split[i] != 0U) {
320 nvgpu_info(na->g, 320 nvgpu_info(na->g,
321 "Excess split nodes!!! (%d: %llu)", 321 "Excess split nodes!!! (%d: %llu)",
322 i, a->buddy_list_split[i]); 322 i, a->buddy_list_split[i]);
323 BUG(); 323 BUG();
324 } 324 }
325 if (a->buddy_list_alloced[i] != 0) { 325 if (a->buddy_list_alloced[i] != 0U) {
326 nvgpu_info(na->g, 326 nvgpu_info(na->g,
327 "Excess alloced nodes!!! (%d: %llu)", 327 "Excess alloced nodes!!! (%d: %llu)",
328 i, a->buddy_list_alloced[i]); 328 i, a->buddy_list_alloced[i]);
@@ -392,14 +392,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
392 struct nvgpu_buddy *left, *right; 392 struct nvgpu_buddy *left, *right;
393 u64 half; 393 u64 half;
394 394
395 left = balloc_new_buddy(a, b, b->start, b->order - 1); 395 left = balloc_new_buddy(a, b, b->start, b->order - 1U);
396 if (!left) { 396 if (!left) {
397 return -ENOMEM; 397 return -ENOMEM;
398 } 398 }
399 399
400 half = (b->end - b->start) / 2; 400 half = (b->end - b->start) / 2U;
401 401
402 right = balloc_new_buddy(a, b, b->start + half, b->order - 1); 402 right = balloc_new_buddy(a, b, b->start + half, b->order - 1U);
403 if (!right) { 403 if (!right) {
404 nvgpu_kmem_cache_free(a->buddy_cache, left); 404 nvgpu_kmem_cache_free(a->buddy_cache, left);
405 return -ENOMEM; 405 return -ENOMEM;
@@ -624,7 +624,7 @@ static void __balloc_get_parent_range(struct nvgpu_buddy_allocator *a,
624 u64 shifted_base = balloc_base_shift(a, base); 624 u64 shifted_base = balloc_base_shift(a, base);
625 625
626 order++; 626 order++;
627 base_mask = ~((a->blk_size << order) - 1); 627 base_mask = ~((a->blk_size << order) - 1U);
628 628
629 shifted_base &= base_mask; 629 shifted_base &= base_mask;
630 630
@@ -720,7 +720,7 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
720 u64 align_order; 720 u64 align_order;
721 721
722 shifted_base = balloc_base_shift(a, base); 722 shifted_base = balloc_base_shift(a, base);
723 if (shifted_base == 0) { 723 if (shifted_base == 0U) {
724 align_order = __fls(len >> a->blk_shift); 724 align_order = __fls(len >> a->blk_shift);
725 } else { 725 } else {
726 align_order = min_t(u64, 726 align_order = min_t(u64,
@@ -871,11 +871,11 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
871 struct nvgpu_buddy_allocator *a = na->priv; 871 struct nvgpu_buddy_allocator *a = na->priv;
872 872
873 /* If base isn't aligned to an order 0 block, fail. */ 873 /* If base isn't aligned to an order 0 block, fail. */
874 if (base & (a->blk_size - 1)) { 874 if (base & (a->blk_size - 1U)) {
875 goto fail; 875 goto fail;
876 } 876 }
877 877
878 if (len == 0) { 878 if (len == 0U) {
879 goto fail; 879 goto fail;
880 } 880 }
881 881
@@ -1255,10 +1255,10 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1255 struct nvgpu_buddy_allocator *a; 1255 struct nvgpu_buddy_allocator *a;
1256 1256
1257 /* blk_size must be greater than 0 and a power of 2. */ 1257 /* blk_size must be greater than 0 and a power of 2. */
1258 if (blk_size == 0) { 1258 if (blk_size == 0U) {
1259 return -EINVAL; 1259 return -EINVAL;
1260 } 1260 }
1261 if (blk_size & (blk_size - 1)) { 1261 if (blk_size & (blk_size - 1U)) {
1262 return -EINVAL; 1262 return -EINVAL;
1263 } 1263 }
1264 1264
@@ -1291,7 +1291,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1291 * If base is 0 then modfy base to be the size of one block so that we 1291 * If base is 0 then modfy base to be the size of one block so that we
1292 * can return errors by returning addr == 0. 1292 * can return errors by returning addr == 0.
1293 */ 1293 */
1294 if (a->base == 0) { 1294 if (a->base == 0U) {
1295 a->base = a->blk_size; 1295 a->base = a->blk_size;
1296 a->length -= a->blk_size; 1296 a->length -= a->blk_size;
1297 } 1297 }
@@ -1308,8 +1308,8 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1308 * requirement is not necessary. 1308 * requirement is not necessary.
1309 */ 1309 */
1310 if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && 1310 if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages &&
1311 (base & ((vm->big_page_size << 10) - 1) || 1311 (base & ((vm->big_page_size << 10) - 1U) ||
1312 size & ((vm->big_page_size << 10) - 1))) { 1312 size & ((vm->big_page_size << 10) - 1U))) {
1313 return -EINVAL; 1313 return -EINVAL;
1314 } 1314 }
1315 1315
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
index c9e332a5..fe3926b9 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -159,7 +159,7 @@ struct nvgpu_buddy_allocator {
159 /* 159 /*
160 * Impose an upper bound on the maximum order. 160 * Impose an upper bound on the maximum order.
161 */ 161 */
162#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1) 162#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1U)
163 163
164 struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN]; 164 struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN];
165 u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN]; 165 u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN];
@@ -190,7 +190,7 @@ static inline struct nvgpu_buddy_allocator *buddy_allocator(
190} 190}
191 191
192static inline struct nvgpu_list_node *balloc_get_order_list( 192static inline struct nvgpu_list_node *balloc_get_order_list(
193 struct nvgpu_buddy_allocator *a, int order) 193 struct nvgpu_buddy_allocator *a, u64 order)
194{ 194{
195 return &a->buddy_list[order]; 195 return &a->buddy_list[order];
196} 196}
diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c
index 0926e78e..e6c99702 100644
--- a/drivers/gpu/nvgpu/common/mm/comptags.c
+++ b/drivers/gpu/nvgpu/common/mm/comptags.c
@@ -37,7 +37,7 @@ int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
37 0, len, 0); 37 0, len, 0);
38 if (addr < allocator->size) { 38 if (addr < allocator->size) {
39 /* number zero is reserved; bitmap base is 1 */ 39 /* number zero is reserved; bitmap base is 1 */
40 *offset = 1 + addr; 40 *offset = 1U + addr;
41 bitmap_set(allocator->bitmap, addr, len); 41 bitmap_set(allocator->bitmap, addr, len);
42 } else { 42 } else {
43 err = -ENOMEM; 43 err = -ENOMEM;
@@ -51,9 +51,9 @@ void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
51 u32 offset, u32 len) 51 u32 offset, u32 len)
52{ 52{
53 /* number zero is reserved; bitmap base is 1 */ 53 /* number zero is reserved; bitmap base is 1 */
54 u32 addr = offset - 1; 54 u32 addr = offset - 1U;
55 55
56 WARN_ON(offset == 0); 56 WARN_ON(offset == 0U);
57 WARN_ON(addr > allocator->size); 57 WARN_ON(addr > allocator->size);
58 WARN_ON(addr + len > allocator->size); 58 WARN_ON(addr + len > allocator->size);
59 59
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 02273393..47d1e8ee 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -98,7 +98,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
98 * therefor we should not try and free it. But otherwise, if we do 98 * therefor we should not try and free it. But otherwise, if we do
99 * manage the VA alloc, we obviously must free it. 99 * manage the VA alloc, we obviously must free it.
100 */ 100 */
101 if (addr != 0) { 101 if (addr != 0U) {
102 mem->free_gpu_va = false; 102 mem->free_gpu_va = false;
103 } else { 103 } else {
104 mem->free_gpu_va = true; 104 mem->free_gpu_va = true;
@@ -300,7 +300,7 @@ static int pd_allocate(struct vm_gk20a *vm,
300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt, 300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
301 struct nvgpu_gmmu_attrs *attrs) 301 struct nvgpu_gmmu_attrs *attrs)
302{ 302{
303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1)) - 1ULL; 303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL;
304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz]; 304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz];
305 305
306 /* 306 /*
@@ -399,7 +399,7 @@ static int __set_pd_level(struct vm_gk20a *vm,
399 * start at a PDE boundary. 399 * start at a PDE boundary.
400 */ 400 */
401 chunk_size = min(length, 401 chunk_size = min(length,
402 pde_range - (virt_addr & (pde_range - 1))); 402 pde_range - (virt_addr & (pde_range - 1U)));
403 403
404 /* 404 /*
405 * If the next level has an update_entry function then we know 405 * If the next level has an update_entry function then we know
@@ -573,7 +573,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
573 virt_addr += chunk_length; 573 virt_addr += chunk_length;
574 length -= chunk_length; 574 length -= chunk_length;
575 575
576 if (length == 0) { 576 if (length == 0U) {
577 break; 577 break;
578 } 578 }
579 } 579 }
@@ -615,7 +615,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
615 615
616 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
617 617
618 if (space_to_skip & (page_size - 1)) { 618 if (space_to_skip & (page_size - 1U)) {
619 return -EINVAL; 619 return -EINVAL;
620 } 620 }
621 621
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 345b947d..ab75b136 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -205,15 +205,15 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
205 205
206u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) 206u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
207{ 207{
208 WARN_ON(offset & 3); 208 WARN_ON(offset & 3U);
209 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); 209 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
210} 210}
211 211
212void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, 212void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
213 u32 offset, void *dest, u32 size) 213 u32 offset, void *dest, u32 size)
214{ 214{
215 WARN_ON(offset & 3); 215 WARN_ON(offset & 3U);
216 WARN_ON(size & 3); 216 WARN_ON(size & 3U);
217 217
218 if (mem->aperture == APERTURE_SYSMEM) { 218 if (mem->aperture == APERTURE_SYSMEM) {
219 u8 *src = (u8 *)mem->cpu_va + offset; 219 u8 *src = (u8 *)mem->cpu_va + offset;
@@ -246,15 +246,15 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
246 246
247void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) 247void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
248{ 248{
249 WARN_ON(offset & 3); 249 WARN_ON(offset & 3U);
250 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); 250 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
251} 251}
252 252
253void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 253void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
254 void *src, u32 size) 254 void *src, u32 size)
255{ 255{
256 WARN_ON(offset & 3); 256 WARN_ON(offset & 3U);
257 WARN_ON(size & 3); 257 WARN_ON(size & 3U);
258 258
259 if (mem->aperture == APERTURE_SYSMEM) { 259 if (mem->aperture == APERTURE_SYSMEM) {
260 u8 *dest = (u8 *)mem->cpu_va + offset; 260 u8 *dest = (u8 *)mem->cpu_va + offset;
@@ -274,11 +274,11 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
274void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 274void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
275 u32 c, u32 size) 275 u32 c, u32 size)
276{ 276{
277 WARN_ON(offset & 3); 277 WARN_ON(offset & 3U);
278 WARN_ON(size & 3); 278 WARN_ON(size & 3U);
279 WARN_ON(c & ~0xff); 279 WARN_ON(c & ~0xffU);
280 280
281 c &= 0xff; 281 c &= 0xffU;
282 282
283 if (mem->aperture == APERTURE_SYSMEM) { 283 if (mem->aperture == APERTURE_SYSMEM) {
284 u8 *dest = (u8 *)mem->cpu_va + offset; 284 u8 *dest = (u8 *)mem->cpu_va + offset;
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index f6d70435..3225f170 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -317,8 +317,8 @@ static void free_slab_page(struct nvgpu_page_allocator *a,
317 palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr); 317 palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr);
318 318
319 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) || 319 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) ||
320 slab_page->nr_objects_alloced != 0 || 320 slab_page->nr_objects_alloced != 0U ||
321 slab_page->bitmap != 0); 321 slab_page->bitmap != 0U);
322 322
323 nvgpu_free(&a->source_allocator, slab_page->page_addr); 323 nvgpu_free(&a->source_allocator, slab_page->page_addr);
324 a->pages_freed++; 324 a->pages_freed++;
@@ -471,7 +471,7 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
471 471
472 slab_page->nr_objects_alloced--; 472 slab_page->nr_objects_alloced--;
473 473
474 if (slab_page->nr_objects_alloced == 0) { 474 if (slab_page->nr_objects_alloced == 0U) {
475 new_state = SP_EMPTY; 475 new_state = SP_EMPTY;
476 } else { 476 } else {
477 new_state = SP_PARTIAL; 477 new_state = SP_PARTIAL;
@@ -684,7 +684,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
684 684
685 alloc_lock(na); 685 alloc_lock(na);
686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
687 real_len <= (a->page_size / 2)) { 687 real_len <= (a->page_size / 2U)) {
688 alloc = __nvgpu_alloc_slab(a, real_len); 688 alloc = __nvgpu_alloc_slab(a, real_len);
689 } else { 689 } else {
690 alloc = __nvgpu_alloc_pages(a, real_len); 690 alloc = __nvgpu_alloc_pages(a, real_len);
@@ -698,7 +698,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
698 __insert_page_alloc(a, alloc); 698 __insert_page_alloc(a, alloc);
699 699
700 a->nr_allocs++; 700 a->nr_allocs++;
701 if (real_len > a->page_size / 2) { 701 if (real_len > a->page_size / 2U) {
702 a->pages_alloced += alloc->length >> a->page_shift; 702 a->pages_alloced += alloc->length >> a->page_shift;
703 } 703 }
704 alloc_unlock(na); 704 alloc_unlock(na);
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index db48d168..335ef360 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -76,20 +76,20 @@
76 76
77static u32 nvgpu_pd_cache_nr(u32 bytes) 77static u32 nvgpu_pd_cache_nr(u32 bytes)
78{ 78{
79 return ilog2(bytes >> (NVGPU_PD_CACHE_MIN_SHIFT - 1)); 79 return ilog2(bytes >> (NVGPU_PD_CACHE_MIN_SHIFT - 1U));
80} 80}
81 81
82static u32 nvgpu_pd_cache_get_mask(struct nvgpu_pd_mem_entry *pentry) 82static u32 nvgpu_pd_cache_get_mask(struct nvgpu_pd_mem_entry *pentry)
83{ 83{
84 u32 mask_offset = 1 << (PAGE_SIZE / pentry->pd_size); 84 u32 mask_offset = 1 << (PAGE_SIZE / pentry->pd_size);
85 85
86 return mask_offset - 1; 86 return mask_offset - 1U;
87} 87}
88 88
89int nvgpu_pd_cache_init(struct gk20a *g) 89int nvgpu_pd_cache_init(struct gk20a *g)
90{ 90{
91 struct nvgpu_pd_cache *cache; 91 struct nvgpu_pd_cache *cache;
92 int i; 92 u32 i;
93 93
94 /* 94 /*
95 * This gets called from finalize_poweron() so we need to make sure we 95 * This gets called from finalize_poweron() so we need to make sure we
@@ -105,7 +105,7 @@ int nvgpu_pd_cache_init(struct gk20a *g)
105 return -ENOMEM; 105 return -ENOMEM;
106 } 106 }
107 107
108 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { 108 for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
109 nvgpu_init_list_node(&cache->full[i]); 109 nvgpu_init_list_node(&cache->full[i]);
110 nvgpu_init_list_node(&cache->partial[i]); 110 nvgpu_init_list_node(&cache->partial[i]);
111 } 111 }
@@ -121,14 +121,14 @@ int nvgpu_pd_cache_init(struct gk20a *g)
121 121
122void nvgpu_pd_cache_fini(struct gk20a *g) 122void nvgpu_pd_cache_fini(struct gk20a *g)
123{ 123{
124 int i; 124 u32 i;
125 struct nvgpu_pd_cache *cache = g->mm.pd_cache; 125 struct nvgpu_pd_cache *cache = g->mm.pd_cache;
126 126
127 if (!cache) { 127 if (!cache) {
128 return; 128 return;
129 } 129 }
130 130
131 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { 131 for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
132 WARN_ON(!nvgpu_list_empty(&cache->full[i])); 132 WARN_ON(!nvgpu_list_empty(&cache->full[i]));
133 WARN_ON(!nvgpu_list_empty(&cache->partial[i])); 133 WARN_ON(!nvgpu_list_empty(&cache->partial[i]));
134 } 134 }
@@ -305,7 +305,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
305 305
306 pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); 306 pd_dbg(g, "PD-Alloc [C] %u bytes", bytes);
307 307
308 if (bytes & (bytes - 1) || 308 if (bytes & (bytes - 1U) ||
309 (bytes >= PAGE_SIZE || 309 (bytes >= PAGE_SIZE ||
310 bytes < NVGPU_PD_CACHE_MIN)) { 310 bytes < NVGPU_PD_CACHE_MIN)) {
311 pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); 311 pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes);
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index e556be12..b364f4d6 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -150,7 +150,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
150 } 150 }
151 151
152 /* Be certain we round up to page_size if needed */ 152 /* Be certain we round up to page_size if needed */
153 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc(vma, size); 155 addr = nvgpu_alloc(vma, size);
156 if (!addr) { 156 if (!addr) {
@@ -202,7 +202,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
202 */ 202 */
203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) 203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size)
204{ 204{
205 u64 mask = ((u64)vm->big_page_size << 10) - 1; 205 u64 mask = ((u64)vm->big_page_size << 10) - 1U;
206 206
207 if (base & mask || size & mask) { 207 if (base & mask || size & mask) {
208 return 0; 208 return 0;
@@ -252,7 +252,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel, 252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel,
253 vm->va_limit - 253 vm->va_limit -
254 mm->channel.kernel_size, 254 mm->channel.kernel_size,
255 512 * PAGE_SIZE, 255 512U * PAGE_SIZE,
256 SZ_4K); 256 SZ_4K);
257 if (!sema_sea->gpu_va) { 257 if (!sema_sea->gpu_va) {
258 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 258 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
@@ -296,7 +296,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
296 return -ENOMEM; 296 return -ENOMEM;
297 } 297 }
298 298
299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) { 299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) {
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 302
@@ -387,7 +387,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
387 } 387 }
388 388
389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ?
390 0 : GPU_ALLOC_GVA_SPACE; 390 0U : GPU_ALLOC_GVA_SPACE;
391 391
392 /* 392 /*
393 * A "user" area only makes sense for the GVA spaces. For VMs where 393 * A "user" area only makes sense for the GVA spaces. For VMs where
@@ -967,7 +967,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
967 g, gk20a_cbc_op_clear, 967 g, gk20a_cbc_op_clear,
968 comptags.offset, 968 comptags.offset,
969 (comptags.offset + 969 (comptags.offset +
970 comptags.lines - 1)); 970 comptags.lines - 1U));
971 gk20a_comptags_finish_clear( 971 gk20a_comptags_finish_clear(
972 os_buf, err == 0); 972 os_buf, err == 0);
973 if (err) { 973 if (err) {
@@ -1036,7 +1036,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1036 aperture); 1036 aperture);
1037 1037
1038 if (clear_ctags) { 1038 if (clear_ctags) {
1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0); 1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0U);
1040 } 1040 }
1041 1041
1042 if (!map_addr) { 1042 if (!map_addr) {
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index 7e2b5c34..c2c0d569 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -57,7 +57,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60 if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1)) { 60 if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1U)) {
61 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx", 61 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
62 map_addr); 62 map_addr);
63 return -EINVAL; 63 return -EINVAL;