summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-17 01:20:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-29 11:59:31 -0400
commit2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (patch)
treec0f90c3dc6909122cfde071efff8ff24d2b61471
parent19cd7ffb5def933db323fe682ec4a263eb1923f9 (diff)
gpu: nvgpu: common: fix MISRA Rule 10.4
MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals to have same type of operands when an arithmetic operation is performed. This fix violations where an arithmetic operation is performed on signed and unsigned int types. In balloc_get_order_list() the argument "int order" has been changed to a u64 because all callers of this function pass a u64 argument. JIRA NVGPU-992 Change-Id: Ie2964f9f1dfb2865a9bd6e6cdd65e7cda6c1f638 Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1784419 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c8
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c52
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h4
-rw-r--r--drivers/gpu/nvgpu/common/mm/comptags.c6
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c20
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c10
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c2
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c102
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/allocator.h12
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gmmu.h6
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/posix/types.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h12
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm_area.h4
16 files changed, 139 insertions, 139 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index 5316783d..1edfda51 100644
--- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -378,7 +378,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
378 int err; 378 int err;
379 struct nvgpu_bitmap_allocator *a; 379 struct nvgpu_bitmap_allocator *a;
380 380
381 if (WARN_ON(blk_size & (blk_size - 1))) { 381 if (WARN_ON(blk_size & (blk_size - 1U))) {
382 return -EINVAL; 382 return -EINVAL;
383 } 383 }
384 384
@@ -386,12 +386,12 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
386 * blk_size must be a power-of-2; base length also need to be aligned 386 * blk_size must be a power-of-2; base length also need to be aligned
387 * to blk_size. 387 * to blk_size.
388 */ 388 */
389 if (blk_size & (blk_size - 1) || 389 if (blk_size & (blk_size - 1U) ||
390 base & (blk_size - 1) || length & (blk_size - 1)) { 390 base & (blk_size - 1U) || length & (blk_size - 1U)) {
391 return -EINVAL; 391 return -EINVAL;
392 } 392 }
393 393
394 if (base == 0) { 394 if (base == 0U) {
395 base = blk_size; 395 base = blk_size;
396 length -= blk_size; 396 length -= blk_size;
397 } 397 }
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index e684e637..a9f90069 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -74,7 +74,7 @@ static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a)
74{ 74{
75 u64 true_max_order = ilog2(a->blks); 75 u64 true_max_order = ilog2(a->blks);
76 76
77 if (a->max_order == 0) { 77 if (a->max_order == 0U) {
78 a->max_order = true_max_order; 78 a->max_order = true_max_order;
79 return; 79 return;
80 } 80 }
@@ -95,7 +95,7 @@ static void balloc_allocator_align(struct nvgpu_buddy_allocator *a)
95{ 95{
96 a->start = ALIGN(a->base, a->blk_size); 96 a->start = ALIGN(a->base, a->blk_size);
97 WARN_ON(a->start != a->base); 97 WARN_ON(a->start != a->base);
98 a->end = (a->base + a->length) & ~(a->blk_size - 1); 98 a->end = (a->base + a->length) & ~(a->blk_size - 1U);
99 a->count = a->end - a->start; 99 a->count = a->end - a->start;
100 a->blks = a->count >> a->blk_shift; 100 a->blks = a->count >> a->blk_shift;
101} 101}
@@ -119,7 +119,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
119 new_buddy->parent = parent; 119 new_buddy->parent = parent;
120 new_buddy->start = start; 120 new_buddy->start = start;
121 new_buddy->order = order; 121 new_buddy->order = order;
122 new_buddy->end = start + (1 << order) * a->blk_size; 122 new_buddy->end = start + (U64(1) << order) * a->blk_size;
123 new_buddy->pte_size = BALLOC_PTE_SIZE_ANY; 123 new_buddy->pte_size = BALLOC_PTE_SIZE_ANY;
124 124
125 return new_buddy; 125 return new_buddy;
@@ -185,7 +185,7 @@ static void balloc_blist_rem(struct nvgpu_buddy_allocator *a,
185 185
186static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len) 186static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len)
187{ 187{
188 if (len == 0) { 188 if (len == 0U) {
189 return 0; 189 return 0;
190 } 190 }
191 191
@@ -200,7 +200,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a,
200{ 200{
201 u64 size = (end - start) >> a->blk_shift; 201 u64 size = (end - start) >> a->blk_shift;
202 202
203 if (size > 0) { 203 if (size > 0U) {
204 return min_t(u64, ilog2(size), a->max_order); 204 return min_t(u64, ilog2(size), a->max_order);
205 } else { 205 } else {
206 return GPU_BALLOC_MAX_ORDER; 206 return GPU_BALLOC_MAX_ORDER;
@@ -212,7 +212,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a,
212 */ 212 */
213static int balloc_init_lists(struct nvgpu_buddy_allocator *a) 213static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
214{ 214{
215 int i; 215 u32 i;
216 u64 bstart, bend, order; 216 u64 bstart, bend, order;
217 struct nvgpu_buddy *buddy; 217 struct nvgpu_buddy *buddy;
218 218
@@ -220,7 +220,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
220 bend = a->end; 220 bend = a->end;
221 221
222 /* First make sure the LLs are valid. */ 222 /* First make sure the LLs are valid. */
223 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 223 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
224 nvgpu_init_list_node(balloc_get_order_list(a, i)); 224 nvgpu_init_list_node(balloc_get_order_list(a, i));
225 } 225 }
226 226
@@ -239,7 +239,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
239 return 0; 239 return 0;
240 240
241cleanup: 241cleanup:
242 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 242 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
243 if (!nvgpu_list_empty(balloc_get_order_list(a, i))) { 243 if (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
244 buddy = nvgpu_list_first_entry( 244 buddy = nvgpu_list_first_entry(
245 balloc_get_order_list(a, i), 245 balloc_get_order_list(a, i),
@@ -257,7 +257,7 @@ cleanup:
257 */ 257 */
258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) 258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
259{ 259{
260 int i; 260 u32 i;
261 struct nvgpu_rbtree_node *node = NULL; 261 struct nvgpu_rbtree_node *node = NULL;
262 struct nvgpu_buddy *bud; 262 struct nvgpu_buddy *bud;
263 struct nvgpu_fixed_alloc *falloc; 263 struct nvgpu_fixed_alloc *falloc;
@@ -299,8 +299,8 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
299 /* 299 /*
300 * Now clean up the unallocated buddies. 300 * Now clean up the unallocated buddies.
301 */ 301 */
302 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 302 for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
303 BUG_ON(a->buddy_list_alloced[i] != 0); 303 BUG_ON(a->buddy_list_alloced[i] != 0U);
304 304
305 while (!nvgpu_list_empty(balloc_get_order_list(a, i))) { 305 while (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
306 bud = nvgpu_list_first_entry( 306 bud = nvgpu_list_first_entry(
@@ -310,19 +310,19 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
310 nvgpu_kmem_cache_free(a->buddy_cache, bud); 310 nvgpu_kmem_cache_free(a->buddy_cache, bud);
311 } 311 }
312 312
313 if (a->buddy_list_len[i] != 0) { 313 if (a->buddy_list_len[i] != 0U) {
314 nvgpu_info(na->g, 314 nvgpu_info(na->g,
315 "Excess buddies!!! (%d: %llu)", 315 "Excess buddies!!! (%d: %llu)",
316 i, a->buddy_list_len[i]); 316 i, a->buddy_list_len[i]);
317 BUG(); 317 BUG();
318 } 318 }
319 if (a->buddy_list_split[i] != 0) { 319 if (a->buddy_list_split[i] != 0U) {
320 nvgpu_info(na->g, 320 nvgpu_info(na->g,
321 "Excess split nodes!!! (%d: %llu)", 321 "Excess split nodes!!! (%d: %llu)",
322 i, a->buddy_list_split[i]); 322 i, a->buddy_list_split[i]);
323 BUG(); 323 BUG();
324 } 324 }
325 if (a->buddy_list_alloced[i] != 0) { 325 if (a->buddy_list_alloced[i] != 0U) {
326 nvgpu_info(na->g, 326 nvgpu_info(na->g,
327 "Excess alloced nodes!!! (%d: %llu)", 327 "Excess alloced nodes!!! (%d: %llu)",
328 i, a->buddy_list_alloced[i]); 328 i, a->buddy_list_alloced[i]);
@@ -392,14 +392,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
392 struct nvgpu_buddy *left, *right; 392 struct nvgpu_buddy *left, *right;
393 u64 half; 393 u64 half;
394 394
395 left = balloc_new_buddy(a, b, b->start, b->order - 1); 395 left = balloc_new_buddy(a, b, b->start, b->order - 1U);
396 if (!left) { 396 if (!left) {
397 return -ENOMEM; 397 return -ENOMEM;
398 } 398 }
399 399
400 half = (b->end - b->start) / 2; 400 half = (b->end - b->start) / 2U;
401 401
402 right = balloc_new_buddy(a, b, b->start + half, b->order - 1); 402 right = balloc_new_buddy(a, b, b->start + half, b->order - 1U);
403 if (!right) { 403 if (!right) {
404 nvgpu_kmem_cache_free(a->buddy_cache, left); 404 nvgpu_kmem_cache_free(a->buddy_cache, left);
405 return -ENOMEM; 405 return -ENOMEM;
@@ -624,7 +624,7 @@ static void __balloc_get_parent_range(struct nvgpu_buddy_allocator *a,
624 u64 shifted_base = balloc_base_shift(a, base); 624 u64 shifted_base = balloc_base_shift(a, base);
625 625
626 order++; 626 order++;
627 base_mask = ~((a->blk_size << order) - 1); 627 base_mask = ~((a->blk_size << order) - 1U);
628 628
629 shifted_base &= base_mask; 629 shifted_base &= base_mask;
630 630
@@ -720,7 +720,7 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
720 u64 align_order; 720 u64 align_order;
721 721
722 shifted_base = balloc_base_shift(a, base); 722 shifted_base = balloc_base_shift(a, base);
723 if (shifted_base == 0) { 723 if (shifted_base == 0U) {
724 align_order = __fls(len >> a->blk_shift); 724 align_order = __fls(len >> a->blk_shift);
725 } else { 725 } else {
726 align_order = min_t(u64, 726 align_order = min_t(u64,
@@ -871,11 +871,11 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
871 struct nvgpu_buddy_allocator *a = na->priv; 871 struct nvgpu_buddy_allocator *a = na->priv;
872 872
873 /* If base isn't aligned to an order 0 block, fail. */ 873 /* If base isn't aligned to an order 0 block, fail. */
874 if (base & (a->blk_size - 1)) { 874 if (base & (a->blk_size - 1U)) {
875 goto fail; 875 goto fail;
876 } 876 }
877 877
878 if (len == 0) { 878 if (len == 0U) {
879 goto fail; 879 goto fail;
880 } 880 }
881 881
@@ -1255,10 +1255,10 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1255 struct nvgpu_buddy_allocator *a; 1255 struct nvgpu_buddy_allocator *a;
1256 1256
1257 /* blk_size must be greater than 0 and a power of 2. */ 1257 /* blk_size must be greater than 0 and a power of 2. */
1258 if (blk_size == 0) { 1258 if (blk_size == 0U) {
1259 return -EINVAL; 1259 return -EINVAL;
1260 } 1260 }
1261 if (blk_size & (blk_size - 1)) { 1261 if (blk_size & (blk_size - 1U)) {
1262 return -EINVAL; 1262 return -EINVAL;
1263 } 1263 }
1264 1264
@@ -1291,7 +1291,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1291 * If base is 0 then modfy base to be the size of one block so that we 1291 * If base is 0 then modfy base to be the size of one block so that we
1292 * can return errors by returning addr == 0. 1292 * can return errors by returning addr == 0.
1293 */ 1293 */
1294 if (a->base == 0) { 1294 if (a->base == 0U) {
1295 a->base = a->blk_size; 1295 a->base = a->blk_size;
1296 a->length -= a->blk_size; 1296 a->length -= a->blk_size;
1297 } 1297 }
@@ -1308,8 +1308,8 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1308 * requirement is not necessary. 1308 * requirement is not necessary.
1309 */ 1309 */
1310 if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && 1310 if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages &&
1311 (base & ((vm->big_page_size << 10) - 1) || 1311 (base & ((vm->big_page_size << 10) - 1U) ||
1312 size & ((vm->big_page_size << 10) - 1))) { 1312 size & ((vm->big_page_size << 10) - 1U))) {
1313 return -EINVAL; 1313 return -EINVAL;
1314 } 1314 }
1315 1315
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
index c9e332a5..fe3926b9 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -159,7 +159,7 @@ struct nvgpu_buddy_allocator {
159 /* 159 /*
160 * Impose an upper bound on the maximum order. 160 * Impose an upper bound on the maximum order.
161 */ 161 */
162#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1) 162#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1U)
163 163
164 struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN]; 164 struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN];
165 u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN]; 165 u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN];
@@ -190,7 +190,7 @@ static inline struct nvgpu_buddy_allocator *buddy_allocator(
190} 190}
191 191
192static inline struct nvgpu_list_node *balloc_get_order_list( 192static inline struct nvgpu_list_node *balloc_get_order_list(
193 struct nvgpu_buddy_allocator *a, int order) 193 struct nvgpu_buddy_allocator *a, u64 order)
194{ 194{
195 return &a->buddy_list[order]; 195 return &a->buddy_list[order];
196} 196}
diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c
index 0926e78e..e6c99702 100644
--- a/drivers/gpu/nvgpu/common/mm/comptags.c
+++ b/drivers/gpu/nvgpu/common/mm/comptags.c
@@ -37,7 +37,7 @@ int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
37 0, len, 0); 37 0, len, 0);
38 if (addr < allocator->size) { 38 if (addr < allocator->size) {
39 /* number zero is reserved; bitmap base is 1 */ 39 /* number zero is reserved; bitmap base is 1 */
40 *offset = 1 + addr; 40 *offset = 1U + addr;
41 bitmap_set(allocator->bitmap, addr, len); 41 bitmap_set(allocator->bitmap, addr, len);
42 } else { 42 } else {
43 err = -ENOMEM; 43 err = -ENOMEM;
@@ -51,9 +51,9 @@ void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
51 u32 offset, u32 len) 51 u32 offset, u32 len)
52{ 52{
53 /* number zero is reserved; bitmap base is 1 */ 53 /* number zero is reserved; bitmap base is 1 */
54 u32 addr = offset - 1; 54 u32 addr = offset - 1U;
55 55
56 WARN_ON(offset == 0); 56 WARN_ON(offset == 0U);
57 WARN_ON(addr > allocator->size); 57 WARN_ON(addr > allocator->size);
58 WARN_ON(addr + len > allocator->size); 58 WARN_ON(addr + len > allocator->size);
59 59
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 02273393..47d1e8ee 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -98,7 +98,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
98 * therefor we should not try and free it. But otherwise, if we do 98 * therefor we should not try and free it. But otherwise, if we do
99 * manage the VA alloc, we obviously must free it. 99 * manage the VA alloc, we obviously must free it.
100 */ 100 */
101 if (addr != 0) { 101 if (addr != 0U) {
102 mem->free_gpu_va = false; 102 mem->free_gpu_va = false;
103 } else { 103 } else {
104 mem->free_gpu_va = true; 104 mem->free_gpu_va = true;
@@ -300,7 +300,7 @@ static int pd_allocate(struct vm_gk20a *vm,
300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt, 300static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
301 struct nvgpu_gmmu_attrs *attrs) 301 struct nvgpu_gmmu_attrs *attrs)
302{ 302{
303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1)) - 1ULL; 303 u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL;
304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz]; 304 u32 pd_shift = (u64)l->lo_bit[attrs->pgsz];
305 305
306 /* 306 /*
@@ -399,7 +399,7 @@ static int __set_pd_level(struct vm_gk20a *vm,
399 * start at a PDE boundary. 399 * start at a PDE boundary.
400 */ 400 */
401 chunk_size = min(length, 401 chunk_size = min(length,
402 pde_range - (virt_addr & (pde_range - 1))); 402 pde_range - (virt_addr & (pde_range - 1U)));
403 403
404 /* 404 /*
405 * If the next level has an update_entry function then we know 405 * If the next level has an update_entry function then we know
@@ -573,7 +573,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
573 virt_addr += chunk_length; 573 virt_addr += chunk_length;
574 length -= chunk_length; 574 length -= chunk_length;
575 575
576 if (length == 0) { 576 if (length == 0U) {
577 break; 577 break;
578 } 578 }
579 } 579 }
@@ -615,7 +615,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
615 615
616 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
617 617
618 if (space_to_skip & (page_size - 1)) { 618 if (space_to_skip & (page_size - 1U)) {
619 return -EINVAL; 619 return -EINVAL;
620 } 620 }
621 621
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 345b947d..ab75b136 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -205,15 +205,15 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
205 205
206u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) 206u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
207{ 207{
208 WARN_ON(offset & 3); 208 WARN_ON(offset & 3U);
209 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); 209 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
210} 210}
211 211
212void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, 212void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
213 u32 offset, void *dest, u32 size) 213 u32 offset, void *dest, u32 size)
214{ 214{
215 WARN_ON(offset & 3); 215 WARN_ON(offset & 3U);
216 WARN_ON(size & 3); 216 WARN_ON(size & 3U);
217 217
218 if (mem->aperture == APERTURE_SYSMEM) { 218 if (mem->aperture == APERTURE_SYSMEM) {
219 u8 *src = (u8 *)mem->cpu_va + offset; 219 u8 *src = (u8 *)mem->cpu_va + offset;
@@ -246,15 +246,15 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
246 246
247void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) 247void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
248{ 248{
249 WARN_ON(offset & 3); 249 WARN_ON(offset & 3U);
250 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); 250 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
251} 251}
252 252
253void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 253void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
254 void *src, u32 size) 254 void *src, u32 size)
255{ 255{
256 WARN_ON(offset & 3); 256 WARN_ON(offset & 3U);
257 WARN_ON(size & 3); 257 WARN_ON(size & 3U);
258 258
259 if (mem->aperture == APERTURE_SYSMEM) { 259 if (mem->aperture == APERTURE_SYSMEM) {
260 u8 *dest = (u8 *)mem->cpu_va + offset; 260 u8 *dest = (u8 *)mem->cpu_va + offset;
@@ -274,11 +274,11 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
274void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 274void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
275 u32 c, u32 size) 275 u32 c, u32 size)
276{ 276{
277 WARN_ON(offset & 3); 277 WARN_ON(offset & 3U);
278 WARN_ON(size & 3); 278 WARN_ON(size & 3U);
279 WARN_ON(c & ~0xff); 279 WARN_ON(c & ~0xffU);
280 280
281 c &= 0xff; 281 c &= 0xffU;
282 282
283 if (mem->aperture == APERTURE_SYSMEM) { 283 if (mem->aperture == APERTURE_SYSMEM) {
284 u8 *dest = (u8 *)mem->cpu_va + offset; 284 u8 *dest = (u8 *)mem->cpu_va + offset;
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index f6d70435..3225f170 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -317,8 +317,8 @@ static void free_slab_page(struct nvgpu_page_allocator *a,
317 palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr); 317 palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr);
318 318
319 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) || 319 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) ||
320 slab_page->nr_objects_alloced != 0 || 320 slab_page->nr_objects_alloced != 0U ||
321 slab_page->bitmap != 0); 321 slab_page->bitmap != 0U);
322 322
323 nvgpu_free(&a->source_allocator, slab_page->page_addr); 323 nvgpu_free(&a->source_allocator, slab_page->page_addr);
324 a->pages_freed++; 324 a->pages_freed++;
@@ -471,7 +471,7 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
471 471
472 slab_page->nr_objects_alloced--; 472 slab_page->nr_objects_alloced--;
473 473
474 if (slab_page->nr_objects_alloced == 0) { 474 if (slab_page->nr_objects_alloced == 0U) {
475 new_state = SP_EMPTY; 475 new_state = SP_EMPTY;
476 } else { 476 } else {
477 new_state = SP_PARTIAL; 477 new_state = SP_PARTIAL;
@@ -684,7 +684,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
684 684
685 alloc_lock(na); 685 alloc_lock(na);
686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
687 real_len <= (a->page_size / 2)) { 687 real_len <= (a->page_size / 2U)) {
688 alloc = __nvgpu_alloc_slab(a, real_len); 688 alloc = __nvgpu_alloc_slab(a, real_len);
689 } else { 689 } else {
690 alloc = __nvgpu_alloc_pages(a, real_len); 690 alloc = __nvgpu_alloc_pages(a, real_len);
@@ -698,7 +698,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
698 __insert_page_alloc(a, alloc); 698 __insert_page_alloc(a, alloc);
699 699
700 a->nr_allocs++; 700 a->nr_allocs++;
701 if (real_len > a->page_size / 2) { 701 if (real_len > a->page_size / 2U) {
702 a->pages_alloced += alloc->length >> a->page_shift; 702 a->pages_alloced += alloc->length >> a->page_shift;
703 } 703 }
704 alloc_unlock(na); 704 alloc_unlock(na);
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index db48d168..335ef360 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -76,20 +76,20 @@
76 76
77static u32 nvgpu_pd_cache_nr(u32 bytes) 77static u32 nvgpu_pd_cache_nr(u32 bytes)
78{ 78{
79 return ilog2(bytes >> (NVGPU_PD_CACHE_MIN_SHIFT - 1)); 79 return ilog2(bytes >> (NVGPU_PD_CACHE_MIN_SHIFT - 1U));
80} 80}
81 81
82static u32 nvgpu_pd_cache_get_mask(struct nvgpu_pd_mem_entry *pentry) 82static u32 nvgpu_pd_cache_get_mask(struct nvgpu_pd_mem_entry *pentry)
83{ 83{
84 u32 mask_offset = 1 << (PAGE_SIZE / pentry->pd_size); 84 u32 mask_offset = 1 << (PAGE_SIZE / pentry->pd_size);
85 85
86 return mask_offset - 1; 86 return mask_offset - 1U;
87} 87}
88 88
89int nvgpu_pd_cache_init(struct gk20a *g) 89int nvgpu_pd_cache_init(struct gk20a *g)
90{ 90{
91 struct nvgpu_pd_cache *cache; 91 struct nvgpu_pd_cache *cache;
92 int i; 92 u32 i;
93 93
94 /* 94 /*
95 * This gets called from finalize_poweron() so we need to make sure we 95 * This gets called from finalize_poweron() so we need to make sure we
@@ -105,7 +105,7 @@ int nvgpu_pd_cache_init(struct gk20a *g)
105 return -ENOMEM; 105 return -ENOMEM;
106 } 106 }
107 107
108 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { 108 for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
109 nvgpu_init_list_node(&cache->full[i]); 109 nvgpu_init_list_node(&cache->full[i]);
110 nvgpu_init_list_node(&cache->partial[i]); 110 nvgpu_init_list_node(&cache->partial[i]);
111 } 111 }
@@ -121,14 +121,14 @@ int nvgpu_pd_cache_init(struct gk20a *g)
121 121
122void nvgpu_pd_cache_fini(struct gk20a *g) 122void nvgpu_pd_cache_fini(struct gk20a *g)
123{ 123{
124 int i; 124 u32 i;
125 struct nvgpu_pd_cache *cache = g->mm.pd_cache; 125 struct nvgpu_pd_cache *cache = g->mm.pd_cache;
126 126
127 if (!cache) { 127 if (!cache) {
128 return; 128 return;
129 } 129 }
130 130
131 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { 131 for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
132 WARN_ON(!nvgpu_list_empty(&cache->full[i])); 132 WARN_ON(!nvgpu_list_empty(&cache->full[i]));
133 WARN_ON(!nvgpu_list_empty(&cache->partial[i])); 133 WARN_ON(!nvgpu_list_empty(&cache->partial[i]));
134 } 134 }
@@ -305,7 +305,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
305 305
306 pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); 306 pd_dbg(g, "PD-Alloc [C] %u bytes", bytes);
307 307
308 if (bytes & (bytes - 1) || 308 if (bytes & (bytes - 1U) ||
309 (bytes >= PAGE_SIZE || 309 (bytes >= PAGE_SIZE ||
310 bytes < NVGPU_PD_CACHE_MIN)) { 310 bytes < NVGPU_PD_CACHE_MIN)) {
311 pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); 311 pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes);
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index e556be12..b364f4d6 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -150,7 +150,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
150 } 150 }
151 151
152 /* Be certain we round up to page_size if needed */ 152 /* Be certain we round up to page_size if needed */
153 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc(vma, size); 155 addr = nvgpu_alloc(vma, size);
156 if (!addr) { 156 if (!addr) {
@@ -202,7 +202,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
202 */ 202 */
203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) 203int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size)
204{ 204{
205 u64 mask = ((u64)vm->big_page_size << 10) - 1; 205 u64 mask = ((u64)vm->big_page_size << 10) - 1U;
206 206
207 if (base & mask || size & mask) { 207 if (base & mask || size & mask) {
208 return 0; 208 return 0;
@@ -252,7 +252,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel, 252 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel,
253 vm->va_limit - 253 vm->va_limit -
254 mm->channel.kernel_size, 254 mm->channel.kernel_size,
255 512 * PAGE_SIZE, 255 512U * PAGE_SIZE,
256 SZ_4K); 256 SZ_4K);
257 if (!sema_sea->gpu_va) { 257 if (!sema_sea->gpu_va) {
258 nvgpu_free(&vm->kernel, sema_sea->gpu_va); 258 nvgpu_free(&vm->kernel, sema_sea->gpu_va);
@@ -296,7 +296,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
296 return -ENOMEM; 296 return -ENOMEM;
297 } 297 }
298 298
299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) { 299 if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) {
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 302
@@ -387,7 +387,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
387 } 387 }
388 388
389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 389 kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ?
390 0 : GPU_ALLOC_GVA_SPACE; 390 0U : GPU_ALLOC_GVA_SPACE;
391 391
392 /* 392 /*
393 * A "user" area only makes sense for the GVA spaces. For VMs where 393 * A "user" area only makes sense for the GVA spaces. For VMs where
@@ -967,7 +967,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
967 g, gk20a_cbc_op_clear, 967 g, gk20a_cbc_op_clear,
968 comptags.offset, 968 comptags.offset,
969 (comptags.offset + 969 (comptags.offset +
970 comptags.lines - 1)); 970 comptags.lines - 1U));
971 gk20a_comptags_finish_clear( 971 gk20a_comptags_finish_clear(
972 os_buf, err == 0); 972 os_buf, err == 0);
973 if (err) { 973 if (err) {
@@ -1036,7 +1036,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
1036 aperture); 1036 aperture);
1037 1037
1038 if (clear_ctags) { 1038 if (clear_ctags) {
1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0); 1039 gk20a_comptags_finish_clear(os_buf, map_addr != 0U);
1040 } 1040 }
1041 1041
1042 if (!map_addr) { 1042 if (!map_addr) {
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index 7e2b5c34..c2c0d569 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -57,7 +57,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60 if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1)) { 60 if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1U)) {
61 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx", 61 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
62 map_addr); 62 map_addr);
63 return -EINVAL; 63 return -EINVAL;
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
index 0760a6cd..fc82c2e9 100644
--- a/drivers/gpu/nvgpu/common/vbios/bios.c
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -26,15 +26,15 @@
26 26
27#include "gk20a/gk20a.h" 27#include "gk20a/gk20a.h"
28 28
29#define BIT_HEADER_ID 0xb8ff 29#define BIT_HEADER_ID 0xb8ffU
30#define BIT_HEADER_SIGNATURE 0x00544942 30#define BIT_HEADER_SIGNATURE 0x00544942U
31#define PCI_EXP_ROM_SIG 0xaa55 31#define PCI_EXP_ROM_SIG 0xaa55U
32#define PCI_EXP_ROM_SIG_NV 0x4e56 32#define PCI_EXP_ROM_SIG_NV 0x4e56U
33 33
34#define INIT_DONE 0x71 34#define INIT_DONE 0x71U
35#define INIT_RESUME 0x72 35#define INIT_RESUME 0x72U
36#define INIT_CONDITION 0x75 36#define INIT_CONDITION 0x75U
37#define INIT_XMEMSEL_ZM_NV_REG_ARRAY 0x8f 37#define INIT_XMEMSEL_ZM_NV_REG_ARRAY 0x8fU
38 38
39struct condition_entry { 39struct condition_entry {
40 u32 cond_addr; 40 u32 cond_addr;
@@ -67,18 +67,18 @@ struct bit {
67 u8 header_checksum; 67 u8 header_checksum;
68} __packed; 68} __packed;
69 69
70#define TOKEN_ID_BIOSDATA 0x42 70#define TOKEN_ID_BIOSDATA 0x42U
71#define TOKEN_ID_NVINIT_PTRS 0x49 71#define TOKEN_ID_NVINIT_PTRS 0x49U
72#define TOKEN_ID_FALCON_DATA 0x70 72#define TOKEN_ID_FALCON_DATA 0x70U
73#define TOKEN_ID_PERF_PTRS 0x50 73#define TOKEN_ID_PERF_PTRS 0x50U
74#define TOKEN_ID_CLOCK_PTRS 0x43 74#define TOKEN_ID_CLOCK_PTRS 0x43U
75#define TOKEN_ID_VIRT_PTRS 0x56 75#define TOKEN_ID_VIRT_PTRS 0x56U
76#define TOKEN_ID_MEMORY_PTRS 0x4D 76#define TOKEN_ID_MEMORY_PTRS 0x4DU
77 77
78#define NVLINK_CONFIG_DATA_HDR_VER_10 0x1 78#define NVLINK_CONFIG_DATA_HDR_VER_10 0x1U
79#define NVLINK_CONFIG_DATA_HDR_10_SIZE 16 79#define NVLINK_CONFIG_DATA_HDR_10_SIZE 16U
80#define NVLINK_CONFIG_DATA_HDR_11_SIZE 17 80#define NVLINK_CONFIG_DATA_HDR_11_SIZE 17U
81#define NVLINK_CONFIG_DATA_HDR_12_SIZE 21 81#define NVLINK_CONFIG_DATA_HDR_12_SIZE 21U
82 82
83struct nvlink_config_data_hdr_v1 { 83struct nvlink_config_data_hdr_v1 {
84 u8 version; 84 u8 version;
@@ -91,8 +91,8 @@ struct nvlink_config_data_hdr_v1 {
91 u32 ac_coupling_mask; 91 u32 ac_coupling_mask;
92} __packed; 92} __packed;
93 93
94#define MEMORY_PTRS_V1 1 94#define MEMORY_PTRS_V1 1U
95#define MEMORY_PTRS_V2 2 95#define MEMORY_PTRS_V2 2U
96 96
97struct memory_ptrs_v1 { 97struct memory_ptrs_v1 {
98 u8 rsvd0[2]; 98 u8 rsvd0[2];
@@ -155,11 +155,11 @@ struct falcon_ucode_table_entry_v1 {
155 u32 desc_ptr; 155 u32 desc_ptr;
156} __packed; 156} __packed;
157 157
158#define TARGET_ID_PMU 0x01 158#define TARGET_ID_PMU 0x01U
159#define APPLICATION_ID_DEVINIT 0x04 159#define APPLICATION_ID_DEVINIT 0x04U
160#define APPLICATION_ID_PRE_OS 0x01 160#define APPLICATION_ID_PRE_OS 0x01U
161 161
162#define FALCON_UCODE_FLAGS_VERSION_AVAILABLE 0x1 162#define FALCON_UCODE_FLAGS_VERSION_AVAILABLE 0x1U
163#define FALCON_UCODE_IS_VERSION_AVAILABLE(hdr) \ 163#define FALCON_UCODE_IS_VERSION_AVAILABLE(hdr) \
164 ((hdr.v2.v_desc & FALCON_UCODE_FLAGS_VERSION_AVAILABLE) == \ 164 ((hdr.v2.v_desc & FALCON_UCODE_FLAGS_VERSION_AVAILABLE) == \
165 FALCON_UCODE_FLAGS_VERSION_AVAILABLE) 165 FALCON_UCODE_FLAGS_VERSION_AVAILABLE)
@@ -170,10 +170,10 @@ struct falcon_ucode_table_entry_v1 {
170 */ 170 */
171 171
172#define FALCON_UCODE_GET_VERSION(hdr) \ 172#define FALCON_UCODE_GET_VERSION(hdr) \
173 ((hdr.v2.v_desc >> 8) & 0xff) 173 ((hdr.v2.v_desc >> 8) & 0xffU)
174 174
175#define FALCON_UCODE_GET_DESC_SIZE(hdr) \ 175#define FALCON_UCODE_GET_DESC_SIZE(hdr) \
176 ((hdr.v2.v_desc >> 16) & 0xffff) 176 ((hdr.v2.v_desc >> 16) & 0xffffU)
177 177
178struct falcon_ucode_desc_v1 { 178struct falcon_ucode_desc_v1 {
179 union { 179 union {
@@ -228,7 +228,7 @@ struct application_interface_entry_v1 {
228 u32 dmem_offset; 228 u32 dmem_offset;
229} __packed; 229} __packed;
230 230
231#define APPINFO_ID_DEVINIT 0x01 231#define APPINFO_ID_DEVINIT 0x01U
232 232
233struct devinit_engine_interface { 233struct devinit_engine_interface {
234 u16 version; 234 u16 version;
@@ -316,7 +316,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
316 pci_data->last_image, 316 pci_data->last_image,
317 pci_data->max_runtime_image_len); 317 pci_data->max_runtime_image_len);
318 318
319 if (pci_data->code_type == 0x3) { 319 if (pci_data->code_type == 0x3U) {
320 pci_ext_data = (struct pci_ext_data_struct *) 320 pci_ext_data = (struct pci_ext_data_struct *)
321 &g->bios.data[(offset + 321 &g->bios.data[(offset +
322 pci_rom->pci_data_struct_ptr + 322 pci_rom->pci_data_struct_ptr +
@@ -332,9 +332,9 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
332 pci_ext_data->flags); 332 pci_ext_data->flags);
333 333
334 nvgpu_log_fn(g, "expansion rom offset %x", 334 nvgpu_log_fn(g, "expansion rom offset %x",
335 pci_data->image_len * 512); 335 pci_data->image_len * 512U);
336 g->bios.expansion_rom_offset = 336 g->bios.expansion_rom_offset =
337 pci_data->image_len * 512; 337 (u32)pci_data->image_len * 512U;
338 offset += pci_ext_data->sub_image_len * 512; 338 offset += pci_ext_data->sub_image_len * 512;
339 last = pci_ext_data->priv_last_image; 339 last = pci_ext_data->priv_last_image;
340 } else { 340 } else {
@@ -344,9 +344,9 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
344 } 344 }
345 345
346 nvgpu_log_info(g, "read bios"); 346 nvgpu_log_info(g, "read bios");
347 for (i = 0; i < g->bios.size - 6; i++) { 347 for (i = 0; i < g->bios.size - 6U; i++) {
348 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID && 348 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
349 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) { 349 nvgpu_bios_rdu32(g, i+2U) == BIT_HEADER_SIGNATURE) {
350 nvgpu_bios_parse_bit(g, i); 350 nvgpu_bios_parse_bit(g, i);
351 found = true; 351 found = true;
352 } 352 }
@@ -394,7 +394,7 @@ u32 nvgpu_bios_get_nvlink_config_data(struct gk20a *g)
394{ 394{
395 struct nvlink_config_data_hdr_v1 config; 395 struct nvlink_config_data_hdr_v1 config;
396 396
397 if (g->bios.nvlink_config_data_offset == 0) { 397 if (g->bios.nvlink_config_data_offset == 0U) {
398 return -EINVAL; 398 return -EINVAL;
399 } 399 }
400 400
@@ -460,7 +460,7 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
460 interface.script_phys_base, 460 interface.script_phys_base,
461 interface.script_size); 461 interface.script_size);
462 462
463 if (interface.version != 1) { 463 if (interface.version != 1U) {
464 return; 464 return;
465 } 465 }
466 g->bios.devinit_tables_phys_base = interface.tables_phys_base; 466 g->bios.devinit_tables_phys_base = interface.tables_phys_base;
@@ -478,7 +478,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
478 hdr.version, hdr.header_size, 478 hdr.version, hdr.header_size,
479 hdr.entry_size, hdr.entry_count); 479 hdr.entry_size, hdr.entry_count);
480 480
481 if (hdr.version != 1) { 481 if (hdr.version != 1U) {
482 return 0; 482 return 0;
483 } 483 }
484 484
@@ -588,7 +588,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
588 hdr.entry_size, hdr.entry_count, 588 hdr.entry_size, hdr.entry_count,
589 hdr.desc_version, hdr.desc_size); 589 hdr.desc_version, hdr.desc_size);
590 590
591 if (hdr.version != 1) { 591 if (hdr.version != 1U) {
592 return -EINVAL; 592 return -EINVAL;
593 } 593 }
594 594
@@ -697,9 +697,9 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
697 (table_id * data_size)), 697 (table_id * data_size)),
698 perf_table_id_offset); 698 perf_table_id_offset);
699 699
700 if (perf_table_id_offset != 0) { 700 if (perf_table_id_offset != 0U) {
701 /* check is perf_table_id_offset is > 64k */ 701 /* check is perf_table_id_offset is > 64k */
702 if (perf_table_id_offset & ~0xFFFF) { 702 if (perf_table_id_offset & ~0xFFFFU) {
703 perf_table_ptr = 703 perf_table_ptr =
704 &g->bios.data[g->bios.expansion_rom_offset + 704 &g->bios.data[g->bios.expansion_rom_offset +
705 perf_table_id_offset]; 705 perf_table_id_offset];
@@ -747,7 +747,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
747 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr); 747 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr);
748 break; 748 break;
749 case TOKEN_ID_FALCON_DATA: 749 case TOKEN_ID_FALCON_DATA:
750 if (bit_token.data_version == 2) { 750 if (bit_token.data_version == 2U) {
751 nvgpu_bios_parse_falcon_data_v2(g, 751 nvgpu_bios_parse_falcon_data_v2(g,
752 bit_token.data_ptr); 752 bit_token.data_ptr);
753 } 753 }
@@ -790,7 +790,7 @@ s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
790{ 790{
791 u32 val; 791 u32 val;
792 val = __nvgpu_bios_readbyte(g, offset); 792 val = __nvgpu_bios_readbyte(g, offset);
793 val = val & 0x80 ? (val | ~0xff) : val; 793 val = val & 0x80U ? (val | ~0xffU) : val;
794 794
795 return (s8) val; 795 return (s8) val;
796} 796}
@@ -800,7 +800,7 @@ u16 nvgpu_bios_read_u16(struct gk20a *g, u32 offset)
800 u16 val; 800 u16 val;
801 801
802 val = __nvgpu_bios_readbyte(g, offset) | 802 val = __nvgpu_bios_readbyte(g, offset) |
803 (__nvgpu_bios_readbyte(g, offset+1) << 8); 803 (__nvgpu_bios_readbyte(g, offset+1U) << 8U);
804 804
805 return val; 805 return val;
806} 806}
@@ -810,9 +810,9 @@ u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
810 u32 val; 810 u32 val;
811 811
812 val = __nvgpu_bios_readbyte(g, offset) | 812 val = __nvgpu_bios_readbyte(g, offset) |
813 (__nvgpu_bios_readbyte(g, offset+1) << 8) | 813 (__nvgpu_bios_readbyte(g, offset+1U) << 8U) |
814 (__nvgpu_bios_readbyte(g, offset+2) << 16) | 814 (__nvgpu_bios_readbyte(g, offset+2U) << 16U) |
815 (__nvgpu_bios_readbyte(g, offset+3) << 24); 815 (__nvgpu_bios_readbyte(g, offset+3U) << 24U);
816 816
817 return val; 817 return val;
818} 818}
@@ -825,7 +825,7 @@ static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condi
825 825
826 if (*condition) { 826 if (*condition) {
827 827
828 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xf; 828 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xfU;
829 829
830 index = g->bios.mem_strap_xlat_tbl_ptr ? 830 index = g->bios.mem_strap_xlat_tbl_ptr ?
831 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr + 831 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr +
@@ -849,9 +849,9 @@ static void gp106_init_condition(struct gk20a *g, bool *condition,
849 entry.cond_addr = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr + 849 entry.cond_addr = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
850 sizeof(entry)*condition_id); 850 sizeof(entry)*condition_id);
851 entry.cond_mask = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr + 851 entry.cond_mask = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
852 sizeof(entry)*condition_id + 4); 852 sizeof(entry)*condition_id + 4U);
853 entry.cond_compare = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr + 853 entry.cond_compare = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
854 sizeof(entry)*condition_id + 8); 854 sizeof(entry)*condition_id + 8U);
855 855
856 if ((gk20a_readl(g, entry.cond_addr) & entry.cond_mask) 856 if ((gk20a_readl(g, entry.cond_addr) & entry.cond_mask)
857 != entry.cond_compare) { 857 != entry.cond_compare) {
@@ -879,9 +879,9 @@ int nvgpu_bios_execute_script(struct gk20a *g, u32 offset)
879 879
880 case INIT_XMEMSEL_ZM_NV_REG_ARRAY: 880 case INIT_XMEMSEL_ZM_NV_REG_ARRAY:
881 operand[0] = nvgpu_bios_read_u32(g, ip); 881 operand[0] = nvgpu_bios_read_u32(g, ip);
882 operand[1] = nvgpu_bios_read_u8(g, ip+4); 882 operand[1] = nvgpu_bios_read_u8(g, ip+4U);
883 operand[2] = nvgpu_bios_read_u8(g, ip+5); 883 operand[2] = nvgpu_bios_read_u8(g, ip+5U);
884 ip += 6; 884 ip += 6U;
885 885
886 nvgpu_bios_init_xmemsel_zm_nv_reg_array(g, &condition, 886 nvgpu_bios_init_xmemsel_zm_nv_reg_array(g, &condition,
887 operand[0], operand[1], operand[2], ip); 887 operand[0], operand[1], operand[2], ip);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
index a38e8d51..698aafb3 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
@@ -186,11 +186,11 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node)
186 * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be 186 * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be
187 * set as well). 187 * set as well).
188 */ 188 */
189#define GPU_ALLOC_GVA_SPACE 0x1 189#define GPU_ALLOC_GVA_SPACE BIT(0)
190#define GPU_ALLOC_NO_ALLOC_PAGE 0x2 190#define GPU_ALLOC_NO_ALLOC_PAGE BIT(1)
191#define GPU_ALLOC_4K_VIDMEM_PAGES 0x4 191#define GPU_ALLOC_4K_VIDMEM_PAGES BIT(2)
192#define GPU_ALLOC_FORCE_CONTIG 0x8 192#define GPU_ALLOC_FORCE_CONTIG BIT(3)
193#define GPU_ALLOC_NO_SCATTER_GATHER 0x10 193#define GPU_ALLOC_NO_SCATTER_GATHER BIT(4)
194 194
195static inline void alloc_lock(struct nvgpu_allocator *a) 195static inline void alloc_lock(struct nvgpu_allocator *a)
196{ 196{
@@ -236,7 +236,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
236 const char *name, u64 base, u64 length, 236 const char *name, u64 base, u64 length,
237 u64 struct_size, u64 flags); 237 u64 struct_size, u64 flags);
238 238
239#define GPU_BALLOC_MAX_ORDER 31 239#define GPU_BALLOC_MAX_ORDER 31U
240 240
241/* 241/*
242 * Allocator APIs. 242 * Allocator APIs.
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
index a83b0dd8..e58f5498 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
@@ -54,9 +54,9 @@ enum gk20a_mem_rw_flag {
54 * structure is of course depending on this. The MIN_SHIFT define is the right 54 * structure is of course depending on this. The MIN_SHIFT define is the right
55 * number of bits to shift to determine which list to use in the array of lists. 55 * number of bits to shift to determine which list to use in the array of lists.
56 */ 56 */
57#define NVGPU_PD_CACHE_MIN 256 57#define NVGPU_PD_CACHE_MIN 256U
58#define NVGPU_PD_CACHE_MIN_SHIFT 9 58#define NVGPU_PD_CACHE_MIN_SHIFT 9U
59#define NVGPU_PD_CACHE_COUNT 4 59#define NVGPU_PD_CACHE_COUNT 4U
60 60
61struct nvgpu_pd_mem_entry { 61struct nvgpu_pd_mem_entry {
62 struct nvgpu_mem mem; 62 struct nvgpu_mem mem;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/posix/types.h b/drivers/gpu/nvgpu/include/nvgpu/posix/types.h
index 4b525923..97686eec 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/posix/types.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/posix/types.h
@@ -72,7 +72,7 @@ typedef long long s64;
72 }) 72 })
73#define min3(a, b, c) min(min(a, b), c) 73#define min3(a, b, c) min(min(a, b), c)
74 74
75#define PAGE_SIZE 4096 75#define PAGE_SIZE 4096U
76 76
77#define ARRAY_SIZE(array) \ 77#define ARRAY_SIZE(array) \
78 (sizeof(array) / sizeof((array)[0])) 78 (sizeof(array) / sizeof((array)[0]))
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index ad8c7cca..b47d4ee0 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -207,12 +207,12 @@ struct vm_gk20a {
207/* 207/*
208 * Mapping flags. 208 * Mapping flags.
209 */ 209 */
210#define NVGPU_VM_MAP_FIXED_OFFSET (1 << 0) 210#define NVGPU_VM_MAP_FIXED_OFFSET BIT32(0)
211#define NVGPU_VM_MAP_CACHEABLE (1 << 1) 211#define NVGPU_VM_MAP_CACHEABLE BIT32(1)
212#define NVGPU_VM_MAP_IO_COHERENT (1 << 2) 212#define NVGPU_VM_MAP_IO_COHERENT BIT32(2)
213#define NVGPU_VM_MAP_UNMAPPED_PTE (1 << 3) 213#define NVGPU_VM_MAP_UNMAPPED_PTE BIT32(3)
214#define NVGPU_VM_MAP_DIRECT_KIND_CTRL (1 << 4) 214#define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4)
215#define NVGPU_VM_MAP_L3_ALLOC (1 << 5) 215#define NVGPU_VM_MAP_L3_ALLOC BIT32(5)
216 216
217#define NVGPU_KIND_INVALID -1 217#define NVGPU_KIND_INVALID -1
218 218
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
index a055ada3..53e1cb85 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
@@ -60,8 +60,8 @@ nvgpu_vm_area_from_vm_area_list(struct nvgpu_list_node *node)
60/* 60/*
61 * Alloc space flags. 61 * Alloc space flags.
62 */ 62 */
63#define NVGPU_VM_AREA_ALLOC_FIXED_OFFSET (1 << 0) 63#define NVGPU_VM_AREA_ALLOC_FIXED_OFFSET BIT(0)
64#define NVGPU_VM_AREA_ALLOC_SPARSE (1 << 1) 64#define NVGPU_VM_AREA_ALLOC_SPARSE BIT(1)
65 65
66int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, 66int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
67 u64 *addr, u32 flags); 67 u64 *addr, u32 flags);