diff options
Diffstat (limited to 'drivers/gpu')
20 files changed, 259 insertions, 246 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index 6b9db23c..e5b9b378 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c | |||
@@ -42,10 +42,10 @@ static u64 nvgpu_bitmap_alloc_base(struct nvgpu_allocator *a) | |||
42 | return ba->base; | 42 | return ba->base; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int nvgpu_bitmap_alloc_inited(struct nvgpu_allocator *a) | 45 | static bool nvgpu_bitmap_alloc_inited(struct nvgpu_allocator *a) |
46 | { | 46 | { |
47 | struct nvgpu_bitmap_allocator *ba = a->priv; | 47 | struct nvgpu_bitmap_allocator *ba = a->priv; |
48 | int inited = ba->inited; | 48 | bool inited = ba->inited; |
49 | 49 | ||
50 | nvgpu_smp_rmb(); | 50 | nvgpu_smp_rmb(); |
51 | return inited; | 51 | return inited; |
@@ -160,7 +160,7 @@ static struct nvgpu_bitmap_alloc *find_alloc_metadata( | |||
160 | struct nvgpu_rbtree_node *node = NULL; | 160 | struct nvgpu_rbtree_node *node = NULL; |
161 | 161 | ||
162 | nvgpu_rbtree_search(addr, &node, a->allocs); | 162 | nvgpu_rbtree_search(addr, &node, a->allocs); |
163 | if (!node) { | 163 | if (node == NULL) { |
164 | return NULL; | 164 | return NULL; |
165 | } | 165 | } |
166 | 166 | ||
@@ -180,7 +180,7 @@ static int nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a, | |||
180 | struct nvgpu_bitmap_alloc *alloc = | 180 | struct nvgpu_bitmap_alloc *alloc = |
181 | nvgpu_kmem_cache_alloc(a->meta_data_cache); | 181 | nvgpu_kmem_cache_alloc(a->meta_data_cache); |
182 | 182 | ||
183 | if (!alloc) { | 183 | if (alloc == NULL) { |
184 | return -ENOMEM; | 184 | return -ENOMEM; |
185 | } | 185 | } |
186 | 186 | ||
@@ -243,8 +243,8 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *na, u64 len) | |||
243 | * either of these possibilities assume that the caller will keep what | 243 | * either of these possibilities assume that the caller will keep what |
244 | * data it needs around to successfully free this allocation. | 244 | * data it needs around to successfully free this allocation. |
245 | */ | 245 | */ |
246 | if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && | 246 | if ((a->flags & GPU_ALLOC_NO_ALLOC_PAGE) == 0ULL && |
247 | nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) { | 247 | nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size) != 0) { |
248 | goto fail_reset_bitmap; | 248 | goto fail_reset_bitmap; |
249 | } | 249 | } |
250 | 250 | ||
@@ -280,7 +280,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr) | |||
280 | } | 280 | } |
281 | 281 | ||
282 | alloc = find_alloc_metadata(a, addr); | 282 | alloc = find_alloc_metadata(a, addr); |
283 | if (!alloc) { | 283 | if (alloc == NULL) { |
284 | goto done; | 284 | goto done; |
285 | } | 285 | } |
286 | 286 | ||
@@ -299,7 +299,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr) | |||
299 | a->bytes_freed += alloc->length; | 299 | a->bytes_freed += alloc->length; |
300 | 300 | ||
301 | done: | 301 | done: |
302 | if (a->meta_data_cache && alloc) { | 302 | if (a->meta_data_cache != NULL && alloc != NULL) { |
303 | nvgpu_kmem_cache_free(a->meta_data_cache, alloc); | 303 | nvgpu_kmem_cache_free(a->meta_data_cache, alloc); |
304 | } | 304 | } |
305 | alloc_unlock(na); | 305 | alloc_unlock(na); |
@@ -377,27 +377,29 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
377 | { | 377 | { |
378 | int err; | 378 | int err; |
379 | struct nvgpu_bitmap_allocator *a; | 379 | struct nvgpu_bitmap_allocator *a; |
380 | bool is_blk_size_pwr_2 = (blk_size & (blk_size - 1ULL)) == 0ULL; | ||
381 | bool is_base_aligned = (base & (blk_size - 1ULL)) == 0ULL; | ||
382 | bool is_length_aligned = (length & (blk_size - 1ULL)) == 0ULL; | ||
380 | 383 | ||
381 | if (WARN_ON(blk_size & (blk_size - 1U))) { | 384 | if (WARN_ON(!is_blk_size_pwr_2)) { |
382 | return -EINVAL; | 385 | return -EINVAL; |
383 | } | 386 | } |
384 | 387 | ||
385 | /* | 388 | /* |
386 | * blk_size must be a power-of-2; base length also need to be aligned | 389 | * blk_size must be a power-of-2; base and length also need to be |
387 | * to blk_size. | 390 | * aligned to blk_size. |
388 | */ | 391 | */ |
389 | if (blk_size & (blk_size - 1U) || | 392 | if (!is_blk_size_pwr_2 || !is_base_aligned || !is_length_aligned) { |
390 | base & (blk_size - 1U) || length & (blk_size - 1U)) { | ||
391 | return -EINVAL; | 393 | return -EINVAL; |
392 | } | 394 | } |
393 | 395 | ||
394 | if (base == 0U) { | 396 | if (base == 0ULL) { |
395 | base = blk_size; | 397 | base = blk_size; |
396 | length -= blk_size; | 398 | length -= blk_size; |
397 | } | 399 | } |
398 | 400 | ||
399 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); | 401 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); |
400 | if (!a) { | 402 | if (a == NULL) { |
401 | return -ENOMEM; | 403 | return -ENOMEM; |
402 | } | 404 | } |
403 | 405 | ||
@@ -406,10 +408,10 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
406 | goto fail; | 408 | goto fail; |
407 | } | 409 | } |
408 | 410 | ||
409 | if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) { | 411 | if ((flags & GPU_ALLOC_NO_ALLOC_PAGE) == 0ULL) { |
410 | a->meta_data_cache = nvgpu_kmem_cache_create(g, | 412 | a->meta_data_cache = nvgpu_kmem_cache_create(g, |
411 | sizeof(struct nvgpu_bitmap_alloc)); | 413 | sizeof(struct nvgpu_bitmap_alloc)); |
412 | if (!a->meta_data_cache) { | 414 | if (a->meta_data_cache == NULL) { |
413 | err = -ENOMEM; | 415 | err = -ENOMEM; |
414 | goto fail; | 416 | goto fail; |
415 | } | 417 | } |
@@ -426,7 +428,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
426 | 428 | ||
427 | a->bitmap = nvgpu_kcalloc(g, BITS_TO_LONGS(a->num_bits), | 429 | a->bitmap = nvgpu_kcalloc(g, BITS_TO_LONGS(a->num_bits), |
428 | sizeof(*a->bitmap)); | 430 | sizeof(*a->bitmap)); |
429 | if (!a->bitmap) { | 431 | if (a->bitmap == NULL) { |
430 | err = -ENOMEM; | 432 | err = -ENOMEM; |
431 | goto fail; | 433 | goto fail; |
432 | } | 434 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index 516e5035..c0d1335e 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c | |||
@@ -129,7 +129,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a, | |||
129 | struct nvgpu_buddy *new_buddy; | 129 | struct nvgpu_buddy *new_buddy; |
130 | 130 | ||
131 | new_buddy = nvgpu_kmem_cache_alloc(a->buddy_cache); | 131 | new_buddy = nvgpu_kmem_cache_alloc(a->buddy_cache); |
132 | if (!new_buddy) { | 132 | if (new_buddy == NULL) { |
133 | return NULL; | 133 | return NULL; |
134 | } | 134 | } |
135 | 135 | ||
@@ -160,7 +160,7 @@ static void balloc_buddy_list_do_add(struct nvgpu_buddy_allocator *a, | |||
160 | * This lets the code that checks if there are available blocks check | 160 | * This lets the code that checks if there are available blocks check |
161 | * without cycling through the entire list. | 161 | * without cycling through the entire list. |
162 | */ | 162 | */ |
163 | if (a->flags & GPU_ALLOC_GVA_SPACE && | 163 | if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && |
164 | b->pte_size == BALLOC_PTE_SIZE_BIG) { | 164 | b->pte_size == BALLOC_PTE_SIZE_BIG) { |
165 | nvgpu_list_add_tail(&b->buddy_entry, list); | 165 | nvgpu_list_add_tail(&b->buddy_entry, list); |
166 | } else { | 166 | } else { |
@@ -247,7 +247,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a) | |||
247 | order = balloc_max_order_in(a, bstart, bend); | 247 | order = balloc_max_order_in(a, bstart, bend); |
248 | 248 | ||
249 | buddy = balloc_new_buddy(a, NULL, bstart, order); | 249 | buddy = balloc_new_buddy(a, NULL, bstart, order); |
250 | if (!buddy) { | 250 | if (buddy == NULL) { |
251 | goto cleanup; | 251 | goto cleanup; |
252 | } | 252 | } |
253 | 253 | ||
@@ -374,7 +374,7 @@ static void balloc_coalesce(struct nvgpu_buddy_allocator *a, | |||
374 | * If both our buddy and I are both not allocated and not split then | 374 | * If both our buddy and I are both not allocated and not split then |
375 | * we can coalesce ourselves. | 375 | * we can coalesce ourselves. |
376 | */ | 376 | */ |
377 | if (!b->buddy) { | 377 | if (b->buddy == NULL) { |
378 | return; | 378 | return; |
379 | } | 379 | } |
380 | if (buddy_is_alloced(b->buddy) || buddy_is_split(b->buddy)) { | 380 | if (buddy_is_alloced(b->buddy) || buddy_is_split(b->buddy)) { |
@@ -412,14 +412,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, | |||
412 | u64 half; | 412 | u64 half; |
413 | 413 | ||
414 | left = balloc_new_buddy(a, b, b->start, b->order - 1U); | 414 | left = balloc_new_buddy(a, b, b->start, b->order - 1U); |
415 | if (!left) { | 415 | if (left == NULL) { |
416 | return -ENOMEM; | 416 | return -ENOMEM; |
417 | } | 417 | } |
418 | 418 | ||
419 | half = (b->end - b->start) / 2U; | 419 | half = (b->end - b->start) / 2U; |
420 | 420 | ||
421 | right = balloc_new_buddy(a, b, b->start + half, b->order - 1U); | 421 | right = balloc_new_buddy(a, b, b->start + half, b->order - 1U); |
422 | if (!right) { | 422 | if (right == NULL) { |
423 | nvgpu_kmem_cache_free(a->buddy_cache, left); | 423 | nvgpu_kmem_cache_free(a->buddy_cache, left); |
424 | return -ENOMEM; | 424 | return -ENOMEM; |
425 | } | 425 | } |
@@ -448,7 +448,7 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, | |||
448 | * we can leave the buddies PTE field alone since the PDE block has yet | 448 | * we can leave the buddies PTE field alone since the PDE block has yet |
449 | * to be assigned a PTE size. | 449 | * to be assigned a PTE size. |
450 | */ | 450 | */ |
451 | if (a->flags & GPU_ALLOC_GVA_SPACE && | 451 | if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && |
452 | left->order < a->pte_blk_order) { | 452 | left->order < a->pte_blk_order) { |
453 | left->pte_size = pte_size; | 453 | left->pte_size = pte_size; |
454 | right->pte_size = pte_size; | 454 | right->pte_size = pte_size; |
@@ -492,7 +492,7 @@ static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a, | |||
492 | struct nvgpu_buddy *bud; | 492 | struct nvgpu_buddy *bud; |
493 | 493 | ||
494 | nvgpu_rbtree_search(addr, &node, a->alloced_buddies); | 494 | nvgpu_rbtree_search(addr, &node, a->alloced_buddies); |
495 | if (!node) { | 495 | if (node == NULL) { |
496 | return NULL; | 496 | return NULL; |
497 | } | 497 | } |
498 | 498 | ||
@@ -518,7 +518,7 @@ static struct nvgpu_buddy *balloc_find_buddy(struct nvgpu_buddy_allocator *a, | |||
518 | return NULL; | 518 | return NULL; |
519 | } | 519 | } |
520 | 520 | ||
521 | if (a->flags & GPU_ALLOC_GVA_SPACE && | 521 | if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL && |
522 | pte_size == BALLOC_PTE_SIZE_BIG) { | 522 | pte_size == BALLOC_PTE_SIZE_BIG) { |
523 | bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), | 523 | bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), |
524 | nvgpu_buddy, buddy_entry); | 524 | nvgpu_buddy, buddy_entry); |
@@ -551,14 +551,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a, | |||
551 | u64 split_order; | 551 | u64 split_order; |
552 | struct nvgpu_buddy *bud = NULL; | 552 | struct nvgpu_buddy *bud = NULL; |
553 | 553 | ||
554 | split_order = order; | 554 | for (split_order = order; split_order <= a->max_order; split_order++) { |
555 | while (split_order <= a->max_order && | 555 | bud = balloc_find_buddy(a, split_order, pte_size); |
556 | !(bud = balloc_find_buddy(a, split_order, pte_size))) { | 556 | if (bud != NULL) { |
557 | split_order++; | 557 | break; |
558 | } | ||
558 | } | 559 | } |
559 | 560 | ||
560 | /* Out of memory! */ | 561 | /* Out of memory! */ |
561 | if (!bud) { | 562 | if (bud == NULL) { |
562 | return 0; | 563 | return 0; |
563 | } | 564 | } |
564 | 565 | ||
@@ -582,15 +583,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a, | |||
582 | * TODO: Right now this uses the unoptimal approach of going through all | 583 | * TODO: Right now this uses the unoptimal approach of going through all |
583 | * outstanding allocations and checking their base/ends. This could be better. | 584 | * outstanding allocations and checking their base/ends. This could be better. |
584 | */ | 585 | */ |
585 | static int balloc_is_range_free(struct nvgpu_buddy_allocator *a, | 586 | static bool balloc_is_range_free(struct nvgpu_buddy_allocator *a, |
586 | u64 base, u64 end) | 587 | u64 base, u64 end) |
587 | { | 588 | { |
588 | struct nvgpu_rbtree_node *node = NULL; | 589 | struct nvgpu_rbtree_node *node = NULL; |
589 | struct nvgpu_buddy *bud; | 590 | struct nvgpu_buddy *bud; |
590 | 591 | ||
591 | nvgpu_rbtree_enum_start(0, &node, a->alloced_buddies); | 592 | nvgpu_rbtree_enum_start(0, &node, a->alloced_buddies); |
592 | if (!node) { | 593 | if (node == NULL) { |
593 | return 1; /* No allocs yet. */ | 594 | return true; /* No allocs yet. */ |
594 | } | 595 | } |
595 | 596 | ||
596 | bud = nvgpu_buddy_from_rbtree_node(node); | 597 | bud = nvgpu_buddy_from_rbtree_node(node); |
@@ -598,17 +599,17 @@ static int balloc_is_range_free(struct nvgpu_buddy_allocator *a, | |||
598 | while (bud->start < end) { | 599 | while (bud->start < end) { |
599 | if ((bud->start > base && bud->start < end) || | 600 | if ((bud->start > base && bud->start < end) || |
600 | (bud->end > base && bud->end < end)) { | 601 | (bud->end > base && bud->end < end)) { |
601 | return 0; | 602 | return false; |
602 | } | 603 | } |
603 | 604 | ||
604 | nvgpu_rbtree_enum_next(&node, node); | 605 | nvgpu_rbtree_enum_next(&node, node); |
605 | if (!node) { | 606 | if (node == NULL) { |
606 | break; | 607 | break; |
607 | } | 608 | } |
608 | bud = nvgpu_buddy_from_rbtree_node(node); | 609 | bud = nvgpu_buddy_from_rbtree_node(node); |
609 | } | 610 | } |
610 | 611 | ||
611 | return 1; | 612 | return true; |
612 | } | 613 | } |
613 | 614 | ||
614 | static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a, | 615 | static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a, |
@@ -633,7 +634,7 @@ static struct nvgpu_fixed_alloc *balloc_free_fixed( | |||
633 | struct nvgpu_rbtree_node *node = NULL; | 634 | struct nvgpu_rbtree_node *node = NULL; |
634 | 635 | ||
635 | nvgpu_rbtree_search(addr, &node, a->fixed_allocs); | 636 | nvgpu_rbtree_search(addr, &node, a->fixed_allocs); |
636 | if (!node) { | 637 | if (node == NULL) { |
637 | return NULL; | 638 | return NULL; |
638 | } | 639 | } |
639 | 640 | ||
@@ -787,7 +788,7 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, | |||
787 | bud = balloc_make_fixed_buddy(a, | 788 | bud = balloc_make_fixed_buddy(a, |
788 | balloc_base_unshift(a, inc_base), | 789 | balloc_base_unshift(a, inc_base), |
789 | align_order, pte_size); | 790 | align_order, pte_size); |
790 | if (!bud) { | 791 | if (bud == NULL) { |
791 | alloc_dbg(balloc_owner(a), | 792 | alloc_dbg(balloc_owner(a), |
792 | "Fixed buddy failed: {0x%llx, %llu}!", | 793 | "Fixed buddy failed: {0x%llx, %llu}!", |
793 | balloc_base_unshift(a, inc_base), | 794 | balloc_base_unshift(a, inc_base), |
@@ -891,7 +892,7 @@ static u64 nvgpu_buddy_balloc_pte(struct nvgpu_allocator *na, u64 len, | |||
891 | alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); | 892 | alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); |
892 | } | 893 | } |
893 | 894 | ||
894 | a->alloc_made = 1; | 895 | a->alloc_made = true; |
895 | 896 | ||
896 | alloc_unlock(na); | 897 | alloc_unlock(na); |
897 | 898 | ||
@@ -930,7 +931,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na, | |||
930 | } | 931 | } |
931 | 932 | ||
932 | falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc)); | 933 | falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc)); |
933 | if (!falloc) { | 934 | if (falloc == NULL) { |
934 | goto fail; | 935 | goto fail; |
935 | } | 936 | } |
936 | 937 | ||
@@ -946,7 +947,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na, | |||
946 | } | 947 | } |
947 | 948 | ||
948 | ret = balloc_do_alloc_fixed(a, falloc, base, len, pte_size); | 949 | ret = balloc_do_alloc_fixed(a, falloc, base, len, pte_size); |
949 | if (!ret) { | 950 | if (ret == 0ULL) { |
950 | alloc_dbg(balloc_owner(a), | 951 | alloc_dbg(balloc_owner(a), |
951 | "Alloc-fixed failed ?? 0x%llx -> 0x%llx", | 952 | "Alloc-fixed failed ?? 0x%llx -> 0x%llx", |
952 | base, base + len); | 953 | base, base + len); |
@@ -988,7 +989,7 @@ static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na, | |||
988 | 989 | ||
989 | alloc_lock(na); | 990 | alloc_lock(na); |
990 | alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size); | 991 | alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size); |
991 | a->alloc_made = 1; | 992 | a->alloc_made = true; |
992 | alloc_unlock(na); | 993 | alloc_unlock(na); |
993 | 994 | ||
994 | return alloc; | 995 | return alloc; |
@@ -1003,7 +1004,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr) | |||
1003 | struct nvgpu_fixed_alloc *falloc; | 1004 | struct nvgpu_fixed_alloc *falloc; |
1004 | struct nvgpu_buddy_allocator *a = na->priv; | 1005 | struct nvgpu_buddy_allocator *a = na->priv; |
1005 | 1006 | ||
1006 | if (!addr) { | 1007 | if (addr == 0ULL) { |
1007 | return; | 1008 | return; |
1008 | } | 1009 | } |
1009 | 1010 | ||
@@ -1020,7 +1021,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr) | |||
1020 | } | 1021 | } |
1021 | 1022 | ||
1022 | bud = balloc_free_buddy(a, addr); | 1023 | bud = balloc_free_buddy(a, addr); |
1023 | if (!bud) { | 1024 | if (bud == NULL) { |
1024 | goto done; | 1025 | goto done; |
1025 | } | 1026 | } |
1026 | 1027 | ||
@@ -1090,7 +1091,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na, | |||
1090 | /* Should not be possible to fail... */ | 1091 | /* Should not be possible to fail... */ |
1091 | addr = nvgpu_balloc_fixed_buddy_locked(na, co->base, co->length, | 1092 | addr = nvgpu_balloc_fixed_buddy_locked(na, co->base, co->length, |
1092 | BALLOC_PTE_SIZE_ANY); | 1093 | BALLOC_PTE_SIZE_ANY); |
1093 | if (!addr) { | 1094 | if (addr == 0ULL) { |
1094 | err = -ENOMEM; | 1095 | err = -ENOMEM; |
1095 | nvgpu_warn(na->g, | 1096 | nvgpu_warn(na->g, |
1096 | "%s: Failed to reserve a valid carveout!", | 1097 | "%s: Failed to reserve a valid carveout!", |
@@ -1133,10 +1134,10 @@ static u64 nvgpu_buddy_alloc_base(struct nvgpu_allocator *a) | |||
1133 | return ba->start; | 1134 | return ba->start; |
1134 | } | 1135 | } |
1135 | 1136 | ||
1136 | static int nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a) | 1137 | static bool nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a) |
1137 | { | 1138 | { |
1138 | struct nvgpu_buddy_allocator *ba = a->priv; | 1139 | struct nvgpu_buddy_allocator *ba = a->priv; |
1139 | int inited = ba->initialized; | 1140 | bool inited = ba->initialized; |
1140 | 1141 | ||
1141 | nvgpu_smp_rmb(); | 1142 | nvgpu_smp_rmb(); |
1142 | return inited; | 1143 | return inited; |
@@ -1292,12 +1293,15 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1292 | int err; | 1293 | int err; |
1293 | u64 pde_size; | 1294 | u64 pde_size; |
1294 | struct nvgpu_buddy_allocator *a; | 1295 | struct nvgpu_buddy_allocator *a; |
1296 | bool is_gva_space = (flags & GPU_ALLOC_GVA_SPACE) != 0ULL; | ||
1297 | bool is_blk_size_pwr_2 = (blk_size & (blk_size - 1ULL)) == 0ULL; | ||
1298 | u64 base_big_page, size_big_page; | ||
1295 | 1299 | ||
1296 | /* blk_size must be greater than 0 and a power of 2. */ | 1300 | /* blk_size must be greater than 0 and a power of 2. */ |
1297 | if (blk_size == 0U) { | 1301 | if (blk_size == 0U) { |
1298 | return -EINVAL; | 1302 | return -EINVAL; |
1299 | } | 1303 | } |
1300 | if (blk_size & (blk_size - 1U)) { | 1304 | if (!is_blk_size_pwr_2) { |
1301 | return -EINVAL; | 1305 | return -EINVAL; |
1302 | } | 1306 | } |
1303 | 1307 | ||
@@ -1306,12 +1310,12 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1306 | } | 1310 | } |
1307 | 1311 | ||
1308 | /* If this is to manage a GVA space we need a VM. */ | 1312 | /* If this is to manage a GVA space we need a VM. */ |
1309 | if (flags & GPU_ALLOC_GVA_SPACE && !vm) { | 1313 | if (is_gva_space && vm == NULL) { |
1310 | return -EINVAL; | 1314 | return -EINVAL; |
1311 | } | 1315 | } |
1312 | 1316 | ||
1313 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator)); | 1317 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator)); |
1314 | if (!a) { | 1318 | if (a == NULL) { |
1315 | return -ENOMEM; | 1319 | return -ENOMEM; |
1316 | } | 1320 | } |
1317 | 1321 | ||
@@ -1336,8 +1340,8 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1336 | } | 1340 | } |
1337 | 1341 | ||
1338 | a->vm = vm; | 1342 | a->vm = vm; |
1339 | if (flags & GPU_ALLOC_GVA_SPACE) { | 1343 | if (is_gva_space) { |
1340 | pde_size = 1ULL << nvgpu_vm_pde_coverage_bit_count(vm); | 1344 | pde_size = BIT64(nvgpu_vm_pde_coverage_bit_count(vm)); |
1341 | a->pte_blk_order = balloc_get_order(a, pde_size); | 1345 | a->pte_blk_order = balloc_get_order(a, pde_size); |
1342 | } | 1346 | } |
1343 | 1347 | ||
@@ -1346,9 +1350,10 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1346 | * must be PDE aligned. If big_pages are not enabled then this | 1350 | * must be PDE aligned. If big_pages are not enabled then this |
1347 | * requirement is not necessary. | 1351 | * requirement is not necessary. |
1348 | */ | 1352 | */ |
1349 | if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && | 1353 | base_big_page = base & ((vm->big_page_size << 10U) - 1U); |
1350 | (base & ((vm->big_page_size << 10) - 1U) || | 1354 | size_big_page = size & ((vm->big_page_size << 10U) - 1U); |
1351 | size & ((vm->big_page_size << 10) - 1U))) { | 1355 | if (is_gva_space && vm->big_pages && |
1356 | (base_big_page != 0ULL || size_big_page != 0ULL)) { | ||
1352 | return -EINVAL; | 1357 | return -EINVAL; |
1353 | } | 1358 | } |
1354 | 1359 | ||
@@ -1359,7 +1364,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1359 | balloc_compute_max_order(a); | 1364 | balloc_compute_max_order(a); |
1360 | 1365 | ||
1361 | a->buddy_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_buddy)); | 1366 | a->buddy_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_buddy)); |
1362 | if (!a->buddy_cache) { | 1367 | if (a->buddy_cache == NULL) { |
1363 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
1364 | goto fail; | 1369 | goto fail; |
1365 | } | 1370 | } |
@@ -1373,7 +1378,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1373 | } | 1378 | } |
1374 | 1379 | ||
1375 | nvgpu_smp_wmb(); | 1380 | nvgpu_smp_wmb(); |
1376 | a->initialized = 1; | 1381 | a->initialized = true; |
1377 | 1382 | ||
1378 | #ifdef CONFIG_DEBUG_FS | 1383 | #ifdef CONFIG_DEBUG_FS |
1379 | nvgpu_init_alloc_debug(g, na); | 1384 | nvgpu_init_alloc_debug(g, na); |
@@ -1382,7 +1387,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1382 | alloc_dbg(na, " base 0x%llx", a->base); | 1387 | alloc_dbg(na, " base 0x%llx", a->base); |
1383 | alloc_dbg(na, " size 0x%llx", a->length); | 1388 | alloc_dbg(na, " size 0x%llx", a->length); |
1384 | alloc_dbg(na, " blk_size 0x%llx", a->blk_size); | 1389 | alloc_dbg(na, " blk_size 0x%llx", a->blk_size); |
1385 | if (flags & GPU_ALLOC_GVA_SPACE) { | 1390 | if (is_gva_space) { |
1386 | alloc_dbg(balloc_owner(a), | 1391 | alloc_dbg(balloc_owner(a), |
1387 | " pde_size 0x%llx", | 1392 | " pde_size 0x%llx", |
1388 | balloc_order_to_len(a, a->pte_blk_order)); | 1393 | balloc_order_to_len(a, a->pte_blk_order)); |
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h index a90530b6..7a22f030 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -175,8 +175,8 @@ struct nvgpu_buddy_allocator { | |||
175 | */ | 175 | */ |
176 | u64 pte_blk_order; | 176 | u64 pte_blk_order; |
177 | 177 | ||
178 | int initialized; | 178 | bool initialized; |
179 | int alloc_made; /* True after the first alloc. */ | 179 | bool alloc_made; /* True after the first alloc. */ |
180 | 180 | ||
181 | u64 flags; | 181 | u64 flags; |
182 | 182 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c index 3bde3a53..f6216648 100644 --- a/drivers/gpu/nvgpu/common/mm/comptags.c +++ b/drivers/gpu/nvgpu/common/mm/comptags.c | |||
@@ -88,7 +88,7 @@ int gk20a_comptag_allocator_init(struct gk20a *g, | |||
88 | size--; | 88 | size--; |
89 | allocator->bitmap = nvgpu_vzalloc(g, | 89 | allocator->bitmap = nvgpu_vzalloc(g, |
90 | BITS_TO_LONGS(size) * sizeof(long)); | 90 | BITS_TO_LONGS(size) * sizeof(long)); |
91 | if (!allocator->bitmap) | 91 | if (allocator->bitmap == NULL) |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
93 | 93 | ||
94 | allocator->size = size; | 94 | allocator->size = size; |
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index e21ffd8d..02e32b20 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c | |||
@@ -79,7 +79,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, | |||
79 | 79 | ||
80 | struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem); | 80 | struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem); |
81 | 81 | ||
82 | if (!sgt) { | 82 | if (sgt == NULL) { |
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
@@ -122,7 +122,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, | |||
122 | 122 | ||
123 | nvgpu_sgt_free(g, sgt); | 123 | nvgpu_sgt_free(g, sgt); |
124 | 124 | ||
125 | if (!vaddr) { | 125 | if (vaddr == 0ULL) { |
126 | nvgpu_err(g, "failed to map buffer!"); | 126 | nvgpu_err(g, "failed to map buffer!"); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
@@ -201,7 +201,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) | |||
201 | pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); | 201 | pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); |
202 | 202 | ||
203 | err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); | 203 | err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); |
204 | if (WARN_ON(err)) { | 204 | if (WARN_ON(err != 0)) { |
205 | return err; | 205 | return err; |
206 | } | 206 | } |
207 | 207 | ||
@@ -324,7 +324,7 @@ static int pd_allocate_children(struct vm_gk20a *vm, | |||
324 | pd->num_entries = pd_entries(l, attrs); | 324 | pd->num_entries = pd_entries(l, attrs); |
325 | pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) * | 325 | pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) * |
326 | pd->num_entries); | 326 | pd->num_entries); |
327 | if (!pd->entries) { | 327 | if (pd->entries == NULL) { |
328 | return -ENOMEM; | 328 | return -ENOMEM; |
329 | } | 329 | } |
330 | 330 | ||
@@ -433,7 +433,7 @@ static int __set_pd_level(struct vm_gk20a *vm, | |||
433 | * to be the table of PDEs. When the next level is PTEs the | 433 | * to be the table of PDEs. When the next level is PTEs the |
434 | * target addr is the real physical address we are aiming for. | 434 | * target addr is the real physical address we are aiming for. |
435 | */ | 435 | */ |
436 | target_addr = next_pd ? | 436 | target_addr = (next_pd != NULL) ? |
437 | nvgpu_pde_phys_addr(g, next_pd) : | 437 | nvgpu_pde_phys_addr(g, next_pd) : |
438 | phys_addr; | 438 | phys_addr; |
439 | 439 | ||
@@ -486,7 +486,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, | |||
486 | struct nvgpu_sgl *sgl; | 486 | struct nvgpu_sgl *sgl; |
487 | int err = 0; | 487 | int err = 0; |
488 | 488 | ||
489 | if (!sgt) { | 489 | if (sgt == NULL) { |
490 | /* | 490 | /* |
491 | * This is considered an unmap. Just pass in 0 as the physical | 491 | * This is considered an unmap. Just pass in 0 as the physical |
492 | * address for the entire GPU range. | 492 | * address for the entire GPU range. |
@@ -543,7 +543,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, | |||
543 | /* | 543 | /* |
544 | * Cut out sgl ents for space_to_skip. | 544 | * Cut out sgl ents for space_to_skip. |
545 | */ | 545 | */ |
546 | if (space_to_skip && | 546 | if (space_to_skip != 0ULL && |
547 | space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { | 547 | space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { |
548 | space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); | 548 | space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); |
549 | continue; | 549 | continue; |
@@ -630,10 +630,10 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, | |||
630 | "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " | 630 | "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " |
631 | "kind=%#02x APT=%-6s %c%c%c%c%c", | 631 | "kind=%#02x APT=%-6s %c%c%c%c%c", |
632 | vm->name, | 632 | vm->name, |
633 | sgt ? "MAP" : "UNMAP", | 633 | (sgt != NULL) ? "MAP" : "UNMAP", |
634 | virt_addr, | 634 | virt_addr, |
635 | length, | 635 | length, |
636 | sgt ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0, | 636 | (sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0, |
637 | space_to_skip, | 637 | space_to_skip, |
638 | page_size >> 10, | 638 | page_size >> 10, |
639 | nvgpu_gmmu_perm_str(attrs->rw_flag), | 639 | nvgpu_gmmu_perm_str(attrs->rw_flag), |
@@ -654,7 +654,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, | |||
654 | 654 | ||
655 | nvgpu_mb(); | 655 | nvgpu_mb(); |
656 | 656 | ||
657 | __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); | 657 | __gmmu_dbg(g, attrs, "%-5s Done!", |
658 | (sgt != NULL) ? "MAP" : "UNMAP"); | ||
658 | 659 | ||
659 | return err; | 660 | return err; |
660 | } | 661 | } |
@@ -700,7 +701,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
700 | .sparse = sparse, | 701 | .sparse = sparse, |
701 | .priv = priv, | 702 | .priv = priv, |
702 | .coherent = flags & NVGPU_VM_MAP_IO_COHERENT, | 703 | .coherent = flags & NVGPU_VM_MAP_IO_COHERENT, |
703 | .valid = !(flags & NVGPU_VM_MAP_UNMAPPED_PTE), | 704 | .valid = (flags & NVGPU_VM_MAP_UNMAPPED_PTE) == 0U, |
704 | .aperture = aperture | 705 | .aperture = aperture |
705 | }; | 706 | }; |
706 | 707 | ||
@@ -727,9 +728,9 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
727 | * Only allocate a new GPU VA range if we haven't already been passed a | 728 | * Only allocate a new GPU VA range if we haven't already been passed a |
728 | * GPU VA range. This facilitates fixed mappings. | 729 | * GPU VA range. This facilitates fixed mappings. |
729 | */ | 730 | */ |
730 | if (!vaddr) { | 731 | if (vaddr == 0ULL) { |
731 | vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); | 732 | vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); |
732 | if (!vaddr) { | 733 | if (vaddr == 0ULL) { |
733 | nvgpu_err(g, "failed to allocate va space"); | 734 | nvgpu_err(g, "failed to allocate va space"); |
734 | err = -ENOMEM; | 735 | err = -ENOMEM; |
735 | goto fail_alloc; | 736 | goto fail_alloc; |
@@ -744,7 +745,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
744 | goto fail_validate; | 745 | goto fail_validate; |
745 | } | 746 | } |
746 | 747 | ||
747 | if (!batch) { | 748 | if (batch == NULL) { |
748 | g->ops.fb.tlb_invalidate(g, vm->pdb.mem); | 749 | g->ops.fb.tlb_invalidate(g, vm->pdb.mem); |
749 | } else { | 750 | } else { |
750 | batch->need_tlb_invalidate = true; | 751 | batch->need_tlb_invalidate = true; |
@@ -800,7 +801,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, | |||
800 | nvgpu_err(g, "failed to update gmmu ptes on unmap"); | 801 | nvgpu_err(g, "failed to update gmmu ptes on unmap"); |
801 | } | 802 | } |
802 | 803 | ||
803 | if (!batch) { | 804 | if (batch == NULL) { |
804 | gk20a_mm_l2_flush(g, true); | 805 | gk20a_mm_l2_flush(g, true); |
805 | g->ops.fb.tlb_invalidate(g, vm->pdb.mem); | 806 | g->ops.fb.tlb_invalidate(g, vm->pdb.mem); |
806 | } else { | 807 | } else { |
@@ -823,7 +824,7 @@ u32 __nvgpu_pte_words(struct gk20a *g) | |||
823 | */ | 824 | */ |
824 | do { | 825 | do { |
825 | next_l = l + 1; | 826 | next_l = l + 1; |
826 | if (!next_l->update_entry) { | 827 | if (next_l->update_entry == NULL) { |
827 | break; | 828 | break; |
828 | } | 829 | } |
829 | 830 | ||
@@ -859,7 +860,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, | |||
859 | struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx; | 860 | struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx; |
860 | 861 | ||
861 | /* Invalid entry! */ | 862 | /* Invalid entry! */ |
862 | if (!pd_next->mem) { | 863 | if (pd_next->mem == NULL) { |
863 | return -EINVAL; | 864 | return -EINVAL; |
864 | } | 865 | } |
865 | 866 | ||
@@ -875,7 +876,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, | |||
875 | pd_offs_out); | 876 | pd_offs_out); |
876 | } | 877 | } |
877 | 878 | ||
878 | if (!pd->mem) { | 879 | if (pd->mem == NULL) { |
879 | return -EINVAL; | 880 | return -EINVAL; |
880 | } | 881 | } |
881 | 882 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c index 79bf4cd6..59fae76d 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c | |||
@@ -41,10 +41,10 @@ static u64 nvgpu_lockless_alloc_base(struct nvgpu_allocator *a) | |||
41 | return pa->base; | 41 | return pa->base; |
42 | } | 42 | } |
43 | 43 | ||
44 | static int nvgpu_lockless_alloc_inited(struct nvgpu_allocator *a) | 44 | static bool nvgpu_lockless_alloc_inited(struct nvgpu_allocator *a) |
45 | { | 45 | { |
46 | struct nvgpu_lockless_allocator *pa = a->priv; | 46 | struct nvgpu_lockless_allocator *pa = a->priv; |
47 | int inited = pa->inited; | 47 | bool inited = pa->inited; |
48 | 48 | ||
49 | nvgpu_smp_rmb(); | 49 | nvgpu_smp_rmb(); |
50 | return inited; | 50 | return inited; |
@@ -169,7 +169,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
169 | u64 count; | 169 | u64 count; |
170 | struct nvgpu_lockless_allocator *a; | 170 | struct nvgpu_lockless_allocator *a; |
171 | 171 | ||
172 | if (!blk_size) { | 172 | if (blk_size == 0ULL) { |
173 | return -EINVAL; | 173 | return -EINVAL; |
174 | } | 174 | } |
175 | 175 | ||
@@ -178,12 +178,12 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
178 | * In order to control memory footprint, we require count < INT_MAX | 178 | * In order to control memory footprint, we require count < INT_MAX |
179 | */ | 179 | */ |
180 | count = length / blk_size; | 180 | count = length / blk_size; |
181 | if (!base || !count || count > INT_MAX) { | 181 | if (base == 0ULL || count == 0ULL || count > INT_MAX) { |
182 | return -EINVAL; | 182 | return -EINVAL; |
183 | } | 183 | } |
184 | 184 | ||
185 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_lockless_allocator)); | 185 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_lockless_allocator)); |
186 | if (!a) { | 186 | if (a == NULL) { |
187 | return -ENOMEM; | 187 | return -ENOMEM; |
188 | } | 188 | } |
189 | 189 | ||
@@ -193,7 +193,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | a->next = nvgpu_vzalloc(g, sizeof(*a->next) * count); | 195 | a->next = nvgpu_vzalloc(g, sizeof(*a->next) * count); |
196 | if (!a->next) { | 196 | if (a->next == NULL) { |
197 | err = -ENOMEM; | 197 | err = -ENOMEM; |
198 | goto fail; | 198 | goto fail; |
199 | } | 199 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index f97d9ebd..03325cce 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -42,7 +42,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm, | |||
42 | struct nvgpu_vm_area *vm_area; | 42 | struct nvgpu_vm_area *vm_area; |
43 | 43 | ||
44 | vm_area = nvgpu_vm_area_find(vm, base); | 44 | vm_area = nvgpu_vm_area_find(vm, base); |
45 | if (!vm_area) { | 45 | if (vm_area == NULL) { |
46 | return GMMU_PAGE_SIZE_SMALL; | 46 | return GMMU_PAGE_SIZE_SMALL; |
47 | } | 47 | } |
48 | 48 | ||
@@ -55,7 +55,7 @@ static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm, | |||
55 | static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm, | 55 | static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm, |
56 | u64 base, u64 size) | 56 | u64 base, u64 size) |
57 | { | 57 | { |
58 | if (!base) { | 58 | if (base == 0ULL) { |
59 | if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { | 59 | if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { |
60 | return GMMU_PAGE_SIZE_BIG; | 60 | return GMMU_PAGE_SIZE_BIG; |
61 | } | 61 | } |
@@ -233,7 +233,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm) | |||
233 | true, | 233 | true, |
234 | false, | 234 | false, |
235 | "system"); | 235 | "system"); |
236 | if (!mm->pmu.vm) { | 236 | if (mm->pmu.vm == NULL) { |
237 | return -ENOMEM; | 237 | return -ENOMEM; |
238 | } | 238 | } |
239 | 239 | ||
@@ -275,7 +275,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm) | |||
275 | NV_MM_DEFAULT_KERNEL_SIZE, | 275 | NV_MM_DEFAULT_KERNEL_SIZE, |
276 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, | 276 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, |
277 | false, false, "cde"); | 277 | false, false, "cde"); |
278 | if (!mm->cde.vm) { | 278 | if (mm->cde.vm == NULL) { |
279 | return -ENOMEM; | 279 | return -ENOMEM; |
280 | } | 280 | } |
281 | return 0; | 281 | return 0; |
@@ -291,7 +291,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm) | |||
291 | NV_MM_DEFAULT_KERNEL_SIZE, | 291 | NV_MM_DEFAULT_KERNEL_SIZE, |
292 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, | 292 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, |
293 | false, false, "ce"); | 293 | false, false, "ce"); |
294 | if (!mm->ce.vm) { | 294 | if (mm->ce.vm == NULL) { |
295 | return -ENOMEM; | 295 | return -ENOMEM; |
296 | } | 296 | } |
297 | return 0; | 297 | return 0; |
@@ -386,7 +386,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm) | |||
386 | mm->bar1.aperture_size, | 386 | mm->bar1.aperture_size, |
387 | true, false, | 387 | true, false, |
388 | "bar1"); | 388 | "bar1"); |
389 | if (!mm->bar1.vm) { | 389 | if (mm->bar1.vm == NULL) { |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | } | 391 | } |
392 | 392 | ||
@@ -442,8 +442,8 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) | |||
442 | * this requires fixed allocations in vidmem which must be | 442 | * this requires fixed allocations in vidmem which must be |
443 | * allocated before all other buffers | 443 | * allocated before all other buffers |
444 | */ | 444 | */ |
445 | if (g->ops.pmu.alloc_blob_space | 445 | if (g->ops.pmu.alloc_blob_space != NULL && |
446 | && !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { | 446 | !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { |
447 | err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob); | 447 | err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob); |
448 | if (err) { | 448 | if (err) { |
449 | return err; | 449 | return err; |
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c index bf624162..68d68ad6 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c | |||
@@ -45,10 +45,10 @@ u64 nvgpu_alloc_base(struct nvgpu_allocator *a) | |||
45 | return 0; | 45 | return 0; |
46 | } | 46 | } |
47 | 47 | ||
48 | u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) | 48 | bool nvgpu_alloc_initialized(struct nvgpu_allocator *a) |
49 | { | 49 | { |
50 | if (!a->ops || !a->ops->inited) { | 50 | if (a->ops == NULL || a->ops->inited == NULL) { |
51 | return 0; | 51 | return false; |
52 | } | 52 | } |
53 | 53 | ||
54 | return a->ops->inited(a); | 54 | return a->ops->inited(a); |
@@ -151,7 +151,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, | |||
151 | { | 151 | { |
152 | int err; | 152 | int err; |
153 | 153 | ||
154 | if (!ops) { | 154 | if (ops == NULL) { |
155 | return -EINVAL; | 155 | return -EINVAL; |
156 | } | 156 | } |
157 | 157 | ||
@@ -159,7 +159,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, | |||
159 | * This is the bare minimum operations required for a sensible | 159 | * This is the bare minimum operations required for a sensible |
160 | * allocator. | 160 | * allocator. |
161 | */ | 161 | */ |
162 | if (!ops->alloc || !ops->free || !ops->fini) { | 162 | if (ops->alloc == NULL || ops->free == NULL || ops->fini == NULL) { |
163 | return -EINVAL; | 163 | return -EINVAL; |
164 | } | 164 | } |
165 | 165 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index e251f3c4..5cfaded0 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | |||
@@ -128,7 +128,7 @@ bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt) | |||
128 | 128 | ||
129 | void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) | 129 | void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) |
130 | { | 130 | { |
131 | if (sgt && sgt->ops->sgt_free) { | 131 | if (sgt != NULL && sgt->ops->sgt_free != NULL) { |
132 | sgt->ops->sgt_free(g, sgt); | 132 | sgt->ops->sgt_free(g, sgt); |
133 | } | 133 | } |
134 | } | 134 | } |
@@ -138,7 +138,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) | |||
138 | /* ensure it is not vidmem allocation */ | 138 | /* ensure it is not vidmem allocation */ |
139 | WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); | 139 | WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); |
140 | 140 | ||
141 | if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) { | 141 | if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit != NULL) { |
142 | return phys | 1ULL << g->ops.mm.get_iommu_bit(g); | 142 | return phys | 1ULL << g->ops.mm.get_iommu_bit(g); |
143 | } | 143 | } |
144 | 144 | ||
@@ -165,7 +165,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) | |||
165 | */ | 165 | */ |
166 | if (nvgpu_iommuable(g) && | 166 | if (nvgpu_iommuable(g) && |
167 | nvgpu_sgt_iommuable(g, sgt) && | 167 | nvgpu_sgt_iommuable(g, sgt) && |
168 | nvgpu_sgt_get_dma(sgt, sgt->sgl)) { | 168 | nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) { |
169 | return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)); | 169 | return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)); |
170 | } | 170 | } |
171 | 171 | ||
@@ -195,7 +195,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) | |||
195 | if (mem->aperture == APERTURE_SYSMEM) { | 195 | if (mem->aperture == APERTURE_SYSMEM) { |
196 | u32 *ptr = mem->cpu_va; | 196 | u32 *ptr = mem->cpu_va; |
197 | 197 | ||
198 | WARN_ON(!ptr); | 198 | WARN_ON(ptr == NULL); |
199 | data = ptr[w]; | 199 | data = ptr[w]; |
200 | } else if (mem->aperture == APERTURE_VIDMEM) { | 200 | } else if (mem->aperture == APERTURE_VIDMEM) { |
201 | nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data); | 201 | nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data); |
@@ -208,20 +208,20 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) | |||
208 | 208 | ||
209 | u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) | 209 | u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) |
210 | { | 210 | { |
211 | WARN_ON(offset & 3U); | 211 | WARN_ON((offset & 3U) != 0U); |
212 | return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); | 212 | return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); |
213 | } | 213 | } |
214 | 214 | ||
215 | void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, | 215 | void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, |
216 | u32 offset, void *dest, u32 size) | 216 | u32 offset, void *dest, u32 size) |
217 | { | 217 | { |
218 | WARN_ON(offset & 3U); | 218 | WARN_ON((offset & 3U) != 0U); |
219 | WARN_ON(size & 3U); | 219 | WARN_ON((size & 3U) != 0U); |
220 | 220 | ||
221 | if (mem->aperture == APERTURE_SYSMEM) { | 221 | if (mem->aperture == APERTURE_SYSMEM) { |
222 | u8 *src = (u8 *)mem->cpu_va + offset; | 222 | u8 *src = (u8 *)mem->cpu_va + offset; |
223 | 223 | ||
224 | WARN_ON(!mem->cpu_va); | 224 | WARN_ON(mem->cpu_va == NULL); |
225 | memcpy(dest, src, size); | 225 | memcpy(dest, src, size); |
226 | } else if (mem->aperture == APERTURE_VIDMEM) { | 226 | } else if (mem->aperture == APERTURE_VIDMEM) { |
227 | nvgpu_pramin_rd_n(g, mem, offset, size, dest); | 227 | nvgpu_pramin_rd_n(g, mem, offset, size, dest); |
@@ -235,7 +235,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) | |||
235 | if (mem->aperture == APERTURE_SYSMEM) { | 235 | if (mem->aperture == APERTURE_SYSMEM) { |
236 | u32 *ptr = mem->cpu_va; | 236 | u32 *ptr = mem->cpu_va; |
237 | 237 | ||
238 | WARN_ON(!ptr); | 238 | WARN_ON(ptr == NULL); |
239 | ptr[w] = data; | 239 | ptr[w] = data; |
240 | } else if (mem->aperture == APERTURE_VIDMEM) { | 240 | } else if (mem->aperture == APERTURE_VIDMEM) { |
241 | nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data); | 241 | nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data); |
@@ -249,20 +249,20 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) | |||
249 | 249 | ||
250 | void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) | 250 | void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) |
251 | { | 251 | { |
252 | WARN_ON(offset & 3U); | 252 | WARN_ON((offset & 3U) != 0U); |
253 | nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); | 253 | nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); |
254 | } | 254 | } |
255 | 255 | ||
256 | void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | 256 | void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, |
257 | void *src, u32 size) | 257 | void *src, u32 size) |
258 | { | 258 | { |
259 | WARN_ON(offset & 3U); | 259 | WARN_ON((offset & 3U) != 0U); |
260 | WARN_ON(size & 3U); | 260 | WARN_ON((size & 3U) != 0U); |
261 | 261 | ||
262 | if (mem->aperture == APERTURE_SYSMEM) { | 262 | if (mem->aperture == APERTURE_SYSMEM) { |
263 | u8 *dest = (u8 *)mem->cpu_va + offset; | 263 | u8 *dest = (u8 *)mem->cpu_va + offset; |
264 | 264 | ||
265 | WARN_ON(!mem->cpu_va); | 265 | WARN_ON(mem->cpu_va == NULL); |
266 | memcpy(dest, src, size); | 266 | memcpy(dest, src, size); |
267 | } else if (mem->aperture == APERTURE_VIDMEM) { | 267 | } else if (mem->aperture == APERTURE_VIDMEM) { |
268 | nvgpu_pramin_wr_n(g, mem, offset, size, src); | 268 | nvgpu_pramin_wr_n(g, mem, offset, size, src); |
@@ -277,16 +277,16 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
277 | void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | 277 | void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, |
278 | u32 c, u32 size) | 278 | u32 c, u32 size) |
279 | { | 279 | { |
280 | WARN_ON(offset & 3U); | 280 | WARN_ON((offset & 3U) != 0U); |
281 | WARN_ON(size & 3U); | 281 | WARN_ON((size & 3U) != 0U); |
282 | WARN_ON(c & ~0xffU); | 282 | WARN_ON((c & ~0xffU) != 0U); |
283 | 283 | ||
284 | c &= 0xffU; | 284 | c &= 0xffU; |
285 | 285 | ||
286 | if (mem->aperture == APERTURE_SYSMEM) { | 286 | if (mem->aperture == APERTURE_SYSMEM) { |
287 | u8 *dest = (u8 *)mem->cpu_va + offset; | 287 | u8 *dest = (u8 *)mem->cpu_va + offset; |
288 | 288 | ||
289 | WARN_ON(!mem->cpu_va); | 289 | WARN_ON(mem->cpu_va == NULL); |
290 | memset(dest, c, size); | 290 | memset(dest, c, size); |
291 | } else if (mem->aperture == APERTURE_VIDMEM) { | 291 | } else if (mem->aperture == APERTURE_VIDMEM) { |
292 | u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); | 292 | u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); |
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index c8bc17c7..35c7e120 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c | |||
@@ -116,7 +116,7 @@ static u64 nvgpu_page_alloc_base(struct nvgpu_allocator *a) | |||
116 | return nvgpu_alloc_base(&va->source_allocator); | 116 | return nvgpu_alloc_base(&va->source_allocator); |
117 | } | 117 | } |
118 | 118 | ||
119 | static int nvgpu_page_alloc_inited(struct nvgpu_allocator *a) | 119 | static bool nvgpu_page_alloc_inited(struct nvgpu_allocator *a) |
120 | { | 120 | { |
121 | struct nvgpu_page_allocator *va = a->priv; | 121 | struct nvgpu_page_allocator *va = a->priv; |
122 | 122 | ||
@@ -264,7 +264,7 @@ static struct nvgpu_page_alloc *find_page_alloc( | |||
264 | struct nvgpu_rbtree_node *node = NULL; | 264 | struct nvgpu_rbtree_node *node = NULL; |
265 | 265 | ||
266 | nvgpu_rbtree_search(addr, &node, a->allocs); | 266 | nvgpu_rbtree_search(addr, &node, a->allocs); |
267 | if (!node) { | 267 | if (node == NULL) { |
268 | return NULL; | 268 | return NULL; |
269 | } | 269 | } |
270 | 270 | ||
@@ -282,7 +282,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
282 | struct page_alloc_slab_page *slab_page; | 282 | struct page_alloc_slab_page *slab_page; |
283 | 283 | ||
284 | slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); | 284 | slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); |
285 | if (!slab_page) { | 285 | if (slab_page == NULL) { |
286 | palloc_dbg(a, "OOM: unable to alloc slab_page struct!"); | 286 | palloc_dbg(a, "OOM: unable to alloc slab_page struct!"); |
287 | return NULL; | 287 | return NULL; |
288 | } | 288 | } |
@@ -290,7 +290,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
290 | memset(slab_page, 0, sizeof(*slab_page)); | 290 | memset(slab_page, 0, sizeof(*slab_page)); |
291 | 291 | ||
292 | slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); | 292 | slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); |
293 | if (!slab_page->page_addr) { | 293 | if (slab_page->page_addr == 0ULL) { |
294 | nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); | 294 | nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); |
295 | palloc_dbg(a, "OOM: vidmem is full!"); | 295 | palloc_dbg(a, "OOM: vidmem is full!"); |
296 | return NULL; | 296 | return NULL; |
@@ -354,9 +354,9 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a, | |||
354 | del_slab_page_from_empty(slab, slab_page); | 354 | del_slab_page_from_empty(slab, slab_page); |
355 | } | 355 | } |
356 | 356 | ||
357 | if (!slab_page) { | 357 | if (slab_page == NULL) { |
358 | slab_page = alloc_slab_page(a, slab); | 358 | slab_page = alloc_slab_page(a, slab); |
359 | if (!slab_page) { | 359 | if (slab_page == NULL) { |
360 | return -ENOMEM; | 360 | return -ENOMEM; |
361 | } | 361 | } |
362 | } | 362 | } |
@@ -423,7 +423,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab( | |||
423 | slab = &a->slabs[slab_nr]; | 423 | slab = &a->slabs[slab_nr]; |
424 | 424 | ||
425 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); | 425 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); |
426 | if (!alloc) { | 426 | if (alloc == NULL) { |
427 | palloc_dbg(a, "OOM: could not alloc page_alloc struct!"); | 427 | palloc_dbg(a, "OOM: could not alloc page_alloc struct!"); |
428 | goto fail; | 428 | goto fail; |
429 | } | 429 | } |
@@ -431,7 +431,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab( | |||
431 | alloc->sgt.ops = &page_alloc_sgl_ops; | 431 | alloc->sgt.ops = &page_alloc_sgl_ops; |
432 | 432 | ||
433 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); | 433 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); |
434 | if (!sgl) { | 434 | if (sgl == NULL) { |
435 | palloc_dbg(a, "OOM: could not alloc sgl struct!"); | 435 | palloc_dbg(a, "OOM: could not alloc sgl struct!"); |
436 | goto fail; | 436 | goto fail; |
437 | } | 437 | } |
@@ -524,7 +524,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( | |||
524 | int i = 0; | 524 | int i = 0; |
525 | 525 | ||
526 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); | 526 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); |
527 | if (!alloc) { | 527 | if (alloc == NULL) { |
528 | goto fail; | 528 | goto fail; |
529 | } | 529 | } |
530 | 530 | ||
@@ -545,7 +545,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( | |||
545 | * requested size. The buddy allocator guarantees any given | 545 | * requested size. The buddy allocator guarantees any given |
546 | * single alloc is contiguous. | 546 | * single alloc is contiguous. |
547 | */ | 547 | */ |
548 | if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) { | 548 | if ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL && i != 0) { |
549 | goto fail_cleanup; | 549 | goto fail_cleanup; |
550 | } | 550 | } |
551 | 551 | ||
@@ -563,23 +563,23 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages( | |||
563 | chunk_len); | 563 | chunk_len); |
564 | 564 | ||
565 | /* Divide by 2 and try again */ | 565 | /* Divide by 2 and try again */ |
566 | if (!chunk_addr) { | 566 | if (chunk_addr == 0ULL) { |
567 | palloc_dbg(a, "balloc failed: 0x%llx", | 567 | palloc_dbg(a, "balloc failed: 0x%llx", |
568 | chunk_len); | 568 | chunk_len); |
569 | chunk_len >>= 1; | 569 | chunk_len >>= 1; |
570 | max_chunk_len = chunk_len; | 570 | max_chunk_len = chunk_len; |
571 | } | 571 | } |
572 | } while (!chunk_addr && chunk_len >= a->page_size); | 572 | } while (chunk_addr == 0ULL && chunk_len >= a->page_size); |
573 | 573 | ||
574 | chunk_pages = chunk_len >> a->page_shift; | 574 | chunk_pages = chunk_len >> a->page_shift; |
575 | 575 | ||
576 | if (!chunk_addr) { | 576 | if (chunk_addr == 0ULL) { |
577 | palloc_dbg(a, "bailing @ 0x%llx", chunk_len); | 577 | palloc_dbg(a, "bailing @ 0x%llx", chunk_len); |
578 | goto fail_cleanup; | 578 | goto fail_cleanup; |
579 | } | 579 | } |
580 | 580 | ||
581 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); | 581 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); |
582 | if (!sgl) { | 582 | if (sgl == NULL) { |
583 | nvgpu_free(&a->source_allocator, chunk_addr); | 583 | nvgpu_free(&a->source_allocator, chunk_addr); |
584 | goto fail_cleanup; | 584 | goto fail_cleanup; |
585 | } | 585 | } |
@@ -638,7 +638,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages( | |||
638 | pages = ALIGN(len, a->page_size) >> a->page_shift; | 638 | pages = ALIGN(len, a->page_size) >> a->page_shift; |
639 | 639 | ||
640 | alloc = do_nvgpu_alloc_pages(a, pages); | 640 | alloc = do_nvgpu_alloc_pages(a, pages); |
641 | if (!alloc) { | 641 | if (alloc == NULL) { |
642 | palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", | 642 | palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", |
643 | pages << a->page_shift, pages); | 643 | pages << a->page_shift, pages); |
644 | return NULL; | 644 | return NULL; |
@@ -679,18 +679,18 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len) | |||
679 | * If we want contig pages we have to round up to a power of two. It's | 679 | * If we want contig pages we have to round up to a power of two. It's |
680 | * easier to do that here than in the buddy allocator. | 680 | * easier to do that here than in the buddy allocator. |
681 | */ | 681 | */ |
682 | real_len = a->flags & GPU_ALLOC_FORCE_CONTIG ? | 682 | real_len = ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL) ? |
683 | roundup_pow_of_two(len) : len; | 683 | roundup_pow_of_two(len) : len; |
684 | 684 | ||
685 | alloc_lock(na); | 685 | alloc_lock(na); |
686 | if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && | 686 | if ((a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL && |
687 | real_len <= (a->page_size / 2U)) { | 687 | real_len <= (a->page_size / 2U)) { |
688 | alloc = nvgpu_alloc_slab(a, real_len); | 688 | alloc = nvgpu_alloc_slab(a, real_len); |
689 | } else { | 689 | } else { |
690 | alloc = nvgpu_alloc_pages(a, real_len); | 690 | alloc = nvgpu_alloc_pages(a, real_len); |
691 | } | 691 | } |
692 | 692 | ||
693 | if (!alloc) { | 693 | if (alloc == NULL) { |
694 | alloc_unlock(na); | 694 | alloc_unlock(na); |
695 | return 0; | 695 | return 0; |
696 | } | 696 | } |
@@ -728,7 +728,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base) | |||
728 | ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); | 728 | ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); |
729 | } | 729 | } |
730 | 730 | ||
731 | if (!alloc) { | 731 | if (alloc == NULL) { |
732 | palloc_dbg(a, "Hrm, found no alloc?"); | 732 | palloc_dbg(a, "Hrm, found no alloc?"); |
733 | goto done; | 733 | goto done; |
734 | } | 734 | } |
@@ -760,13 +760,13 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed( | |||
760 | 760 | ||
761 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); | 761 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); |
762 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); | 762 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); |
763 | if (!alloc || !sgl) { | 763 | if (alloc == NULL || sgl == NULL) { |
764 | goto fail; | 764 | goto fail; |
765 | } | 765 | } |
766 | 766 | ||
767 | alloc->sgt.ops = &page_alloc_sgl_ops; | 767 | alloc->sgt.ops = &page_alloc_sgl_ops; |
768 | alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); | 768 | alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); |
769 | if (!alloc->base) { | 769 | if (alloc->base == 0ULL) { |
770 | WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); | 770 | WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); |
771 | goto fail; | 771 | goto fail; |
772 | } | 772 | } |
@@ -811,7 +811,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na, | |||
811 | alloc_lock(na); | 811 | alloc_lock(na); |
812 | 812 | ||
813 | alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); | 813 | alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); |
814 | if (!alloc) { | 814 | if (alloc == NULL) { |
815 | alloc_unlock(na); | 815 | alloc_unlock(na); |
816 | return 0; | 816 | return 0; |
817 | } | 817 | } |
@@ -850,7 +850,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na, | |||
850 | 850 | ||
851 | if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { | 851 | if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { |
852 | alloc = find_page_alloc(a, base); | 852 | alloc = find_page_alloc(a, base); |
853 | if (!alloc) { | 853 | if (alloc == NULL) { |
854 | goto done; | 854 | goto done; |
855 | } | 855 | } |
856 | } else { | 856 | } else { |
@@ -985,7 +985,7 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a) | |||
985 | a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), | 985 | a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), |
986 | nr_slabs, | 986 | nr_slabs, |
987 | sizeof(struct page_alloc_slab)); | 987 | sizeof(struct page_alloc_slab)); |
988 | if (!a->slabs) { | 988 | if (a->slabs == NULL) { |
989 | return -ENOMEM; | 989 | return -ENOMEM; |
990 | } | 990 | } |
991 | a->nr_slabs = nr_slabs; | 991 | a->nr_slabs = nr_slabs; |
@@ -1018,7 +1018,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); | 1020 | a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); |
1021 | if (!a) { | 1021 | if (a == NULL) { |
1022 | return -ENOMEM; | 1022 | return -ENOMEM; |
1023 | } | 1023 | } |
1024 | 1024 | ||
@@ -1031,7 +1031,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1031 | sizeof(struct nvgpu_page_alloc)); | 1031 | sizeof(struct nvgpu_page_alloc)); |
1032 | a->slab_page_cache = nvgpu_kmem_cache_create(g, | 1032 | a->slab_page_cache = nvgpu_kmem_cache_create(g, |
1033 | sizeof(struct page_alloc_slab_page)); | 1033 | sizeof(struct page_alloc_slab_page)); |
1034 | if (!a->alloc_cache || !a->slab_page_cache) { | 1034 | if (a->alloc_cache == NULL || a->slab_page_cache == NULL) { |
1035 | err = -ENOMEM; | 1035 | err = -ENOMEM; |
1036 | goto fail; | 1036 | goto fail; |
1037 | } | 1037 | } |
@@ -1044,7 +1044,8 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1044 | a->owner = na; | 1044 | a->owner = na; |
1045 | a->flags = flags; | 1045 | a->flags = flags; |
1046 | 1046 | ||
1047 | if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { | 1047 | if ((flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL && |
1048 | blk_size > SZ_4K) { | ||
1048 | err = nvgpu_page_alloc_init_slabs(a); | 1049 | err = nvgpu_page_alloc_init_slabs(a); |
1049 | if (err) { | 1050 | if (err) { |
1050 | goto fail; | 1051 | goto fail; |
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index dae6d34e..a8ed10e7 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c | |||
@@ -102,7 +102,7 @@ int nvgpu_pd_cache_init(struct gk20a *g) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | cache = nvgpu_kzalloc(g, sizeof(*cache)); | 104 | cache = nvgpu_kzalloc(g, sizeof(*cache)); |
105 | if (!cache) { | 105 | if (cache == NULL) { |
106 | nvgpu_err(g, "Failed to alloc pd_cache!"); | 106 | nvgpu_err(g, "Failed to alloc pd_cache!"); |
107 | return -ENOMEM; | 107 | return -ENOMEM; |
108 | } | 108 | } |
@@ -132,7 +132,7 @@ void nvgpu_pd_cache_fini(struct gk20a *g) | |||
132 | u32 i; | 132 | u32 i; |
133 | struct nvgpu_pd_cache *cache = g->mm.pd_cache; | 133 | struct nvgpu_pd_cache *cache = g->mm.pd_cache; |
134 | 134 | ||
135 | if (!cache) { | 135 | if (cache == NULL) { |
136 | return; | 136 | return; |
137 | } | 137 | } |
138 | 138 | ||
@@ -159,7 +159,7 @@ int nvgpu_pd_cache_alloc_direct(struct gk20a *g, | |||
159 | pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); | 159 | pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); |
160 | 160 | ||
161 | pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem)); | 161 | pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem)); |
162 | if (!pd->mem) { | 162 | if (pd->mem == NULL) { |
163 | nvgpu_err(g, "OOM allocating nvgpu_mem struct!"); | 163 | nvgpu_err(g, "OOM allocating nvgpu_mem struct!"); |
164 | return -ENOMEM; | 164 | return -ENOMEM; |
165 | } | 165 | } |
@@ -205,7 +205,7 @@ static int nvgpu_pd_cache_alloc_new(struct gk20a *g, | |||
205 | pd_dbg(g, "PD-Alloc [C] New: offs=0"); | 205 | pd_dbg(g, "PD-Alloc [C] New: offs=0"); |
206 | 206 | ||
207 | pentry = nvgpu_kzalloc(g, sizeof(*pentry)); | 207 | pentry = nvgpu_kzalloc(g, sizeof(*pentry)); |
208 | if (!pentry) { | 208 | if (pentry == NULL) { |
209 | nvgpu_err(g, "OOM allocating pentry!"); | 209 | nvgpu_err(g, "OOM allocating pentry!"); |
210 | return -ENOMEM; | 210 | return -ENOMEM; |
211 | } | 211 | } |
@@ -313,7 +313,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, | |||
313 | 313 | ||
314 | pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); | 314 | pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); |
315 | 315 | ||
316 | if (bytes & (bytes - 1U) || | 316 | if ((bytes & (bytes - 1U)) != 0U || |
317 | (bytes >= PAGE_SIZE || | 317 | (bytes >= PAGE_SIZE || |
318 | bytes < NVGPU_PD_CACHE_MIN)) { | 318 | bytes < NVGPU_PD_CACHE_MIN)) { |
319 | pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); | 319 | pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); |
@@ -321,7 +321,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, | |||
321 | } | 321 | } |
322 | 322 | ||
323 | pentry = nvgpu_pd_cache_get_partial(cache, bytes); | 323 | pentry = nvgpu_pd_cache_get_partial(cache, bytes); |
324 | if (!pentry) { | 324 | if (pentry == NULL) { |
325 | err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes); | 325 | err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes); |
326 | } else { | 326 | } else { |
327 | err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); | 327 | err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); |
@@ -357,7 +357,7 @@ int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) | |||
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | 359 | ||
360 | if (WARN_ON(!g->mm.pd_cache)) { | 360 | if (WARN_ON(g->mm.pd_cache == NULL)) { |
361 | return -ENOMEM; | 361 | return -ENOMEM; |
362 | } | 362 | } |
363 | 363 | ||
@@ -372,7 +372,7 @@ void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) | |||
372 | { | 372 | { |
373 | pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); | 373 | pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); |
374 | 374 | ||
375 | if (!pd->mem) { | 375 | if (pd->mem == NULL) { |
376 | return; | 376 | return; |
377 | } | 377 | } |
378 | 378 | ||
@@ -425,7 +425,7 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_look_up( | |||
425 | 425 | ||
426 | nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node, | 426 | nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node, |
427 | cache->mem_tree); | 427 | cache->mem_tree); |
428 | if (!node) { | 428 | if (node == NULL) { |
429 | return NULL; | 429 | return NULL; |
430 | } | 430 | } |
431 | 431 | ||
@@ -440,7 +440,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, | |||
440 | pd_dbg(g, "PD-Free [C] 0x%p", pd->mem); | 440 | pd_dbg(g, "PD-Free [C] 0x%p", pd->mem); |
441 | 441 | ||
442 | pentry = nvgpu_pd_cache_look_up(g, cache, pd); | 442 | pentry = nvgpu_pd_cache_look_up(g, cache, pd); |
443 | if (!pentry) { | 443 | if (pentry == NULL) { |
444 | WARN(1, "Attempting to free non-existent pd"); | 444 | WARN(1, "Attempting to free non-existent pd"); |
445 | return; | 445 | return; |
446 | } | 446 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 17e49969..98bad70b 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -59,7 +59,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, | |||
59 | 59 | ||
60 | int vm_aspace_id(struct vm_gk20a *vm) | 60 | int vm_aspace_id(struct vm_gk20a *vm) |
61 | { | 61 | { |
62 | return vm->as_share ? vm->as_share->id : -1; | 62 | return (vm->as_share != NULL) ? vm->as_share->id : -1; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | 65 | /* |
@@ -112,7 +112,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, | |||
112 | 112 | ||
113 | nvgpu_pd_cache_free_direct(g, pdb); | 113 | nvgpu_pd_cache_free_direct(g, pdb); |
114 | 114 | ||
115 | if (!pdb->entries) { | 115 | if (pdb->entries == NULL) { |
116 | return; | 116 | return; |
117 | } | 117 | } |
118 | 118 | ||
@@ -153,7 +153,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) | |||
153 | size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U); | 153 | size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U); |
154 | 154 | ||
155 | addr = nvgpu_alloc_pte(vma, size, page_size); | 155 | addr = nvgpu_alloc_pte(vma, size, page_size); |
156 | if (!addr) { | 156 | if (addr == 0ULL) { |
157 | nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size); | 157 | nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size); |
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
@@ -200,14 +200,16 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm, | |||
200 | /* | 200 | /* |
201 | * Determine if the passed address space can support big pages or not. | 201 | * Determine if the passed address space can support big pages or not. |
202 | */ | 202 | */ |
203 | int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) | 203 | bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) |
204 | { | 204 | { |
205 | u64 mask = ((u64)vm->big_page_size << 10) - 1U; | 205 | u64 mask = ((u64)vm->big_page_size << 10ULL) - 1ULL; |
206 | u64 base_big_page = base & mask; | ||
207 | u64 size_big_page = size & mask; | ||
206 | 208 | ||
207 | if (base & mask || size & mask) { | 209 | if (base_big_page != 0ULL || size_big_page != 0ULL) { |
208 | return 0; | 210 | return false; |
209 | } | 211 | } |
210 | return 1; | 212 | return true; |
211 | } | 213 | } |
212 | 214 | ||
213 | /* | 215 | /* |
@@ -233,12 +235,12 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) | |||
233 | } | 235 | } |
234 | 236 | ||
235 | sema_sea = nvgpu_semaphore_sea_create(g); | 237 | sema_sea = nvgpu_semaphore_sea_create(g); |
236 | if (!sema_sea) { | 238 | if (sema_sea == NULL) { |
237 | return -ENOMEM; | 239 | return -ENOMEM; |
238 | } | 240 | } |
239 | 241 | ||
240 | err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); | 242 | err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); |
241 | if (err) { | 243 | if (err != 0) { |
242 | return err; | 244 | return err; |
243 | } | 245 | } |
244 | 246 | ||
@@ -254,7 +256,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) | |||
254 | mm->channel.kernel_size, | 256 | mm->channel.kernel_size, |
255 | 512U * PAGE_SIZE, | 257 | 512U * PAGE_SIZE, |
256 | SZ_4K); | 258 | SZ_4K); |
257 | if (!sema_sea->gpu_va) { | 259 | if (sema_sea->gpu_va == 0ULL) { |
258 | nvgpu_free(&vm->kernel, sema_sea->gpu_va); | 260 | nvgpu_free(&vm->kernel, sema_sea->gpu_va); |
259 | nvgpu_vm_put(vm); | 261 | nvgpu_vm_put(vm); |
260 | return -ENOMEM; | 262 | return -ENOMEM; |
@@ -387,7 +389,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
387 | } | 389 | } |
388 | 390 | ||
389 | kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? | 391 | kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? |
390 | 0U : GPU_ALLOC_GVA_SPACE; | 392 | 0ULL : GPU_ALLOC_GVA_SPACE; |
391 | 393 | ||
392 | /* | 394 | /* |
393 | * A "user" area only makes sense for the GVA spaces. For VMs where | 395 | * A "user" area only makes sense for the GVA spaces. For VMs where |
@@ -579,7 +581,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, | |||
579 | { | 581 | { |
580 | struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); | 582 | struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); |
581 | 583 | ||
582 | if (!vm) { | 584 | if (vm == NULL) { |
583 | return NULL; | 585 | return NULL; |
584 | } | 586 | } |
585 | 587 | ||
@@ -615,7 +617,8 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) | |||
615 | } | 617 | } |
616 | } | 618 | } |
617 | 619 | ||
618 | if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) { | 620 | if (nvgpu_mem_is_valid(&g->syncpt_mem) && |
621 | vm->syncpt_ro_map_gpu_va != 0ULL) { | ||
619 | nvgpu_gmmu_unmap(vm, &g->syncpt_mem, | 622 | nvgpu_gmmu_unmap(vm, &g->syncpt_mem, |
620 | vm->syncpt_ro_map_gpu_va); | 623 | vm->syncpt_ro_map_gpu_va); |
621 | } | 624 | } |
@@ -701,7 +704,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf( | |||
701 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 704 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
702 | 705 | ||
703 | nvgpu_rbtree_search(addr, &node, root); | 706 | nvgpu_rbtree_search(addr, &node, root); |
704 | if (!node) { | 707 | if (node == NULL) { |
705 | return NULL; | 708 | return NULL; |
706 | } | 709 | } |
707 | 710 | ||
@@ -715,7 +718,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range( | |||
715 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 718 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
716 | 719 | ||
717 | nvgpu_rbtree_range_search(addr, &node, root); | 720 | nvgpu_rbtree_range_search(addr, &node, root); |
718 | if (!node) { | 721 | if (node == NULL) { |
719 | return NULL; | 722 | return NULL; |
720 | } | 723 | } |
721 | 724 | ||
@@ -729,7 +732,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( | |||
729 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 732 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
730 | 733 | ||
731 | nvgpu_rbtree_less_than_search(addr, &node, root); | 734 | nvgpu_rbtree_less_than_search(addr, &node, root); |
732 | if (!node) { | 735 | if (node == NULL) { |
733 | return NULL; | 736 | return NULL; |
734 | } | 737 | } |
735 | 738 | ||
@@ -755,7 +758,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
755 | 758 | ||
756 | buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * | 759 | buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * |
757 | vm->num_user_mapped_buffers); | 760 | vm->num_user_mapped_buffers); |
758 | if (!buffer_list) { | 761 | if (buffer_list == NULL) { |
759 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 762 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
760 | return -ENOMEM; | 763 | return -ENOMEM; |
761 | } | 764 | } |
@@ -841,7 +844,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
841 | u8 pte_kind; | 844 | u8 pte_kind; |
842 | 845 | ||
843 | if (vm->userspace_managed && | 846 | if (vm->userspace_managed && |
844 | !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) { | 847 | (flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U) { |
845 | nvgpu_err(g, | 848 | nvgpu_err(g, |
846 | "non-fixed-offset mapping not available on " | 849 | "non-fixed-offset mapping not available on " |
847 | "userspace managed address spaces"); | 850 | "userspace managed address spaces"); |
@@ -883,7 +886,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
883 | * Generate a new mapping! | 886 | * Generate a new mapping! |
884 | */ | 887 | */ |
885 | mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); | 888 | mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); |
886 | if (!mapped_buffer) { | 889 | if (mapped_buffer == NULL) { |
887 | nvgpu_warn(g, "oom allocating tracking buffer"); | 890 | nvgpu_warn(g, "oom allocating tracking buffer"); |
888 | return ERR_PTR(-ENOMEM); | 891 | return ERR_PTR(-ENOMEM); |
889 | } | 892 | } |
@@ -895,7 +898,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
895 | binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr, | 898 | binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr, |
896 | min_t(u64, binfo.size, align)); | 899 | min_t(u64, binfo.size, align)); |
897 | } | 900 | } |
898 | map_size = map_size ? map_size : binfo.size; | 901 | map_size = (map_size != 0ULL) ? map_size : binfo.size; |
899 | map_size = ALIGN(map_size, SZ_4K); | 902 | map_size = ALIGN(map_size, SZ_4K); |
900 | 903 | ||
901 | if ((map_size > binfo.size) || | 904 | if ((map_size > binfo.size) || |
@@ -929,7 +932,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
929 | } | 932 | } |
930 | 933 | ||
931 | if ((binfo.compr_kind != NVGPU_KIND_INVALID) && | 934 | if ((binfo.compr_kind != NVGPU_KIND_INVALID) && |
932 | (flags & NVGPU_VM_MAP_FIXED_OFFSET)) { | 935 | ((flags & NVGPU_VM_MAP_FIXED_OFFSET) != 0U)) { |
933 | /* | 936 | /* |
934 | * Fixed-address compressible mapping is | 937 | * Fixed-address compressible mapping is |
935 | * requested. Make sure we're respecting the alignment | 938 | * requested. Make sure we're respecting the alignment |
@@ -1008,7 +1011,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
1008 | /* | 1011 | /* |
1009 | * Figure out the kind and ctag offset for the GMMU page tables | 1012 | * Figure out the kind and ctag offset for the GMMU page tables |
1010 | */ | 1013 | */ |
1011 | if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) { | 1014 | if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset != 0U) { |
1012 | /* | 1015 | /* |
1013 | * Adjust the ctag_offset as per the buffer map offset | 1016 | * Adjust the ctag_offset as per the buffer map offset |
1014 | */ | 1017 | */ |
@@ -1054,7 +1057,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
1054 | gk20a_comptags_finish_clear(os_buf, map_addr != 0U); | 1057 | gk20a_comptags_finish_clear(os_buf, map_addr != 0U); |
1055 | } | 1058 | } |
1056 | 1059 | ||
1057 | if (!map_addr) { | 1060 | if (map_addr == 0ULL) { |
1058 | err = -ENOMEM; | 1061 | err = -ENOMEM; |
1059 | goto clean_up; | 1062 | goto clean_up; |
1060 | } | 1063 | } |
@@ -1096,7 +1099,7 @@ clean_up: | |||
1096 | mapped_buffer->pgsz_idx, | 1099 | mapped_buffer->pgsz_idx, |
1097 | mapped_buffer->va_allocated, | 1100 | mapped_buffer->va_allocated, |
1098 | gk20a_mem_flag_none, | 1101 | gk20a_mem_flag_none, |
1099 | mapped_buffer->vm_area ? | 1102 | (mapped_buffer->vm_area != NULL) ? |
1100 | mapped_buffer->vm_area->sparse : false, | 1103 | mapped_buffer->vm_area->sparse : false, |
1101 | NULL); | 1104 | NULL); |
1102 | } | 1105 | } |
@@ -1125,7 +1128,7 @@ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, | |||
1125 | mapped_buffer->pgsz_idx, | 1128 | mapped_buffer->pgsz_idx, |
1126 | mapped_buffer->va_allocated, | 1129 | mapped_buffer->va_allocated, |
1127 | gk20a_mem_flag_none, | 1130 | gk20a_mem_flag_none, |
1128 | mapped_buffer->vm_area ? | 1131 | (mapped_buffer->vm_area != NULL) ? |
1129 | mapped_buffer->vm_area->sparse : false, | 1132 | mapped_buffer->vm_area->sparse : false, |
1130 | batch); | 1133 | batch); |
1131 | 1134 | ||
@@ -1185,8 +1188,8 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, | |||
1185 | break; | 1188 | break; |
1186 | } | 1189 | } |
1187 | nvgpu_msleep(10); | 1190 | nvgpu_msleep(10); |
1188 | } while (!nvgpu_timeout_expired_msg(&timeout, | 1191 | } while (nvgpu_timeout_expired_msg(&timeout, |
1189 | "sync-unmap failed on 0x%llx")); | 1192 | "sync-unmap failed on 0x%llx") == 0); |
1190 | 1193 | ||
1191 | if (nvgpu_timeout_expired(&timeout)) { | 1194 | if (nvgpu_timeout_expired(&timeout)) { |
1192 | ret = -ETIMEDOUT; | 1195 | ret = -ETIMEDOUT; |
@@ -1205,7 +1208,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, | |||
1205 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 1208 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1206 | 1209 | ||
1207 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | 1210 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); |
1208 | if (!mapped_buffer) { | 1211 | if (mapped_buffer == NULL) { |
1209 | goto done; | 1212 | goto done; |
1210 | } | 1213 | } |
1211 | 1214 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index d096de5d..ac4708af 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -66,13 +66,13 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, | |||
66 | /* Find the space reservation, but it's ok to have none for | 66 | /* Find the space reservation, but it's ok to have none for |
67 | * userspace-managed address spaces */ | 67 | * userspace-managed address spaces */ |
68 | vm_area = nvgpu_vm_area_find(vm, map_addr); | 68 | vm_area = nvgpu_vm_area_find(vm, map_addr); |
69 | if (!vm_area && !vm->userspace_managed) { | 69 | if (vm_area == NULL && !vm->userspace_managed) { |
70 | nvgpu_warn(g, "fixed offset mapping without space allocation"); | 70 | nvgpu_warn(g, "fixed offset mapping without space allocation"); |
71 | return -EINVAL; | 71 | return -EINVAL; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* Mapped area should fit inside va, if there's one */ | 74 | /* Mapped area should fit inside va, if there's one */ |
75 | if (vm_area && map_end > vm_area->addr + vm_area->size) { | 75 | if (vm_area != NULL && map_end > vm_area->addr + vm_area->size) { |
76 | nvgpu_warn(g, "fixed offset mapping size overflows va node"); | 76 | nvgpu_warn(g, "fixed offset mapping size overflows va node"); |
77 | return -EINVAL; | 77 | return -EINVAL; |
78 | } | 78 | } |
@@ -82,7 +82,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, | |||
82 | * that is less than our buffer end */ | 82 | * that is less than our buffer end */ |
83 | buffer = __nvgpu_vm_find_mapped_buf_less_than( | 83 | buffer = __nvgpu_vm_find_mapped_buf_less_than( |
84 | vm, map_addr + map_size); | 84 | vm, map_addr + map_size); |
85 | if (buffer && buffer->addr + buffer->size > map_addr) { | 85 | if (buffer != NULL && buffer->addr + buffer->size > map_addr) { |
86 | nvgpu_warn(g, "overlapping buffer map requested"); | 86 | nvgpu_warn(g, "overlapping buffer map requested"); |
87 | return -EINVAL; | 87 | return -EINVAL; |
88 | } | 88 | } |
@@ -138,7 +138,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); | 140 | vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); |
141 | if (!vm_area) { | 141 | if (vm_area == NULL) { |
142 | goto clean_up_err; | 142 | goto clean_up_err; |
143 | } | 143 | } |
144 | 144 | ||
@@ -155,7 +155,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
155 | page_size); | 155 | page_size); |
156 | } | 156 | } |
157 | 157 | ||
158 | if (!vaddr_start) { | 158 | if (vaddr_start == 0ULL) { |
159 | goto clean_up_err; | 159 | goto clean_up_err; |
160 | } | 160 | } |
161 | 161 | ||
@@ -183,7 +183,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
183 | false, | 183 | false, |
184 | NULL, | 184 | NULL, |
185 | APERTURE_INVALID); | 185 | APERTURE_INVALID); |
186 | if (!map_addr) { | 186 | if (map_addr == 0ULL) { |
187 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 187 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
188 | goto clean_up_err; | 188 | goto clean_up_err; |
189 | } | 189 | } |
@@ -215,7 +215,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) | |||
215 | 215 | ||
216 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 216 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
217 | vm_area = nvgpu_vm_area_find(vm, addr); | 217 | vm_area = nvgpu_vm_area_find(vm, addr); |
218 | if (!vm_area) { | 218 | if (vm_area == NULL) { |
219 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 219 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
220 | return 0; | 220 | return 0; |
221 | } | 221 | } |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 0395e463..6d1d5f00 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c | |||
@@ -170,8 +170,8 @@ void nvgpu_kill_task_pg_init(struct gk20a *g) | |||
170 | break; | 170 | break; |
171 | } | 171 | } |
172 | nvgpu_udelay(2); | 172 | nvgpu_udelay(2); |
173 | } while (!nvgpu_timeout_expired_msg(&timeout, | 173 | } while (nvgpu_timeout_expired_msg(&timeout, |
174 | "timeout - waiting PMU state machine thread stop")); | 174 | "timeout - waiting PMU state machine thread stop") == 0); |
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
@@ -214,7 +214,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) | |||
214 | pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); | 214 | pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size(); |
215 | pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * | 215 | pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt * |
216 | sizeof(struct pmu_mutex)); | 216 | sizeof(struct pmu_mutex)); |
217 | if (!pmu->mutex) { | 217 | if (pmu->mutex == NULL) { |
218 | err = -ENOMEM; | 218 | err = -ENOMEM; |
219 | goto err; | 219 | goto err; |
220 | } | 220 | } |
@@ -226,7 +226,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) | |||
226 | 226 | ||
227 | pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES * | 227 | pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES * |
228 | sizeof(struct pmu_sequence)); | 228 | sizeof(struct pmu_sequence)); |
229 | if (!pmu->seq) { | 229 | if (pmu->seq == NULL) { |
230 | err = -ENOMEM; | 230 | err = -ENOMEM; |
231 | goto err_free_mutex; | 231 | goto err_free_mutex; |
232 | } | 232 | } |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index bf54e0d6..a94453fb 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c | |||
@@ -1738,12 +1738,12 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) | |||
1738 | 1738 | ||
1739 | nvgpu_log_fn(g, " "); | 1739 | nvgpu_log_fn(g, " "); |
1740 | 1740 | ||
1741 | if (pmu->fw) { | 1741 | if (pmu->fw != NULL) { |
1742 | return nvgpu_init_pmu_fw_support(pmu); | 1742 | return nvgpu_init_pmu_fw_support(pmu); |
1743 | } | 1743 | } |
1744 | 1744 | ||
1745 | pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); | 1745 | pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); |
1746 | if (!pmu->fw) { | 1746 | if (pmu->fw == NULL) { |
1747 | nvgpu_err(g, "failed to load pmu ucode!!"); | 1747 | nvgpu_err(g, "failed to load pmu ucode!!"); |
1748 | return err; | 1748 | return err; |
1749 | } | 1749 | } |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 9fe999ae..6f88260f 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | |||
@@ -221,7 +221,7 @@ invalid_cmd: | |||
221 | "payload in=%p, in_size=%d, in_offset=%d,\n" | 221 | "payload in=%p, in_size=%d, in_offset=%d,\n" |
222 | "payload out=%p, out_size=%d, out_offset=%d", | 222 | "payload out=%p, out_size=%d, out_offset=%d", |
223 | queue_id, cmd->hdr.size, cmd->hdr.unit_id, | 223 | queue_id, cmd->hdr.size, cmd->hdr.unit_id, |
224 | msg, msg ? msg->hdr.unit_id : ~0, | 224 | msg, (msg != NULL) ? msg->hdr.unit_id : ~0, |
225 | &payload->in, payload->in.size, payload->in.offset, | 225 | &payload->in, payload->in.size, payload->in.offset, |
226 | &payload->out, payload->out.size, payload->out.offset); | 226 | &payload->out, payload->out.size, payload->out.offset); |
227 | 227 | ||
@@ -243,7 +243,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | |||
243 | 243 | ||
244 | do { | 244 | do { |
245 | err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); | 245 | err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); |
246 | if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) { | 246 | if (err == -EAGAIN && nvgpu_timeout_expired(&timeout) == 0) { |
247 | nvgpu_usleep_range(1000, 2000); | 247 | nvgpu_usleep_range(1000, 2000); |
248 | } else { | 248 | } else { |
249 | break; | 249 | break; |
@@ -273,7 +273,7 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd, | |||
273 | dmem_alloc_size = payload->rpc.size_rpc + | 273 | dmem_alloc_size = payload->rpc.size_rpc + |
274 | payload->rpc.size_scratch; | 274 | payload->rpc.size_scratch; |
275 | dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size); | 275 | dmem_alloc_offset = nvgpu_alloc(&pmu->dmem, dmem_alloc_size); |
276 | if (!dmem_alloc_offset) { | 276 | if (dmem_alloc_offset == 0U) { |
277 | err = -ENOMEM; | 277 | err = -ENOMEM; |
278 | goto clean_up; | 278 | goto clean_up; |
279 | } | 279 | } |
@@ -312,11 +312,11 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, | |||
312 | 312 | ||
313 | nvgpu_log_fn(g, " "); | 313 | nvgpu_log_fn(g, " "); |
314 | 314 | ||
315 | if (payload) { | 315 | if (payload != NULL) { |
316 | seq->out_payload = payload->out.buf; | 316 | seq->out_payload = payload->out.buf; |
317 | } | 317 | } |
318 | 318 | ||
319 | if (payload && payload->in.offset != 0U) { | 319 | if (payload != NULL && payload->in.offset != 0U) { |
320 | pv->set_pmu_allocation_ptr(pmu, &in, | 320 | pv->set_pmu_allocation_ptr(pmu, &in, |
321 | ((u8 *)&cmd->cmd + payload->in.offset)); | 321 | ((u8 *)&cmd->cmd + payload->in.offset)); |
322 | 322 | ||
@@ -331,14 +331,14 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, | |||
331 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = | 331 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = |
332 | nvgpu_alloc(&pmu->dmem, | 332 | nvgpu_alloc(&pmu->dmem, |
333 | pv->pmu_allocation_get_dmem_size(pmu, in)); | 333 | pv->pmu_allocation_get_dmem_size(pmu, in)); |
334 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) { | 334 | if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) == 0U) { |
335 | goto clean_up; | 335 | goto clean_up; |
336 | } | 336 | } |
337 | 337 | ||
338 | if (payload->in.fb_size != 0x0U) { | 338 | if (payload->in.fb_size != 0x0U) { |
339 | seq->in_mem = nvgpu_kzalloc(g, | 339 | seq->in_mem = nvgpu_kzalloc(g, |
340 | sizeof(struct nvgpu_mem)); | 340 | sizeof(struct nvgpu_mem)); |
341 | if (!seq->in_mem) { | 341 | if (seq->in_mem == NULL) { |
342 | err = -ENOMEM; | 342 | err = -ENOMEM; |
343 | goto clean_up; | 343 | goto clean_up; |
344 | } | 344 | } |
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, | |||
365 | pv->pmu_allocation_get_dmem_offset(pmu, in)); | 365 | pv->pmu_allocation_get_dmem_offset(pmu, in)); |
366 | } | 366 | } |
367 | 367 | ||
368 | if (payload && payload->out.offset != 0U) { | 368 | if (payload != NULL && payload->out.offset != 0U) { |
369 | pv->set_pmu_allocation_ptr(pmu, &out, | 369 | pv->set_pmu_allocation_ptr(pmu, &out, |
370 | ((u8 *)&cmd->cmd + payload->out.offset)); | 370 | ((u8 *)&cmd->cmd + payload->out.offset)); |
371 | pv->pmu_allocation_set_dmem_size(pmu, out, | 371 | pv->pmu_allocation_set_dmem_size(pmu, out, |
@@ -376,15 +376,15 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, | |||
376 | nvgpu_alloc(&pmu->dmem, | 376 | nvgpu_alloc(&pmu->dmem, |
377 | pv->pmu_allocation_get_dmem_size(pmu, | 377 | pv->pmu_allocation_get_dmem_size(pmu, |
378 | out)); | 378 | out)); |
379 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, | 379 | if (*(pv->pmu_allocation_get_dmem_offset_addr(pmu, |
380 | out))) { | 380 | out)) == 0U) { |
381 | goto clean_up; | 381 | goto clean_up; |
382 | } | 382 | } |
383 | 383 | ||
384 | if (payload->out.fb_size != 0x0U) { | 384 | if (payload->out.fb_size != 0x0U) { |
385 | seq->out_mem = nvgpu_kzalloc(g, | 385 | seq->out_mem = nvgpu_kzalloc(g, |
386 | sizeof(struct nvgpu_mem)); | 386 | sizeof(struct nvgpu_mem)); |
387 | if (!seq->out_mem) { | 387 | if (seq->out_mem == NULL) { |
388 | err = -ENOMEM; | 388 | err = -ENOMEM; |
389 | goto clean_up; | 389 | goto clean_up; |
390 | } | 390 | } |
@@ -439,16 +439,16 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
439 | 439 | ||
440 | nvgpu_log_fn(g, " "); | 440 | nvgpu_log_fn(g, " "); |
441 | 441 | ||
442 | if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { | 442 | if (cmd == NULL || seq_desc == NULL || !pmu->pmu_ready) { |
443 | if (!cmd) { | 443 | if (cmd == NULL) { |
444 | nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); | 444 | nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); |
445 | } else if (!seq_desc) { | 445 | } else if (seq_desc == NULL) { |
446 | nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); | 446 | nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); |
447 | } else { | 447 | } else { |
448 | nvgpu_warn(g, "%s(): PMU is not ready", __func__); | 448 | nvgpu_warn(g, "%s(): PMU is not ready", __func__); |
449 | } | 449 | } |
450 | 450 | ||
451 | WARN_ON(1); | 451 | WARN_ON(true); |
452 | return -EINVAL; | 452 | return -EINVAL; |
453 | } | 453 | } |
454 | 454 | ||
@@ -612,7 +612,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) | |||
612 | err = g->ops.perf.handle_pmu_perf_event(g, | 612 | err = g->ops.perf.handle_pmu_perf_event(g, |
613 | (void *)&msg->msg.perf); | 613 | (void *)&msg->msg.perf); |
614 | } else { | 614 | } else { |
615 | WARN_ON(1); | 615 | WARN_ON(true); |
616 | } | 616 | } |
617 | break; | 617 | break; |
618 | case PMU_UNIT_THERM: | 618 | case PMU_UNIT_THERM: |
@@ -641,7 +641,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, | |||
641 | 641 | ||
642 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, | 642 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, |
643 | PMU_MSG_HDR_SIZE, &bytes_read); | 643 | PMU_MSG_HDR_SIZE, &bytes_read); |
644 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | 644 | if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { |
645 | nvgpu_err(g, "fail to read msg from queue %d", queue->id); | 645 | nvgpu_err(g, "fail to read msg from queue %d", queue->id); |
646 | *status = err | -EINVAL; | 646 | *status = err | -EINVAL; |
647 | goto clean_up; | 647 | goto clean_up; |
@@ -657,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, | |||
657 | /* read again after rewind */ | 657 | /* read again after rewind */ |
658 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, | 658 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, |
659 | PMU_MSG_HDR_SIZE, &bytes_read); | 659 | PMU_MSG_HDR_SIZE, &bytes_read); |
660 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | 660 | if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { |
661 | nvgpu_err(g, | 661 | nvgpu_err(g, |
662 | "fail to read msg from queue %d", queue->id); | 662 | "fail to read msg from queue %d", queue->id); |
663 | *status = err | -EINVAL; | 663 | *status = err | -EINVAL; |
@@ -676,7 +676,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, | |||
676 | read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; | 676 | read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; |
677 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, | 677 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, |
678 | read_size, &bytes_read); | 678 | read_size, &bytes_read); |
679 | if (err || bytes_read != read_size) { | 679 | if (err != 0 || bytes_read != read_size) { |
680 | nvgpu_err(g, | 680 | nvgpu_err(g, |
681 | "fail to read msg from queue %d", queue->id); | 681 | "fail to read msg from queue %d", queue->id); |
682 | *status = err; | 682 | *status = err; |
@@ -750,7 +750,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, | |||
750 | 750 | ||
751 | nvgpu_usleep_range(delay, delay * 2U); | 751 | nvgpu_usleep_range(delay, delay * 2U); |
752 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 752 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
753 | } while (!nvgpu_timeout_expired(&timeout)); | 753 | } while (nvgpu_timeout_expired(&timeout) == 0); |
754 | 754 | ||
755 | return -ETIMEDOUT; | 755 | return -ETIMEDOUT; |
756 | } | 756 | } |
@@ -887,7 +887,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, | |||
887 | if (caller_cb == NULL) { | 887 | if (caller_cb == NULL) { |
888 | rpc_payload = nvgpu_kzalloc(g, | 888 | rpc_payload = nvgpu_kzalloc(g, |
889 | sizeof(struct rpc_handler_payload) + size_rpc); | 889 | sizeof(struct rpc_handler_payload) + size_rpc); |
890 | if (!rpc_payload) { | 890 | if (rpc_payload == NULL) { |
891 | status = ENOMEM; | 891 | status = ENOMEM; |
892 | goto exit; | 892 | goto exit; |
893 | } | 893 | } |
@@ -907,7 +907,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, | |||
907 | } | 907 | } |
908 | rpc_payload = nvgpu_kzalloc(g, | 908 | rpc_payload = nvgpu_kzalloc(g, |
909 | sizeof(struct rpc_handler_payload)); | 909 | sizeof(struct rpc_handler_payload)); |
910 | if (!rpc_payload) { | 910 | if (rpc_payload == NULL) { |
911 | status = ENOMEM; | 911 | status = ENOMEM; |
912 | goto exit; | 912 | goto exit; |
913 | } | 913 | } |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index a99e86ce..12ab4422 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | |||
@@ -51,7 +51,7 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu) | |||
51 | default: | 51 | default: |
52 | unit_id = PMU_UNIT_INVALID; | 52 | unit_id = PMU_UNIT_INVALID; |
53 | nvgpu_err(g, "no support for %x", ver); | 53 | nvgpu_err(g, "no support for %x", ver); |
54 | WARN_ON(1); | 54 | WARN_ON(true); |
55 | } | 55 | } |
56 | 56 | ||
57 | return unit_id; | 57 | return unit_id; |
@@ -75,11 +75,11 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) | |||
75 | 75 | ||
76 | g->ops.pmu.pmu_init_perfmon_counter(g); | 76 | g->ops.pmu.pmu_init_perfmon_counter(g); |
77 | 77 | ||
78 | if (!pmu->sample_buffer) { | 78 | if (pmu->sample_buffer == 0U) { |
79 | pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, | 79 | pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, |
80 | 2U * sizeof(u16)); | 80 | 2U * sizeof(u16)); |
81 | } | 81 | } |
82 | if (!pmu->sample_buffer) { | 82 | if (pmu->sample_buffer == 0U) { |
83 | nvgpu_err(g, "failed to allocate perfmon sample buffer"); | 83 | nvgpu_err(g, "failed to allocate perfmon sample buffer"); |
84 | return -ENOMEM; | 84 | return -ENOMEM; |
85 | } | 85 | } |
@@ -240,7 +240,7 @@ int nvgpu_pmu_load_update(struct gk20a *g) | |||
240 | void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, | 240 | void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, |
241 | u32 *total_cycles) | 241 | u32 *total_cycles) |
242 | { | 242 | { |
243 | if (!g->power_on || gk20a_busy(g)) { | 243 | if (!g->power_on || gk20a_busy(g) != 0) { |
244 | *busy_cycles = 0; | 244 | *busy_cycles = 0; |
245 | *total_cycles = 0; | 245 | *total_cycles = 0; |
246 | return; | 246 | return; |
@@ -254,7 +254,7 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, | |||
254 | 254 | ||
255 | void nvgpu_pmu_reset_load_counters(struct gk20a *g) | 255 | void nvgpu_pmu_reset_load_counters(struct gk20a *g) |
256 | { | 256 | { |
257 | if (!g->power_on || gk20a_busy(g)) { | 257 | if (!g->power_on || gk20a_busy(g) != 0) { |
258 | return; | 258 | return; |
259 | } | 259 | } |
260 | 260 | ||
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 0758279d..d2615b1a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c | |||
@@ -89,9 +89,9 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, | |||
89 | } | 89 | } |
90 | 90 | ||
91 | if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { | 91 | if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) { |
92 | if (g->ops.pmu.pmu_pg_engines_feature_list && | 92 | if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && |
93 | g->ops.pmu.pmu_pg_engines_feature_list(g, | 93 | g->ops.pmu.pmu_pg_engines_feature_list(g, |
94 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != | 94 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != |
95 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { | 95 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { |
96 | pmu->initialized = true; | 96 | pmu->initialized = true; |
97 | nvgpu_pmu_state_change(g, PMU_STATE_STARTED, | 97 | nvgpu_pmu_state_change(g, PMU_STATE_STARTED, |
@@ -117,9 +117,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | |||
117 | u32 status = 0; | 117 | u32 status = 0; |
118 | 118 | ||
119 | if (enable_pg == true) { | 119 | if (enable_pg == true) { |
120 | if (g->ops.pmu.pmu_pg_engines_feature_list && | 120 | if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && |
121 | g->ops.pmu.pmu_pg_engines_feature_list(g, | 121 | g->ops.pmu.pmu_pg_engines_feature_list(g, |
122 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != | 122 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != |
123 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { | 123 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { |
124 | if (g->ops.pmu.pmu_lpwr_enable_pg) { | 124 | if (g->ops.pmu.pmu_lpwr_enable_pg) { |
125 | status = g->ops.pmu.pmu_lpwr_enable_pg(g, | 125 | status = g->ops.pmu.pmu_lpwr_enable_pg(g, |
@@ -129,9 +129,9 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | |||
129 | status = nvgpu_pmu_enable_elpg(g); | 129 | status = nvgpu_pmu_enable_elpg(g); |
130 | } | 130 | } |
131 | } else if (enable_pg == false) { | 131 | } else if (enable_pg == false) { |
132 | if (g->ops.pmu.pmu_pg_engines_feature_list && | 132 | if (g->ops.pmu.pmu_pg_engines_feature_list != NULL && |
133 | g->ops.pmu.pmu_pg_engines_feature_list(g, | 133 | g->ops.pmu.pmu_pg_engines_feature_list(g, |
134 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != | 134 | PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != |
135 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { | 135 | NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { |
136 | if (g->ops.pmu.pmu_lpwr_disable_pg) { | 136 | if (g->ops.pmu.pmu_lpwr_disable_pg) { |
137 | status = g->ops.pmu.pmu_lpwr_disable_pg(g, | 137 | status = g->ops.pmu.pmu_lpwr_disable_pg(g, |
@@ -207,7 +207,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g) | |||
207 | nvgpu_warn(g, | 207 | nvgpu_warn(g, |
208 | "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", | 208 | "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", |
209 | __func__, pmu->elpg_refcnt); | 209 | __func__, pmu->elpg_refcnt); |
210 | WARN_ON(1); | 210 | WARN_ON(true); |
211 | } | 211 | } |
212 | 212 | ||
213 | /* do NOT enable elpg until golden ctx is created, | 213 | /* do NOT enable elpg until golden ctx is created, |
@@ -273,7 +273,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g) | |||
273 | nvgpu_warn(g, | 273 | nvgpu_warn(g, |
274 | "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", | 274 | "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", |
275 | __func__, pmu->elpg_refcnt); | 275 | __func__, pmu->elpg_refcnt); |
276 | WARN_ON(1); | 276 | WARN_ON(true); |
277 | ret = 0; | 277 | ret = 0; |
278 | goto exit_unlock; | 278 | goto exit_unlock; |
279 | } | 279 | } |
@@ -481,7 +481,8 @@ int nvgpu_pmu_init_powergating(struct gk20a *g) | |||
481 | pg_engine_id++) { | 481 | pg_engine_id++) { |
482 | 482 | ||
483 | if (BIT(pg_engine_id) & pg_engine_id_list) { | 483 | if (BIT(pg_engine_id) & pg_engine_id_list) { |
484 | if (pmu && pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { | 484 | if (pmu != NULL && |
485 | pmu->pmu_state == PMU_STATE_INIT_RECEIVED) { | ||
485 | nvgpu_pmu_state_change(g, | 486 | nvgpu_pmu_state_change(g, |
486 | PMU_STATE_ELPG_BOOTING, false); | 487 | PMU_STATE_ELPG_BOOTING, false); |
487 | } | 488 | } |
@@ -636,9 +637,9 @@ static void ap_callback_init_and_enable_ctrl( | |||
636 | void *param, u32 seq_desc, u32 status) | 637 | void *param, u32 seq_desc, u32 status) |
637 | { | 638 | { |
638 | /* Define p_ap (i.e pointer to pmu_ap structure) */ | 639 | /* Define p_ap (i.e pointer to pmu_ap structure) */ |
639 | WARN_ON(!msg); | 640 | WARN_ON(msg == NULL); |
640 | 641 | ||
641 | if (!status) { | 642 | if (status == 0U) { |
642 | switch (msg->msg.pg.ap_msg.cmn.msg_id) { | 643 | switch (msg->msg.pg.ap_msg.cmn.msg_id) { |
643 | case PMU_AP_MSG_ID_INIT_ACK: | 644 | case PMU_AP_MSG_ID_INIT_ACK: |
644 | nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT"); | 645 | nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT"); |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h index 2bff0efd..d722673d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -83,7 +83,7 @@ struct nvgpu_allocator_ops { | |||
83 | u64 (*base)(struct nvgpu_allocator *allocator); | 83 | u64 (*base)(struct nvgpu_allocator *allocator); |
84 | u64 (*length)(struct nvgpu_allocator *allocator); | 84 | u64 (*length)(struct nvgpu_allocator *allocator); |
85 | u64 (*end)(struct nvgpu_allocator *allocator); | 85 | u64 (*end)(struct nvgpu_allocator *allocator); |
86 | int (*inited)(struct nvgpu_allocator *allocator); | 86 | bool (*inited)(struct nvgpu_allocator *allocator); |
87 | u64 (*space)(struct nvgpu_allocator *allocator); | 87 | u64 (*space)(struct nvgpu_allocator *allocator); |
88 | 88 | ||
89 | /* Destructor. */ | 89 | /* Destructor. */ |
@@ -188,11 +188,11 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node) | |||
188 | * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be | 188 | * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be |
189 | * set as well). | 189 | * set as well). |
190 | */ | 190 | */ |
191 | #define GPU_ALLOC_GVA_SPACE BIT(0) | 191 | #define GPU_ALLOC_GVA_SPACE BIT64(0) |
192 | #define GPU_ALLOC_NO_ALLOC_PAGE BIT(1) | 192 | #define GPU_ALLOC_NO_ALLOC_PAGE BIT64(1) |
193 | #define GPU_ALLOC_4K_VIDMEM_PAGES BIT(2) | 193 | #define GPU_ALLOC_4K_VIDMEM_PAGES BIT64(2) |
194 | #define GPU_ALLOC_FORCE_CONTIG BIT(3) | 194 | #define GPU_ALLOC_FORCE_CONTIG BIT64(3) |
195 | #define GPU_ALLOC_NO_SCATTER_GATHER BIT(4) | 195 | #define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4) |
196 | 196 | ||
197 | static inline void alloc_lock(struct nvgpu_allocator *a) | 197 | static inline void alloc_lock(struct nvgpu_allocator *a) |
198 | { | 198 | { |
@@ -256,7 +256,7 @@ void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, | |||
256 | u64 nvgpu_alloc_base(struct nvgpu_allocator *a); | 256 | u64 nvgpu_alloc_base(struct nvgpu_allocator *a); |
257 | u64 nvgpu_alloc_length(struct nvgpu_allocator *a); | 257 | u64 nvgpu_alloc_length(struct nvgpu_allocator *a); |
258 | u64 nvgpu_alloc_end(struct nvgpu_allocator *a); | 258 | u64 nvgpu_alloc_end(struct nvgpu_allocator *a); |
259 | u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a); | 259 | bool nvgpu_alloc_initialized(struct nvgpu_allocator *a); |
260 | u64 nvgpu_alloc_space(struct nvgpu_allocator *a); | 260 | u64 nvgpu_alloc_space(struct nvgpu_allocator *a); |
261 | 261 | ||
262 | void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator); | 262 | void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator); |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index b47d4ee0..23dac0ac 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -220,7 +220,7 @@ void nvgpu_vm_get(struct vm_gk20a *vm); | |||
220 | void nvgpu_vm_put(struct vm_gk20a *vm); | 220 | void nvgpu_vm_put(struct vm_gk20a *vm); |
221 | 221 | ||
222 | int vm_aspace_id(struct vm_gk20a *vm); | 222 | int vm_aspace_id(struct vm_gk20a *vm); |
223 | int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); | 223 | bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); |
224 | 224 | ||
225 | int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); | 225 | int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); |
226 | 226 | ||