summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/page_allocator.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/page_allocator.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/page_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c55
1 files changed, 28 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index c8bc17c7..35c7e120 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -116,7 +116,7 @@ static u64 nvgpu_page_alloc_base(struct nvgpu_allocator *a)
116 return nvgpu_alloc_base(&va->source_allocator); 116 return nvgpu_alloc_base(&va->source_allocator);
117} 117}
118 118
119static int nvgpu_page_alloc_inited(struct nvgpu_allocator *a) 119static bool nvgpu_page_alloc_inited(struct nvgpu_allocator *a)
120{ 120{
121 struct nvgpu_page_allocator *va = a->priv; 121 struct nvgpu_page_allocator *va = a->priv;
122 122
@@ -264,7 +264,7 @@ static struct nvgpu_page_alloc *find_page_alloc(
264 struct nvgpu_rbtree_node *node = NULL; 264 struct nvgpu_rbtree_node *node = NULL;
265 265
266 nvgpu_rbtree_search(addr, &node, a->allocs); 266 nvgpu_rbtree_search(addr, &node, a->allocs);
267 if (!node) { 267 if (node == NULL) {
268 return NULL; 268 return NULL;
269 } 269 }
270 270
@@ -282,7 +282,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
282 struct page_alloc_slab_page *slab_page; 282 struct page_alloc_slab_page *slab_page;
283 283
284 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); 284 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache);
285 if (!slab_page) { 285 if (slab_page == NULL) {
286 palloc_dbg(a, "OOM: unable to alloc slab_page struct!"); 286 palloc_dbg(a, "OOM: unable to alloc slab_page struct!");
287 return NULL; 287 return NULL;
288 } 288 }
@@ -290,7 +290,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
290 memset(slab_page, 0, sizeof(*slab_page)); 290 memset(slab_page, 0, sizeof(*slab_page));
291 291
292 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); 292 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
293 if (!slab_page->page_addr) { 293 if (slab_page->page_addr == 0ULL) {
294 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); 294 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
295 palloc_dbg(a, "OOM: vidmem is full!"); 295 palloc_dbg(a, "OOM: vidmem is full!");
296 return NULL; 296 return NULL;
@@ -354,9 +354,9 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a,
354 del_slab_page_from_empty(slab, slab_page); 354 del_slab_page_from_empty(slab, slab_page);
355 } 355 }
356 356
357 if (!slab_page) { 357 if (slab_page == NULL) {
358 slab_page = alloc_slab_page(a, slab); 358 slab_page = alloc_slab_page(a, slab);
359 if (!slab_page) { 359 if (slab_page == NULL) {
360 return -ENOMEM; 360 return -ENOMEM;
361 } 361 }
362 } 362 }
@@ -423,7 +423,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab(
423 slab = &a->slabs[slab_nr]; 423 slab = &a->slabs[slab_nr];
424 424
425 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 425 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
426 if (!alloc) { 426 if (alloc == NULL) {
427 palloc_dbg(a, "OOM: could not alloc page_alloc struct!"); 427 palloc_dbg(a, "OOM: could not alloc page_alloc struct!");
428 goto fail; 428 goto fail;
429 } 429 }
@@ -431,7 +431,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab(
431 alloc->sgt.ops = &page_alloc_sgl_ops; 431 alloc->sgt.ops = &page_alloc_sgl_ops;
432 432
433 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); 433 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl));
434 if (!sgl) { 434 if (sgl == NULL) {
435 palloc_dbg(a, "OOM: could not alloc sgl struct!"); 435 palloc_dbg(a, "OOM: could not alloc sgl struct!");
436 goto fail; 436 goto fail;
437 } 437 }
@@ -524,7 +524,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
524 int i = 0; 524 int i = 0;
525 525
526 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 526 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
527 if (!alloc) { 527 if (alloc == NULL) {
528 goto fail; 528 goto fail;
529 } 529 }
530 530
@@ -545,7 +545,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
545 * requested size. The buddy allocator guarantees any given 545 * requested size. The buddy allocator guarantees any given
546 * single alloc is contiguous. 546 * single alloc is contiguous.
547 */ 547 */
548 if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) { 548 if ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL && i != 0) {
549 goto fail_cleanup; 549 goto fail_cleanup;
550 } 550 }
551 551
@@ -563,23 +563,23 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
563 chunk_len); 563 chunk_len);
564 564
565 /* Divide by 2 and try again */ 565 /* Divide by 2 and try again */
566 if (!chunk_addr) { 566 if (chunk_addr == 0ULL) {
567 palloc_dbg(a, "balloc failed: 0x%llx", 567 palloc_dbg(a, "balloc failed: 0x%llx",
568 chunk_len); 568 chunk_len);
569 chunk_len >>= 1; 569 chunk_len >>= 1;
570 max_chunk_len = chunk_len; 570 max_chunk_len = chunk_len;
571 } 571 }
572 } while (!chunk_addr && chunk_len >= a->page_size); 572 } while (chunk_addr == 0ULL && chunk_len >= a->page_size);
573 573
574 chunk_pages = chunk_len >> a->page_shift; 574 chunk_pages = chunk_len >> a->page_shift;
575 575
576 if (!chunk_addr) { 576 if (chunk_addr == 0ULL) {
577 palloc_dbg(a, "bailing @ 0x%llx", chunk_len); 577 palloc_dbg(a, "bailing @ 0x%llx", chunk_len);
578 goto fail_cleanup; 578 goto fail_cleanup;
579 } 579 }
580 580
581 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); 581 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl));
582 if (!sgl) { 582 if (sgl == NULL) {
583 nvgpu_free(&a->source_allocator, chunk_addr); 583 nvgpu_free(&a->source_allocator, chunk_addr);
584 goto fail_cleanup; 584 goto fail_cleanup;
585 } 585 }
@@ -638,7 +638,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages(
638 pages = ALIGN(len, a->page_size) >> a->page_shift; 638 pages = ALIGN(len, a->page_size) >> a->page_shift;
639 639
640 alloc = do_nvgpu_alloc_pages(a, pages); 640 alloc = do_nvgpu_alloc_pages(a, pages);
641 if (!alloc) { 641 if (alloc == NULL) {
642 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", 642 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)",
643 pages << a->page_shift, pages); 643 pages << a->page_shift, pages);
644 return NULL; 644 return NULL;
@@ -679,18 +679,18 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
679 * If we want contig pages we have to round up to a power of two. It's 679 * If we want contig pages we have to round up to a power of two. It's
680 * easier to do that here than in the buddy allocator. 680 * easier to do that here than in the buddy allocator.
681 */ 681 */
682 real_len = a->flags & GPU_ALLOC_FORCE_CONTIG ? 682 real_len = ((a->flags & GPU_ALLOC_FORCE_CONTIG) != 0ULL) ?
683 roundup_pow_of_two(len) : len; 683 roundup_pow_of_two(len) : len;
684 684
685 alloc_lock(na); 685 alloc_lock(na);
686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if ((a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL &&
687 real_len <= (a->page_size / 2U)) { 687 real_len <= (a->page_size / 2U)) {
688 alloc = nvgpu_alloc_slab(a, real_len); 688 alloc = nvgpu_alloc_slab(a, real_len);
689 } else { 689 } else {
690 alloc = nvgpu_alloc_pages(a, real_len); 690 alloc = nvgpu_alloc_pages(a, real_len);
691 } 691 }
692 692
693 if (!alloc) { 693 if (alloc == NULL) {
694 alloc_unlock(na); 694 alloc_unlock(na);
695 return 0; 695 return 0;
696 } 696 }
@@ -728,7 +728,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
728 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); 728 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base);
729 } 729 }
730 730
731 if (!alloc) { 731 if (alloc == NULL) {
732 palloc_dbg(a, "Hrm, found no alloc?"); 732 palloc_dbg(a, "Hrm, found no alloc?");
733 goto done; 733 goto done;
734 } 734 }
@@ -760,13 +760,13 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed(
760 760
761 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 761 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
762 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); 762 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl));
763 if (!alloc || !sgl) { 763 if (alloc == NULL || sgl == NULL) {
764 goto fail; 764 goto fail;
765 } 765 }
766 766
767 alloc->sgt.ops = &page_alloc_sgl_ops; 767 alloc->sgt.ops = &page_alloc_sgl_ops;
768 alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); 768 alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0);
769 if (!alloc->base) { 769 if (alloc->base == 0ULL) {
770 WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); 770 WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base);
771 goto fail; 771 goto fail;
772 } 772 }
@@ -811,7 +811,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na,
811 alloc_lock(na); 811 alloc_lock(na);
812 812
813 alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); 813 alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0);
814 if (!alloc) { 814 if (alloc == NULL) {
815 alloc_unlock(na); 815 alloc_unlock(na);
816 return 0; 816 return 0;
817 } 817 }
@@ -850,7 +850,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
850 850
851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
852 alloc = find_page_alloc(a, base); 852 alloc = find_page_alloc(a, base);
853 if (!alloc) { 853 if (alloc == NULL) {
854 goto done; 854 goto done;
855 } 855 }
856 } else { 856 } else {
@@ -985,7 +985,7 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
985 a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), 985 a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner),
986 nr_slabs, 986 nr_slabs,
987 sizeof(struct page_alloc_slab)); 987 sizeof(struct page_alloc_slab));
988 if (!a->slabs) { 988 if (a->slabs == NULL) {
989 return -ENOMEM; 989 return -ENOMEM;
990 } 990 }
991 a->nr_slabs = nr_slabs; 991 a->nr_slabs = nr_slabs;
@@ -1018,7 +1018,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1018 } 1018 }
1019 1019
1020 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); 1020 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator));
1021 if (!a) { 1021 if (a == NULL) {
1022 return -ENOMEM; 1022 return -ENOMEM;
1023 } 1023 }
1024 1024
@@ -1031,7 +1031,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1031 sizeof(struct nvgpu_page_alloc)); 1031 sizeof(struct nvgpu_page_alloc));
1032 a->slab_page_cache = nvgpu_kmem_cache_create(g, 1032 a->slab_page_cache = nvgpu_kmem_cache_create(g,
1033 sizeof(struct page_alloc_slab_page)); 1033 sizeof(struct page_alloc_slab_page));
1034 if (!a->alloc_cache || !a->slab_page_cache) { 1034 if (a->alloc_cache == NULL || a->slab_page_cache == NULL) {
1035 err = -ENOMEM; 1035 err = -ENOMEM;
1036 goto fail; 1036 goto fail;
1037 } 1037 }
@@ -1044,7 +1044,8 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1044 a->owner = na; 1044 a->owner = na;
1045 a->flags = flags; 1045 a->flags = flags;
1046 1046
1047 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { 1047 if ((flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL &&
1048 blk_size > SZ_4K) {
1048 err = nvgpu_page_alloc_init_slabs(a); 1049 err = nvgpu_page_alloc_init_slabs(a);
1049 if (err) { 1050 if (err) {
1050 goto fail; 1051 goto fail;