summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/buddy_allocator.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/buddy_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c89
1 files changed, 47 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index 516e5035..c0d1335e 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -129,7 +129,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
129 struct nvgpu_buddy *new_buddy; 129 struct nvgpu_buddy *new_buddy;
130 130
131 new_buddy = nvgpu_kmem_cache_alloc(a->buddy_cache); 131 new_buddy = nvgpu_kmem_cache_alloc(a->buddy_cache);
132 if (!new_buddy) { 132 if (new_buddy == NULL) {
133 return NULL; 133 return NULL;
134 } 134 }
135 135
@@ -160,7 +160,7 @@ static void balloc_buddy_list_do_add(struct nvgpu_buddy_allocator *a,
160 * This lets the code that checks if there are available blocks check 160 * This lets the code that checks if there are available blocks check
161 * without cycling through the entire list. 161 * without cycling through the entire list.
162 */ 162 */
163 if (a->flags & GPU_ALLOC_GVA_SPACE && 163 if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL &&
164 b->pte_size == BALLOC_PTE_SIZE_BIG) { 164 b->pte_size == BALLOC_PTE_SIZE_BIG) {
165 nvgpu_list_add_tail(&b->buddy_entry, list); 165 nvgpu_list_add_tail(&b->buddy_entry, list);
166 } else { 166 } else {
@@ -247,7 +247,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
247 order = balloc_max_order_in(a, bstart, bend); 247 order = balloc_max_order_in(a, bstart, bend);
248 248
249 buddy = balloc_new_buddy(a, NULL, bstart, order); 249 buddy = balloc_new_buddy(a, NULL, bstart, order);
250 if (!buddy) { 250 if (buddy == NULL) {
251 goto cleanup; 251 goto cleanup;
252 } 252 }
253 253
@@ -374,7 +374,7 @@ static void balloc_coalesce(struct nvgpu_buddy_allocator *a,
374 * If both our buddy and I are both not allocated and not split then 374 * If both our buddy and I are both not allocated and not split then
375 * we can coalesce ourselves. 375 * we can coalesce ourselves.
376 */ 376 */
377 if (!b->buddy) { 377 if (b->buddy == NULL) {
378 return; 378 return;
379 } 379 }
380 if (buddy_is_alloced(b->buddy) || buddy_is_split(b->buddy)) { 380 if (buddy_is_alloced(b->buddy) || buddy_is_split(b->buddy)) {
@@ -412,14 +412,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
412 u64 half; 412 u64 half;
413 413
414 left = balloc_new_buddy(a, b, b->start, b->order - 1U); 414 left = balloc_new_buddy(a, b, b->start, b->order - 1U);
415 if (!left) { 415 if (left == NULL) {
416 return -ENOMEM; 416 return -ENOMEM;
417 } 417 }
418 418
419 half = (b->end - b->start) / 2U; 419 half = (b->end - b->start) / 2U;
420 420
421 right = balloc_new_buddy(a, b, b->start + half, b->order - 1U); 421 right = balloc_new_buddy(a, b, b->start + half, b->order - 1U);
422 if (!right) { 422 if (right == NULL) {
423 nvgpu_kmem_cache_free(a->buddy_cache, left); 423 nvgpu_kmem_cache_free(a->buddy_cache, left);
424 return -ENOMEM; 424 return -ENOMEM;
425 } 425 }
@@ -448,7 +448,7 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
448 * we can leave the buddies PTE field alone since the PDE block has yet 448 * we can leave the buddies PTE field alone since the PDE block has yet
449 * to be assigned a PTE size. 449 * to be assigned a PTE size.
450 */ 450 */
451 if (a->flags & GPU_ALLOC_GVA_SPACE && 451 if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL &&
452 left->order < a->pte_blk_order) { 452 left->order < a->pte_blk_order) {
453 left->pte_size = pte_size; 453 left->pte_size = pte_size;
454 right->pte_size = pte_size; 454 right->pte_size = pte_size;
@@ -492,7 +492,7 @@ static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a,
492 struct nvgpu_buddy *bud; 492 struct nvgpu_buddy *bud;
493 493
494 nvgpu_rbtree_search(addr, &node, a->alloced_buddies); 494 nvgpu_rbtree_search(addr, &node, a->alloced_buddies);
495 if (!node) { 495 if (node == NULL) {
496 return NULL; 496 return NULL;
497 } 497 }
498 498
@@ -518,7 +518,7 @@ static struct nvgpu_buddy *balloc_find_buddy(struct nvgpu_buddy_allocator *a,
518 return NULL; 518 return NULL;
519 } 519 }
520 520
521 if (a->flags & GPU_ALLOC_GVA_SPACE && 521 if ((a->flags & GPU_ALLOC_GVA_SPACE) != 0ULL &&
522 pte_size == BALLOC_PTE_SIZE_BIG) { 522 pte_size == BALLOC_PTE_SIZE_BIG) {
523 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), 523 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order),
524 nvgpu_buddy, buddy_entry); 524 nvgpu_buddy, buddy_entry);
@@ -551,14 +551,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a,
551 u64 split_order; 551 u64 split_order;
552 struct nvgpu_buddy *bud = NULL; 552 struct nvgpu_buddy *bud = NULL;
553 553
554 split_order = order; 554 for (split_order = order; split_order <= a->max_order; split_order++) {
555 while (split_order <= a->max_order && 555 bud = balloc_find_buddy(a, split_order, pte_size);
556 !(bud = balloc_find_buddy(a, split_order, pte_size))) { 556 if (bud != NULL) {
557 split_order++; 557 break;
558 }
558 } 559 }
559 560
560 /* Out of memory! */ 561 /* Out of memory! */
561 if (!bud) { 562 if (bud == NULL) {
562 return 0; 563 return 0;
563 } 564 }
564 565
@@ -582,15 +583,15 @@ static u64 balloc_do_alloc(struct nvgpu_buddy_allocator *a,
582 * TODO: Right now this uses the unoptimal approach of going through all 583 * TODO: Right now this uses the unoptimal approach of going through all
583 * outstanding allocations and checking their base/ends. This could be better. 584 * outstanding allocations and checking their base/ends. This could be better.
584 */ 585 */
585static int balloc_is_range_free(struct nvgpu_buddy_allocator *a, 586static bool balloc_is_range_free(struct nvgpu_buddy_allocator *a,
586 u64 base, u64 end) 587 u64 base, u64 end)
587{ 588{
588 struct nvgpu_rbtree_node *node = NULL; 589 struct nvgpu_rbtree_node *node = NULL;
589 struct nvgpu_buddy *bud; 590 struct nvgpu_buddy *bud;
590 591
591 nvgpu_rbtree_enum_start(0, &node, a->alloced_buddies); 592 nvgpu_rbtree_enum_start(0, &node, a->alloced_buddies);
592 if (!node) { 593 if (node == NULL) {
593 return 1; /* No allocs yet. */ 594 return true; /* No allocs yet. */
594 } 595 }
595 596
596 bud = nvgpu_buddy_from_rbtree_node(node); 597 bud = nvgpu_buddy_from_rbtree_node(node);
@@ -598,17 +599,17 @@ static int balloc_is_range_free(struct nvgpu_buddy_allocator *a,
598 while (bud->start < end) { 599 while (bud->start < end) {
599 if ((bud->start > base && bud->start < end) || 600 if ((bud->start > base && bud->start < end) ||
600 (bud->end > base && bud->end < end)) { 601 (bud->end > base && bud->end < end)) {
601 return 0; 602 return false;
602 } 603 }
603 604
604 nvgpu_rbtree_enum_next(&node, node); 605 nvgpu_rbtree_enum_next(&node, node);
605 if (!node) { 606 if (node == NULL) {
606 break; 607 break;
607 } 608 }
608 bud = nvgpu_buddy_from_rbtree_node(node); 609 bud = nvgpu_buddy_from_rbtree_node(node);
609 } 610 }
610 611
611 return 1; 612 return true;
612} 613}
613 614
614static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a, 615static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a,
@@ -633,7 +634,7 @@ static struct nvgpu_fixed_alloc *balloc_free_fixed(
633 struct nvgpu_rbtree_node *node = NULL; 634 struct nvgpu_rbtree_node *node = NULL;
634 635
635 nvgpu_rbtree_search(addr, &node, a->fixed_allocs); 636 nvgpu_rbtree_search(addr, &node, a->fixed_allocs);
636 if (!node) { 637 if (node == NULL) {
637 return NULL; 638 return NULL;
638 } 639 }
639 640
@@ -787,7 +788,7 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
787 bud = balloc_make_fixed_buddy(a, 788 bud = balloc_make_fixed_buddy(a,
788 balloc_base_unshift(a, inc_base), 789 balloc_base_unshift(a, inc_base),
789 align_order, pte_size); 790 align_order, pte_size);
790 if (!bud) { 791 if (bud == NULL) {
791 alloc_dbg(balloc_owner(a), 792 alloc_dbg(balloc_owner(a),
792 "Fixed buddy failed: {0x%llx, %llu}!", 793 "Fixed buddy failed: {0x%llx, %llu}!",
793 balloc_base_unshift(a, inc_base), 794 balloc_base_unshift(a, inc_base),
@@ -891,7 +892,7 @@ static u64 nvgpu_buddy_balloc_pte(struct nvgpu_allocator *na, u64 len,
891 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); 892 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!");
892 } 893 }
893 894
894 a->alloc_made = 1; 895 a->alloc_made = true;
895 896
896 alloc_unlock(na); 897 alloc_unlock(na);
897 898
@@ -930,7 +931,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na,
930 } 931 }
931 932
932 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc)); 933 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc));
933 if (!falloc) { 934 if (falloc == NULL) {
934 goto fail; 935 goto fail;
935 } 936 }
936 937
@@ -946,7 +947,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na,
946 } 947 }
947 948
948 ret = balloc_do_alloc_fixed(a, falloc, base, len, pte_size); 949 ret = balloc_do_alloc_fixed(a, falloc, base, len, pte_size);
949 if (!ret) { 950 if (ret == 0ULL) {
950 alloc_dbg(balloc_owner(a), 951 alloc_dbg(balloc_owner(a),
951 "Alloc-fixed failed ?? 0x%llx -> 0x%llx", 952 "Alloc-fixed failed ?? 0x%llx -> 0x%llx",
952 base, base + len); 953 base, base + len);
@@ -988,7 +989,7 @@ static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
988 989
989 alloc_lock(na); 990 alloc_lock(na);
990 alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size); 991 alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size);
991 a->alloc_made = 1; 992 a->alloc_made = true;
992 alloc_unlock(na); 993 alloc_unlock(na);
993 994
994 return alloc; 995 return alloc;
@@ -1003,7 +1004,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr)
1003 struct nvgpu_fixed_alloc *falloc; 1004 struct nvgpu_fixed_alloc *falloc;
1004 struct nvgpu_buddy_allocator *a = na->priv; 1005 struct nvgpu_buddy_allocator *a = na->priv;
1005 1006
1006 if (!addr) { 1007 if (addr == 0ULL) {
1007 return; 1008 return;
1008 } 1009 }
1009 1010
@@ -1020,7 +1021,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr)
1020 } 1021 }
1021 1022
1022 bud = balloc_free_buddy(a, addr); 1023 bud = balloc_free_buddy(a, addr);
1023 if (!bud) { 1024 if (bud == NULL) {
1024 goto done; 1025 goto done;
1025 } 1026 }
1026 1027
@@ -1090,7 +1091,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na,
1090 /* Should not be possible to fail... */ 1091 /* Should not be possible to fail... */
1091 addr = nvgpu_balloc_fixed_buddy_locked(na, co->base, co->length, 1092 addr = nvgpu_balloc_fixed_buddy_locked(na, co->base, co->length,
1092 BALLOC_PTE_SIZE_ANY); 1093 BALLOC_PTE_SIZE_ANY);
1093 if (!addr) { 1094 if (addr == 0ULL) {
1094 err = -ENOMEM; 1095 err = -ENOMEM;
1095 nvgpu_warn(na->g, 1096 nvgpu_warn(na->g,
1096 "%s: Failed to reserve a valid carveout!", 1097 "%s: Failed to reserve a valid carveout!",
@@ -1133,10 +1134,10 @@ static u64 nvgpu_buddy_alloc_base(struct nvgpu_allocator *a)
1133 return ba->start; 1134 return ba->start;
1134} 1135}
1135 1136
1136static int nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a) 1137static bool nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a)
1137{ 1138{
1138 struct nvgpu_buddy_allocator *ba = a->priv; 1139 struct nvgpu_buddy_allocator *ba = a->priv;
1139 int inited = ba->initialized; 1140 bool inited = ba->initialized;
1140 1141
1141 nvgpu_smp_rmb(); 1142 nvgpu_smp_rmb();
1142 return inited; 1143 return inited;
@@ -1292,12 +1293,15 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1292 int err; 1293 int err;
1293 u64 pde_size; 1294 u64 pde_size;
1294 struct nvgpu_buddy_allocator *a; 1295 struct nvgpu_buddy_allocator *a;
1296 bool is_gva_space = (flags & GPU_ALLOC_GVA_SPACE) != 0ULL;
1297 bool is_blk_size_pwr_2 = (blk_size & (blk_size - 1ULL)) == 0ULL;
1298 u64 base_big_page, size_big_page;
1295 1299
1296 /* blk_size must be greater than 0 and a power of 2. */ 1300 /* blk_size must be greater than 0 and a power of 2. */
1297 if (blk_size == 0U) { 1301 if (blk_size == 0U) {
1298 return -EINVAL; 1302 return -EINVAL;
1299 } 1303 }
1300 if (blk_size & (blk_size - 1U)) { 1304 if (!is_blk_size_pwr_2) {
1301 return -EINVAL; 1305 return -EINVAL;
1302 } 1306 }
1303 1307
@@ -1306,12 +1310,12 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1306 } 1310 }
1307 1311
1308 /* If this is to manage a GVA space we need a VM. */ 1312 /* If this is to manage a GVA space we need a VM. */
1309 if (flags & GPU_ALLOC_GVA_SPACE && !vm) { 1313 if (is_gva_space && vm == NULL) {
1310 return -EINVAL; 1314 return -EINVAL;
1311 } 1315 }
1312 1316
1313 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator)); 1317 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator));
1314 if (!a) { 1318 if (a == NULL) {
1315 return -ENOMEM; 1319 return -ENOMEM;
1316 } 1320 }
1317 1321
@@ -1336,8 +1340,8 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1336 } 1340 }
1337 1341
1338 a->vm = vm; 1342 a->vm = vm;
1339 if (flags & GPU_ALLOC_GVA_SPACE) { 1343 if (is_gva_space) {
1340 pde_size = 1ULL << nvgpu_vm_pde_coverage_bit_count(vm); 1344 pde_size = BIT64(nvgpu_vm_pde_coverage_bit_count(vm));
1341 a->pte_blk_order = balloc_get_order(a, pde_size); 1345 a->pte_blk_order = balloc_get_order(a, pde_size);
1342 } 1346 }
1343 1347
@@ -1346,9 +1350,10 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1346 * must be PDE aligned. If big_pages are not enabled then this 1350 * must be PDE aligned. If big_pages are not enabled then this
1347 * requirement is not necessary. 1351 * requirement is not necessary.
1348 */ 1352 */
1349 if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && 1353 base_big_page = base & ((vm->big_page_size << 10U) - 1U);
1350 (base & ((vm->big_page_size << 10) - 1U) || 1354 size_big_page = size & ((vm->big_page_size << 10U) - 1U);
1351 size & ((vm->big_page_size << 10) - 1U))) { 1355 if (is_gva_space && vm->big_pages &&
1356 (base_big_page != 0ULL || size_big_page != 0ULL)) {
1352 return -EINVAL; 1357 return -EINVAL;
1353 } 1358 }
1354 1359
@@ -1359,7 +1364,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1359 balloc_compute_max_order(a); 1364 balloc_compute_max_order(a);
1360 1365
1361 a->buddy_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_buddy)); 1366 a->buddy_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_buddy));
1362 if (!a->buddy_cache) { 1367 if (a->buddy_cache == NULL) {
1363 err = -ENOMEM; 1368 err = -ENOMEM;
1364 goto fail; 1369 goto fail;
1365 } 1370 }
@@ -1373,7 +1378,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1373 } 1378 }
1374 1379
1375 nvgpu_smp_wmb(); 1380 nvgpu_smp_wmb();
1376 a->initialized = 1; 1381 a->initialized = true;
1377 1382
1378#ifdef CONFIG_DEBUG_FS 1383#ifdef CONFIG_DEBUG_FS
1379 nvgpu_init_alloc_debug(g, na); 1384 nvgpu_init_alloc_debug(g, na);
@@ -1382,7 +1387,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1382 alloc_dbg(na, " base 0x%llx", a->base); 1387 alloc_dbg(na, " base 0x%llx", a->base);
1383 alloc_dbg(na, " size 0x%llx", a->length); 1388 alloc_dbg(na, " size 0x%llx", a->length);
1384 alloc_dbg(na, " blk_size 0x%llx", a->blk_size); 1389 alloc_dbg(na, " blk_size 0x%llx", a->blk_size);
1385 if (flags & GPU_ALLOC_GVA_SPACE) { 1390 if (is_gva_space) {
1386 alloc_dbg(balloc_owner(a), 1391 alloc_dbg(balloc_owner(a),
1387 " pde_size 0x%llx", 1392 " pde_size 0x%llx",
1388 balloc_order_to_len(a, a->pte_blk_order)); 1393 balloc_order_to_len(a, a->pte_blk_order));