summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/acr_gp106.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-27 01:59:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-29 11:59:35 -0400
commite3710e5431d8f14f1b8c2812f5c1aeeb7bdaac1c (patch)
tree8a6498b12e10f6b391d3c5dd7c6ac7b340ca60b4 /drivers/gpu/nvgpu/gp106/acr_gp106.c
parent2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (diff)
gpu: nvgpu: gp106: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I8493274995ed8de526902dd0ca0808b2972e28aa Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1796806 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/acr_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c101
1 files changed, 63 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 817a196e..795ae0d8 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -90,8 +90,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
90 struct wpr_carveout_info wpr_inf; 90 struct wpr_carveout_info wpr_inf;
91 int err; 91 int err;
92 92
93 if (mem->size) 93 if (mem->size) {
94 return 0; 94 return 0;
95 }
95 96
96 g->ops.pmu.get_wpr(g, &wpr_inf); 97 g->ops.pmu.get_wpr(g, &wpr_inf);
97 98
@@ -102,8 +103,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
102 err = nvgpu_dma_alloc_vid_at(g, 103 err = nvgpu_dma_alloc_vid_at(g,
103 wpr_inf.size, 104 wpr_inf.size,
104 &g->acr.wpr_dummy, wpr_inf.wpr_base); 105 &g->acr.wpr_dummy, wpr_inf.wpr_base);
105 if (err) 106 if (err) {
106 return err; 107 return err;
108 }
107 109
108 return nvgpu_dma_alloc_vid_at(g, 110 return nvgpu_dma_alloc_vid_at(g,
109 wpr_inf.size, mem, 111 wpr_inf.size, mem,
@@ -291,8 +293,9 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
291 struct nvgpu_firmware *gpccs_sig = NULL; 293 struct nvgpu_firmware *gpccs_sig = NULL;
292 int err; 294 int err;
293 295
294 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) 296 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
295 return -ENOENT; 297 return -ENOENT;
298 }
296 299
297 switch (ver) { 300 switch (ver) {
298 case NVGPU_GPUID_GP104: 301 case NVGPU_GPUID_GP104:
@@ -415,8 +418,9 @@ static u32 lsfm_discover_and_add_sub_wprs(struct gk20a *g,
415 418
416 if (size_4K) { 419 if (size_4K) {
417 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr)); 420 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr));
418 if (pnode == NULL) 421 if (pnode == NULL) {
419 return -ENOMEM; 422 return -ENOMEM;
423 }
420 424
421 pnode->sub_wpr_header.use_case_id = sub_wpr_index; 425 pnode->sub_wpr_header.use_case_id = sub_wpr_index;
422 pnode->sub_wpr_header.size_4K = size_4K; 426 pnode->sub_wpr_header.size_4K = size_4K;
@@ -460,23 +464,27 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
460 /* Discover all managed falcons*/ 464 /* Discover all managed falcons*/
461 err = lsfm_discover_ucode_images(g, plsfm); 465 err = lsfm_discover_ucode_images(g, plsfm);
462 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 466 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
463 if (err) 467 if (err) {
464 goto exit_err; 468 goto exit_err;
469 }
465 470
466 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) 471 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
467 lsfm_discover_and_add_sub_wprs(g, plsfm); 472 lsfm_discover_and_add_sub_wprs(g, plsfm);
473 }
468 474
469 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { 475 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
470 /* Generate WPR requirements*/ 476 /* Generate WPR requirements*/
471 err = lsf_gen_wpr_requirements(g, plsfm); 477 err = lsf_gen_wpr_requirements(g, plsfm);
472 if (err) 478 if (err) {
473 goto exit_err; 479 goto exit_err;
480 }
474 481
475 /*Alloc memory to hold ucode blob contents*/ 482 /*Alloc memory to hold ucode blob contents*/
476 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 483 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size
477 ,&g->acr.ucode_blob); 484 ,&g->acr.ucode_blob);
478 if (err) 485 if (err) {
479 goto exit_err; 486 goto exit_err;
487 }
480 488
481 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", 489 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
482 plsfm->managed_flcn_cnt, plsfm->wpr_size); 490 plsfm->managed_flcn_cnt, plsfm->wpr_size);
@@ -512,13 +520,15 @@ int lsfm_discover_ucode_images(struct gk20a *g,
512 /* Obtain the PMU ucode image and add it to the list if required*/ 520 /* Obtain the PMU ucode image and add it to the list if required*/
513 memset(&ucode_img, 0, sizeof(ucode_img)); 521 memset(&ucode_img, 0, sizeof(ucode_img));
514 status = pmu_ucode_details(g, &ucode_img); 522 status = pmu_ucode_details(g, &ucode_img);
515 if (status) 523 if (status) {
516 return status; 524 return status;
525 }
517 526
518 if (ucode_img.lsf_desc != NULL) { 527 if (ucode_img.lsf_desc != NULL) {
519 /* The falon_id is formed by grabbing the static base 528 /* The falon_id is formed by grabbing the static base
520 * falon_id from the image and adding the 529 * falon_id from the image and adding the
521 * engine-designated falcon instance.*/ 530 * engine-designated falcon instance.
531 */
522 pmu->pmu_mode |= PMU_SECURE_MODE; 532 pmu->pmu_mode |= PMU_SECURE_MODE;
523 falcon_id = ucode_img.lsf_desc->falcon_id + 533 falcon_id = ucode_img.lsf_desc->falcon_id +
524 ucode_img.flcn_inst; 534 ucode_img.flcn_inst;
@@ -526,8 +536,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
526 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) { 536 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
527 pmu->falcon_id = falcon_id; 537 pmu->falcon_id = falcon_id;
528 if (lsfm_add_ucode_img(g, plsfm, &ucode_img, 538 if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
529 pmu->falcon_id) == 0) 539 pmu->falcon_id) == 0) {
530 pmu->pmu_mode |= PMU_LSFM_MANAGED; 540 pmu->pmu_mode |= PMU_LSFM_MANAGED;
541 }
531 542
532 plsfm->managed_flcn_cnt++; 543 plsfm->managed_flcn_cnt++;
533 } else { 544 } else {
@@ -566,8 +577,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
566 /* Do not manage non-FB ucode*/ 577 /* Do not manage non-FB ucode*/
567 if (lsfm_add_ucode_img(g, 578 if (lsfm_add_ucode_img(g,
568 plsfm, &ucode_img, falcon_id) 579 plsfm, &ucode_img, falcon_id)
569 == 0) 580 == 0) {
570 plsfm->managed_flcn_cnt++; 581 plsfm->managed_flcn_cnt++;
582 }
571 } else { 583 } else {
572 gp106_dbg_pmu(g, "not managed %d\n", 584 gp106_dbg_pmu(g, "not managed %d\n",
573 ucode_img.lsf_desc->falcon_id); 585 ucode_img.lsf_desc->falcon_id);
@@ -599,18 +611,21 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
599 u64 addr_code, addr_data; 611 u64 addr_code, addr_data;
600 u32 addr_args; 612 u32 addr_args;
601 613
602 if (p_img->desc == NULL) /*This means its a header based ucode, 614 if (p_img->desc == NULL) {
603 and so we do not fill BL gen desc structure*/ 615 /* This means its a header based ucode,
616 * and so we do not fill BL gen desc structure
617 */
604 return -EINVAL; 618 return -EINVAL;
619 }
605 desc = p_img->desc; 620 desc = p_img->desc;
606 /* 621 /*
607 Calculate physical and virtual addresses for various portions of 622 * Calculate physical and virtual addresses for various portions of
608 the PMU ucode image 623 * the PMU ucode image
609 Calculate the 32-bit addresses for the application code, application 624 * Calculate the 32-bit addresses for the application code, application
610 data, and bootloader code. These values are all based on IM_BASE. 625 * data, and bootloader code. These values are all based on IM_BASE.
611 The 32-bit addresses will be the upper 32-bits of the virtual or 626 * The 32-bit addresses will be the upper 32-bits of the virtual or
612 physical addresses of each respective segment. 627 * physical addresses of each respective segment.
613 */ 628 */
614 addr_base = p_lsfm->lsb_header.ucode_off; 629 addr_base = p_lsfm->lsb_header.ucode_off;
615 g->ops.pmu.get_wpr(g, &wpr_inf); 630 g->ops.pmu.get_wpr(g, &wpr_inf);
616 addr_base += (wpr_inf.wpr_base); 631 addr_base += (wpr_inf.wpr_base);
@@ -670,19 +685,22 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
670 struct pmu_ucode_desc_v1 *desc; 685 struct pmu_ucode_desc_v1 *desc;
671 u64 addr_code, addr_data; 686 u64 addr_code, addr_data;
672 687
673 if (p_img->desc == NULL) /*This means its a header based ucode, 688 if (p_img->desc == NULL) {
674 and so we do not fill BL gen desc structure*/ 689 /* This means its a header based ucode,
690 * and so we do not fill BL gen desc structure
691 */
675 return -EINVAL; 692 return -EINVAL;
693 }
676 desc = p_img->desc; 694 desc = p_img->desc;
677 695
678 /* 696 /*
679 Calculate physical and virtual addresses for various portions of 697 * Calculate physical and virtual addresses for various portions of
680 the PMU ucode image 698 * the PMU ucode image
681 Calculate the 32-bit addresses for the application code, application 699 * Calculate the 32-bit addresses for the application code, application
682 data, and bootloader code. These values are all based on IM_BASE. 700 * data, and bootloader code. These values are all based on IM_BASE.
683 The 32-bit addresses will be the upper 32-bits of the virtual or 701 * The 32-bit addresses will be the upper 32-bits of the virtual or
684 physical addresses of each respective segment. 702 * physical addresses of each respective segment.
685 */ 703 */
686 addr_base = p_lsfm->lsb_header.ucode_off; 704 addr_base = p_lsfm->lsb_header.ucode_off;
687 g->ops.pmu.get_wpr(g, &wpr_inf); 705 g->ops.pmu.get_wpr(g, &wpr_inf);
688 addr_base += wpr_inf.wpr_base; 706 addr_base += wpr_inf.wpr_base;
@@ -728,9 +746,10 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
728 746
729 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 747 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
730 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); 748 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
731 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 749 if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
732 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 750 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
733 &pnode->bl_gen_desc_size); 751 &pnode->bl_gen_desc_size);
752 }
734 } 753 }
735 754
736 /* Failed to find the falcon requested. */ 755 /* Failed to find the falcon requested. */
@@ -784,8 +803,9 @@ void lsfm_init_wpr_contents(struct gk20a *g,
784 memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1)); 803 memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1));
785 i = 0; 804 i = 0;
786 805
787 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) 806 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
788 lsfm_init_sub_wpr_contents(g, plsfm, ucode); 807 lsfm_init_sub_wpr_contents(g, plsfm, ucode);
808 }
789 809
790 /* 810 /*
791 * Walk the managed falcons, flush WPR and LSB headers to FB. 811 * Walk the managed falcons, flush WPR and LSB headers to FB.
@@ -918,9 +938,10 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
918 u32 full_app_size = 0; 938 u32 full_app_size = 0;
919 u32 data = 0; 939 u32 data = 0;
920 940
921 if (pnode->ucode_img.lsf_desc) 941 if (pnode->ucode_img.lsf_desc) {
922 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc, 942 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc,
923 sizeof(struct lsf_ucode_desc_v1)); 943 sizeof(struct lsf_ucode_desc_v1));
944 }
924 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size; 945 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
925 946
926 /* The remainder of the LSB depends on the loader usage */ 947 /* The remainder of the LSB depends on the loader usage */
@@ -974,9 +995,10 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
974 pnode->lsb_header.flags = data; 995 pnode->lsb_header.flags = data;
975 } 996 }
976 997
977 if(g->ops.pmu.is_priv_load(falcon_id)) 998 if (g->ops.pmu.is_priv_load(falcon_id)) {
978 pnode->lsb_header.flags |= 999 pnode->lsb_header.flags |=
979 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE; 1000 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
1001 }
980 } 1002 }
981} 1003}
982 1004
@@ -987,8 +1009,9 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
987 struct lsfm_managed_ucode_img_v2 *pnode; 1009 struct lsfm_managed_ucode_img_v2 *pnode;
988 1010
989 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2)); 1011 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2));
990 if (pnode == NULL) 1012 if (pnode == NULL) {
991 return -ENOMEM; 1013 return -ENOMEM;
1014 }
992 1015
993 /* Keep a copy of the ucode image info locally */ 1016 /* Keep a copy of the ucode image info locally */
994 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img_v1)); 1017 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img_v1));
@@ -1043,11 +1066,12 @@ void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm)
1043 while (cnt) { 1066 while (cnt) {
1044 mg_ucode_img = plsfm->ucode_img_list; 1067 mg_ucode_img = plsfm->ucode_img_list;
1045 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id == 1068 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
1046 LSF_FALCON_ID_PMU) 1069 LSF_FALCON_ID_PMU) {
1047 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img); 1070 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
1048 else 1071 } else {
1049 lsfm_free_nonpmu_ucode_img_res(g, 1072 lsfm_free_nonpmu_ucode_img_res(g,
1050 &mg_ucode_img->ucode_img); 1073 &mg_ucode_img->ucode_img);
1074 }
1051 plsfm->ucode_img_list = mg_ucode_img->next; 1075 plsfm->ucode_img_list = mg_ucode_img->next;
1052 nvgpu_kfree(g, mg_ucode_img); 1076 nvgpu_kfree(g, mg_ucode_img);
1053 cnt--; 1077 cnt--;
@@ -1279,8 +1303,9 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
1279 acr->acr_ucode.gpu_va + 1303 acr->acr_ucode.gpu_va +
1280 (acr_ucode_header_t210_load[2])); 1304 (acr_ucode_header_t210_load[2]));
1281 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; 1305 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
1282 } else 1306 } else {
1283 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0; 1307 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
1308 }
1284 1309
1285 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); 1310 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
1286 if (status != 0) { 1311 if (status != 0) {