summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-27 01:59:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-29 11:59:35 -0400
commite3710e5431d8f14f1b8c2812f5c1aeeb7bdaac1c (patch)
tree8a6498b12e10f6b391d3c5dd7c6ac7b340ca60b4 /drivers/gpu/nvgpu/gp106
parent2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (diff)
gpu: nvgpu: gp106: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I8493274995ed8de526902dd0ca0808b2972e28aa Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1796806 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106')
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c101
-rw-r--r--drivers/gpu/nvgpu/gp106/bios_gp106.c27
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_arb_gp106.c15
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c9
-rw-r--r--drivers/gpu/nvgpu/gp106/fifo_gp106.c10
-rw-r--r--drivers/gpu/nvgpu/gp106/flcn_gp106.c3
-rw-r--r--drivers/gpu/nvgpu/gp106/gr_gp106.c15
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c9
-rw-r--r--drivers/gpu/nvgpu/gp106/mclk_gp106.c30
-rw-r--r--drivers/gpu/nvgpu/gp106/mm_gp106.c3
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c18
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c9
12 files changed, 161 insertions, 88 deletions
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 817a196e..795ae0d8 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -90,8 +90,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
90 struct wpr_carveout_info wpr_inf; 90 struct wpr_carveout_info wpr_inf;
91 int err; 91 int err;
92 92
93 if (mem->size) 93 if (mem->size) {
94 return 0; 94 return 0;
95 }
95 96
96 g->ops.pmu.get_wpr(g, &wpr_inf); 97 g->ops.pmu.get_wpr(g, &wpr_inf);
97 98
@@ -102,8 +103,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
102 err = nvgpu_dma_alloc_vid_at(g, 103 err = nvgpu_dma_alloc_vid_at(g,
103 wpr_inf.size, 104 wpr_inf.size,
104 &g->acr.wpr_dummy, wpr_inf.wpr_base); 105 &g->acr.wpr_dummy, wpr_inf.wpr_base);
105 if (err) 106 if (err) {
106 return err; 107 return err;
108 }
107 109
108 return nvgpu_dma_alloc_vid_at(g, 110 return nvgpu_dma_alloc_vid_at(g,
109 wpr_inf.size, mem, 111 wpr_inf.size, mem,
@@ -291,8 +293,9 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
291 struct nvgpu_firmware *gpccs_sig = NULL; 293 struct nvgpu_firmware *gpccs_sig = NULL;
292 int err; 294 int err;
293 295
294 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) 296 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
295 return -ENOENT; 297 return -ENOENT;
298 }
296 299
297 switch (ver) { 300 switch (ver) {
298 case NVGPU_GPUID_GP104: 301 case NVGPU_GPUID_GP104:
@@ -415,8 +418,9 @@ static u32 lsfm_discover_and_add_sub_wprs(struct gk20a *g,
415 418
416 if (size_4K) { 419 if (size_4K) {
417 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr)); 420 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr));
418 if (pnode == NULL) 421 if (pnode == NULL) {
419 return -ENOMEM; 422 return -ENOMEM;
423 }
420 424
421 pnode->sub_wpr_header.use_case_id = sub_wpr_index; 425 pnode->sub_wpr_header.use_case_id = sub_wpr_index;
422 pnode->sub_wpr_header.size_4K = size_4K; 426 pnode->sub_wpr_header.size_4K = size_4K;
@@ -460,23 +464,27 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
460 /* Discover all managed falcons*/ 464 /* Discover all managed falcons*/
461 err = lsfm_discover_ucode_images(g, plsfm); 465 err = lsfm_discover_ucode_images(g, plsfm);
462 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 466 gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
463 if (err) 467 if (err) {
464 goto exit_err; 468 goto exit_err;
469 }
465 470
466 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) 471 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
467 lsfm_discover_and_add_sub_wprs(g, plsfm); 472 lsfm_discover_and_add_sub_wprs(g, plsfm);
473 }
468 474
469 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { 475 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
470 /* Generate WPR requirements*/ 476 /* Generate WPR requirements*/
471 err = lsf_gen_wpr_requirements(g, plsfm); 477 err = lsf_gen_wpr_requirements(g, plsfm);
472 if (err) 478 if (err) {
473 goto exit_err; 479 goto exit_err;
480 }
474 481
475 /*Alloc memory to hold ucode blob contents*/ 482 /*Alloc memory to hold ucode blob contents*/
476 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 483 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size
477 ,&g->acr.ucode_blob); 484 ,&g->acr.ucode_blob);
478 if (err) 485 if (err) {
479 goto exit_err; 486 goto exit_err;
487 }
480 488
481 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", 489 gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
482 plsfm->managed_flcn_cnt, plsfm->wpr_size); 490 plsfm->managed_flcn_cnt, plsfm->wpr_size);
@@ -512,13 +520,15 @@ int lsfm_discover_ucode_images(struct gk20a *g,
512 /* Obtain the PMU ucode image and add it to the list if required*/ 520 /* Obtain the PMU ucode image and add it to the list if required*/
513 memset(&ucode_img, 0, sizeof(ucode_img)); 521 memset(&ucode_img, 0, sizeof(ucode_img));
514 status = pmu_ucode_details(g, &ucode_img); 522 status = pmu_ucode_details(g, &ucode_img);
515 if (status) 523 if (status) {
516 return status; 524 return status;
525 }
517 526
518 if (ucode_img.lsf_desc != NULL) { 527 if (ucode_img.lsf_desc != NULL) {
519 /* The falon_id is formed by grabbing the static base 528 /* The falon_id is formed by grabbing the static base
520 * falon_id from the image and adding the 529 * falon_id from the image and adding the
521 * engine-designated falcon instance.*/ 530 * engine-designated falcon instance.
531 */
522 pmu->pmu_mode |= PMU_SECURE_MODE; 532 pmu->pmu_mode |= PMU_SECURE_MODE;
523 falcon_id = ucode_img.lsf_desc->falcon_id + 533 falcon_id = ucode_img.lsf_desc->falcon_id +
524 ucode_img.flcn_inst; 534 ucode_img.flcn_inst;
@@ -526,8 +536,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
526 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) { 536 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
527 pmu->falcon_id = falcon_id; 537 pmu->falcon_id = falcon_id;
528 if (lsfm_add_ucode_img(g, plsfm, &ucode_img, 538 if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
529 pmu->falcon_id) == 0) 539 pmu->falcon_id) == 0) {
530 pmu->pmu_mode |= PMU_LSFM_MANAGED; 540 pmu->pmu_mode |= PMU_LSFM_MANAGED;
541 }
531 542
532 plsfm->managed_flcn_cnt++; 543 plsfm->managed_flcn_cnt++;
533 } else { 544 } else {
@@ -566,8 +577,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
566 /* Do not manage non-FB ucode*/ 577 /* Do not manage non-FB ucode*/
567 if (lsfm_add_ucode_img(g, 578 if (lsfm_add_ucode_img(g,
568 plsfm, &ucode_img, falcon_id) 579 plsfm, &ucode_img, falcon_id)
569 == 0) 580 == 0) {
570 plsfm->managed_flcn_cnt++; 581 plsfm->managed_flcn_cnt++;
582 }
571 } else { 583 } else {
572 gp106_dbg_pmu(g, "not managed %d\n", 584 gp106_dbg_pmu(g, "not managed %d\n",
573 ucode_img.lsf_desc->falcon_id); 585 ucode_img.lsf_desc->falcon_id);
@@ -599,18 +611,21 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
599 u64 addr_code, addr_data; 611 u64 addr_code, addr_data;
600 u32 addr_args; 612 u32 addr_args;
601 613
602 if (p_img->desc == NULL) /*This means its a header based ucode, 614 if (p_img->desc == NULL) {
603 and so we do not fill BL gen desc structure*/ 615 /* This means its a header based ucode,
616 * and so we do not fill BL gen desc structure
617 */
604 return -EINVAL; 618 return -EINVAL;
619 }
605 desc = p_img->desc; 620 desc = p_img->desc;
606 /* 621 /*
607 Calculate physical and virtual addresses for various portions of 622 * Calculate physical and virtual addresses for various portions of
608 the PMU ucode image 623 * the PMU ucode image
609 Calculate the 32-bit addresses for the application code, application 624 * Calculate the 32-bit addresses for the application code, application
610 data, and bootloader code. These values are all based on IM_BASE. 625 * data, and bootloader code. These values are all based on IM_BASE.
611 The 32-bit addresses will be the upper 32-bits of the virtual or 626 * The 32-bit addresses will be the upper 32-bits of the virtual or
612 physical addresses of each respective segment. 627 * physical addresses of each respective segment.
613 */ 628 */
614 addr_base = p_lsfm->lsb_header.ucode_off; 629 addr_base = p_lsfm->lsb_header.ucode_off;
615 g->ops.pmu.get_wpr(g, &wpr_inf); 630 g->ops.pmu.get_wpr(g, &wpr_inf);
616 addr_base += (wpr_inf.wpr_base); 631 addr_base += (wpr_inf.wpr_base);
@@ -670,19 +685,22 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
670 struct pmu_ucode_desc_v1 *desc; 685 struct pmu_ucode_desc_v1 *desc;
671 u64 addr_code, addr_data; 686 u64 addr_code, addr_data;
672 687
673 if (p_img->desc == NULL) /*This means its a header based ucode, 688 if (p_img->desc == NULL) {
674 and so we do not fill BL gen desc structure*/ 689 /* This means its a header based ucode,
690 * and so we do not fill BL gen desc structure
691 */
675 return -EINVAL; 692 return -EINVAL;
693 }
676 desc = p_img->desc; 694 desc = p_img->desc;
677 695
678 /* 696 /*
679 Calculate physical and virtual addresses for various portions of 697 * Calculate physical and virtual addresses for various portions of
680 the PMU ucode image 698 * the PMU ucode image
681 Calculate the 32-bit addresses for the application code, application 699 * Calculate the 32-bit addresses for the application code, application
682 data, and bootloader code. These values are all based on IM_BASE. 700 * data, and bootloader code. These values are all based on IM_BASE.
683 The 32-bit addresses will be the upper 32-bits of the virtual or 701 * The 32-bit addresses will be the upper 32-bits of the virtual or
684 physical addresses of each respective segment. 702 * physical addresses of each respective segment.
685 */ 703 */
686 addr_base = p_lsfm->lsb_header.ucode_off; 704 addr_base = p_lsfm->lsb_header.ucode_off;
687 g->ops.pmu.get_wpr(g, &wpr_inf); 705 g->ops.pmu.get_wpr(g, &wpr_inf);
688 addr_base += wpr_inf.wpr_base; 706 addr_base += wpr_inf.wpr_base;
@@ -728,9 +746,10 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
728 746
729 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 747 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
730 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); 748 gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
731 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 749 if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
732 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 750 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
733 &pnode->bl_gen_desc_size); 751 &pnode->bl_gen_desc_size);
752 }
734 } 753 }
735 754
736 /* Failed to find the falcon requested. */ 755 /* Failed to find the falcon requested. */
@@ -784,8 +803,9 @@ void lsfm_init_wpr_contents(struct gk20a *g,
784 memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1)); 803 memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1));
785 i = 0; 804 i = 0;
786 805
787 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) 806 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
788 lsfm_init_sub_wpr_contents(g, plsfm, ucode); 807 lsfm_init_sub_wpr_contents(g, plsfm, ucode);
808 }
789 809
790 /* 810 /*
791 * Walk the managed falcons, flush WPR and LSB headers to FB. 811 * Walk the managed falcons, flush WPR and LSB headers to FB.
@@ -918,9 +938,10 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
918 u32 full_app_size = 0; 938 u32 full_app_size = 0;
919 u32 data = 0; 939 u32 data = 0;
920 940
921 if (pnode->ucode_img.lsf_desc) 941 if (pnode->ucode_img.lsf_desc) {
922 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc, 942 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc,
923 sizeof(struct lsf_ucode_desc_v1)); 943 sizeof(struct lsf_ucode_desc_v1));
944 }
924 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size; 945 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
925 946
926 /* The remainder of the LSB depends on the loader usage */ 947 /* The remainder of the LSB depends on the loader usage */
@@ -974,9 +995,10 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
974 pnode->lsb_header.flags = data; 995 pnode->lsb_header.flags = data;
975 } 996 }
976 997
977 if(g->ops.pmu.is_priv_load(falcon_id)) 998 if (g->ops.pmu.is_priv_load(falcon_id)) {
978 pnode->lsb_header.flags |= 999 pnode->lsb_header.flags |=
979 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE; 1000 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
1001 }
980 } 1002 }
981} 1003}
982 1004
@@ -987,8 +1009,9 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
987 struct lsfm_managed_ucode_img_v2 *pnode; 1009 struct lsfm_managed_ucode_img_v2 *pnode;
988 1010
989 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2)); 1011 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2));
990 if (pnode == NULL) 1012 if (pnode == NULL) {
991 return -ENOMEM; 1013 return -ENOMEM;
1014 }
992 1015
993 /* Keep a copy of the ucode image info locally */ 1016 /* Keep a copy of the ucode image info locally */
994 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img_v1)); 1017 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img_v1));
@@ -1043,11 +1066,12 @@ void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm)
1043 while (cnt) { 1066 while (cnt) {
1044 mg_ucode_img = plsfm->ucode_img_list; 1067 mg_ucode_img = plsfm->ucode_img_list;
1045 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id == 1068 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
1046 LSF_FALCON_ID_PMU) 1069 LSF_FALCON_ID_PMU) {
1047 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img); 1070 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
1048 else 1071 } else {
1049 lsfm_free_nonpmu_ucode_img_res(g, 1072 lsfm_free_nonpmu_ucode_img_res(g,
1050 &mg_ucode_img->ucode_img); 1073 &mg_ucode_img->ucode_img);
1074 }
1051 plsfm->ucode_img_list = mg_ucode_img->next; 1075 plsfm->ucode_img_list = mg_ucode_img->next;
1052 nvgpu_kfree(g, mg_ucode_img); 1076 nvgpu_kfree(g, mg_ucode_img);
1053 cnt--; 1077 cnt--;
@@ -1279,8 +1303,9 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
1279 acr->acr_ucode.gpu_va + 1303 acr->acr_ucode.gpu_va +
1280 (acr_ucode_header_t210_load[2])); 1304 (acr_ucode_header_t210_load[2]));
1281 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; 1305 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
1282 } else 1306 } else {
1283 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0; 1307 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
1308 }
1284 1309
1285 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); 1310 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
1286 if (status != 0) { 1311 if (status != 0) {
diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c
index e470fffc..eedb304e 100644
--- a/drivers/gpu/nvgpu/gp106/bios_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c
@@ -123,8 +123,9 @@ int gp106_bios_devinit(struct gk20a *g)
123 nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT); 123 nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
124 } while (!devinit_completed && !nvgpu_timeout_expired(&timeout)); 124 } while (!devinit_completed && !nvgpu_timeout_expired(&timeout));
125 125
126 if (nvgpu_timeout_peek_expired(&timeout)) 126 if (nvgpu_timeout_peek_expired(&timeout)) {
127 err = -ETIMEDOUT; 127 err = -ETIMEDOUT;
128 }
128 129
129 nvgpu_flcn_clear_halt_intr_status(g->pmu.flcn, 130 nvgpu_flcn_clear_halt_intr_status(g->pmu.flcn,
130 gk20a_get_gr_idle_timeout(g)); 131 gk20a_get_gr_idle_timeout(g));
@@ -138,8 +139,9 @@ int gp106_bios_preos_wait_for_halt(struct gk20a *g)
138{ 139{
139 int err = 0; 140 int err = 0;
140 141
141 if (nvgpu_flcn_wait_for_halt(g->pmu.flcn, PMU_BOOT_TIMEOUT_MAX / 1000)) 142 if (nvgpu_flcn_wait_for_halt(g->pmu.flcn, PMU_BOOT_TIMEOUT_MAX / 1000)) {
142 err = -ETIMEDOUT; 143 err = -ETIMEDOUT;
144 }
143 145
144 return err; 146 return err;
145} 147}
@@ -155,8 +157,9 @@ int gp106_bios_preos(struct gk20a *g)
155 goto out; 157 goto out;
156 } 158 }
157 159
158 if (g->ops.bios.preos_reload_check) 160 if (g->ops.bios.preos_reload_check) {
159 g->ops.bios.preos_reload_check(g); 161 g->ops.bios.preos_reload_check(g);
162 }
160 163
161 upload_code(g, g->bios.preos.bootloader_phys_base, 164 upload_code(g, g->bios.preos.bootloader_phys_base,
162 g->bios.preos.bootloader, 165 g->bios.preos.bootloader,
@@ -190,17 +193,20 @@ int gp106_bios_init(struct gk20a *g)
190 193
191 nvgpu_log_fn(g, " "); 194 nvgpu_log_fn(g, " ");
192 195
193 if (g->bios_is_init) 196 if (g->bios_is_init) {
194 return 0; 197 return 0;
198 }
195 199
196 nvgpu_log_info(g, "reading bios from EEPROM"); 200 nvgpu_log_info(g, "reading bios from EEPROM");
197 g->bios.size = BIOS_SIZE; 201 g->bios.size = BIOS_SIZE;
198 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE); 202 g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
199 if (!g->bios.data) 203 if (!g->bios.data) {
200 return -ENOMEM; 204 return -ENOMEM;
205 }
201 206
202 if (g->ops.xve.disable_shadow_rom) 207 if (g->ops.xve.disable_shadow_rom) {
203 g->ops.xve.disable_shadow_rom(g); 208 g->ops.xve.disable_shadow_rom(g);
209 }
204 for (i = 0; i < g->bios.size/4; i++) { 210 for (i = 0; i < g->bios.size/4; i++) {
205 u32 val = be32_to_cpu(gk20a_readl(g, 0x300000 + i*4)); 211 u32 val = be32_to_cpu(gk20a_readl(g, 0x300000 + i*4));
206 212
@@ -209,12 +215,14 @@ int gp106_bios_init(struct gk20a *g)
209 g->bios.data[(i*4)+2] = (val >> 8) & 0xff; 215 g->bios.data[(i*4)+2] = (val >> 8) & 0xff;
210 g->bios.data[(i*4)+3] = val & 0xff; 216 g->bios.data[(i*4)+3] = val & 0xff;
211 } 217 }
212 if (g->ops.xve.enable_shadow_rom) 218 if (g->ops.xve.enable_shadow_rom) {
213 g->ops.xve.enable_shadow_rom(g); 219 g->ops.xve.enable_shadow_rom(g);
220 }
214 221
215 err = nvgpu_bios_parse_rom(g); 222 err = nvgpu_bios_parse_rom(g);
216 if (err) 223 if (err) {
217 goto free_firmware; 224 goto free_firmware;
225 }
218 226
219 if (g->bios.vbios_version < g->vbios_min_version) { 227 if (g->bios.vbios_version < g->vbios_min_version) {
220 nvgpu_err(g, "unsupported VBIOS version %08x", 228 nvgpu_err(g, "unsupported VBIOS version %08x",
@@ -254,7 +262,8 @@ int gp106_bios_init(struct gk20a *g)
254 262
255 return 0; 263 return 0;
256free_firmware: 264free_firmware:
257 if (g->bios.data) 265 if (g->bios.data) {
258 nvgpu_vfree(g, g->bios.data); 266 nvgpu_vfree(g, g->bios.data);
267 }
259 return err; 268 return err;
260} 269}
diff --git a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
index 5f969e79..ca8015d6 100644
--- a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
@@ -55,20 +55,24 @@ int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
55 55
56 p5_info = pstate_get_clk_set_info(g, 56 p5_info = pstate_get_clk_set_info(g,
57 CTRL_PERF_PSTATE_P5, clkwhich); 57 CTRL_PERF_PSTATE_P5, clkwhich);
58 if (!p5_info) 58 if (!p5_info) {
59 return -EINVAL; 59 return -EINVAL;
60 }
60 61
61 p0_info = pstate_get_clk_set_info(g, 62 p0_info = pstate_get_clk_set_info(g,
62 CTRL_PERF_PSTATE_P0, clkwhich); 63 CTRL_PERF_PSTATE_P0, clkwhich);
63 if (!p0_info) 64 if (!p0_info) {
64 return -EINVAL; 65 return -EINVAL;
66 }
65 67
66 limit_min_mhz = p5_info->min_mhz; 68 limit_min_mhz = p5_info->min_mhz;
67 /* WAR for DVCO min */ 69 /* WAR for DVCO min */
68 if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK) 70 if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
69 if ((pfllobjs->max_min_freq_mhz) && 71 if ((pfllobjs->max_min_freq_mhz) &&
70 (pfllobjs->max_min_freq_mhz >= limit_min_mhz)) 72 (pfllobjs->max_min_freq_mhz >= limit_min_mhz)) {
71 limit_min_mhz = pfllobjs->max_min_freq_mhz + 1; 73 limit_min_mhz = pfllobjs->max_min_freq_mhz + 1;
74 }
75 }
72 76
73 *min_mhz = limit_min_mhz; 77 *min_mhz = limit_min_mhz;
74 *max_mhz = p0_info->max_mhz; 78 *max_mhz = p0_info->max_mhz;
@@ -97,8 +101,9 @@ int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
97 101
98 p0_info = pstate_get_clk_set_info(g, 102 p0_info = pstate_get_clk_set_info(g,
99 CTRL_PERF_PSTATE_P0, clkwhich); 103 CTRL_PERF_PSTATE_P0, clkwhich);
100 if (!p0_info) 104 if (!p0_info) {
101 return -EINVAL; 105 return -EINVAL;
106 }
102 107
103 *default_mhz = p0_info->max_mhz; 108 *default_mhz = p0_info->max_mhz;
104 109
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
index e3633b58..24b07112 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -68,8 +68,9 @@ unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain)
68 } 68 }
69 } 69 }
70 70
71 if (!c) 71 if (!c) {
72 return 0; 72 return 0;
73 }
73 74
74 freq_khz = c->is_counter ? c->scale * gp106_get_rate_cntr(g, c) : 75 freq_khz = c->is_counter ? c->scale * gp106_get_rate_cntr(g, c) :
75 0; /* TODO: PLL read */ 76 0; /* TODO: PLL read */
@@ -86,8 +87,9 @@ int gp106_init_clk_support(struct gk20a *g)
86 nvgpu_log_fn(g, " "); 87 nvgpu_log_fn(g, " ");
87 88
88 err = nvgpu_mutex_init(&clk->clk_mutex); 89 err = nvgpu_mutex_init(&clk->clk_mutex);
89 if (err) 90 if (err) {
90 return err; 91 return err;
92 }
91 93
92 clk->clk_namemap = (struct namemap_cfg *) 94 clk->clk_namemap = (struct namemap_cfg *)
93 nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS); 95 nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
@@ -185,8 +187,9 @@ static u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {
185 187
186 struct clk_gk20a *clk = &g->clk; 188 struct clk_gk20a *clk = &g->clk;
187 189
188 if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr) 190 if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr) {
189 return 0; 191 return 0;
192 }
190 193
191 nvgpu_mutex_acquire(&clk->clk_mutex); 194 nvgpu_mutex_acquire(&clk->clk_mutex);
192 195
diff --git a/drivers/gpu/nvgpu/gp106/fifo_gp106.c b/drivers/gpu/nvgpu/gp106/fifo_gp106.c
index c1428306..8202426e 100644
--- a/drivers/gpu/nvgpu/gp106/fifo_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/fifo_gp106.c
@@ -85,21 +85,23 @@ static const char * const gp106_gpc_client_descs[] = {
85 85
86void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault) 86void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
87{ 87{
88 if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs)) 88 if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs)) {
89 WARN_ON(mmfault->client_id >= 89 WARN_ON(mmfault->client_id >=
90 ARRAY_SIZE(gp106_gpc_client_descs)); 90 ARRAY_SIZE(gp106_gpc_client_descs));
91 else 91 } else {
92 mmfault->client_id_desc = 92 mmfault->client_id_desc =
93 gp106_gpc_client_descs[mmfault->client_id]; 93 gp106_gpc_client_descs[mmfault->client_id];
94 }
94} 95}
95 96
96/* fill in mmu fault client description */ 97/* fill in mmu fault client description */
97void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault) 98void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
98{ 99{
99 if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs)) 100 if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs)) {
100 WARN_ON(mmfault->client_id >= 101 WARN_ON(mmfault->client_id >=
101 ARRAY_SIZE(gp106_hub_client_descs)); 102 ARRAY_SIZE(gp106_hub_client_descs));
102 else 103 } else {
103 mmfault->client_id_desc = 104 mmfault->client_id_desc =
104 gp106_hub_client_descs[mmfault->client_id]; 105 gp106_hub_client_descs[mmfault->client_id];
106 }
105} 107}
diff --git a/drivers/gpu/nvgpu/gp106/flcn_gp106.c b/drivers/gpu/nvgpu/gp106/flcn_gp106.c
index 9f542b6a..5959086d 100644
--- a/drivers/gpu/nvgpu/gp106/flcn_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/flcn_gp106.c
@@ -92,7 +92,8 @@ void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
92 if (flcn->is_falcon_supported) { 92 if (flcn->is_falcon_supported) {
93 nvgpu_mutex_init(&flcn->copy_lock); 93 nvgpu_mutex_init(&flcn->copy_lock);
94 gp106_falcon_ops(flcn); 94 gp106_falcon_ops(flcn);
95 } else 95 } else {
96 nvgpu_info(g, "falcon 0x%x not supported on %s", 96 nvgpu_info(g, "falcon 0x%x not supported on %s",
97 flcn->flcn_id, g->name); 97 flcn->flcn_id, g->name);
98 }
98} 99}
diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c
index 233357c8..a3fd8912 100644
--- a/drivers/gpu/nvgpu/gp106/gr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c
@@ -129,8 +129,9 @@ void gr_gp106_cb_size_default(struct gk20a *g)
129{ 129{
130 struct gr_gk20a *gr = &g->gr; 130 struct gr_gk20a *gr = &g->gr;
131 131
132 if (!gr->attrib_cb_default_size) 132 if (!gr->attrib_cb_default_size) {
133 gr->attrib_cb_default_size = 0x800; 133 gr->attrib_cb_default_size = 0x800;
134 }
134 gr->alpha_cb_default_size = 135 gr->alpha_cb_default_size =
135 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(); 136 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
136 gr->attrib_cb_gfxp_default_size = 137 gr->attrib_cb_gfxp_default_size =
@@ -147,20 +148,24 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
147{ 148{
148 int err = 0; 149 int err = 0;
149 150
150 if (class == PASCAL_B && g->gr.ctx_vars.force_preemption_gfxp) 151 if (class == PASCAL_B && g->gr.ctx_vars.force_preemption_gfxp) {
151 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; 152 graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
153 }
152 154
153 if (class == PASCAL_COMPUTE_B && 155 if (class == PASCAL_COMPUTE_B &&
154 g->gr.ctx_vars.force_preemption_cilp) 156 g->gr.ctx_vars.force_preemption_cilp) {
155 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP; 157 compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
158 }
156 159
157 /* check for invalid combinations */ 160 /* check for invalid combinations */
158 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) 161 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) {
159 return -EINVAL; 162 return -EINVAL;
163 }
160 164
161 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) && 165 if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
162 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) 166 (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) {
163 return -EINVAL; 167 return -EINVAL;
168 }
164 169
165 /* set preemption modes */ 170 /* set preemption modes */
166 switch (graphics_preempt_mode) { 171 switch (graphics_preempt_mode) {
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index f3b5dd87..9c42ac3a 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -224,8 +224,9 @@ static int gp106_init_gpu_characteristics(struct gk20a *g)
224 int err; 224 int err;
225 225
226 err = gk20a_init_gpu_characteristics(g); 226 err = gk20a_init_gpu_characteristics(g);
227 if (err) 227 if (err) {
228 return err; 228 return err;
229 }
229 230
230 __nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_VOLTAGE, true); 231 __nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_VOLTAGE, true);
231 __nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_CURRENT, true); 232 __nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_CURRENT, true);
@@ -868,8 +869,10 @@ int gp106_init_hal(struct gk20a *g)
868 __nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false); 869 __nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
869 870
870 /* Read fuses to check if gpu needs to boot in secure/non-secure mode */ 871 /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
871 if (gops->fuse.check_priv_security(g)) 872 if (gops->fuse.check_priv_security(g)) {
872 return -EINVAL; /* Do not boot gpu */ 873 /* Do not boot gpu */
874 return -EINVAL;
875 }
873 876
874 g->pmu_lsf_pmu_wpr_init_done = 0; 877 g->pmu_lsf_pmu_wpr_init_done = 0;
875 g->bootstrap_owner = LSF_FALCON_ID_SEC2; 878 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
index 108eed56..36092a1a 100644
--- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
@@ -3079,8 +3079,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
3079 3079
3080 memcpy(&memclock_base_entry, mem_entry_ptr, 3080 memcpy(&memclock_base_entry, mem_entry_ptr,
3081 memclock_table_header.base_entry_size); 3081 memclock_table_header.base_entry_size);
3082 if (memclock_base_entry.maximum == 0) 3082 if (memclock_base_entry.maximum == 0) {
3083 continue; 3083 continue;
3084 }
3084 3085
3085 script_index = BIOS_GET_FIELD(memclock_base_entry.flags1, 3086 script_index = BIOS_GET_FIELD(memclock_base_entry.flags1,
3086 VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX); 3087 VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX);
@@ -3089,8 +3090,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
3089 memclock_table_header.script_list_ptr + 3090 memclock_table_header.script_list_ptr +
3090 script_index * sizeof(u32)); 3091 script_index * sizeof(u32));
3091 3092
3092 if (!script_ptr) 3093 if (!script_ptr) {
3093 continue; 3094 continue;
3095 }
3094 3096
3095 /* Link and execute shadow scripts */ 3097 /* Link and execute shadow scripts */
3096 3098
@@ -3107,8 +3109,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
3107 for (shadow_idx = 0; shadow_idx < 3109 for (shadow_idx = 0; shadow_idx <
3108 fb_fbpa_fbio_delay_priv_max_v(); 3110 fb_fbpa_fbio_delay_priv_max_v();
3109 ++shadow_idx) { 3111 ++shadow_idx) {
3110 if (idx_to_ptr_tbl[shadow_idx] == 0) 3112 if (idx_to_ptr_tbl[shadow_idx] == 0) {
3111 break; 3113 break;
3114 }
3112 } 3115 }
3113 3116
3114 if (shadow_idx > fb_fbpa_fbio_delay_priv_max_v()) { 3117 if (shadow_idx > fb_fbpa_fbio_delay_priv_max_v()) {
@@ -3142,14 +3145,16 @@ static int mclk_get_memclk_table(struct gk20a *g)
3142 memclock_table_header.cmd_script_list_ptr + 3145 memclock_table_header.cmd_script_list_ptr +
3143 cmd_script_index * sizeof(u32)); 3146 cmd_script_index * sizeof(u32));
3144 3147
3145 if (!cmd_script_ptr) 3148 if (!cmd_script_ptr) {
3146 continue; 3149 continue;
3150 }
3147 3151
3148 /* Link and execute cmd shadow scripts */ 3152 /* Link and execute cmd shadow scripts */
3149 for (cmd_idx = 0; cmd_idx <= fb_fbpa_fbio_cmd_delay_cmd_priv_max_v(); 3153 for (cmd_idx = 0; cmd_idx <= fb_fbpa_fbio_cmd_delay_cmd_priv_max_v();
3150 ++cmd_idx) { 3154 ++cmd_idx) {
3151 if (cmd_script_ptr == idx_to_cmd_ptr_tbl[cmd_idx]) 3155 if (cmd_script_ptr == idx_to_cmd_ptr_tbl[cmd_idx]) {
3152 break; 3156 break;
3157 }
3153 } 3158 }
3154 3159
3155 /* script has not been executed before */ 3160 /* script has not been executed before */
@@ -3158,8 +3163,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
3158 for (cmd_idx = 0; cmd_idx < 3163 for (cmd_idx = 0; cmd_idx <
3159 fb_fbpa_fbio_cmd_delay_cmd_priv_max_v(); 3164 fb_fbpa_fbio_cmd_delay_cmd_priv_max_v();
3160 ++cmd_idx) { 3165 ++cmd_idx) {
3161 if (idx_to_cmd_ptr_tbl[cmd_idx] == 0) 3166 if (idx_to_cmd_ptr_tbl[cmd_idx] == 0) {
3162 break; 3167 break;
3168 }
3163 } 3169 }
3164 3170
3165 if (cmd_idx > fb_fbpa_fbio_cmd_delay_cmd_priv_max_v()) { 3171 if (cmd_idx > fb_fbpa_fbio_cmd_delay_cmd_priv_max_v()) {
@@ -3220,12 +3226,14 @@ int gp106_mclk_init(struct gk20a *g)
3220 mclk = &g->clk_pmu.clk_mclk; 3226 mclk = &g->clk_pmu.clk_mclk;
3221 3227
3222 err = nvgpu_mutex_init(&mclk->mclk_lock); 3228 err = nvgpu_mutex_init(&mclk->mclk_lock);
3223 if (err) 3229 if (err) {
3224 return err; 3230 return err;
3231 }
3225 3232
3226 err = nvgpu_mutex_init(&mclk->data_lock); 3233 err = nvgpu_mutex_init(&mclk->data_lock);
3227 if (err) 3234 if (err) {
3228 goto fail_mclk_mutex; 3235 goto fail_mclk_mutex;
3236 }
3229 3237
3230 /* FBPA gain WAR */ 3238 /* FBPA gain WAR */
3231 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222); 3239 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222);
@@ -3326,15 +3334,17 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
3326 3334
3327 nvgpu_mutex_acquire(&mclk->mclk_lock); 3335 nvgpu_mutex_acquire(&mclk->mclk_lock);
3328 3336
3329 if (!mclk->init) 3337 if (!mclk->init) {
3330 goto exit_status; 3338 goto exit_status;
3339 }
3331 3340
3332 speed = (val < mclk->p5_min) ? GP106_MCLK_LOW_SPEED : 3341 speed = (val < mclk->p5_min) ? GP106_MCLK_LOW_SPEED :
3333 (val < mclk->p0_min) ? GP106_MCLK_MID_SPEED : 3342 (val < mclk->p0_min) ? GP106_MCLK_MID_SPEED :
3334 GP106_MCLK_HIGH_SPEED; 3343 GP106_MCLK_HIGH_SPEED;
3335 3344
3336 if (speed == mclk->speed) 3345 if (speed == mclk->speed) {
3337 goto exit_status; 3346 goto exit_status;
3347 }
3338 3348
3339 seq_script_ptr = m->scripts[mclk->speed][speed].addr; 3349 seq_script_ptr = m->scripts[mclk->speed][speed].addr;
3340 seq_script_size = m->scripts[mclk->speed][speed].size; 3350 seq_script_size = m->scripts[mclk->speed][speed].size;
diff --git a/drivers/gpu/nvgpu/gp106/mm_gp106.c b/drivers/gpu/nvgpu/gp106/mm_gp106.c
index 4a3a4a19..7864880f 100644
--- a/drivers/gpu/nvgpu/gp106/mm_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/mm_gp106.c
@@ -39,8 +39,9 @@ size_t gp106_mm_get_vidmem_size(struct gk20a *g)
39 u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range); 39 u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
40 size_t bytes = ((size_t)mag << scale) * SZ_1M; 40 size_t bytes = ((size_t)mag << scale) * SZ_1M;
41 41
42 if (ecc) 42 if (ecc) {
43 bytes = bytes / 16 * 15; 43 bytes = bytes / 16 * 15;
44 }
44 45
45 return bytes; 46 return bytes;
46} 47}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 45924828..963668c4 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -54,8 +54,9 @@ bool gp106_pmu_is_engine_in_reset(struct gk20a *g)
54 bool status = false; 54 bool status = false;
55 55
56 reg_reset = gk20a_readl(g, pwr_falcon_engine_r()); 56 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
57 if (reg_reset == pwr_falcon_engine_reset_true_f()) 57 if (reg_reset == pwr_falcon_engine_reset_true_f()) {
58 status = true; 58 status = true;
59 }
59 60
60 return status; 61 return status;
61} 62}
@@ -82,11 +83,13 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
82 83
83u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 84u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
84{ 85{
85 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 86 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
86 return NVGPU_PMU_GR_FEATURE_MASK_RPPG; 87 return NVGPU_PMU_GR_FEATURE_MASK_RPPG;
88 }
87 89
88 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) 90 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
89 return NVGPU_PMU_MS_FEATURE_MASK_ALL; 91 return NVGPU_PMU_MS_FEATURE_MASK_ALL;
92 }
90 93
91 return 0; 94 return 0;
92} 95}
@@ -274,11 +277,13 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
274 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 277 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
275 278
276 /* GM20B PMU supports loading FECS and GPCCS only */ 279 /* GM20B PMU supports loading FECS and GPCCS only */
277 if (falconidmask == 0) 280 if (falconidmask == 0) {
278 return -EINVAL; 281 return -EINVAL;
282 }
279 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) | 283 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
280 (1 << LSF_FALCON_ID_GPCCS))) 284 (1 << LSF_FALCON_ID_GPCCS))) {
281 return -EINVAL; 285 return -EINVAL;
286 }
282 g->pmu_lsf_loaded_falcon_id = 0; 287 g->pmu_lsf_loaded_falcon_id = 0;
283 /* check whether pmu is ready to bootstrap lsf if not wait for it */ 288 /* check whether pmu is ready to bootstrap lsf if not wait for it */
284 if (!g->pmu_lsf_pmu_wpr_init_done) { 289 if (!g->pmu_lsf_pmu_wpr_init_done) {
@@ -296,7 +301,8 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
296 pmu_wait_message_cond(&g->pmu, 301 pmu_wait_message_cond(&g->pmu,
297 gk20a_get_gr_idle_timeout(g), 302 gk20a_get_gr_idle_timeout(g),
298 &g->pmu_lsf_loaded_falcon_id, falconidmask); 303 &g->pmu_lsf_loaded_falcon_id, falconidmask);
299 if (g->pmu_lsf_loaded_falcon_id != falconidmask) 304 if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
300 return -ETIMEDOUT; 305 return -ETIMEDOUT;
306 }
301 return 0; 307 return 0;
302} 308}
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 6f60fe41..61424bfe 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -41,8 +41,9 @@ int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g,
41{ 41{
42 int status = 0; 42 int status = 0;
43 43
44 if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout)) 44 if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout)) {
45 status = -EBUSY; 45 status = -EBUSY;
46 }
46 47
47 return status; 48 return status;
48} 49}
@@ -166,8 +167,9 @@ void init_pmu_setup_hw1(struct gk20a *g)
166 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); 167 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
167 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( 168 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
168 pmu, GK20A_PMU_DMAIDX_VIRT); 169 pmu, GK20A_PMU_DMAIDX_VIRT);
169 if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface) 170 if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface) {
170 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu); 171 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
172 }
171 173
172 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args, 174 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
173 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 175 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
@@ -222,8 +224,9 @@ int init_sec2_setup_hw1(struct gk20a *g,
222 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 224 psec_fbif_transcfg_target_noncoherent_sysmem_f());
223 225
224 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 226 err = bl_bootstrap_sec2(pmu, desc, bl_sz);
225 if (err) 227 if (err) {
226 return err; 228 return err;
229 }
227 230
228 return 0; 231 return 0;
229} 232}