summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c106
1 files changed, 66 insertions, 40 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index a9311759..916e7a66 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -238,8 +238,9 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
238 struct nvgpu_firmware *gpccs_sig; 238 struct nvgpu_firmware *gpccs_sig;
239 int err; 239 int err;
240 240
241 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) 241 if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
242 return -ENOENT; 242 return -ENOENT;
243 }
243 244
244 gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0); 245 gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
245 if (!gpccs_sig) { 246 if (!gpccs_sig) {
@@ -381,20 +382,23 @@ int prepare_ucode_blob(struct gk20a *g)
381 /* Discover all managed falcons*/ 382 /* Discover all managed falcons*/
382 err = lsfm_discover_ucode_images(g, plsfm); 383 err = lsfm_discover_ucode_images(g, plsfm);
383 gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 384 gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
384 if (err) 385 if (err) {
385 goto free_sgt; 386 goto free_sgt;
387 }
386 388
387 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { 389 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
388 /* Generate WPR requirements*/ 390 /* Generate WPR requirements*/
389 err = lsf_gen_wpr_requirements(g, plsfm); 391 err = lsf_gen_wpr_requirements(g, plsfm);
390 if (err) 392 if (err) {
391 goto free_sgt; 393 goto free_sgt;
394 }
392 395
393 /*Alloc memory to hold ucode blob contents*/ 396 /*Alloc memory to hold ucode blob contents*/
394 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 397 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size
395 , &g->acr.ucode_blob); 398 , &g->acr.ucode_blob);
396 if (err) 399 if (err) {
397 goto free_sgt; 400 goto free_sgt;
401 }
398 402
399 gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", 403 gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
400 plsfm->managed_flcn_cnt, plsfm->wpr_size); 404 plsfm->managed_flcn_cnt, plsfm->wpr_size);
@@ -428,8 +432,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
428 /* Obtain the PMU ucode image and add it to the list if required*/ 432 /* Obtain the PMU ucode image and add it to the list if required*/
429 memset(&ucode_img, 0, sizeof(ucode_img)); 433 memset(&ucode_img, 0, sizeof(ucode_img));
430 status = pmu_ucode_details(g, &ucode_img); 434 status = pmu_ucode_details(g, &ucode_img);
431 if (status) 435 if (status) {
432 return status; 436 return status;
437 }
433 438
434 /* The falon_id is formed by grabbing the static base 439 /* The falon_id is formed by grabbing the static base
435 * falon_id from the image and adding the 440 * falon_id from the image and adding the
@@ -441,8 +446,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
441 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) { 446 if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
442 pmu->falcon_id = falcon_id; 447 pmu->falcon_id = falcon_id;
443 if (lsfm_add_ucode_img(g, plsfm, &ucode_img, 448 if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
444 pmu->falcon_id) == 0) 449 pmu->falcon_id) == 0) {
445 pmu->pmu_mode |= PMU_LSFM_MANAGED; 450 pmu->pmu_mode |= PMU_LSFM_MANAGED;
451 }
446 452
447 plsfm->managed_flcn_cnt++; 453 plsfm->managed_flcn_cnt++;
448 } else { 454 } else {
@@ -480,8 +486,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
480 /* Do not manage non-FB ucode*/ 486 /* Do not manage non-FB ucode*/
481 if (lsfm_add_ucode_img(g, 487 if (lsfm_add_ucode_img(g,
482 plsfm, &ucode_img, falcon_id) 488 plsfm, &ucode_img, falcon_id)
483 == 0) 489 == 0) {
484 plsfm->managed_flcn_cnt++; 490 plsfm->managed_flcn_cnt++;
491 }
485 } else { 492 } else {
486 gm20b_dbg_pmu(g, "not managed %d\n", 493 gm20b_dbg_pmu(g, "not managed %d\n",
487 ucode_img.lsf_desc->falcon_id); 494 ucode_img.lsf_desc->falcon_id);
@@ -513,18 +520,22 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
513 u64 addr_code, addr_data; 520 u64 addr_code, addr_data;
514 u32 addr_args; 521 u32 addr_args;
515 522
516 if (p_img->desc == NULL) /*This means its a header based ucode, 523 if (p_img->desc == NULL) {
517 and so we do not fill BL gen desc structure*/ 524 /*
525 * This means its a header based ucode,
526 * and so we do not fill BL gen desc structure
527 */
518 return -EINVAL; 528 return -EINVAL;
529 }
519 desc = p_img->desc; 530 desc = p_img->desc;
520 /* 531 /*
521 Calculate physical and virtual addresses for various portions of 532 * Calculate physical and virtual addresses for various portions of
522 the PMU ucode image 533 * the PMU ucode image
523 Calculate the 32-bit addresses for the application code, application 534 * Calculate the 32-bit addresses for the application code, application
524 data, and bootloader code. These values are all based on IM_BASE. 535 * data, and bootloader code. These values are all based on IM_BASE.
525 The 32-bit addresses will be the upper 32-bits of the virtual or 536 * The 32-bit addresses will be the upper 32-bits of the virtual or
526 physical addresses of each respective segment. 537 * physical addresses of each respective segment.
527 */ 538 */
528 addr_base = p_lsfm->lsb_header.ucode_off; 539 addr_base = p_lsfm->lsb_header.ucode_off;
529 g->ops.pmu.get_wpr(g, &wpr_inf); 540 g->ops.pmu.get_wpr(g, &wpr_inf);
530 addr_base += wpr_inf.wpr_base; 541 addr_base += wpr_inf.wpr_base;
@@ -584,19 +595,23 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
584 struct pmu_ucode_desc *desc; 595 struct pmu_ucode_desc *desc;
585 u64 addr_code, addr_data; 596 u64 addr_code, addr_data;
586 597
587 if (p_img->desc == NULL) /*This means its a header based ucode, 598 if (p_img->desc == NULL) {
588 and so we do not fill BL gen desc structure*/ 599 /*
600 * This means its a header based ucode,
601 * and so we do not fill BL gen desc structure
602 */
589 return -EINVAL; 603 return -EINVAL;
604 }
590 desc = p_img->desc; 605 desc = p_img->desc;
591 606
592 /* 607 /*
593 Calculate physical and virtual addresses for various portions of 608 * Calculate physical and virtual addresses for various portions of
594 the PMU ucode image 609 * the PMU ucode image
595 Calculate the 32-bit addresses for the application code, application 610 * Calculate the 32-bit addresses for the application code, application
596 data, and bootloader code. These values are all based on IM_BASE. 611 * data, and bootloader code. These values are all based on IM_BASE.
597 The 32-bit addresses will be the upper 32-bits of the virtual or 612 * The 32-bit addresses will be the upper 32-bits of the virtual or
598 physical addresses of each respective segment. 613 * physical addresses of each respective segment.
599 */ 614 */
600 addr_base = p_lsfm->lsb_header.ucode_off; 615 addr_base = p_lsfm->lsb_header.ucode_off;
601 g->ops.pmu.get_wpr(g, &wpr_inf); 616 g->ops.pmu.get_wpr(g, &wpr_inf);
602 addr_base += wpr_inf.wpr_base; 617 addr_base += wpr_inf.wpr_base;
@@ -642,9 +657,10 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
642 657
643 if (pmu->pmu_mode & PMU_LSFM_MANAGED) { 658 if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
644 gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n"); 659 gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
645 if (pnode->wpr_header.falcon_id == pmu->falcon_id) 660 if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
646 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, 661 return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
647 &pnode->bl_gen_desc_size); 662 &pnode->bl_gen_desc_size);
663 }
648 } 664 }
649 665
650 /* Failed to find the falcon requested. */ 666 /* Failed to find the falcon requested. */
@@ -795,9 +811,10 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
795 u32 full_app_size = 0; 811 u32 full_app_size = 0;
796 u32 data = 0; 812 u32 data = 0;
797 813
798 if (pnode->ucode_img.lsf_desc) 814 if (pnode->ucode_img.lsf_desc) {
799 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc, 815 memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc,
800 sizeof(struct lsf_ucode_desc)); 816 sizeof(struct lsf_ucode_desc));
817 }
801 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size; 818 pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
802 819
803 /* The remainder of the LSB depends on the loader usage */ 820 /* The remainder of the LSB depends on the loader usage */
@@ -865,8 +882,9 @@ static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
865 882
866 struct lsfm_managed_ucode_img *pnode; 883 struct lsfm_managed_ucode_img *pnode;
867 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img)); 884 pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img));
868 if (pnode == NULL) 885 if (pnode == NULL) {
869 return -ENOMEM; 886 return -ENOMEM;
887 }
870 888
871 /* Keep a copy of the ucode image info locally */ 889 /* Keep a copy of the ucode image info locally */
872 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img)); 890 memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img));
@@ -919,11 +937,12 @@ static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm)
919 while (cnt) { 937 while (cnt) {
920 mg_ucode_img = plsfm->ucode_img_list; 938 mg_ucode_img = plsfm->ucode_img_list;
921 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id == 939 if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
922 LSF_FALCON_ID_PMU) 940 LSF_FALCON_ID_PMU) {
923 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img); 941 lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
924 else 942 } else {
925 lsfm_free_nonpmu_ucode_img_res(g, 943 lsfm_free_nonpmu_ucode_img_res(g,
926 &mg_ucode_img->ucode_img); 944 &mg_ucode_img->ucode_img);
945 }
927 plsfm->ucode_img_list = mg_ucode_img->next; 946 plsfm->ucode_img_list = mg_ucode_img->next;
928 nvgpu_kfree(g, mg_ucode_img); 947 nvgpu_kfree(g, mg_ucode_img);
929 cnt--; 948 cnt--;
@@ -1110,8 +1129,9 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1110 ((acr_ucode_header_t210_load[2]) >> 8); 1129 ((acr_ucode_header_t210_load[2]) >> 8);
1111 bl_dmem_desc->data_dma_base1 = 0x0; 1130 bl_dmem_desc->data_dma_base1 = 0x0;
1112 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; 1131 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
1113 } else 1132 } else {
1114 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0; 1133 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
1134 }
1115 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); 1135 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
1116 if (status != 0) { 1136 if (status != 0) {
1117 err = status; 1137 err = status;
@@ -1274,10 +1294,12 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1274 pmu->isr_enabled = true; 1294 pmu->isr_enabled = true;
1275 nvgpu_mutex_release(&pmu->isr_mutex); 1295 nvgpu_mutex_release(&pmu->isr_mutex);
1276 1296
1277 if (g->ops.pmu.setup_apertures) 1297 if (g->ops.pmu.setup_apertures) {
1278 g->ops.pmu.setup_apertures(g); 1298 g->ops.pmu.setup_apertures(g);
1279 if (g->ops.pmu.update_lspmu_cmdline_args) 1299 }
1300 if (g->ops.pmu.update_lspmu_cmdline_args) {
1280 g->ops.pmu.update_lspmu_cmdline_args(g); 1301 g->ops.pmu.update_lspmu_cmdline_args(g);
1302 }
1281 1303
1282 /*disable irqs for hs falcon booting as we will poll for halt*/ 1304 /*disable irqs for hs falcon booting as we will poll for halt*/
1283 nvgpu_mutex_acquire(&pmu->isr_mutex); 1305 nvgpu_mutex_acquire(&pmu->isr_mutex);
@@ -1287,8 +1309,9 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1287 /*Clearing mailbox register used to reflect capabilities*/ 1309 /*Clearing mailbox register used to reflect capabilities*/
1288 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); 1310 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
1289 err = bl_bootstrap(pmu, desc, bl_sz); 1311 err = bl_bootstrap(pmu, desc, bl_sz);
1290 if (err) 1312 if (err) {
1291 return err; 1313 return err;
1314 }
1292 return 0; 1315 return 0;
1293} 1316}
1294 1317
@@ -1362,8 +1385,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1362 */ 1385 */
1363 1386
1364 if (g->ops.pmu.falcon_clear_halt_interrupt_status(g, 1387 if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
1365 gk20a_get_gr_idle_timeout(g))) 1388 gk20a_get_gr_idle_timeout(g))) {
1366 goto err_unmap_bl; 1389 goto err_unmap_bl;
1390 }
1367 1391
1368 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, 1392 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
1369 pwr_falcon_mmu_phys_sec_r())); 1393 pwr_falcon_mmu_phys_sec_r()));
@@ -1377,12 +1401,13 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1377 ACR_COMPLETION_TIMEOUT_MS); 1401 ACR_COMPLETION_TIMEOUT_MS);
1378 if (err == 0) { 1402 if (err == 0) {
1379 /* Clear the HALT interrupt */ 1403 /* Clear the HALT interrupt */
1380 if (g->ops.pmu.falcon_clear_halt_interrupt_status(g, 1404 if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
1381 gk20a_get_gr_idle_timeout(g))) 1405 gk20a_get_gr_idle_timeout(g))) {
1406 goto err_unmap_bl;
1407 }
1408 } else {
1382 goto err_unmap_bl; 1409 goto err_unmap_bl;
1383 } 1410 }
1384 else
1385 goto err_unmap_bl;
1386 } 1411 }
1387 gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err); 1412 gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
1388 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, 1413 gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
@@ -1447,8 +1472,9 @@ int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
1447 struct nvgpu_pmu *pmu = &g->pmu; 1472 struct nvgpu_pmu *pmu = &g->pmu;
1448 int status = 0; 1473 int status = 0;
1449 1474
1450 if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms)) 1475 if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms)) {
1451 status = -EBUSY; 1476 status = -EBUSY;
1477 }
1452 1478
1453 return status; 1479 return status;
1454} 1480}