summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-09-06 11:14:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-24 11:11:49 -0400
commit5d30a5cda37ca349b4d9cb7e1985c7a0849001b6 (patch)
tree89a37078480d7cec42d9a8c7bc869aae8bb28279 /drivers/gpu/nvgpu/gp106
parent7465926ccdcdad87c22c788fe04fc11961df53ba (diff)
gpu: nvgpu: ACR code refactor
-Created struct nvgpu_acr to hold acr module related member within single struct which are currently spread across multiple structs like nvgpu_pmu, pmu_ops & gk20a. -Created struct hs_flcn_bl struct to hold ACR HS bootloader specific members -Created struct hs_acr to hold ACR ucode specific members like bootloader data using struct hs_flcn_bl, acr type & falcon info on which ACR ucode need to run. -Created acr ops under struct nvgpu_acr to perform ACR specific operation, currently ACR ops were part PMU which caused to have always dependence on PMU even though ACR was not executing on PMU. -Added acr_remove_support ops which will be called as part of gk20a_remove_support() method, earlier acr cleanup was part of pmu remove_support method. -Created define for ACR types, -Ops acr_sw_init() function helps to set ACR properties statically for chip currently in execution & assign ops to point to needed functions as per chip. -Ops acr_sw_init execute at early as nvgpu_init_mm_support calls acr function to alloc blob space. -Created ops to fill bootloader descriptor & to patch WPR info to ACR uocde based on interfaces used to bootstrap ACR ucode. -Created function gm20b_bootstrap_hs_acr() function which is now common HAL for all chips to bootstrap ACR, earlier had 3 different function for gm20b/gp10b, gv11b & for all dgpu based on interface needed. -Removed duplicate code for falcon engine wherever common falcon code can be used. -Removed ACR code dependent on PMU & made changes to use from nvgpu_acr. JIRA NVGPU-1148 Change-Id: I39951d2fc9a0bb7ee6057e0fa06da78045d47590 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1813231 GVS: Gerrit_Virtual_Submit Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106')
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c133
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.h8
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c5
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c171
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.h12
5 files changed, 172 insertions, 157 deletions
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 7bb099e5..b1150e29 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -93,7 +93,7 @@ int gp106_alloc_blob_space(struct gk20a *g,
93 return 0; 93 return 0;
94 } 94 }
95 95
96 g->ops.pmu.get_wpr(g, &wpr_inf); 96 g->acr.get_wpr_info(g, &wpr_inf);
97 97
98 /* 98 /*
99 * Even though this mem_desc wouldn't be used, the wpr region needs to 99 * Even though this mem_desc wouldn't be used, the wpr region needs to
@@ -456,7 +456,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
456 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1)); 456 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
457 gr_gk20a_init_ctxsw_ucode(g); 457 gr_gk20a_init_ctxsw_ucode(g);
458 458
459 g->ops.pmu.get_wpr(g, &wpr_inf); 459 g->acr.get_wpr_info(g, &wpr_inf);
460 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); 460 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
461 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size); 461 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
462 462
@@ -479,7 +479,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
479 } 479 }
480 480
481 /*Alloc memory to hold ucode blob contents*/ 481 /*Alloc memory to hold ucode blob contents*/
482 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 482 err = g->acr.alloc_blob_space(g, plsfm->wpr_size
483 ,&g->acr.ucode_blob); 483 ,&g->acr.ucode_blob);
484 if (err) { 484 if (err) {
485 goto exit_err; 485 goto exit_err;
@@ -557,7 +557,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
557 557
558 /*0th index is always PMU which is already handled in earlier 558 /*0th index is always PMU which is already handled in earlier
559 if condition*/ 559 if condition*/
560 for (i = 1; i < (MAX_SUPPORTED_LSFM); i++) { 560 for (i = 1; i < g->acr.max_supported_lsfm; i++) {
561 memset(&ucode_img, 0, sizeof(ucode_img)); 561 memset(&ucode_img, 0, sizeof(ucode_img));
562 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { 562 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
563 if (ucode_img.lsf_desc != NULL) { 563 if (ucode_img.lsf_desc != NULL) {
@@ -626,7 +626,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
626 * physical addresses of each respective segment. 626 * physical addresses of each respective segment.
627 */ 627 */
628 addr_base = p_lsfm->lsb_header.ucode_off; 628 addr_base = p_lsfm->lsb_header.ucode_off;
629 g->ops.pmu.get_wpr(g, &wpr_inf); 629 g->acr.get_wpr_info(g, &wpr_inf);
630 addr_base += (wpr_inf.wpr_base); 630 addr_base += (wpr_inf.wpr_base);
631 631
632 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base); 632 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base);
@@ -701,7 +701,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
701 * physical addresses of each respective segment. 701 * physical addresses of each respective segment.
702 */ 702 */
703 addr_base = p_lsfm->lsb_header.ucode_off; 703 addr_base = p_lsfm->lsb_header.ucode_off;
704 g->ops.pmu.get_wpr(g, &wpr_inf); 704 g->acr.get_wpr_info(g, &wpr_inf);
705 addr_base += wpr_inf.wpr_base; 705 addr_base += wpr_inf.wpr_base;
706 706
707 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id); 707 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
@@ -1017,7 +1017,7 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
1017 1017
1018 /* Fill in static WPR header info*/ 1018 /* Fill in static WPR header info*/
1019 pnode->wpr_header.falcon_id = falcon_id; 1019 pnode->wpr_header.falcon_id = falcon_id;
1020 pnode->wpr_header.bootstrap_owner = g->bootstrap_owner; 1020 pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
1021 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY; 1021 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
1022 1022
1023 pnode->wpr_header.lazy_bootstrap = 1023 pnode->wpr_header.lazy_bootstrap =
@@ -1030,6 +1030,7 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
1030 pnode->wpr_header.bin_version = pnode->lsb_header.signature.version; 1030 pnode->wpr_header.bin_version = pnode->lsb_header.signature.version;
1031 pnode->next = plsfm->ucode_img_list; 1031 pnode->next = plsfm->ucode_img_list;
1032 plsfm->ucode_img_list = pnode; 1032 plsfm->ucode_img_list = pnode;
1033
1033 return 0; 1034 return 0;
1034} 1035}
1035 1036
@@ -1191,3 +1192,121 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
1191 plsfm->wpr_size = wpr_offset; 1192 plsfm->wpr_size = wpr_offset;
1192 return 0; 1193 return 0;
1193} 1194}
1195
1196int gp106_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
1197 struct hs_acr *acr_desc, bool is_recovery)
1198{
1199 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1200 struct acr_fw_header *acr_fw_hdr = NULL;
1201 struct bin_hdr *acr_fw_bin_hdr = NULL;
1202 struct flcn_acr_desc_v1 *acr_dmem_desc;
1203 struct wpr_carveout_info wpr_inf;
1204 u32 *acr_ucode_header = NULL;
1205 u32 *acr_ucode_data = NULL;
1206
1207 nvgpu_log_fn(g, " ");
1208
1209 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1210 acr_fw_hdr = (struct acr_fw_header *)
1211 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1212
1213 acr_ucode_data = (u32 *)(acr_fw->data + acr_fw_bin_hdr->data_offset);
1214 acr_ucode_header = (u32 *)(acr_fw->data + acr_fw_hdr->hdr_offset);
1215
1216 acr->get_wpr_info(g, &wpr_inf);
1217
1218 acr_dmem_desc = (struct flcn_acr_desc_v1 *)
1219 &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
1220
1221 acr_dmem_desc->nonwpr_ucode_blob_start = wpr_inf.nonwpr_base;
1222 acr_dmem_desc->nonwpr_ucode_blob_size = wpr_inf.size;
1223 acr_dmem_desc->regions.no_regions = 1U;
1224 acr_dmem_desc->wpr_offset = 0U;
1225
1226 acr_dmem_desc->wpr_region_id = 1U;
1227 acr_dmem_desc->regions.region_props[0U].region_id = 1U;
1228 acr_dmem_desc->regions.region_props[0U].start_addr =
1229 (wpr_inf.wpr_base) >> 8U;
1230 acr_dmem_desc->regions.region_props[0U].end_addr =
1231 ((wpr_inf.wpr_base) + wpr_inf.size) >> 8U;
1232 acr_dmem_desc->regions.region_props[0U].shadowmMem_startaddress =
1233 wpr_inf.nonwpr_base >> 8U;
1234
1235 return 0;
1236}
1237
1238int gp106_acr_fill_bl_dmem_desc(struct gk20a *g,
1239 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
1240 u32 *acr_ucode_header)
1241{
1242 struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
1243 struct flcn_bl_dmem_desc_v1 *bl_dmem_desc =
1244 &acr_desc->bl_dmem_desc_v1;
1245
1246 nvgpu_log_fn(g, " ");
1247
1248 memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc_v1));
1249
1250 bl_dmem_desc->signature[0] = 0U;
1251 bl_dmem_desc->signature[1] = 0U;
1252 bl_dmem_desc->signature[2] = 0U;
1253 bl_dmem_desc->signature[3] = 0U;
1254 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
1255
1256 flcn64_set_dma(&bl_dmem_desc->code_dma_base,
1257 acr_ucode_mem->gpu_va);
1258
1259 bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
1260 bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
1261 bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
1262 bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
1263 bl_dmem_desc->code_entry_point = 0U;
1264
1265 flcn64_set_dma(&bl_dmem_desc->data_dma_base,
1266 acr_ucode_mem->gpu_va + acr_ucode_header[2U]);
1267
1268 bl_dmem_desc->data_size = acr_ucode_header[3U];
1269
1270 return 0;
1271}
1272
1273static void nvgpu_gp106_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
1274{
1275 struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
1276
1277 nvgpu_log_fn(g, " ");
1278
1279 hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
1280
1281 hs_acr->acr_type = ACR_DEFAULT;
1282 hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
1283
1284 hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
1285 hs_acr->bl_dmem_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
1286
1287 hs_acr->acr_flcn = &g->sec2_flcn;
1288 hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
1289 gp106_sec2_setup_hw_and_bl_bootstrap;
1290}
1291
1292void nvgpu_gp106_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
1293{
1294 nvgpu_log_fn(g, " ");
1295
1296 acr->g = g;
1297
1298 acr->bootstrap_owner = LSF_FALCON_ID_SEC2;
1299 acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
1300
1301 nvgpu_gp106_acr_default_sw_init(g, &acr->acr);
1302
1303 acr->get_wpr_info = gp106_wpr_info;
1304 acr->alloc_blob_space = gp106_alloc_blob_space;
1305 acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
1306 acr->patch_wpr_info_to_ucode =
1307 gp106_acr_patch_wpr_info_to_ucode;
1308 acr->acr_fill_bl_dmem_desc =
1309 gp106_acr_fill_bl_dmem_desc;
1310
1311 acr->remove_support = gm20b_remove_acr_support;
1312}
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.h b/drivers/gpu/nvgpu/gp106/acr_gp106.h
index 3fab1509..ad004bf0 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.h
@@ -63,4 +63,12 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
63 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid); 63 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
64int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, 64int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
65 struct lsfm_managed_ucode_img_v2 *pnode); 65 struct lsfm_managed_ucode_img_v2 *pnode);
66int gp106_acr_fill_bl_dmem_desc(struct gk20a *g,
67 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
68 u32 *acr_ucode_header);
69int gp106_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
70 struct hs_acr *acr_desc, bool is_recovery);
71void nvgpu_gp106_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
72
66#endif /* NVGPU_ACR_GP106_H */ 73#endif /* NVGPU_ACR_GP106_H */
74
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index e94bc1ea..048c0a45 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -800,6 +800,9 @@ static const struct gpu_ops gp106_ops = {
800 .read_vin_cal_gain_offset_fuse = 800 .read_vin_cal_gain_offset_fuse =
801 gp106_fuse_read_vin_cal_gain_offset_fuse, 801 gp106_fuse_read_vin_cal_gain_offset_fuse,
802 }, 802 },
803 .acr = {
804 .acr_sw_init = nvgpu_gp106_acr_sw_init,
805 },
803 .get_litter_value = gp106_get_litter_value, 806 .get_litter_value = gp106_get_litter_value,
804 .chip_init_gpu_characteristics = gp106_init_gpu_characteristics, 807 .chip_init_gpu_characteristics = gp106_init_gpu_characteristics,
805}; 808};
@@ -855,6 +858,7 @@ int gp106_init_hal(struct gk20a *g)
855 gops->falcon = gp106_ops.falcon; 858 gops->falcon = gp106_ops.falcon;
856 gops->priv_ring = gp106_ops.priv_ring; 859 gops->priv_ring = gp106_ops.priv_ring;
857 gops->fuse = gp106_ops.fuse; 860 gops->fuse = gp106_ops.fuse;
861 gops->acr = gp106_ops.acr;
858 862
859 /* Lone functions */ 863 /* Lone functions */
860 gops->get_litter_value = gp106_ops.get_litter_value; 864 gops->get_litter_value = gp106_ops.get_litter_value;
@@ -875,7 +879,6 @@ int gp106_init_hal(struct gk20a *g)
875 } 879 }
876 880
877 g->pmu_lsf_pmu_wpr_init_done = 0; 881 g->pmu_lsf_pmu_wpr_init_done = 0;
878 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
879 gops->clk.split_rail_support = true; 882 gops->clk.split_rail_support = true;
880 gops->clk.support_clk_freq_controller = true; 883 gops->clk.support_clk_freq_controller = true;
881 gops->clk.support_pmgr_domain = true; 884 gops->clk.support_pmgr_domain = true;
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index dec35a91..40823b69 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -32,167 +32,61 @@
32#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 32#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
33#include <nvgpu/hw/gp106/hw_psec_gp106.h> 33#include <nvgpu/hw/gp106/hw_psec_gp106.h>
34 34
35int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g, 35int gp106_sec2_reset(struct gk20a *g)
36 unsigned int timeout)
37{ 36{
38 int status = 0; 37 nvgpu_log_fn(g, " ");
39
40 if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout)) {
41 status = -EBUSY;
42 }
43 38
44 return status; 39 gk20a_writel(g, psec_falcon_engine_r(),
45} 40 pwr_falcon_engine_reset_true_f());
41 nvgpu_udelay(10);
42 gk20a_writel(g, psec_falcon_engine_r(),
43 pwr_falcon_engine_reset_false_f());
46 44
47int gp106_sec2_wait_for_halt(struct gk20a *g, unsigned int timeout) 45 nvgpu_log_fn(g, "done");
48{ 46 return 0;
49 u32 data = 0;
50 int completion = 0;
51
52 completion = nvgpu_flcn_wait_for_halt(&g->sec2_flcn, timeout);
53 if (completion) {
54 nvgpu_err(g, "ACR boot timed out");
55 goto exit;
56 }
57
58 g->acr.capabilities = nvgpu_flcn_mailbox_read(&g->sec2_flcn,
59 FALCON_MAILBOX_1);
60 nvgpu_pmu_dbg(g, "ACR capabilities %x\n", g->acr.capabilities);
61 data = nvgpu_flcn_mailbox_read(&g->sec2_flcn, FALCON_MAILBOX_0);
62 if (data) {
63 nvgpu_err(g, "ACR boot failed, err %x", data);
64 completion = -EAGAIN;
65 goto exit;
66 }
67
68 init_pmu_setup_hw1(g);
69
70exit:
71 if (completion) {
72 nvgpu_kill_task_pg_init(g);
73 nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
74 nvgpu_flcn_dump_stats(&g->sec2_flcn);
75 }
76
77 return completion;
78} 47}
79 48
80int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, 49static int sec2_flcn_bl_bootstrap(struct gk20a *g,
81 void *desc, u32 bl_sz) 50 struct nvgpu_falcon_bl_info *bl_info)
82{ 51{
83 struct gk20a *g = gk20a_from_pmu(pmu);
84 struct mm_gk20a *mm = &g->mm; 52 struct mm_gk20a *mm = &g->mm;
85 struct nvgpu_falcon_bl_info bl_info; 53 u32 data = 0U;
86 u32 data = 0; 54 int err = 0U;
87 55
88 nvgpu_log_fn(g, " "); 56 nvgpu_log_fn(g, " ");
89 57
90 /* SEC2 Config */ 58 /* SEC2 Config */
91 gk20a_writel(g, psec_falcon_itfen_r(), 59 gk20a_writel(g, psec_falcon_itfen_r(),
92 gk20a_readl(g, psec_falcon_itfen_r()) | 60 gk20a_readl(g, psec_falcon_itfen_r()) |
93 psec_falcon_itfen_ctxen_enable_f()); 61 psec_falcon_itfen_ctxen_enable_f());
94 62
95 gk20a_writel(g, psec_falcon_nxtctx_r(), 63 gk20a_writel(g, psec_falcon_nxtctx_r(),
96 pwr_pmu_new_instblk_ptr_f( 64 pwr_pmu_new_instblk_ptr_f(
97 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | 65 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U) |
98 pwr_pmu_new_instblk_valid_f(1) | 66 pwr_pmu_new_instblk_valid_f(1U) |
99 nvgpu_aperture_mask(g, &mm->pmu.inst_block, 67 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
100 pwr_pmu_new_instblk_target_sys_ncoh_f(), 68 pwr_pmu_new_instblk_target_sys_ncoh_f(),
101 pwr_pmu_new_instblk_target_sys_coh_f(), 69 pwr_pmu_new_instblk_target_sys_coh_f(),
102 pwr_pmu_new_instblk_target_fb_f())); 70 pwr_pmu_new_instblk_target_fb_f()));
103 71
104 data = gk20a_readl(g, psec_falcon_debug1_r()); 72 data = gk20a_readl(g, psec_falcon_debug1_r());
105 data |= psec_falcon_debug1_ctxsw_mode_m(); 73 data |= psec_falcon_debug1_ctxsw_mode_m();
106 gk20a_writel(g, psec_falcon_debug1_r(), data); 74 gk20a_writel(g, psec_falcon_debug1_r(), data);
107 75
108 data = gk20a_readl(g, psec_falcon_engctl_r()); 76 data = gk20a_readl(g, psec_falcon_engctl_r());
109 data |= (1 << 3); 77 data |= (1U << 3U);
110 gk20a_writel(g, psec_falcon_engctl_r(), data); 78 gk20a_writel(g, psec_falcon_engctl_r(), data);
111 79
112 bl_info.bl_src = g->acr.hsbl_ucode.cpu_va; 80 err = nvgpu_flcn_bl_bootstrap(&g->sec2_flcn, bl_info);
113 bl_info.bl_desc = desc;
114 bl_info.bl_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
115 bl_info.bl_size = bl_sz;
116 bl_info.bl_start_tag = g->acr.pmu_hsbl_desc->bl_start_tag;
117 nvgpu_flcn_bl_bootstrap(&g->sec2_flcn, &bl_info);
118 81
119 return 0; 82 return err;
120} 83}
121 84
122void init_pmu_setup_hw1(struct gk20a *g) 85int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
86 struct hs_acr *acr_desc,
87 struct nvgpu_falcon_bl_info *bl_info)
123{ 88{
124 struct mm_gk20a *mm = &g->mm; 89 u32 data = 0U;
125 struct nvgpu_pmu *pmu = &g->pmu;
126
127 /* PMU TRANSCFG */
128 /* setup apertures - virtual */
129 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
130 pwr_fbif_transcfg_mem_type_physical_f() |
131 pwr_fbif_transcfg_target_local_fb_f());
132 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
133 pwr_fbif_transcfg_mem_type_virtual_f());
134 /* setup apertures - physical */
135 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
136 pwr_fbif_transcfg_mem_type_physical_f() |
137 pwr_fbif_transcfg_target_local_fb_f());
138 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
139 pwr_fbif_transcfg_mem_type_physical_f() |
140 pwr_fbif_transcfg_target_coherent_sysmem_f());
141 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
142 pwr_fbif_transcfg_mem_type_physical_f() |
143 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
144
145 /* PMU Config */
146 gk20a_writel(g, pwr_falcon_itfen_r(),
147 gk20a_readl(g, pwr_falcon_itfen_r()) |
148 pwr_falcon_itfen_ctxen_enable_f());
149 gk20a_writel(g, pwr_pmu_new_instblk_r(),
150 pwr_pmu_new_instblk_ptr_f(
151 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
152 pwr_pmu_new_instblk_valid_f(1) |
153 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
154 pwr_pmu_new_instblk_target_sys_ncoh_f(),
155 pwr_pmu_new_instblk_target_sys_coh_f(),
156 pwr_pmu_new_instblk_target_fb_f()));
157
158 /*Copying pmu cmdline args*/
159 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 0);
160 g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
161 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
162 pmu, GK20A_PMU_TRACE_BUFSIZE);
163 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
164 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
165 pmu, GK20A_PMU_DMAIDX_VIRT);
166 if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface) {
167 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
168 }
169
170 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
171 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
172 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
173
174}
175
176int gp106_sec2_reset(struct gk20a *g)
177{
178 nvgpu_log_fn(g, " ");
179
180 gk20a_writel(g, psec_falcon_engine_r(),
181 pwr_falcon_engine_reset_true_f());
182 nvgpu_udelay(10);
183 gk20a_writel(g, psec_falcon_engine_r(),
184 pwr_falcon_engine_reset_false_f());
185
186 nvgpu_log_fn(g, "done");
187 return 0;
188}
189
190int init_sec2_setup_hw1(struct gk20a *g,
191 void *desc, u32 bl_sz)
192{
193 struct nvgpu_pmu *pmu = &g->pmu;
194 int err;
195 u32 data = 0;
196 90
197 nvgpu_log_fn(g, " "); 91 nvgpu_log_fn(g, " ");
198 92
@@ -219,10 +113,5 @@ int init_sec2_setup_hw1(struct gk20a *g,
219 psec_fbif_transcfg_mem_type_physical_f() | 113 psec_fbif_transcfg_mem_type_physical_f() |
220 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 114 psec_fbif_transcfg_target_noncoherent_sysmem_f());
221 115
222 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 116 return sec2_flcn_bl_bootstrap(g, bl_info);
223 if (err) {
224 return err;
225 }
226
227 return 0;
228} 117}
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.h b/drivers/gpu/nvgpu/gp106/sec2_gp106.h
index b17028e7..f1cad65a 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.h
@@ -23,14 +23,10 @@
23#ifndef NVGPU_SEC2_GP106_H 23#ifndef NVGPU_SEC2_GP106_H
24#define NVGPU_SEC2_GP106_H 24#define NVGPU_SEC2_GP106_H
25 25
26int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g,
27 unsigned int timeout);
28int gp106_sec2_wait_for_halt(struct gk20a *g, unsigned int timeout);
29int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
30 void *desc, u32 bl_sz);
31void init_pmu_setup_hw1(struct gk20a *g);
32int init_sec2_setup_hw1(struct gk20a *g,
33 void *desc, u32 bl_sz);
34int gp106_sec2_reset(struct gk20a *g); 26int gp106_sec2_reset(struct gk20a *g);
35 27
28int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
29 struct hs_acr *acr_desc,
30 struct nvgpu_falcon_bl_info *bl_info);
31
36#endif /* NVGPU_SEC2_GP106_H */ 32#endif /* NVGPU_SEC2_GP106_H */