summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c55
1 files changed, 11 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index c03629fc..77f0653e 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -174,7 +174,6 @@ int prepare_ucode_blob(struct gk20a *g)
174 dma_addr_t iova; 174 dma_addr_t iova;
175 u32 status; 175 u32 status;
176 void *nonwpr_addr; 176 void *nonwpr_addr;
177 u64 nonwpr_pmu_va;
178 struct ls_flcn_mgr lsfm_l, *plsfm; 177 struct ls_flcn_mgr lsfm_l, *plsfm;
179 struct sg_table *sgt_nonwpr; 178 struct sg_table *sgt_nonwpr;
180 struct mm_gk20a *mm = &g->mm; 179 struct mm_gk20a *mm = &g->mm;
@@ -203,32 +202,12 @@ int prepare_ucode_blob(struct gk20a *g)
203 GFP_KERNEL); 202 GFP_KERNEL);
204 if (nonwpr_addr == NULL) 203 if (nonwpr_addr == NULL)
205 return -ENOMEM; 204 return -ENOMEM;
206 status = gk20a_get_sgtable(d, &sgt_nonwpr,
207 nonwpr_addr,
208 iova,
209 plsfm->wpr_size);
210 if (status) {
211 gk20a_err(d, "failed allocate sg table for nonwpr\n");
212 status = -ENOMEM;
213 goto err_free_nonwpr_addr;
214 }
215 205
216 nonwpr_pmu_va = gk20a_gmmu_map(vm, &sgt_nonwpr,
217 plsfm->wpr_size,
218 0, /* flags */
219 gk20a_mem_flag_read_only);
220 if (!nonwpr_pmu_va) {
221 gk20a_err(d, "failed to map pmu ucode memory!!");
222 status = -ENOMEM;
223 goto err_free_nonwpr_sgt;
224 }
225 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 206 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n",
226 plsfm->managed_flcn_cnt, plsfm->wpr_size); 207 plsfm->managed_flcn_cnt, plsfm->wpr_size);
227 lsfm_init_wpr_contents(g, plsfm, nonwpr_addr); 208 lsfm_init_wpr_contents(g, plsfm, nonwpr_addr);
228 g->acr.ucode_blob_start = nonwpr_pmu_va; 209 g->acr.ucode_blob_start = NV_MC_SMMU_VADDR_TRANSLATE(iova);
229 g->acr.ucode_blob_size = plsfm->wpr_size; 210 g->acr.ucode_blob_size = plsfm->wpr_size;
230 gm20b_dbg_pmu("32 bit ucode_start %x, size %d\n",
231 (u32)nonwpr_pmu_va, plsfm->wpr_size);
232 gm20b_dbg_pmu("base reg carveout 2:%x\n", 211 gm20b_dbg_pmu("base reg carveout 2:%x\n",
233 readl(mc + MC_SECURITY_CARVEOUT2_BOM_0)); 212 readl(mc + MC_SECURITY_CARVEOUT2_BOM_0));
234 gm20b_dbg_pmu("base reg carveout 3:%x\n", 213 gm20b_dbg_pmu("base reg carveout 3:%x\n",
@@ -238,15 +217,6 @@ int prepare_ucode_blob(struct gk20a *g)
238 } 217 }
239 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 218 gm20b_dbg_pmu("prepare ucode blob return 0\n");
240 return 0; 219 return 0;
241err_free_nonwpr_sgt:
242 gk20a_free_sgtable(&sgt_nonwpr);
243err_free_nonwpr_addr:
244 dma_free_coherent(d, plsfm->wpr_size,
245 nonwpr_addr, iova);
246 nonwpr_addr = NULL;
247 iova = 0;
248 gm20b_dbg_pmu("prepare ucode blob return %x\n", status);
249 return status;
250} 220}
251 221
252u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, 222u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
@@ -271,7 +241,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
271 status = pmu_ucode_details(g, &ucode_img); 241 status = pmu_ucode_details(g, &ucode_img);
272 if (status == 0) { 242 if (status == 0) {
273 if (ucode_img.lsf_desc != NULL) { 243 if (ucode_img.lsf_desc != NULL) {
274 /* The falonId is formed by grabbing the static base 244 /* The falon_id is formed by grabbing the static base
275 * falonId from the image and adding the 245 * falonId from the image and adding the
276 * engine-designated falcon instance.*/ 246 * engine-designated falcon instance.*/
277 pmu->pmu_mode |= PMU_SECURE_MODE; 247 pmu->pmu_mode |= PMU_SECURE_MODE;
@@ -368,7 +338,7 @@ int pmu_populate_loader_cfg(struct gk20a *g,
368 physical addresses of each respective segment. 338 physical addresses of each respective segment.
369 */ 339 */
370 addr_base = lsfm->lsb_header.ucode_off; 340 addr_base = lsfm->lsb_header.ucode_off;
371 addr_base += readl(mc + MC_SECURITY_CARVEOUT3_BOM_0); 341 addr_base += readl(mc + MC_SECURITY_CARVEOUT2_BOM_0);
372 gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 342 gm20b_dbg_pmu("pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
373 /*From linux*/ 343 /*From linux*/
374 addr_code = u64_lo32((addr_base + 344 addr_code = u64_lo32((addr_base +
@@ -391,7 +361,7 @@ int pmu_populate_loader_cfg(struct gk20a *g,
391 gm20b_dbg_pmu("addr_args %x\n", addr_args); 361 gm20b_dbg_pmu("addr_args %x\n", addr_args);
392 362
393 /* Populate the loader_config state*/ 363 /* Populate the loader_config state*/
394 ldr_cfg->dma_idx = 2; 364 ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
395 ldr_cfg->code_dma_base = addr_code; 365 ldr_cfg->code_dma_base = addr_code;
396 ldr_cfg->code_size_total = desc->app_size; 366 ldr_cfg->code_size_total = desc->app_size;
397 ldr_cfg->code_size_to_load = desc->app_resident_code_size; 367 ldr_cfg->code_size_to_load = desc->app_resident_code_size;
@@ -441,7 +411,7 @@ int flcn_populate_bl_dmem_desc(struct gk20a *g,
441 physical addresses of each respective segment. 411 physical addresses of each respective segment.
442 */ 412 */
443 addr_base = lsfm->lsb_header.ucode_off; 413 addr_base = lsfm->lsb_header.ucode_off;
444 addr_base += readl(mc + MC_SECURITY_CARVEOUT3_BOM_0); 414 addr_base += readl(mc + MC_SECURITY_CARVEOUT2_BOM_0);
445 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 415 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
446 lsfm->wpr_header.falcon_id); 416 lsfm->wpr_header.falcon_id);
447 addr_code = u64_lo32((addr_base + 417 addr_code = u64_lo32((addr_base +
@@ -457,7 +427,7 @@ int flcn_populate_bl_dmem_desc(struct gk20a *g,
457 427
458 /* Populate the LOADER_CONFIG state */ 428 /* Populate the LOADER_CONFIG state */
459 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc)); 429 memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc));
460 ldr_cfg->ctx_dma = 0; 430 ldr_cfg->ctx_dma = GK20A_PMU_DMAIDX_UCODE;
461 ldr_cfg->code_dma_base = addr_code; 431 ldr_cfg->code_dma_base = addr_code;
462 ldr_cfg->non_sec_code_size = desc->app_resident_code_size; 432 ldr_cfg->non_sec_code_size = desc->app_resident_code_size;
463 ldr_cfg->data_dma_base = addr_data; 433 ldr_cfg->data_dma_base = addr_data;
@@ -829,7 +799,8 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
829 u64 *pacr_ucode_cpuva = NULL, pacr_ucode_pmu_va, *acr_dmem; 799 u64 *pacr_ucode_cpuva = NULL, pacr_ucode_pmu_va, *acr_dmem;
830 u32 img_size_in_bytes; 800 u32 img_size_in_bytes;
831 struct flcn_bl_dmem_desc bl_dmem_desc; 801 struct flcn_bl_dmem_desc bl_dmem_desc;
832 u32 status, start, size; 802 u32 status, size;
803 u64 start;
833 const struct firmware *acr_fw; 804 const struct firmware *acr_fw;
834 struct acr_gm20b *acr = &g->acr; 805 struct acr_gm20b *acr = &g->acr;
835 u32 *acr_ucode_header_t210_load; 806 u32 *acr_ucode_header_t210_load;
@@ -891,12 +862,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
891 start; 862 start;
892 ((struct flcn_acr_desc *)acr_dmem)->nonwpr_ucode_blob_size = 863 ((struct flcn_acr_desc *)acr_dmem)->nonwpr_ucode_blob_size =
893 size; 864 size;
894 ((struct flcn_acr_desc *)acr_dmem)->wpr_region_id = 2;
895 ((struct flcn_acr_desc *)acr_dmem)->regions.no_regions = 2; 865 ((struct flcn_acr_desc *)acr_dmem)->regions.no_regions = 2;
896 ((struct flcn_acr_desc *)acr_dmem)->regions.region_props[0].region_id
897 = 2;
898 ((struct flcn_acr_desc *)acr_dmem)->regions.region_props[1].region_id
899 = 3;
900 ((struct flcn_acr_desc *)acr_dmem)->wpr_offset = 0; 866 ((struct flcn_acr_desc *)acr_dmem)->wpr_offset = 0;
901 867
902 for (i = 0; i < (img_size_in_bytes/4); i++) { 868 for (i = 0; i < (img_size_in_bytes/4); i++) {
@@ -915,7 +881,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
915 bl_dmem_desc.signature[1] = 0; 881 bl_dmem_desc.signature[1] = 0;
916 bl_dmem_desc.signature[2] = 0; 882 bl_dmem_desc.signature[2] = 0;
917 bl_dmem_desc.signature[3] = 0; 883 bl_dmem_desc.signature[3] = 0;
918 bl_dmem_desc.ctx_dma = GK20A_PMU_DMAIDX_UCODE; 884 bl_dmem_desc.ctx_dma = GK20A_PMU_DMAIDX_VIRT;
919 bl_dmem_desc.code_dma_base = 885 bl_dmem_desc.code_dma_base =
920 (unsigned int)(((u64)pacr_ucode_pmu_va >> 8)); 886 (unsigned int)(((u64)pacr_ucode_pmu_va >> 8));
921 bl_dmem_desc.non_sec_code_off = acr_ucode_header_t210_load[0]; 887 bl_dmem_desc.non_sec_code_off = acr_ucode_header_t210_load[0];
@@ -1064,7 +1030,8 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g, struct flcn_bl_dmem_desc *desc,
1064 1030
1065 /* setup apertures - virtual */ 1031 /* setup apertures - virtual */
1066 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 1032 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
1067 pwr_fbif_transcfg_mem_type_virtual_f()); 1033 pwr_fbif_transcfg_mem_type_physical_f() |
1034 pwr_fbif_transcfg_target_local_fb_f());
1068 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT), 1035 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
1069 pwr_fbif_transcfg_mem_type_virtual_f()); 1036 pwr_fbif_transcfg_mem_type_virtual_f());
1070 /* setup apertures - physical */ 1037 /* setup apertures - physical */