diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 55 |
1 files changed, 47 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index ba47d235..a238c523 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |||
@@ -284,9 +284,17 @@ rel_sig: | |||
284 | 284 | ||
285 | int prepare_ucode_blob(struct gk20a *g) | 285 | int prepare_ucode_blob(struct gk20a *g) |
286 | { | 286 | { |
287 | |||
287 | int err; | 288 | int err; |
288 | struct ls_flcn_mgr lsfm_l, *plsfm; | 289 | struct ls_flcn_mgr lsfm_l, *plsfm; |
289 | struct pmu_gk20a *pmu = &g->pmu; | 290 | struct pmu_gk20a *pmu = &g->pmu; |
291 | phys_addr_t wpr_addr; | ||
292 | u32 wprsize; | ||
293 | struct mm_gk20a *mm = &g->mm; | ||
294 | struct vm_gk20a *vm = &mm->pmu.vm; | ||
295 | struct mc_carveout_info inf; | ||
296 | struct sg_table *sgt; | ||
297 | struct page *page; | ||
290 | 298 | ||
291 | if (g->acr.ucode_blob.cpu_va) { | 299 | if (g->acr.ucode_blob.cpu_va) { |
292 | /*Recovery case, we do not need to form | 300 | /*Recovery case, we do not need to form |
@@ -304,22 +312,46 @@ int prepare_ucode_blob(struct gk20a *g) | |||
304 | gm20b_mm_mmu_vpr_info_fetch(g); | 312 | gm20b_mm_mmu_vpr_info_fetch(g); |
305 | gr_gk20a_init_ctxsw_ucode(g); | 313 | gr_gk20a_init_ctxsw_ucode(g); |
306 | 314 | ||
315 | mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2); | ||
316 | gm20b_dbg_pmu("wpr carveout base:%llx\n", inf.base); | ||
317 | wpr_addr = (phys_addr_t)inf.base; | ||
318 | gm20b_dbg_pmu("wpr carveout size :%llx\n", inf.size); | ||
319 | wprsize = (u32)inf.size; | ||
320 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
321 | if (!sgt) { | ||
322 | gk20a_err(dev_from_gk20a(g), "failed to allocate memory\n"); | ||
323 | return -ENOMEM; | ||
324 | } | ||
325 | err = sg_alloc_table(sgt, 1, GFP_KERNEL); | ||
326 | if (err) { | ||
327 | gk20a_err(dev_from_gk20a(g), "failed to allocate sg_table\n"); | ||
328 | goto free_sgt; | ||
329 | } | ||
330 | page = phys_to_page(wpr_addr); | ||
331 | sg_set_page(sgt->sgl, page, wprsize, 0); | ||
332 | /* This bypasses SMMU for WPR during gmmu_map. */ | ||
333 | sg_dma_address(sgt->sgl) = 0; | ||
334 | |||
335 | g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &sgt, wprsize, | ||
336 | 0, gk20a_mem_flag_none); | ||
337 | gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va); | ||
338 | |||
307 | /* Discover all managed falcons*/ | 339 | /* Discover all managed falcons*/ |
308 | err = lsfm_discover_ucode_images(g, plsfm); | 340 | err = lsfm_discover_ucode_images(g, plsfm); |
309 | gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); | 341 | gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); |
310 | if (err) | 342 | if (err) |
311 | return err; | 343 | goto free_sgt; |
312 | 344 | ||
313 | if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { | 345 | if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { |
314 | /* Generate WPR requirements*/ | 346 | /* Generate WPR requirements*/ |
315 | err = lsf_gen_wpr_requirements(g, plsfm); | 347 | err = lsf_gen_wpr_requirements(g, plsfm); |
316 | if (err) | 348 | if (err) |
317 | return err; | 349 | goto free_sgt; |
318 | 350 | ||
319 | /*Alloc memory to hold ucode blob contents*/ | 351 | /*Alloc memory to hold ucode blob contents*/ |
320 | err = gk20a_gmmu_alloc(g, plsfm->wpr_size, &g->acr.ucode_blob); | 352 | err = gk20a_gmmu_alloc(g, plsfm->wpr_size, &g->acr.ucode_blob); |
321 | if (err) | 353 | if (err) |
322 | return err; | 354 | goto free_sgt; |
323 | 355 | ||
324 | gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", | 356 | gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", |
325 | plsfm->managed_flcn_cnt, plsfm->wpr_size); | 357 | plsfm->managed_flcn_cnt, plsfm->wpr_size); |
@@ -329,7 +361,9 @@ int prepare_ucode_blob(struct gk20a *g) | |||
329 | } | 361 | } |
330 | gm20b_dbg_pmu("prepare ucode blob return 0\n"); | 362 | gm20b_dbg_pmu("prepare ucode blob return 0\n"); |
331 | free_acr_resources(g, plsfm); | 363 | free_acr_resources(g, plsfm); |
332 | return 0; | 364 | free_sgt: |
365 | kfree(sgt); | ||
366 | return err; | ||
333 | } | 367 | } |
334 | 368 | ||
335 | static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, | 369 | static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, |
@@ -495,7 +529,8 @@ static int pmu_populate_loader_cfg(struct gk20a *g, | |||
495 | 529 | ||
496 | static int flcn_populate_bl_dmem_desc(struct gk20a *g, | 530 | static int flcn_populate_bl_dmem_desc(struct gk20a *g, |
497 | struct lsfm_managed_ucode_img *lsfm, | 531 | struct lsfm_managed_ucode_img *lsfm, |
498 | union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size) | 532 | union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size, |
533 | u32 falconid) | ||
499 | { | 534 | { |
500 | struct mc_carveout_info inf; | 535 | struct mc_carveout_info inf; |
501 | struct flcn_ucode_img *p_img = &(lsfm->ucode_img); | 536 | struct flcn_ucode_img *p_img = &(lsfm->ucode_img); |
@@ -520,7 +555,10 @@ static int flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
520 | */ | 555 | */ |
521 | addr_base = lsfm->lsb_header.ucode_off; | 556 | addr_base = lsfm->lsb_header.ucode_off; |
522 | mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2); | 557 | mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2); |
523 | addr_base += inf.base; | 558 | if (falconid == LSF_FALCON_ID_GPCCS) |
559 | addr_base += g->pmu.wpr_buf.gpu_va; | ||
560 | else | ||
561 | addr_base += inf.base; | ||
524 | gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, | 562 | gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, |
525 | lsfm->wpr_header.falcon_id); | 563 | lsfm->wpr_header.falcon_id); |
526 | addr_code = u64_lo32((addr_base + | 564 | addr_code = u64_lo32((addr_base + |
@@ -555,7 +593,8 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, | |||
555 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { | 593 | if (pnode->wpr_header.falcon_id != pmu->falcon_id) { |
556 | gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); | 594 | gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); |
557 | flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc, | 595 | flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc, |
558 | &pnode->bl_gen_desc_size); | 596 | &pnode->bl_gen_desc_size, |
597 | pnode->wpr_header.falcon_id); | ||
559 | return 0; | 598 | return 0; |
560 | } | 599 | } |
561 | 600 | ||
@@ -797,7 +836,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, | |||
797 | } | 836 | } |
798 | if (falcon_id == LSF_FALCON_ID_GPCCS) { | 837 | if (falcon_id == LSF_FALCON_ID_GPCCS) { |
799 | pnode->lsb_header.flags |= | 838 | pnode->lsb_header.flags |= |
800 | NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE; | 839 | NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE; |
801 | } | 840 | } |
802 | } | 841 | } |
803 | } | 842 | } |