diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-10-03 07:51:16 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-03 16:45:28 -0400 |
commit | bc4182afeb7461e5b211da9a26a796c40395bbfd (patch) | |
tree | f3b66186508dff5a0d02a7485a1c246efc6e581c /drivers | |
parent | 5f16bb575c2a5aff94c366cde32832c58c421c09 (diff) |
gpu: nvgpu: remove GR falcons bootstrap support using VA
- GR falcons bootstrap can be done using physical or
virtual address by setting flag usevamask in PMU interface
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS command
- With this change always setting to physical address support & removed
virtual address support along with code removal.
- Removed Linux specific code used to get info regarding WPR VA.
JIRA NVGPU-128
Change-Id: Id58f3ddc4418d61126f2a4eacb50713d278c10a0
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1572468
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 33 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp106/acr_gp106.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/pmu.h | 1 |
4 files changed, 6 insertions, 40 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index b6941ada..7029b477 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |||
@@ -350,13 +350,7 @@ int prepare_ucode_blob(struct gk20a *g) | |||
350 | int err; | 350 | int err; |
351 | struct ls_flcn_mgr lsfm_l, *plsfm; | 351 | struct ls_flcn_mgr lsfm_l, *plsfm; |
352 | struct nvgpu_pmu *pmu = &g->pmu; | 352 | struct nvgpu_pmu *pmu = &g->pmu; |
353 | phys_addr_t wpr_addr, wpr_page; | ||
354 | u32 wprsize; | ||
355 | int i; | ||
356 | struct mm_gk20a *mm = &g->mm; | ||
357 | struct vm_gk20a *vm = mm->pmu.vm; | ||
358 | struct wpr_carveout_info wpr_inf; | 353 | struct wpr_carveout_info wpr_inf; |
359 | struct page **pages; | ||
360 | 354 | ||
361 | if (g->acr.ucode_blob.cpu_va) { | 355 | if (g->acr.ucode_blob.cpu_va) { |
362 | /*Recovery case, we do not need to form | 356 | /*Recovery case, we do not need to form |
@@ -375,26 +369,8 @@ int prepare_ucode_blob(struct gk20a *g) | |||
375 | gr_gk20a_init_ctxsw_ucode(g); | 369 | gr_gk20a_init_ctxsw_ucode(g); |
376 | 370 | ||
377 | g->ops.pmu.get_wpr(g, &wpr_inf); | 371 | g->ops.pmu.get_wpr(g, &wpr_inf); |
378 | wpr_addr = (phys_addr_t)wpr_inf.wpr_base; | ||
379 | wprsize = (u32)wpr_inf.size; | ||
380 | gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); | 372 | gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); |
381 | gm20b_dbg_pmu("wpr carveout size :%x\n", wprsize); | 373 | gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size); |
382 | |||
383 | pages = nvgpu_kmalloc(g, sizeof(struct page *) * (wprsize / PAGE_SIZE)); | ||
384 | if (!pages) | ||
385 | return -ENOMEM; | ||
386 | |||
387 | wpr_page = wpr_addr; | ||
388 | for (i = 0; wpr_page < (wpr_addr + wprsize); i++, wpr_page += PAGE_SIZE) | ||
389 | pages[i] = phys_to_page(wpr_page); | ||
390 | __nvgpu_mem_create_from_pages(g, &g->pmu.wpr_buf, pages, | ||
391 | wprsize / PAGE_SIZE); | ||
392 | nvgpu_kfree(g, pages); | ||
393 | |||
394 | g->pmu.wpr_buf.gpu_va = nvgpu_gmmu_map(vm, &g->pmu.wpr_buf, | ||
395 | wprsize, 0, gk20a_mem_flag_none, | ||
396 | false, APERTURE_SYSMEM); | ||
397 | gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va); | ||
398 | 374 | ||
399 | /* Discover all managed falcons*/ | 375 | /* Discover all managed falcons*/ |
400 | err = lsfm_discover_ucode_images(g, plsfm); | 376 | err = lsfm_discover_ucode_images(g, plsfm); |
@@ -423,7 +399,6 @@ int prepare_ucode_blob(struct gk20a *g) | |||
423 | gm20b_dbg_pmu("prepare ucode blob return 0\n"); | 399 | gm20b_dbg_pmu("prepare ucode blob return 0\n"); |
424 | free_acr_resources(g, plsfm); | 400 | free_acr_resources(g, plsfm); |
425 | free_sgt: | 401 | free_sgt: |
426 | nvgpu_gmmu_unmap(vm, &g->pmu.wpr_buf, g->pmu.wpr_buf.gpu_va); | ||
427 | return err; | 402 | return err; |
428 | } | 403 | } |
429 | 404 | ||
@@ -618,10 +593,8 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
618 | */ | 593 | */ |
619 | addr_base = p_lsfm->lsb_header.ucode_off; | 594 | addr_base = p_lsfm->lsb_header.ucode_off; |
620 | g->ops.pmu.get_wpr(g, &wpr_inf); | 595 | g->ops.pmu.get_wpr(g, &wpr_inf); |
621 | if (falconid == LSF_FALCON_ID_GPCCS) | 596 | addr_base += wpr_inf.wpr_base; |
622 | addr_base += g->pmu.wpr_buf.gpu_va; | 597 | |
623 | else | ||
624 | addr_base += wpr_inf.wpr_base; | ||
625 | gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, | 598 | gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, |
626 | p_lsfm->wpr_header.falcon_id); | 599 | p_lsfm->wpr_header.falcon_id); |
627 | addr_code = u64_lo32((addr_base + | 600 | addr_code = u64_lo32((addr_base + |
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index be4d8bbe..c16c058e 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c | |||
@@ -623,11 +623,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g, | |||
623 | */ | 623 | */ |
624 | addr_base = p_lsfm->lsb_header.ucode_off; | 624 | addr_base = p_lsfm->lsb_header.ucode_off; |
625 | g->ops.pmu.get_wpr(g, &wpr_inf); | 625 | g->ops.pmu.get_wpr(g, &wpr_inf); |
626 | if (falconid == LSF_FALCON_ID_GPCCS && | 626 | addr_base += wpr_inf.wpr_base; |
627 | g->pmu.wpr_buf.aperture == APERTURE_SYSMEM) | ||
628 | addr_base += g->pmu.wpr_buf.gpu_va; | ||
629 | else | ||
630 | addr_base += wpr_inf.wpr_base; | ||
631 | 627 | ||
632 | gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); | 628 | gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id); |
633 | gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); | 629 | gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base); |
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c index 168aadb0..147cd020 100644 --- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | |||
@@ -170,10 +170,8 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, | |||
170 | cmd.cmd.acr.boot_falcons.falconidmask = | 170 | cmd.cmd.acr.boot_falcons.falconidmask = |
171 | falconidmask; | 171 | falconidmask; |
172 | cmd.cmd.acr.boot_falcons.usevamask = 0; | 172 | cmd.cmd.acr.boot_falcons.usevamask = 0; |
173 | cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = | 173 | cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; |
174 | u64_lo32(g->pmu.wpr_buf.gpu_va); | 174 | cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; |
175 | cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = | ||
176 | u64_hi32(g->pmu.wpr_buf.gpu_va); | ||
177 | gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", | 175 | gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", |
178 | falconidmask); | 176 | falconidmask); |
179 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 177 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index f96e75c9..045bf34c 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h | |||
@@ -287,7 +287,6 @@ struct nvgpu_pmu { | |||
287 | /* TBD: remove this if ZBC seq is fixed */ | 287 | /* TBD: remove this if ZBC seq is fixed */ |
288 | struct nvgpu_mem seq_buf; | 288 | struct nvgpu_mem seq_buf; |
289 | struct nvgpu_mem trace_buf; | 289 | struct nvgpu_mem trace_buf; |
290 | struct nvgpu_mem wpr_buf; | ||
291 | bool buf_loaded; | 290 | bool buf_loaded; |
292 | 291 | ||
293 | struct pmu_sha1_gid gid_info; | 292 | struct pmu_sha1_gid gid_info; |