summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c28
1 files changed, 8 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 8622f7b4..6679d905 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -22,6 +22,7 @@
22#include <nvgpu/timers.h> 22#include <nvgpu/timers.h>
23#include <nvgpu/nvgpu_common.h> 23#include <nvgpu/nvgpu_common.h>
24#include <nvgpu/kmem.h> 24#include <nvgpu/kmem.h>
25#include <nvgpu/nvgpu_mem.h>
25#include <nvgpu/acr/nvgpu_acr.h> 26#include <nvgpu/acr/nvgpu_acr.h>
26#include <nvgpu/firmware.h> 27#include <nvgpu/firmware.h>
27 28
@@ -386,7 +387,6 @@ int prepare_ucode_blob(struct gk20a *g)
386 struct mm_gk20a *mm = &g->mm; 387 struct mm_gk20a *mm = &g->mm;
387 struct vm_gk20a *vm = &mm->pmu.vm; 388 struct vm_gk20a *vm = &mm->pmu.vm;
388 struct wpr_carveout_info wpr_inf; 389 struct wpr_carveout_info wpr_inf;
389 struct sg_table *sgt;
390 struct page *page; 390 struct page *page;
391 391
392 if (g->acr.ucode_blob.cpu_va) { 392 if (g->acr.ucode_blob.cpu_va) {
@@ -411,24 +411,11 @@ int prepare_ucode_blob(struct gk20a *g)
411 gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base); 411 gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base);
412 gm20b_dbg_pmu("wpr carveout size :%x\n", wprsize); 412 gm20b_dbg_pmu("wpr carveout size :%x\n", wprsize);
413 413
414 sgt = nvgpu_kzalloc(g, sizeof(*sgt));
415 if (!sgt) {
416 nvgpu_err(g, "failed to allocate memory");
417 return -ENOMEM;
418 }
419 err = sg_alloc_table(sgt, 1, GFP_KERNEL);
420 if (err) {
421 nvgpu_err(g, "failed to allocate sg_table");
422 goto free_sgt;
423 }
424 page = phys_to_page(wpr_addr); 414 page = phys_to_page(wpr_addr);
425 sg_set_page(sgt->sgl, page, wprsize, 0); 415 __nvgpu_mem_create_from_pages(g, &g->pmu.wpr_buf, &page, 1);
426 /* This bypasses SMMU for WPR during gmmu_map. */ 416 g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &g->pmu.wpr_buf.priv.sgt,
427 sg_dma_address(sgt->sgl) = 0; 417 wprsize, 0, gk20a_mem_flag_none,
428 418 false, APERTURE_SYSMEM);
429 g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &sgt, wprsize,
430 0, gk20a_mem_flag_none, false,
431 APERTURE_SYSMEM);
432 gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va); 419 gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va);
433 420
434 /* Discover all managed falcons*/ 421 /* Discover all managed falcons*/
@@ -457,8 +444,9 @@ int prepare_ucode_blob(struct gk20a *g)
457 } 444 }
458 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 445 gm20b_dbg_pmu("prepare ucode blob return 0\n");
459 free_acr_resources(g, plsfm); 446 free_acr_resources(g, plsfm);
460 free_sgt: 447free_sgt:
461 nvgpu_free_sgtable(g, &sgt); 448 gk20a_gmmu_unmap(vm, g->pmu.wpr_buf.gpu_va,
449 g->pmu.wpr_buf.size, gk20a_mem_flag_none);
462 return err; 450 return err;
463} 451}
464 452