summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c47
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c49
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h13
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h15
6 files changed, 73 insertions, 71 deletions
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
index 51c23589..b3e1354d 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
@@ -172,7 +172,7 @@ u32 boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g,
172 if (pcmd->id == BOARDOBJGRP_GRP_CMD_ID_INVALID) 172 if (pcmd->id == BOARDOBJGRP_GRP_CMD_ID_INVALID)
173 goto boardobjgrp_pmucmd_pmuinithandle_exit; 173 goto boardobjgrp_pmucmd_pmuinithandle_exit;
174 174
175 gk20a_pmu_sysmem_surface_alloc(g, sysmem_desc, pcmd->fbsize); 175 nvgpu_pmu_sysmem_surface_alloc(g, sysmem_desc, pcmd->fbsize);
176 /* we only have got sysmem later this will get copied to vidmem 176 /* we only have got sysmem later this will get copied to vidmem
177 surface*/ 177 surface*/
178 pcmd->surf.vidmem_desc.size = 0; 178 pcmd->surf.vidmem_desc.size = 0;
@@ -401,7 +401,7 @@ u32 boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
401 * sysmem to vidmem 401 * sysmem to vidmem
402 */ 402 */
403 if (pcmd->surf.vidmem_desc.size == 0) { 403 if (pcmd->surf.vidmem_desc.size == 0) {
404 gk20a_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc, 404 nvgpu_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc,
405 pcmd->fbsize); 405 pcmd->fbsize);
406 } 406 }
407 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize); 407 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize);
@@ -466,7 +466,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
466 * sysmem to vidmem 466 * sysmem to vidmem
467 */ 467 */
468 if (pcmd->surf.vidmem_desc.size == 0) { 468 if (pcmd->surf.vidmem_desc.size == 0) {
469 gk20a_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc, 469 nvgpu_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc,
470 pcmd->fbsize); 470 pcmd->fbsize);
471 } 471 }
472 472
@@ -726,7 +726,7 @@ static u32 boardobjgrp_pmucmdsend(struct gk20a *g,
726 /* 726 /*
727 * copy vidmem information to boardobj_cmd_grp 727 * copy vidmem information to boardobj_cmd_grp
728 */ 728 */
729 gk20a_pmu_surface_describe(g, &pcmd->surf.vidmem_desc, 729 nvgpu_pmu_surface_describe(g, &pcmd->surf.vidmem_desc,
730 &pgrpcmd->grp.fb); 730 &pgrpcmd->grp.fb);
731 731
732 /* 732 /*
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index 90db07b4..ca532049 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -423,3 +423,50 @@ int nvgpu_pmu_destroy(struct gk20a *g)
423 nvgpu_log_fn(g, "done"); 423 nvgpu_log_fn(g, "done");
424 return 0; 424 return 0;
425} 425}
426
427void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
428 struct flcn_mem_desc_v0 *fb)
429{
430 fb->address.lo = u64_lo32(mem->gpu_va);
431 fb->address.hi = u64_hi32(mem->gpu_va);
432 fb->params = ((u32)mem->size & 0xFFFFFF);
433 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24);
434}
435
436int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
437 u32 size)
438{
439 struct mm_gk20a *mm = &g->mm;
440 struct vm_gk20a *vm = mm->pmu.vm;
441 int err;
442
443 err = nvgpu_dma_alloc_map_vid(vm, size, mem);
444 if (err) {
445 nvgpu_err(g, "memory allocation failed");
446 return -ENOMEM;
447 }
448
449 return 0;
450}
451
452int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
453 u32 size)
454{
455 struct mm_gk20a *mm = &g->mm;
456 struct vm_gk20a *vm = mm->pmu.vm;
457 int err;
458
459 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
460 if (err) {
461 nvgpu_err(g, "failed to allocate memory\n");
462 return -ENOMEM;
463 }
464
465 return 0;
466}
467
468void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
469{
470 nvgpu_dma_free(g, mem);
471 memset(mem, 0, sizeof(struct nvgpu_mem));
472}
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 74966f2d..e45a6182 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -552,9 +552,9 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
552 goto clean_up; 552 goto clean_up;
553 } 553 }
554 554
555 gk20a_pmu_vidmem_surface_alloc(g, seq->in_mem, 555 nvgpu_pmu_vidmem_surface_alloc(g, seq->in_mem,
556 payload->in.fb_size); 556 payload->in.fb_size);
557 gk20a_pmu_surface_describe(g, seq->in_mem, 557 nvgpu_pmu_surface_describe(g, seq->in_mem,
558 (struct flcn_mem_desc_v0 *) 558 (struct flcn_mem_desc_v0 *)
559 pv->pmu_allocation_get_fb_addr(pmu, in)); 559 pv->pmu_allocation_get_fb_addr(pmu, in));
560 560
@@ -595,9 +595,9 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
595 err = -ENOMEM; 595 err = -ENOMEM;
596 goto clean_up; 596 goto clean_up;
597 } 597 }
598 gk20a_pmu_vidmem_surface_alloc(g, seq->out_mem, 598 nvgpu_pmu_vidmem_surface_alloc(g, seq->out_mem,
599 payload->out.fb_size); 599 payload->out.fb_size);
600 gk20a_pmu_surface_describe(g, seq->out_mem, 600 nvgpu_pmu_surface_describe(g, seq->out_mem,
601 (struct flcn_mem_desc_v0 *) 601 (struct flcn_mem_desc_v0 *)
602 pv->pmu_allocation_get_fb_addr(pmu, 602 pv->pmu_allocation_get_fb_addr(pmu,
603 out)); 603 out));
@@ -699,7 +699,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
699 pv->pmu_allocation_get_fb_size(pmu, 699 pv->pmu_allocation_get_fb_size(pmu,
700 pv->get_pmu_seq_out_a_ptr(seq))); 700 pv->get_pmu_seq_out_a_ptr(seq)));
701 701
702 gk20a_pmu_surface_free(g, seq->out_mem); 702 nvgpu_pmu_surface_free(g, seq->out_mem);
703 if (seq->out_mem != seq->in_mem) 703 if (seq->out_mem != seq->in_mem)
704 nvgpu_kfree(g, seq->out_mem); 704 nvgpu_kfree(g, seq->out_mem);
705 else 705 else
@@ -712,7 +712,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
712 pv->pmu_allocation_get_fb_size(pmu, 712 pv->pmu_allocation_get_fb_size(pmu,
713 pv->get_pmu_seq_in_a_ptr(seq))); 713 pv->get_pmu_seq_in_a_ptr(seq)));
714 714
715 gk20a_pmu_surface_free(g, seq->in_mem); 715 nvgpu_pmu_surface_free(g, seq->in_mem);
716 nvgpu_kfree(g, seq->in_mem); 716 nvgpu_kfree(g, seq->in_mem);
717 seq->in_mem = NULL; 717 seq->in_mem = NULL;
718 } 718 }
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 03728378..a9457330 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -220,7 +220,7 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu)
220{ 220{
221 struct gk20a *g = gk20a_from_pmu(pmu); 221 struct gk20a *g = gk20a_from_pmu(pmu);
222 222
223 gk20a_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); 223 nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf);
224} 224}
225 225
226static void set_pmu_cmdline_args_falctracedmaidx_v5( 226static void set_pmu_cmdline_args_falctracedmaidx_v5(
@@ -3789,53 +3789,6 @@ void gk20a_pmu_isr(struct gk20a *g)
3789 nvgpu_mutex_release(&pmu->isr_mutex); 3789 nvgpu_mutex_release(&pmu->isr_mutex);
3790} 3790}
3791 3791
3792void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
3793 struct flcn_mem_desc_v0 *fb)
3794{
3795 fb->address.lo = u64_lo32(mem->gpu_va);
3796 fb->address.hi = u64_hi32(mem->gpu_va);
3797 fb->params = ((u32)mem->size & 0xFFFFFF);
3798 fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24);
3799}
3800
3801int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
3802 u32 size)
3803{
3804 struct mm_gk20a *mm = &g->mm;
3805 struct vm_gk20a *vm = mm->pmu.vm;
3806 int err;
3807
3808 err = nvgpu_dma_alloc_map_vid(vm, size, mem);
3809 if (err) {
3810 nvgpu_err(g, "memory allocation failed");
3811 return -ENOMEM;
3812 }
3813
3814 return 0;
3815}
3816
3817int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
3818 u32 size)
3819{
3820 struct mm_gk20a *mm = &g->mm;
3821 struct vm_gk20a *vm = mm->pmu.vm;
3822 int err;
3823
3824 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
3825 if (err) {
3826 nvgpu_err(g, "failed to allocate memory");
3827 return -ENOMEM;
3828 }
3829
3830 return 0;
3831}
3832
3833void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
3834{
3835 nvgpu_dma_free(g, mem);
3836 memset(mem, 0, sizeof(struct nvgpu_mem));
3837}
3838
3839int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) 3792int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
3840{ 3793{
3841 u32 status = 0; 3794 u32 status = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index 1c29b380..1d2e20e6 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -57,12 +57,6 @@ struct nvgpu_firmware;
57#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) 57#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
58#define PMU_PGENG_GR_BUFFER_IDX_FECS (2) 58#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
59 59
60struct pmu_surface {
61 struct nvgpu_mem vidmem_desc;
62 struct nvgpu_mem sysmem_desc;
63 struct flcn_mem_desc_v0 params;
64};
65
66#define PMU_PG_IDLE_THRESHOLD_SIM 1000 60#define PMU_PG_IDLE_THRESHOLD_SIM 1000
67#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000 61#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
68/* TBD: QT or else ? */ 62/* TBD: QT or else ? */
@@ -154,13 +148,6 @@ int gk20a_pmu_reset(struct gk20a *g);
154int pmu_idle(struct nvgpu_pmu *pmu); 148int pmu_idle(struct nvgpu_pmu *pmu);
155int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); 149int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable);
156 150
157void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
158void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
159 struct flcn_mem_desc_v0 *fb);
160int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
161 u32 size);
162int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
163 u32 size);
164bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); 151bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
165 152
166int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); 153int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index a4d1e0ee..169d8b98 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -335,6 +335,12 @@ struct nvgpu_pmu {
335 struct nvgpu_firmware *fw; 335 struct nvgpu_firmware *fw;
336}; 336};
337 337
338struct pmu_surface {
339 struct nvgpu_mem vidmem_desc;
340 struct nvgpu_mem sysmem_desc;
341 struct flcn_mem_desc_v0 params;
342};
343
338/*PG defines used by nvpgu-pmu*/ 344/*PG defines used by nvpgu-pmu*/
339struct pmu_pg_stats_data { 345struct pmu_pg_stats_data {
340 u32 gating_cnt; 346 u32 gating_cnt;
@@ -386,4 +392,13 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
386int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, 392int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
387 struct pmu_pg_stats_data *pg_stat_data); 393 struct pmu_pg_stats_data *pg_stat_data);
388 394
395/* NVGPU-PMU MEM alloc */
396void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
397void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
398 struct flcn_mem_desc_v0 *fb);
399int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
400 u32 size);
401int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
402 u32 size);
403
389#endif /* __NVGPU_PMU_H__ */ 404#endif /* __NVGPU_PMU_H__ */