diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2018-02-13 04:07:18 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-03-08 02:27:49 -0500 |
commit | cc4b9f540f66abc9f60cf9f8e2217ff17349bc77 (patch) | |
tree | 695dca926578d8b02fab2cbf9fb98d3d4733c39f /drivers/gpu/nvgpu/common/pmu | |
parent | 418f31cd91a5c3ca45f0920ed64205def49c8a80 (diff) |
gpu: nvgpu: PMU super surface support
- Added ops "pmu.alloc_super_surface" to create
memory space for pmu super surface
- Defined method nvgpu_pmu_sysmem_surface_alloc()
to allocate pmu super surface memory & assigned
to "pmu.alloc_super_surface" for gv100
- "pmu.alloc_super_surface" set to NULL for gp106
- Memory space of size "struct nv_pmu_super_surface"
is allocated during pmu sw init setup if
"pmu.alloc_super_surface" is not NULL &
free if error occur.
- Added ops "pmu_ver.config_pmu_cmdline_args_super_surface"
to describe PMU super surface details to PMU ucode
as part of pmu command line args command if
"pmu.alloc_super_surface" is not NULL.
- Updated pmu_cmdline_args_v6 to include member
"struct flcn_mem_desc_v0 super_surface"
- Free allocated memory for PMU super surface in
nvgpu_remove_pmu_support() method
- Added "struct nvgpu_mem super_surface_buf" to "nvgpu_pmu" struct
- Created header file "gpmu_super_surf_if.h" to include interface
about pmu super surface, added "struct nv_pmu_super_surface"
to hold super surface members along with rsvd[x] dummy space
to sync members offset with PMU super surface members.
Change-Id: I2b28912bf4d86a8cc72884e3b023f21c73fb3503
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1656571
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu')
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu.c | 30 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 14 |
2 files changed, 43 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index e96ea090..aaae138c 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c | |||
@@ -241,11 +241,19 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) | |||
241 | 241 | ||
242 | pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; | 242 | pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; |
243 | 243 | ||
244 | if (g->ops.pmu.alloc_super_surface) { | ||
245 | err = g->ops.pmu.alloc_super_surface(g, | ||
246 | &pmu->super_surface_buf, | ||
247 | sizeof(struct nv_pmu_super_surface)); | ||
248 | if (err) | ||
249 | goto err_free_seq_buf; | ||
250 | } | ||
251 | |||
244 | err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, | 252 | err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, |
245 | &pmu->trace_buf); | 253 | &pmu->trace_buf); |
246 | if (err) { | 254 | if (err) { |
247 | nvgpu_err(g, "failed to allocate pmu trace buffer\n"); | 255 | nvgpu_err(g, "failed to allocate pmu trace buffer\n"); |
248 | goto err_free_seq_buf; | 256 | goto err_free_super_surface; |
249 | } | 257 | } |
250 | 258 | ||
251 | pmu->sw_ready = true; | 259 | pmu->sw_ready = true; |
@@ -253,6 +261,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) | |||
253 | skip_init: | 261 | skip_init: |
254 | nvgpu_log_fn(g, "done"); | 262 | nvgpu_log_fn(g, "done"); |
255 | return 0; | 263 | return 0; |
264 | err_free_super_surface: | ||
265 | if (g->ops.pmu.alloc_super_surface) | ||
266 | nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf); | ||
256 | err_free_seq_buf: | 267 | err_free_seq_buf: |
257 | nvgpu_dma_unmap_free(vm, &pmu->seq_buf); | 268 | nvgpu_dma_unmap_free(vm, &pmu->seq_buf); |
258 | err_free_seq: | 269 | err_free_seq: |
@@ -560,6 +571,23 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | |||
560 | return 0; | 571 | return 0; |
561 | } | 572 | } |
562 | 573 | ||
574 | int nvgpu_pmu_super_surface_alloc(struct gk20a *g, | ||
575 | struct nvgpu_mem *mem_surface, u32 size) | ||
576 | { | ||
577 | struct vm_gk20a *vm = g->mm.pmu.vm; | ||
578 | int err = 0; | ||
579 | |||
580 | nvgpu_log_fn(g, " "); | ||
581 | |||
582 | err = nvgpu_dma_alloc_map(vm, size, mem_surface); | ||
583 | if (err) { | ||
584 | nvgpu_err(g, "failed to allocate pmu suffer surface\n"); | ||
585 | err = -ENOMEM; | ||
586 | } | ||
587 | |||
588 | return err; | ||
589 | } | ||
590 | |||
563 | void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) | 591 | void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) |
564 | { | 592 | { |
565 | nvgpu_dma_free(g, mem); | 593 | nvgpu_dma_free(g, mem); |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 549cfdc6..6b565abb 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c | |||
@@ -142,6 +142,16 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) | |||
142 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); | 142 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); |
143 | } | 143 | } |
144 | 144 | ||
145 | static void config_pmu_cmdline_args_super_surface_v6(struct nvgpu_pmu *pmu) | ||
146 | { | ||
147 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
148 | |||
149 | if (g->ops.pmu.alloc_super_surface) { | ||
150 | nvgpu_pmu_surface_describe(g, &pmu->super_surface_buf, | ||
151 | &pmu->args_v6.super_surface); | ||
152 | } | ||
153 | } | ||
154 | |||
145 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | 155 | static void set_pmu_cmdline_args_falctracedmaidx_v5( |
146 | struct nvgpu_pmu *pmu, u32 idx) | 156 | struct nvgpu_pmu *pmu, u32 idx) |
147 | { | 157 | { |
@@ -1250,6 +1260,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu) | |||
1250 | set_pmu_cmdline_args_falctracedmabase_v5; | 1260 | set_pmu_cmdline_args_falctracedmabase_v5; |
1251 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | 1261 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = |
1252 | set_pmu_cmdline_args_falctracedmaidx_v5; | 1262 | set_pmu_cmdline_args_falctracedmaidx_v5; |
1263 | g->ops.pmu_ver.config_pmu_cmdline_args_super_surface = | ||
1264 | config_pmu_cmdline_args_super_surface_v6; | ||
1253 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | 1265 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = |
1254 | get_pmu_cmdline_args_ptr_v5; | 1266 | get_pmu_cmdline_args_ptr_v5; |
1255 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | 1267 | g->ops.pmu_ver.get_pmu_allocation_struct_size = |
@@ -1587,6 +1599,8 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu) | |||
1587 | 1599 | ||
1588 | nvgpu_dma_unmap_free(vm, &pmu->seq_buf); | 1600 | nvgpu_dma_unmap_free(vm, &pmu->seq_buf); |
1589 | 1601 | ||
1602 | nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf); | ||
1603 | |||
1590 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | 1604 | nvgpu_mutex_destroy(&pmu->elpg_mutex); |
1591 | nvgpu_mutex_destroy(&pmu->pg_mutex); | 1605 | nvgpu_mutex_destroy(&pmu->pg_mutex); |
1592 | nvgpu_mutex_destroy(&pmu->isr_mutex); | 1606 | nvgpu_mutex_destroy(&pmu->isr_mutex); |