diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-12 01:54:31 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-13 05:40:15 -0400 |
commit | 8c66aef3bdbfbbeb1d3c3ef3bd6b1bee3ac05411 (patch) | |
tree | 80b8135576c2419887dc18d588c2efd493600ab2 /drivers/gpu/nvgpu/gk20a | |
parent | 69dee6a648ad434b75e1a9c64b022ee45d3ff87b (diff) |
gpu: nvgpu: reorganize PMU FB alloc/free
Moved PMU FB access related code from pmu_gk20a.c to
"drivers/gpu/nvgpu/common/pmu/pmu.c" file
- Prepended with nvgpu_ for global functions & replaced
wherever used.
JIRA NVGPU-56
JIRA NVGPU-94
Change-Id: I42bfd9d216e6b35672a9738f01302d954b32b69e
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1480551
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 49 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 13 |
2 files changed, 1 insertions, 61 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 03728378..a9457330 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -220,7 +220,7 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) | |||
220 | { | 220 | { |
221 | struct gk20a *g = gk20a_from_pmu(pmu); | 221 | struct gk20a *g = gk20a_from_pmu(pmu); |
222 | 222 | ||
223 | gk20a_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); | 223 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); |
224 | } | 224 | } |
225 | 225 | ||
226 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | 226 | static void set_pmu_cmdline_args_falctracedmaidx_v5( |
@@ -3789,53 +3789,6 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
3789 | nvgpu_mutex_release(&pmu->isr_mutex); | 3789 | nvgpu_mutex_release(&pmu->isr_mutex); |
3790 | } | 3790 | } |
3791 | 3791 | ||
3792 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, | ||
3793 | struct flcn_mem_desc_v0 *fb) | ||
3794 | { | ||
3795 | fb->address.lo = u64_lo32(mem->gpu_va); | ||
3796 | fb->address.hi = u64_hi32(mem->gpu_va); | ||
3797 | fb->params = ((u32)mem->size & 0xFFFFFF); | ||
3798 | fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24); | ||
3799 | } | ||
3800 | |||
3801 | int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | ||
3802 | u32 size) | ||
3803 | { | ||
3804 | struct mm_gk20a *mm = &g->mm; | ||
3805 | struct vm_gk20a *vm = mm->pmu.vm; | ||
3806 | int err; | ||
3807 | |||
3808 | err = nvgpu_dma_alloc_map_vid(vm, size, mem); | ||
3809 | if (err) { | ||
3810 | nvgpu_err(g, "memory allocation failed"); | ||
3811 | return -ENOMEM; | ||
3812 | } | ||
3813 | |||
3814 | return 0; | ||
3815 | } | ||
3816 | |||
3817 | int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | ||
3818 | u32 size) | ||
3819 | { | ||
3820 | struct mm_gk20a *mm = &g->mm; | ||
3821 | struct vm_gk20a *vm = mm->pmu.vm; | ||
3822 | int err; | ||
3823 | |||
3824 | err = nvgpu_dma_alloc_map_sys(vm, size, mem); | ||
3825 | if (err) { | ||
3826 | nvgpu_err(g, "failed to allocate memory"); | ||
3827 | return -ENOMEM; | ||
3828 | } | ||
3829 | |||
3830 | return 0; | ||
3831 | } | ||
3832 | |||
3833 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) | ||
3834 | { | ||
3835 | nvgpu_dma_free(g, mem); | ||
3836 | memset(mem, 0, sizeof(struct nvgpu_mem)); | ||
3837 | } | ||
3838 | |||
3839 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | 3792 | int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) |
3840 | { | 3793 | { |
3841 | u32 status = 0; | 3794 | u32 status = 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index 1c29b380..1d2e20e6 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -57,12 +57,6 @@ struct nvgpu_firmware; | |||
57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) | 57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) |
58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) | 58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) |
59 | 59 | ||
60 | struct pmu_surface { | ||
61 | struct nvgpu_mem vidmem_desc; | ||
62 | struct nvgpu_mem sysmem_desc; | ||
63 | struct flcn_mem_desc_v0 params; | ||
64 | }; | ||
65 | |||
66 | #define PMU_PG_IDLE_THRESHOLD_SIM 1000 | 60 | #define PMU_PG_IDLE_THRESHOLD_SIM 1000 |
67 | #define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000 | 61 | #define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000 |
68 | /* TBD: QT or else ? */ | 62 | /* TBD: QT or else ? */ |
@@ -154,13 +148,6 @@ int gk20a_pmu_reset(struct gk20a *g); | |||
154 | int pmu_idle(struct nvgpu_pmu *pmu); | 148 | int pmu_idle(struct nvgpu_pmu *pmu); |
155 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); | 149 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); |
156 | 150 | ||
157 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); | ||
158 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, | ||
159 | struct flcn_mem_desc_v0 *fb); | ||
160 | int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | ||
161 | u32 size); | ||
162 | int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | ||
163 | u32 size); | ||
164 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); | 151 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); |
165 | 152 | ||
166 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); | 153 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); |