summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-03-15 08:09:35 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-30 15:36:15 -0400
commite239cad2d97c4337f4bbea5a4928044bae12d6db (patch)
treea1efd2182c4d963e35739643d0443d9129cd30cc /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent24e1c7e0a729158be36d63b821550d206a8a0436 (diff)
gpu: nvgpu: check return value of mutex_init in pmu_gk20a.c
- check return value of nvgpu_mutex_init in pmu_gk20a.c - add corresponding nvgpu_mutex_destroy calls Jira NVGPU-13 Change-Id: I646876d9c03be82b46db4733e3ecbd5135ab7798 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1321291 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c59
1 files changed, 50 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index eda4167b..3297d376 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1420,17 +1420,31 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg,
1420 pg->eng_buf_load_v2.dma_desc.params |= (value << 24); 1420 pg->eng_buf_load_v2.dma_desc.params |= (value << 24);
1421} 1421}
1422 1422
1423
1424int gk20a_init_pmu(struct pmu_gk20a *pmu) 1423int gk20a_init_pmu(struct pmu_gk20a *pmu)
1425{ 1424{
1426 struct gk20a *g = gk20a_from_pmu(pmu); 1425 struct gk20a *g = gk20a_from_pmu(pmu);
1427 struct pmu_v *pv = &g->ops.pmu_ver; 1426 struct pmu_v *pv = &g->ops.pmu_ver;
1427 int err;
1428
1429 err = nvgpu_mutex_init(&pmu->elpg_mutex);
1430 if (err)
1431 return err;
1432
1433 err = nvgpu_mutex_init(&pmu->pg_mutex);
1434 if (err)
1435 goto fail_elpg;
1436
1437 err = nvgpu_mutex_init(&pmu->isr_mutex);
1438 if (err)
1439 goto fail_pg;
1428 1440
1429 nvgpu_mutex_init(&pmu->elpg_mutex); 1441 err = nvgpu_mutex_init(&pmu->pmu_copy_lock);
1430 nvgpu_mutex_init(&pmu->pg_mutex); 1442 if (err)
1431 nvgpu_mutex_init(&pmu->isr_mutex); 1443 goto fail_isr;
1432 nvgpu_mutex_init(&pmu->pmu_copy_lock); 1444
1433 nvgpu_mutex_init(&pmu->pmu_seq_lock); 1445 err = nvgpu_mutex_init(&pmu->pmu_seq_lock);
1446 if (err)
1447 goto fail_pmu_copy;
1434 1448
1435 pmu->remove_support = gk20a_remove_pmu_support; 1449 pmu->remove_support = gk20a_remove_pmu_support;
1436 1450
@@ -2172,13 +2186,25 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
2172 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2186 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)),
2173 "PMU code version not supported version: %d\n", 2187 "PMU code version not supported version: %d\n",
2174 pmu->desc->app_version); 2188 pmu->desc->app_version);
2175 return -EINVAL; 2189 err = -EINVAL;
2176 break; 2190 goto fail_pmu_seq;
2177 } 2191 }
2178 pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */ 2192 pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */
2179 pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE); 2193 pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE);
2180 2194
2181 return 0; 2195 return 0;
2196
2197fail_pmu_seq:
2198 nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
2199fail_pmu_copy:
2200 nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
2201fail_isr:
2202 nvgpu_mutex_destroy(&pmu->isr_mutex);
2203fail_pg:
2204 nvgpu_mutex_destroy(&pmu->pg_mutex);
2205fail_elpg:
2206 nvgpu_mutex_destroy(&pmu->elpg_mutex);
2207 return err;
2182} 2208}
2183 2209
2184void pmu_copy_from_dmem(struct pmu_gk20a *pmu, 2210void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
@@ -2626,10 +2652,15 @@ static int pmu_queue_init(struct pmu_gk20a *pmu,
2626{ 2652{
2627 struct gk20a *g = gk20a_from_pmu(pmu); 2653 struct gk20a *g = gk20a_from_pmu(pmu);
2628 struct pmu_queue *queue = &pmu->queue[id]; 2654 struct pmu_queue *queue = &pmu->queue[id];
2655 int err;
2656
2657 err = nvgpu_mutex_init(&queue->mutex);
2658 if (err)
2659 return err;
2660
2629 queue->id = id; 2661 queue->id = id;
2630 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); 2662 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
2631 queue->mutex_id = id; 2663 queue->mutex_id = id;
2632 nvgpu_mutex_init(&queue->mutex);
2633 2664
2634 gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x", 2665 gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
2635 id, queue->index, queue->offset, queue->size); 2666 id, queue->index, queue->offset, queue->size);
@@ -3077,6 +3108,12 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
3077 nvgpu_alloc_destroy(&pmu->dmem); 3108 nvgpu_alloc_destroy(&pmu->dmem);
3078 3109
3079 release_firmware(pmu->fw); 3110 release_firmware(pmu->fw);
3111
3112 nvgpu_mutex_destroy(&pmu->elpg_mutex);
3113 nvgpu_mutex_destroy(&pmu->pg_mutex);
3114 nvgpu_mutex_destroy(&pmu->isr_mutex);
3115 nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
3116 nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
3080} 3117}
3081 3118
3082static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) 3119static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
@@ -5172,6 +5209,7 @@ int gk20a_pmu_destroy(struct gk20a *g)
5172{ 5209{
5173 struct pmu_gk20a *pmu = &g->pmu; 5210 struct pmu_gk20a *pmu = &g->pmu;
5174 struct pmu_pg_stats_data pg_stat_data = { 0 }; 5211 struct pmu_pg_stats_data pg_stat_data = { 0 };
5212 int i;
5175 5213
5176 gk20a_dbg_fn(""); 5214 gk20a_dbg_fn("");
5177 5215
@@ -5196,6 +5234,9 @@ int gk20a_pmu_destroy(struct gk20a *g)
5196 pmu->isr_enabled = false; 5234 pmu->isr_enabled = false;
5197 nvgpu_mutex_release(&pmu->isr_mutex); 5235 nvgpu_mutex_release(&pmu->isr_mutex);
5198 5236
5237 for (i = 0; i < PMU_QUEUE_COUNT; i++)
5238 nvgpu_mutex_destroy(&pmu->queue[i].mutex);
5239
5199 pmu->pmu_state = PMU_STATE_OFF; 5240 pmu->pmu_state = PMU_STATE_OFF;
5200 pmu->pmu_ready = false; 5241 pmu->pmu_ready = false;
5201 pmu->perfmon_ready = false; 5242 pmu->perfmon_ready = false;