From 002d6f147490cd5b3002ce5a564f4276df784ea6 Mon Sep 17 00:00:00 2001 From: Mahantesh Kumbar Date: Wed, 10 May 2017 17:18:01 +0530 Subject: gpu: nvgpu: PMU IPC reorg support update - prepend PMU IPC func with nvgpu_ by replacing gk20a_ - updated gv11b HAL methods of queue & mutex to point to gk20a HAL methods. JIRA NVGPU-56 Change-Id: Iade9f5613dbd4bc11515e822ddfda3a1787bfa4f Signed-off-by: Mahantesh Kumbar Reviewed-on: http://git-master/r/1479117 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c') diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index b018f3d9..847bf172 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -718,12 +718,12 @@ static int gv11b_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); - mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = __locked_fifo_preempt(g, hw_chid, false); if (!mutex_ret) - pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); @@ -770,12 +770,12 @@ static int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); - mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = __locked_fifo_preempt(g, tsgid, true); if (!mutex_ret) - pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); @@ -798,12 +798,12 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) runlist_info[runlist_id].mutex); } - mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = __locked_fifo_preempt_runlists(g, runlists_mask); if (!mutex_ret) - pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) @@ -858,12 +858,12 @@ static int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); - mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type); if (!mutex_ret) - pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); -- cgit v1.2.2