summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-05-09 06:19:43 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-09 14:13:54 -0400
commit40ca7cc573430ca4e21fdec4a44394c09d615846 (patch)
treee4ee884dd8863d9928b34c7b0bf7468f2903c6b1 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent821d1cab904d055264bc5d62b0c0d5187417ff13 (diff)
gpu: nvgpu: reorganize PMU IPC
- Moved PMU IPC related code to drivers/gpu/nvgpu/common/pmu/pmu_ipc.c file, -Below is the list which are moved seq mutex queue cmd/msg post & process event handling NVGPU-56 Change-Id: Ic380faa27de4e5574d5b22500125e86027fd4b5d Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1478167 GVS: Gerrit_Virtual_Submit Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 4b0667c5..00b26cf4 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2629,12 +2629,12 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2629 for (i = 0; i < g->fifo.max_runlists; i++) 2629 for (i = 0; i < g->fifo.max_runlists; i++)
2630 nvgpu_mutex_acquire(&f->runlist_info[i].mutex); 2630 nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
2631 2631
2632 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2632 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2633 2633
2634 ret = __locked_fifo_preempt(g, hw_chid, false); 2634 ret = __locked_fifo_preempt(g, hw_chid, false);
2635 2635
2636 if (!mutex_ret) 2636 if (!mutex_ret)
2637 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2637 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2638 2638
2639 for (i = 0; i < g->fifo.max_runlists; i++) 2639 for (i = 0; i < g->fifo.max_runlists; i++)
2640 nvgpu_mutex_release(&f->runlist_info[i].mutex); 2640 nvgpu_mutex_release(&f->runlist_info[i].mutex);
@@ -2656,12 +2656,12 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2656 for (i = 0; i < g->fifo.max_runlists; i++) 2656 for (i = 0; i < g->fifo.max_runlists; i++)
2657 nvgpu_mutex_acquire(&f->runlist_info[i].mutex); 2657 nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
2658 2658
2659 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2659 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2660 2660
2661 ret = __locked_fifo_preempt(g, tsgid, true); 2661 ret = __locked_fifo_preempt(g, tsgid, true);
2662 2662
2663 if (!mutex_ret) 2663 if (!mutex_ret)
2664 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2664 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2665 2665
2666 for (i = 0; i < g->fifo.max_runlists; i++) 2666 for (i = 0; i < g->fifo.max_runlists; i++)
2667 nvgpu_mutex_release(&f->runlist_info[i].mutex); 2667 nvgpu_mutex_release(&f->runlist_info[i].mutex);
@@ -2718,12 +2718,12 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
2718 } 2718 }
2719 } 2719 }
2720 2720
2721 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2721 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2722 2722
2723 gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); 2723 gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state);
2724 2724
2725 if (!mutex_ret) 2725 if (!mutex_ret)
2726 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2726 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2727 2727
2728 if (!is_runlist_info_mutex_locked) { 2728 if (!is_runlist_info_mutex_locked) {
2729 gk20a_dbg_info("release runlist_info mutex"); 2729 gk20a_dbg_info("release runlist_info mutex");
@@ -2792,7 +2792,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2792 fifo_engine_status_engine_busy_v() && !wait_for_idle) 2792 fifo_engine_status_engine_busy_v() && !wait_for_idle)
2793 return -EBUSY; 2793 return -EBUSY;
2794 2794
2795 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2795 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2796 2796
2797 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m( 2797 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2798 eng_info->runlist_id), RUNLIST_DISABLED, 2798 eng_info->runlist_id), RUNLIST_DISABLED,
@@ -2832,7 +2832,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2832 2832
2833clean_up: 2833clean_up:
2834 if (!mutex_ret) 2834 if (!mutex_ret)
2835 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2835 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2836 2836
2837 if (err) { 2837 if (err) {
2838 gk20a_dbg_fn("failed"); 2838 gk20a_dbg_fn("failed");
@@ -3300,13 +3300,13 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
3300 3300
3301 nvgpu_mutex_acquire(&runlist->mutex); 3301 nvgpu_mutex_acquire(&runlist->mutex);
3302 3302
3303 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3303 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3304 3304
3305 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, 3305 ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
3306 wait_for_finish); 3306 wait_for_finish);
3307 3307
3308 if (!mutex_ret) 3308 if (!mutex_ret)
3309 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3309 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3310 3310
3311 nvgpu_mutex_release(&runlist->mutex); 3311 nvgpu_mutex_release(&runlist->mutex);
3312 return ret; 3312 return ret;