summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 4a32194c..c245f4a2 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -477,7 +477,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
477 kfree(runlist->active_tsgs); 477 kfree(runlist->active_tsgs);
478 runlist->active_tsgs = NULL; 478 runlist->active_tsgs = NULL;
479 479
480 mutex_destroy(&runlist->mutex); 480 nvgpu_mutex_destroy(&runlist->mutex);
481 481
482 } 482 }
483 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * 483 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
@@ -650,7 +650,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
650 goto clean_up_runlist; 650 goto clean_up_runlist;
651 } 651 }
652 } 652 }
653 mutex_init(&runlist->mutex); 653 nvgpu_mutex_init(&runlist->mutex);
654 654
655 /* None of buffers is pinned if this value doesn't change. 655 /* None of buffers is pinned if this value doesn't change.
656 Otherwise, one of them (cur_buffer) must have been pinned. */ 656 Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -809,8 +809,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
809 809
810 f->g = g; 810 f->g = g;
811 811
812 mutex_init(&f->intr.isr.mutex); 812 nvgpu_mutex_init(&f->intr.isr.mutex);
813 mutex_init(&f->gr_reset_mutex); 813 nvgpu_mutex_init(&f->gr_reset_mutex);
814 gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */ 814 gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */
815 815
816 f->num_channels = g->ops.fifo.get_num_fifos(g); 816 f->num_channels = g->ops.fifo.get_num_fifos(g);
@@ -846,7 +846,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
846 init_runlist(g, f); 846 init_runlist(g, f);
847 847
848 INIT_LIST_HEAD(&f->free_chs); 848 INIT_LIST_HEAD(&f->free_chs);
849 mutex_init(&f->free_chs_mutex); 849 nvgpu_mutex_init(&f->free_chs_mutex);
850 850
851 if (g->ops.mm.is_bar1_supported(g)) 851 if (g->ops.mm.is_bar1_supported(g))
852 err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, 852 err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm,
@@ -871,12 +871,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
871 gk20a_init_channel_support(g, chid); 871 gk20a_init_channel_support(g, chid);
872 gk20a_init_tsg_support(g, chid); 872 gk20a_init_tsg_support(g, chid);
873 } 873 }
874 mutex_init(&f->tsg_inuse_mutex); 874 nvgpu_mutex_init(&f->tsg_inuse_mutex);
875 875
876 f->remove_support = gk20a_remove_fifo_support; 876 f->remove_support = gk20a_remove_fifo_support;
877 877
878 f->deferred_reset_pending = false; 878 f->deferred_reset_pending = false;
879 mutex_init(&f->deferred_reset_mutex); 879 nvgpu_mutex_init(&f->deferred_reset_mutex);
880 880
881 f->sw_ready = true; 881 f->sw_ready = true;
882 882
@@ -1224,7 +1224,7 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
1224 if (!ch) 1224 if (!ch)
1225 return verbose; 1225 return verbose;
1226 1226
1227 mutex_lock(&ch->error_notifier_mutex); 1227 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
1228 if (ch->error_notifier_ref) { 1228 if (ch->error_notifier_ref) {
1229 u32 err = ch->error_notifier->info32; 1229 u32 err = ch->error_notifier->info32;
1230 if (ch->error_notifier->status == 0xffff) { 1230 if (ch->error_notifier->status == 0xffff) {
@@ -1240,7 +1240,7 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
1240 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 1240 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
1241 } 1241 }
1242 } 1242 }
1243 mutex_unlock(&ch->error_notifier_mutex); 1243 nvgpu_mutex_release(&ch->error_notifier_mutex);
1244 1244
1245 /* mark channel as faulted */ 1245 /* mark channel as faulted */
1246 ch->has_timedout = true; 1246 ch->has_timedout = true;
@@ -1309,7 +1309,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
1309{ 1309{
1310 u32 engine_id, engines; 1310 u32 engine_id, engines;
1311 1311
1312 mutex_lock(&g->dbg_sessions_lock); 1312 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1313 gr_gk20a_disable_ctxsw(g); 1313 gr_gk20a_disable_ctxsw(g);
1314 1314
1315 if (!g->fifo.deferred_reset_pending) 1315 if (!g->fifo.deferred_reset_pending)
@@ -1336,7 +1336,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
1336 1336
1337clean_up: 1337clean_up:
1338 gr_gk20a_enable_ctxsw(g); 1338 gr_gk20a_enable_ctxsw(g);
1339 mutex_unlock(&g->dbg_sessions_lock); 1339 nvgpu_mutex_release(&g->dbg_sessions_lock);
1340 1340
1341 return 0; 1341 return 0;
1342} 1342}
@@ -1487,9 +1487,9 @@ static bool gk20a_fifo_handle_mmu_fault(
1487 } else if (engine_id != FIFO_INVAL_ENGINE_ID) { 1487 } else if (engine_id != FIFO_INVAL_ENGINE_ID) {
1488 /* if lock is already taken, a reset is taking place 1488 /* if lock is already taken, a reset is taking place
1489 so no need to repeat */ 1489 so no need to repeat */
1490 if (mutex_trylock(&g->fifo.gr_reset_mutex)) { 1490 if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) {
1491 gk20a_fifo_reset_engine(g, engine_id); 1491 gk20a_fifo_reset_engine(g, engine_id);
1492 mutex_unlock(&g->fifo.gr_reset_mutex); 1492 nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
1493 } 1493 }
1494 } 1494 }
1495 1495
@@ -1646,7 +1646,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1646 1646
1647 /* stop context switching to prevent engine assignments from 1647 /* stop context switching to prevent engine assignments from
1648 changing until channel is recovered */ 1648 changing until channel is recovered */
1649 mutex_lock(&g->dbg_sessions_lock); 1649 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1650 gr_gk20a_disable_ctxsw(g); 1650 gr_gk20a_disable_ctxsw(g);
1651 1651
1652 engines = gk20a_fifo_engines_on_id(g, hw_chid, false); 1652 engines = gk20a_fifo_engines_on_id(g, hw_chid, false);
@@ -1667,7 +1667,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1667 } 1667 }
1668 1668
1669 gr_gk20a_enable_ctxsw(g); 1669 gr_gk20a_enable_ctxsw(g);
1670 mutex_unlock(&g->dbg_sessions_lock); 1670 nvgpu_mutex_release(&g->dbg_sessions_lock);
1671} 1671}
1672 1672
1673void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) 1673void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose)
@@ -1676,7 +1676,7 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose)
1676 1676
1677 /* stop context switching to prevent engine assignments from 1677 /* stop context switching to prevent engine assignments from
1678 changing until TSG is recovered */ 1678 changing until TSG is recovered */
1679 mutex_lock(&g->dbg_sessions_lock); 1679 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1680 gr_gk20a_disable_ctxsw(g); 1680 gr_gk20a_disable_ctxsw(g);
1681 1681
1682 engines = gk20a_fifo_engines_on_id(g, tsgid, true); 1682 engines = gk20a_fifo_engines_on_id(g, tsgid, true);
@@ -1693,7 +1693,7 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose)
1693 } 1693 }
1694 1694
1695 gr_gk20a_enable_ctxsw(g); 1695 gr_gk20a_enable_ctxsw(g);
1696 mutex_unlock(&g->dbg_sessions_lock); 1696 nvgpu_mutex_release(&g->dbg_sessions_lock);
1697} 1697}
1698 1698
1699void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, 1699void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
@@ -2307,7 +2307,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2307 if (g->fifo.sw_ready) { 2307 if (g->fifo.sw_ready) {
2308 /* note we're not actually in an "isr", but rather 2308 /* note we're not actually in an "isr", but rather
2309 * in a threaded interrupt context... */ 2309 * in a threaded interrupt context... */
2310 mutex_lock(&g->fifo.intr.isr.mutex); 2310 nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
2311 2311
2312 gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); 2312 gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
2313 2313
@@ -2322,7 +2322,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2322 if (unlikely(fifo_intr & error_intr_mask)) 2322 if (unlikely(fifo_intr & error_intr_mask))
2323 clear_intr = fifo_error_isr(g, fifo_intr); 2323 clear_intr = fifo_error_isr(g, fifo_intr);
2324 2324
2325 mutex_unlock(&g->fifo.intr.isr.mutex); 2325 nvgpu_mutex_release(&g->fifo.intr.isr.mutex);
2326 } 2326 }
2327 gk20a_writel(g, fifo_intr_0_r(), clear_intr); 2327 gk20a_writel(g, fifo_intr_0_r(), clear_intr);
2328 2328
@@ -2434,7 +2434,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2434 2434
2435 /* we have no idea which runlist we are using. lock all */ 2435 /* we have no idea which runlist we are using. lock all */
2436 for (i = 0; i < g->fifo.max_runlists; i++) 2436 for (i = 0; i < g->fifo.max_runlists; i++)
2437 mutex_lock(&f->runlist_info[i].mutex); 2437 nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
2438 2438
2439 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2439 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2440 2440
@@ -2444,7 +2444,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
2444 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2444 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2445 2445
2446 for (i = 0; i < g->fifo.max_runlists; i++) 2446 for (i = 0; i < g->fifo.max_runlists; i++)
2447 mutex_unlock(&f->runlist_info[i].mutex); 2447 nvgpu_mutex_release(&f->runlist_info[i].mutex);
2448 2448
2449 return ret; 2449 return ret;
2450} 2450}
@@ -2461,7 +2461,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2461 2461
2462 /* we have no idea which runlist we are using. lock all */ 2462 /* we have no idea which runlist we are using. lock all */
2463 for (i = 0; i < g->fifo.max_runlists; i++) 2463 for (i = 0; i < g->fifo.max_runlists; i++)
2464 mutex_lock(&f->runlist_info[i].mutex); 2464 nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
2465 2465
2466 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2466 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2467 2467
@@ -2471,7 +2471,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2471 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2471 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2472 2472
2473 for (i = 0; i < g->fifo.max_runlists; i++) 2473 for (i = 0; i < g->fifo.max_runlists; i++)
2474 mutex_unlock(&f->runlist_info[i].mutex); 2474 nvgpu_mutex_release(&f->runlist_info[i].mutex);
2475 2475
2476 return ret; 2476 return ret;
2477} 2477}
@@ -3046,7 +3046,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
3046 3046
3047 runlist = &f->runlist_info[runlist_id]; 3047 runlist = &f->runlist_info[runlist_id];
3048 3048
3049 mutex_lock(&runlist->mutex); 3049 nvgpu_mutex_acquire(&runlist->mutex);
3050 3050
3051 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3051 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3052 3052
@@ -3056,7 +3056,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
3056 if (!mutex_ret) 3056 if (!mutex_ret)
3057 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3057 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3058 3058
3059 mutex_unlock(&runlist->mutex); 3059 nvgpu_mutex_release(&runlist->mutex);
3060 return ret; 3060 return ret;
3061} 3061}
3062 3062