summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-05-07 15:06:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-24 12:53:33 -0400
commit067ddbc4e4df3f1f756f03e7865c369a46f420aa (patch)
treec3de983f555152fedc3df2107bbae414bd119351 /drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
parent3eede64de058fcb1e39d723dd146bcd5d06c6f43 (diff)
gpu: nvgpu: remove timeout_rc_type i/p param
-is_preempt_pending hal does not need timeout_rc_type input param as for volta, reset_eng_bitmask is saved if preempt times out. For legacy chips, recovery triggers mmu fault and mmu fault handler takes care of resetting engines. -For volta, no special input param needed to differentiate between preempt polling during normal scenario and preempt polling during recovery. Recovery path uses preempt_ch_tsg hal to issue preempt. This hal does not issue recovery if preempt times out. Bug 2125776 Bug 2108544 Bug 2105322 Bug 2092051 Bug 2048824 Bug 2043838 Bug 2039587 Bug 2028993 Bug 2029245 Bug 2065990 Bug 1945121 Bug 200401707 Bug 200393631 Bug 200327596 Change-Id: Ie76a18ae0be880cfbeee615859a08179fb974fa8 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1709799 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c31
1 files changed, 14 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index abea39b6..4917f828 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -391,7 +391,7 @@ u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g)
391} 391}
392 392
393static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, 393static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
394 u32 pbdma_id, unsigned int timeout_rc_type) 394 u32 pbdma_id)
395{ 395{
396 struct nvgpu_timeout timeout; 396 struct nvgpu_timeout timeout;
397 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ 397 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
@@ -476,8 +476,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
476} 476}
477 477
478static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, 478static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
479 u32 act_eng_id, u32 *reset_eng_bitmask, 479 u32 act_eng_id, u32 *reset_eng_bitmask)
480 unsigned int timeout_rc_type)
481{ 480{
482 struct nvgpu_timeout timeout; 481 struct nvgpu_timeout timeout;
483 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ 482 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
@@ -775,7 +774,7 @@ static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g,
775} 774}
776 775
777int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 776int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
778 unsigned int id_type, unsigned int timeout_rc_type) 777 unsigned int id_type)
779{ 778{
780 struct fifo_gk20a *f = &g->fifo; 779 struct fifo_gk20a *f = &g->fifo;
781 unsigned long runlist_served_pbdmas; 780 unsigned long runlist_served_pbdmas;
@@ -800,14 +799,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
800 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 799 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
801 800
802 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) 801 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma)
803 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, 802 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
804 timeout_rc_type); 803
805 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 804 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
806 805
807 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) 806 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines)
808 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 807 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
809 &f->runlist_info[runlist_id].reset_eng_bitmask, 808 &f->runlist_info[runlist_id].reset_eng_bitmask);
810 timeout_rc_type);
811 return ret; 809 return ret;
812} 810}
813 811
@@ -944,7 +942,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
944} 942}
945 943
946static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, 944static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
947 unsigned int id_type, unsigned int timeout_rc_type) 945 unsigned int id_type)
948{ 946{
949 int ret; 947 int ret;
950 struct fifo_gk20a *f = &g->fifo; 948 struct fifo_gk20a *f = &g->fifo;
@@ -958,18 +956,18 @@ static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
958 gk20a_fifo_issue_preempt(g, id, true); 956 gk20a_fifo_issue_preempt(g, id, true);
959 957
960 /* wait for preempt */ 958 /* wait for preempt */
961 ret = g->ops.fifo.is_preempt_pending(g, id, id_type, 959 ret = g->ops.fifo.is_preempt_pending(g, id, id_type);
962 timeout_rc_type);
963 960
964 if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC)) 961 /* No recovery even if preempt timed out since
965 gk20a_fifo_preempt_timeout_rc(g, id, id_type); 962 * this is called from recovery path
963 */
966 964
967 return ret; 965 return ret;
968} 966}
969 967
970 968
971int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, 969int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
972 unsigned int id_type, unsigned int timeout_rc_type) 970 unsigned int id_type)
973{ 971{
974 struct fifo_gk20a *f = &g->fifo; 972 struct fifo_gk20a *f = &g->fifo;
975 u32 ret = 0; 973 u32 ret = 0;
@@ -995,7 +993,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
995 993
996 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 994 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
997 995
998 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type); 996 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type);
999 997
1000 if (!mutex_ret) 998 if (!mutex_ret)
1001 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 999 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -1068,8 +1066,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1068 1066
1069 /* Preempt tsg/ch */ 1067 /* Preempt tsg/ch */
1070 if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) { 1068 if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) {
1071 g->ops.fifo.preempt_ch_tsg(g, id, id_type, 1069 g->ops.fifo.preempt_ch_tsg(g, id, id_type);
1072 PREEMPT_TIMEOUT_NORC);
1073 } else { 1070 } else {
1074 gv11b_fifo_preempt_runlists(g, runlists_mask); 1071 gv11b_fifo_preempt_runlists(g, runlists_mask);
1075 } 1072 }