summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-05-07 15:06:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-24 12:53:33 -0400
commit067ddbc4e4df3f1f756f03e7865c369a46f420aa (patch)
treec3de983f555152fedc3df2107bbae414bd119351 /drivers
parent3eede64de058fcb1e39d723dd146bcd5d06c6f43 (diff)
gpu: nvgpu: remove timeout_rc_type i/p param
-is_preempt_pending hal does not need timeout_rc_type input param as for volta, reset_eng_bitmask is saved if preempt times out. For legacy chips, recovery triggers mmu fault and mmu fault handler takes care of resetting engines. -For volta, no special input param needed to differentiate between preempt polling during normal scenario and preempt polling during recovery. Recovery path uses preempt_ch_tsg hal to issue preempt. This hal does not issue recovery if preempt times out. Bug 2125776 Bug 2108544 Bug 2105322 Bug 2092051 Bug 2048824 Bug 2043838 Bug 2039587 Bug 2028993 Bug 2029245 Bug 2065990 Bug 1945121 Bug 200401707 Bug 200393631 Bug 200327596 Change-Id: Ie76a18ae0be880cfbeee615859a08179fb974fa8 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1709799 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h7
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c31
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.h5
5 files changed, 24 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 00119300..c8789c3a 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2704,7 +2704,7 @@ void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg)
2704} 2704}
2705 2705
2706int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 2706int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
2707 unsigned int id_type, unsigned int timeout_rc_type) 2707 unsigned int id_type)
2708{ 2708{
2709 struct nvgpu_timeout timeout; 2709 struct nvgpu_timeout timeout;
2710 u32 delay = GR_IDLE_CHECK_DEFAULT; 2710 u32 delay = GR_IDLE_CHECK_DEFAULT;
@@ -2777,8 +2777,8 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2777 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; 2777 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL;
2778 2778
2779 /* wait for preempt */ 2779 /* wait for preempt */
2780 ret = g->ops.fifo.is_preempt_pending(g, id, id_type, 2780 ret = g->ops.fifo.is_preempt_pending(g, id, id_type);
2781 PREEMPT_TIMEOUT_RC); 2781
2782 return ret; 2782 return ret;
2783} 2783}
2784 2784
@@ -3448,8 +3448,7 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
3448 gk20a_readl(g, fifo_preempt_r())); 3448 gk20a_readl(g, fifo_preempt_r()));
3449#endif 3449#endif
3450 if (wait_preempt) { 3450 if (wait_preempt) {
3451 g->ops.fifo.is_preempt_pending( 3451 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type);
3452 g, preempt_id, preempt_type, PREEMPT_TIMEOUT_RC);
3453 } 3452 }
3454#ifdef TRACEPOINTS_ENABLED 3453#ifdef TRACEPOINTS_ENABLED
3455 trace_gk20a_reschedule_preempted_next(ch->chid); 3454 trace_gk20a_reschedule_preempted_next(ch->chid);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index bccd15f6..d6e759ac 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -50,9 +50,6 @@ enum {
50#define ID_TYPE_TSG 1 50#define ID_TYPE_TSG 1
51#define ID_TYPE_UNKNOWN ((u32)~0) 51#define ID_TYPE_UNKNOWN ((u32)~0)
52 52
53#define PREEMPT_TIMEOUT_RC 1
54#define PREEMPT_TIMEOUT_NORC 0
55
56#define RC_YES 1 53#define RC_YES 1
57#define RC_NO 0 54#define RC_NO 0
58 55
@@ -390,8 +387,8 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);
390 387
391u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); 388u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
392 389
393int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type, 390int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
394 unsigned int timeout_rc_type); 391 unsigned int id_type);
395int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg); 392int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
396void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, 393void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
397 unsigned int id_type); 394 unsigned int id_type);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 25146b8b..fac02f68 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -662,9 +662,9 @@ struct gpu_ops {
662 struct ch_state *ch_state); 662 struct ch_state *ch_state);
663 u32 (*intr_0_error_mask)(struct gk20a *g); 663 u32 (*intr_0_error_mask)(struct gk20a *g);
664 int (*is_preempt_pending)(struct gk20a *g, u32 id, 664 int (*is_preempt_pending)(struct gk20a *g, u32 id,
665 unsigned int id_type, unsigned int timeout_rc_type); 665 unsigned int id_type);
666 int (*preempt_ch_tsg)(struct gk20a *g, u32 id, 666 int (*preempt_ch_tsg)(struct gk20a *g, u32 id,
667 unsigned int id_type, unsigned int timeout_rc_type); 667 unsigned int id_type);
668 void (*init_pbdma_intr_descs)(struct fifo_gk20a *f); 668 void (*init_pbdma_intr_descs)(struct fifo_gk20a *f);
669 int (*reset_enable_hw)(struct gk20a *g); 669 int (*reset_enable_hw)(struct gk20a *g);
670 int (*setup_userd)(struct channel_gk20a *c); 670 int (*setup_userd)(struct channel_gk20a *c);
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index abea39b6..4917f828 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -391,7 +391,7 @@ u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g)
391} 391}
392 392
393static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, 393static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
394 u32 pbdma_id, unsigned int timeout_rc_type) 394 u32 pbdma_id)
395{ 395{
396 struct nvgpu_timeout timeout; 396 struct nvgpu_timeout timeout;
397 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ 397 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
@@ -476,8 +476,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
476} 476}
477 477
478static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, 478static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
479 u32 act_eng_id, u32 *reset_eng_bitmask, 479 u32 act_eng_id, u32 *reset_eng_bitmask)
480 unsigned int timeout_rc_type)
481{ 480{
482 struct nvgpu_timeout timeout; 481 struct nvgpu_timeout timeout;
483 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ 482 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
@@ -775,7 +774,7 @@ static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g,
775} 774}
776 775
777int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 776int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
778 unsigned int id_type, unsigned int timeout_rc_type) 777 unsigned int id_type)
779{ 778{
780 struct fifo_gk20a *f = &g->fifo; 779 struct fifo_gk20a *f = &g->fifo;
781 unsigned long runlist_served_pbdmas; 780 unsigned long runlist_served_pbdmas;
@@ -800,14 +799,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
800 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 799 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
801 800
802 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) 801 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma)
803 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, 802 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
804 timeout_rc_type); 803
805 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 804 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
806 805
807 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) 806 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines)
808 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 807 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
809 &f->runlist_info[runlist_id].reset_eng_bitmask, 808 &f->runlist_info[runlist_id].reset_eng_bitmask);
810 timeout_rc_type);
811 return ret; 809 return ret;
812} 810}
813 811
@@ -944,7 +942,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
944} 942}
945 943
946static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, 944static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
947 unsigned int id_type, unsigned int timeout_rc_type) 945 unsigned int id_type)
948{ 946{
949 int ret; 947 int ret;
950 struct fifo_gk20a *f = &g->fifo; 948 struct fifo_gk20a *f = &g->fifo;
@@ -958,18 +956,18 @@ static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
958 gk20a_fifo_issue_preempt(g, id, true); 956 gk20a_fifo_issue_preempt(g, id, true);
959 957
960 /* wait for preempt */ 958 /* wait for preempt */
961 ret = g->ops.fifo.is_preempt_pending(g, id, id_type, 959 ret = g->ops.fifo.is_preempt_pending(g, id, id_type);
962 timeout_rc_type);
963 960
964 if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC)) 961 /* No recovery even if preempt timed out since
965 gk20a_fifo_preempt_timeout_rc(g, id, id_type); 962 * this is called from recovery path
963 */
966 964
967 return ret; 965 return ret;
968} 966}
969 967
970 968
971int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, 969int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
972 unsigned int id_type, unsigned int timeout_rc_type) 970 unsigned int id_type)
973{ 971{
974 struct fifo_gk20a *f = &g->fifo; 972 struct fifo_gk20a *f = &g->fifo;
975 u32 ret = 0; 973 u32 ret = 0;
@@ -995,7 +993,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
995 993
996 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 994 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
997 995
998 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type); 996 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type);
999 997
1000 if (!mutex_ret) 998 if (!mutex_ret)
1001 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 999 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -1068,8 +1066,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1068 1066
1069 /* Preempt tsg/ch */ 1067 /* Preempt tsg/ch */
1070 if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) { 1068 if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) {
1071 g->ops.fifo.preempt_ch_tsg(g, id, id_type, 1069 g->ops.fifo.preempt_ch_tsg(g, id, id_type);
1072 PREEMPT_TIMEOUT_NORC);
1073 } else { 1070 } else {
1074 gv11b_fifo_preempt_runlists(g, runlists_mask); 1071 gv11b_fifo_preempt_runlists(g, runlists_mask);
1075 } 1072 }
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
index 66efdd3b..3dfc337c 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
@@ -81,12 +81,11 @@ void gv11b_dump_eng_status(struct gk20a *g,
81u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g); 81u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g);
82int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next); 82int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
83int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 83int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
84 unsigned int id_type, unsigned int timeout_rc_type); 84 unsigned int id_type);
85int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid); 85int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
86int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 86int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
87int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg); 87int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
88int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, 88int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, unsigned int id_type);
89 unsigned int id_type, unsigned int timeout_rc_type);
90void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, 89void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
91 u32 id, unsigned int id_type, unsigned int rc_type, 90 u32 id, unsigned int id_type, unsigned int rc_type,
92 struct mmu_fault_info *mmfault); 91 struct mmu_fault_info *mmfault);