summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-11-27 00:35:56 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-02-11 11:18:36 -0500
commitef9de9e9925573b691d78760e42334ad24c5797f (patch)
tree068bc5b4bb01de77136ed1da0e5da10d883c164a
parent5b8ecbc51fe2e94a233c2c42d497b05c2eccdaf5 (diff)
gpu: nvgpu: replace input parameter tsgid with pointer to struct tsg_gk20a
gv11b_fifo_preempt_tsg needs to access the runlist_id of the tsg as well as pass the tsg pointer to other public functions such as gk20a_fifo_disable_tsg_sched. This qualifies the preempt_tsg to use a pointer to a struct tsg_gk20a instead of just using the tsgid. Jira NVGPU-1461 Change-Id: I01fbd2370b5746c2a597a0351e0301b0f7d25175 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1959068 (cherry picked from commit 1e78d47f15ff050edbb10a88550012178d353288 in rel-32) Reviewed-on: https://git-master.nvidia.com/r/2013725 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c33
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_tsg.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.h2
8 files changed, 39 insertions, 36 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 9ed78640..a2ebb720 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1562,7 +1562,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
1562 g->ops.fifo.disable_tsg(tsg); 1562 g->ops.fifo.disable_tsg(tsg);
1563 1563
1564 if (preempt) { 1564 if (preempt) {
1565 g->ops.fifo.preempt_tsg(g, tsg->tsgid); 1565 g->ops.fifo.preempt_tsg(g, tsg);
1566 } 1566 }
1567 1567
1568 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1568 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -2194,8 +2194,8 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
2194 /* Disable TSG and examine status before unbinding channel */ 2194 /* Disable TSG and examine status before unbinding channel */
2195 g->ops.fifo.disable_tsg(tsg); 2195 g->ops.fifo.disable_tsg(tsg);
2196 2196
2197 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); 2197 err = g->ops.fifo.preempt_tsg(g, tsg);
2198 if (err) { 2198 if (err != 0) {
2199 goto fail_enable_tsg; 2199 goto fail_enable_tsg;
2200 } 2200 }
2201 2201
@@ -3000,7 +3000,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
3000 return ret; 3000 return ret;
3001} 3001}
3002 3002
3003int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) 3003int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
3004{ 3004{
3005 struct fifo_gk20a *f = &g->fifo; 3005 struct fifo_gk20a *f = &g->fifo;
3006 u32 ret = 0; 3006 u32 ret = 0;
@@ -3008,10 +3008,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3008 u32 mutex_ret = 0; 3008 u32 mutex_ret = 0;
3009 u32 i; 3009 u32 i;
3010 3010
3011 nvgpu_log_fn(g, "tsgid: %d", tsgid); 3011 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
3012 if (tsgid == FIFO_INVAL_TSG_ID) {
3013 return 0;
3014 }
3015 3012
3016 /* we have no idea which runlist we are using. lock all */ 3013 /* we have no idea which runlist we are using. lock all */
3017 for (i = 0; i < g->fifo.max_runlists; i++) { 3014 for (i = 0; i < g->fifo.max_runlists; i++) {
@@ -3020,7 +3017,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3020 3017
3021 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3018 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3022 3019
3023 ret = __locked_fifo_preempt(g, tsgid, true); 3020 ret = __locked_fifo_preempt(g, tsg->tsgid, true);
3024 3021
3025 if (!mutex_ret) { 3022 if (!mutex_ret) {
3026 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3023 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3033,9 +3030,11 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3033 if (ret) { 3030 if (ret) {
3034 if (nvgpu_platform_is_silicon(g)) { 3031 if (nvgpu_platform_is_silicon(g)) {
3035 nvgpu_err(g, "preempt timed out for tsgid: %u, " 3032 nvgpu_err(g, "preempt timed out for tsgid: %u, "
3036 "ctxsw timeout will trigger recovery if needed", tsgid); 3033 "ctxsw timeout will trigger recovery if needed",
3034 tsg->tsgid);
3037 } else { 3035 } else {
3038 gk20a_fifo_preempt_timeout_rc(g, tsgid, true); 3036 gk20a_fifo_preempt_timeout_rc(g,
3037 tsg->tsgid, ID_TYPE_TSG);
3039 } 3038 }
3040 } 3039 }
3041 3040
@@ -3045,9 +3044,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
3045int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch) 3044int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
3046{ 3045{
3047 int err; 3046 int err;
3047 struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
3048 3048
3049 if (gk20a_is_channel_marked_as_tsg(ch)) { 3049 if (tsg != NULL) {
3050 err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); 3050 err = g->ops.fifo.preempt_tsg(ch->g, tsg);
3051 } else { 3051 } else {
3052 err = g->ops.fifo.preempt_channel(ch->g, ch->chid); 3052 err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
3053 } 3053 }
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index f3c1b362..2d1c9cc3 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -231,7 +231,7 @@ void gk20a_fifo_isr(struct gk20a *g);
231u32 gk20a_fifo_nonstall_isr(struct gk20a *g); 231u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
232 232
233int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid); 233int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
234int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 234int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
235int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch); 235int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
236 236
237int gk20a_fifo_enable_engine_activity(struct gk20a *g, 237int gk20a_fifo_enable_engine_activity(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index b0b752af..11ccdd48 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -22,6 +22,7 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <nvgpu/bug.h>
25#include <nvgpu/semaphore.h> 26#include <nvgpu/semaphore.h>
26#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
27#include <nvgpu/log.h> 28#include <nvgpu/log.h>
@@ -803,17 +804,22 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
803int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid) 804int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
804{ 805{
805 struct fifo_gk20a *f = &g->fifo; 806 struct fifo_gk20a *f = &g->fifo;
806 u32 tsgid; 807 struct tsg_gk20a *tsg = NULL;
807 808
808 if (chid == FIFO_INVAL_CHANNEL_ID) { 809 if (chid == FIFO_INVAL_CHANNEL_ID) {
809 return 0; 810 return 0;
810 } 811 }
811 812
812 tsgid = f->channel[chid].tsgid; 813 tsg = tsg_gk20a_from_ch(&f->channel[chid]);
813 nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); 814
815 if (tsg == NULL) {
816 return 0;
817 }
818
819 nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsg->tsgid);
814 820
815 /* Preempt tsg. Channel preempt is NOOP */ 821 /* Preempt tsg. Channel preempt is NOOP */
816 return g->ops.fifo.preempt_tsg(g, tsgid); 822 return g->ops.fifo.preempt_tsg(g, tsg);
817} 823}
818 824
819/* TSG enable sequence applicable for Volta and onwards */ 825/* TSG enable sequence applicable for Volta and onwards */
@@ -837,7 +843,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
837 return 0; 843 return 0;
838} 844}
839 845
840int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) 846int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
841{ 847{
842 struct fifo_gk20a *f = &g->fifo; 848 struct fifo_gk20a *f = &g->fifo;
843 u32 ret = 0; 849 u32 ret = 0;
@@ -845,12 +851,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
845 u32 mutex_ret = 0; 851 u32 mutex_ret = 0;
846 u32 runlist_id; 852 u32 runlist_id;
847 853
848 nvgpu_log_fn(g, "tsgid: %d", tsgid); 854 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
849 if (tsgid == FIFO_INVAL_TSG_ID) {
850 return 0;
851 }
852 855
853 runlist_id = f->tsg[tsgid].runlist_id; 856 runlist_id = tsg->runlist_id;
854 nvgpu_log_fn(g, "runlist_id: %d", runlist_id); 857 nvgpu_log_fn(g, "runlist_id: %d", runlist_id);
855 if (runlist_id == FIFO_INVAL_RUNLIST_ID) { 858 if (runlist_id == FIFO_INVAL_RUNLIST_ID) {
856 return 0; 859 return 0;
@@ -859,27 +862,27 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
859 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 862 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
860 863
861 /* WAR for Bug 2065990 */ 864 /* WAR for Bug 2065990 */
862 gk20a_fifo_disable_tsg_sched(g, &f->tsg[tsgid]); 865 gk20a_fifo_disable_tsg_sched(g, tsg);
863 866
864 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 867 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
865 868
866 ret = __locked_fifo_preempt(g, tsgid, true); 869 ret = __locked_fifo_preempt(g, tsg->tsgid, true);
867 870
868 if (!mutex_ret) { 871 if (!mutex_ret) {
869 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 872 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
870 } 873 }
871 874
872 /* WAR for Bug 2065990 */ 875 /* WAR for Bug 2065990 */
873 gk20a_fifo_enable_tsg_sched(g, &f->tsg[tsgid]); 876 gk20a_fifo_enable_tsg_sched(g, tsg);
874 877
875 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); 878 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
876 879
877 if (ret) { 880 if (ret) {
878 if (nvgpu_platform_is_silicon(g)) { 881 if (nvgpu_platform_is_silicon(g)) {
879 nvgpu_err(g, "preempt timed out for tsgid: %u, " 882 nvgpu_err(g, "preempt timed out for tsgid: %u, "
880 "ctxsw timeout will trigger recovery if needed", tsgid); 883 "ctxsw timeout will trigger recovery if needed", tsg->tsgid);
881 } else { 884 } else {
882 gk20a_fifo_preempt_timeout_rc(g, tsgid, true); 885 gk20a_fifo_preempt_timeout_rc(g, tsg->tsgid, true);
883 } 886 }
884 } 887 }
885 888
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
index 1703fbdc..7ff42637 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
@@ -82,7 +82,7 @@ int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
82int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 82int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
83 unsigned int id_type); 83 unsigned int id_type);
84int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid); 84int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
85int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 85int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
86int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg); 86int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
87void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, 87void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
88 u32 id, unsigned int id_type, unsigned int rc_type, 88 u32 id, unsigned int id_type, unsigned int rc_type,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
index 0424e74d..f6aabd82 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
@@ -646,7 +646,7 @@ struct gpu_ops {
646 u32 flags); 646 u32 flags);
647 int (*resetup_ramfc)(struct channel_gk20a *c); 647 int (*resetup_ramfc)(struct channel_gk20a *c);
648 int (*preempt_channel)(struct gk20a *g, u32 chid); 648 int (*preempt_channel)(struct gk20a *g, u32 chid);
649 int (*preempt_tsg)(struct gk20a *g, u32 tsgid); 649 int (*preempt_tsg)(struct gk20a *g, struct tsg_gk20a *tsg);
650 int (*enable_tsg)(struct tsg_gk20a *tsg); 650 int (*enable_tsg)(struct tsg_gk20a *tsg);
651 int (*disable_tsg)(struct tsg_gk20a *tsg); 651 int (*disable_tsg)(struct tsg_gk20a *tsg);
652 int (*tsg_verify_channel_status)(struct channel_gk20a *ch); 652 int (*tsg_verify_channel_status)(struct channel_gk20a *ch);
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
index 7ebf4291..b0cdf5e5 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c
@@ -699,7 +699,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
699 return err; 699 return err;
700 } 700 }
701 /* preempt TSG */ 701 /* preempt TSG */
702 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); 702 err = g->ops.fifo.preempt_tsg(g, tsg);
703 gk20a_idle(g); 703 gk20a_idle(g);
704 break; 704 break;
705 } 705 }
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 8821e799..9e3f7867 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -471,7 +471,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
471 return err; 471 return err;
472} 472}
473 473
474int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) 474int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
475{ 475{
476 struct tegra_vgpu_cmd_msg msg; 476 struct tegra_vgpu_cmd_msg msg;
477 struct tegra_vgpu_tsg_preempt_params *p = 477 struct tegra_vgpu_tsg_preempt_params *p =
@@ -482,13 +482,13 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
482 482
483 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; 483 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
484 msg.handle = vgpu_get_handle(g); 484 msg.handle = vgpu_get_handle(g);
485 p->tsg_id = tsgid; 485 p->tsg_id = tsg->tsgid;
486 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 486 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
487 err = err ? err : msg.ret; 487 err = err ? err : msg.ret;
488 488
489 if (err) { 489 if (err) {
490 nvgpu_err(g, 490 nvgpu_err(g,
491 "preempt tsg %u failed", tsgid); 491 "preempt tsg %u failed", tsg->tsgid);
492 } 492 }
493 493
494 return err; 494 return err;
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h
index 20205d3c..8c042033 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.h
@@ -42,7 +42,7 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
42 unsigned long acquire_timeout, u32 flags); 42 unsigned long acquire_timeout, u32 flags);
43int vgpu_fifo_init_engine_info(struct fifo_gk20a *f); 43int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
44int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid); 44int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid);
45int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 45int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
46int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, 46int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
47 u32 chid, bool add, bool wait_for_finish); 47 u32 chid, bool add, bool wait_for_finish);
48int vgpu_fifo_wait_engine_idle(struct gk20a *g); 48int vgpu_fifo_wait_engine_idle(struct gk20a *g);