summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-09-13 08:41:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-09-15 15:48:20 -0400
commit460951ed092aad787bacd0ebb0646b799d3463a1 (patch)
treeb7fae8084b76106d77a8af6efea470e595175f17 /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parent7d6d0405311337456f50d6fa032963c18d2c9f9f (diff)
gpu: nvgpu: fix TSG enable sequence
Due to a h/w bug in Maxwell and Pascal we first need to enable all channels with NEXT and CTX_RELOAD set in a TSG, and then rest of the channels should be enabled Add this sequence to gk20a_tsg_enable() Add new APIs to enable/disable scheduling of TSG runlist gk20a_fifo_enable_tsg_sched() gk20a_fifo_disble_tsg_sched() Add new APIs to check if channel has NEXT or CTX_RELOAD set gk20a_fifo_channel_status_is_next() gk20a_fifo_channel_status_is_ctx_reload() Bug 1739362 Change-Id: I4891cbd7f22ebc1e0bf32c52801002cdc259dbe1 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1560636 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index f3e87a13..eabb98ea 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -29,13 +29,37 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
29{ 29{
30 struct gk20a *g = tsg->g; 30 struct gk20a *g = tsg->g;
31 struct channel_gk20a *ch; 31 struct channel_gk20a *ch;
32 bool is_next, is_ctx_reload;
32 33
34 gk20a_fifo_disable_tsg_sched(g, tsg);
35
36 /*
37 * Due to h/w bug that exists in Maxwell and Pascal,
38 * we first need to enable all channels with NEXT and CTX_RELOAD set,
39 * and then rest of the channels should be enabled
40 */
33 down_read(&tsg->ch_list_lock); 41 down_read(&tsg->ch_list_lock);
34 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 42 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
43 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
44 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
45
46 if (is_next || is_ctx_reload)
47 g->ops.fifo.enable_channel(ch);
48 }
49
50 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
51 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
52 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
53
54 if (is_next || is_ctx_reload)
55 continue;
56
35 g->ops.fifo.enable_channel(ch); 57 g->ops.fifo.enable_channel(ch);
36 } 58 }
37 up_read(&tsg->ch_list_lock); 59 up_read(&tsg->ch_list_lock);
38 60
61 gk20a_fifo_enable_tsg_sched(g, tsg);
62
39 return 0; 63 return 0;
40} 64}
41 65