summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index f3e87a13..eabb98ea 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -29,13 +29,37 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
29{ 29{
30 struct gk20a *g = tsg->g; 30 struct gk20a *g = tsg->g;
31 struct channel_gk20a *ch; 31 struct channel_gk20a *ch;
32 bool is_next, is_ctx_reload;
32 33
34 gk20a_fifo_disable_tsg_sched(g, tsg);
35
36 /*
37 * Due to h/w bug that exists in Maxwell and Pascal,
38 * we first need to enable all channels with NEXT and CTX_RELOAD set,
39 * and then rest of the channels should be enabled
40 */
33 down_read(&tsg->ch_list_lock); 41 down_read(&tsg->ch_list_lock);
34 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 42 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
43 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
44 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
45
46 if (is_next || is_ctx_reload)
47 g->ops.fifo.enable_channel(ch);
48 }
49
50 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
51 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
52 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
53
54 if (is_next || is_ctx_reload)
55 continue;
56
35 g->ops.fifo.enable_channel(ch); 57 g->ops.fifo.enable_channel(ch);
36 } 58 }
37 up_read(&tsg->ch_list_lock); 59 up_read(&tsg->ch_list_lock);
38 60
61 gk20a_fifo_enable_tsg_sched(g, tsg);
62
39 return 0; 63 return 0;
40} 64}
41 65