summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-09-14 06:47:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-09-15 15:48:21 -0400
commit2b7e8a2c2a5df041c9a434804d0f3f6d9df82737 (patch)
tree126b14a854a75aa926966acfe41f4f5823711cfe /drivers/gpu
parent460951ed092aad787bacd0ebb0646b799d3463a1 (diff)
gpu: nvgpu: fix channel unbind sequence from TSG
We right now remove a channel from TSG list and disable all the channels in TSG while removing a channel from TSG With this sequence if any one channel in TSG is closed, rest of the channels are set as timed out and cannot be used anymore We need to fix this sequence as below to allow removing a channel from active TSG so that rest of the channels can still be used - disable all channels of TSG - preempt TSG - check if CTX_RELOAD is set if support is available if CTX_RELOAD is set on channel, it should be moved to some other channel - check if FAULTED is set if support is available - if NEXT is set on channel then it means channel is still active print out an error in this case for the time being until properly handled - remove the channel from runlist - remove channel from TSG list - re-enable rest of the channels in TSG - clean up the channel (same as regular channels) Add below fifo operations to support checking channel status g->ops.fifo.tsg_verify_status_ctx_reload g->ops.fifo.tsg_verify_status_faulted Define ops.fifo.tsg_verify_status_ctx_reload operation for gm20b/gp10b/gp106 as gm20b_fifo_tsg_verify_status_ctx_reload() This API will check if channel to be released has CTX_RELOAD set, if yes CTX_RELOAD needs to be moved to some other channel in TSG Remove static from channel_gk20a_update_runlist() and export it Bug 200327095 Change-Id: I0dd4be7c7e0b9b759389ec12c5a148a4b919d3e2 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1560637 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c56
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c15
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c28
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.h1
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c1
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c1
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c1
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gp106/hw_ccsr_gp106.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h4
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c4
17 files changed, 136 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index d0d5c41f..0b8422a6 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -82,9 +82,6 @@ static void channel_gk20a_joblist_delete(struct channel_gk20a *c,
82static struct channel_gk20a_job *channel_gk20a_joblist_peek( 82static struct channel_gk20a_job *channel_gk20a_joblist_peek(
83 struct channel_gk20a *c); 83 struct channel_gk20a *c);
84 84
85static int channel_gk20a_update_runlist(struct channel_gk20a *c,
86 bool add);
87
88static u32 gk20a_get_channel_watchdog_timeout(struct channel_gk20a *ch); 85static u32 gk20a_get_channel_watchdog_timeout(struct channel_gk20a *ch);
89 86
90static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c, 87static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
@@ -189,7 +186,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
189 return 0; 186 return 0;
190} 187}
191 188
192static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) 189int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
193{ 190{
194 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true); 191 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true);
195} 192}
@@ -459,6 +456,8 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
459 struct dbg_session_gk20a *dbg_s; 456 struct dbg_session_gk20a *dbg_s;
460 struct dbg_session_data *session_data, *tmp_s; 457 struct dbg_session_data *session_data, *tmp_s;
461 struct dbg_session_channel_data *ch_data, *tmp; 458 struct dbg_session_channel_data *ch_data, *tmp;
459 bool was_tsg = false;
460 int err;
462 461
463 gk20a_dbg_fn(""); 462 gk20a_dbg_fn("");
464 463
@@ -467,7 +466,19 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
467 trace_gk20a_free_channel(ch->chid); 466 trace_gk20a_free_channel(ch->chid);
468 467
469 /* abort channel and remove from runlist */ 468 /* abort channel and remove from runlist */
470 gk20a_disable_channel(ch); 469 if (gk20a_is_channel_marked_as_tsg(ch)) {
470 err = g->ops.fifo.tsg_unbind_channel(ch);
471 if (err)
472 nvgpu_err(g, "failed to unbind channel %d from TSG", ch->chid);
473 /*
474 * Channel is not a part of TSG this point onwards
475 * So stash its status and use it whenever necessary
476 * e.g. while releasing gr_ctx in g->ops.gr.free_channel_ctx()
477 */
478 was_tsg = true;
479 } else {
480 gk20a_disable_channel(ch);
481 }
471 482
472 /* wait until there's only our ref to the channel */ 483 /* wait until there's only our ref to the channel */
473 if (!force) 484 if (!force)
@@ -524,7 +535,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
524 g->ops.fecs_trace.unbind_channel(g, ch); 535 g->ops.fecs_trace.unbind_channel(g, ch);
525 536
526 /* release channel ctx */ 537 /* release channel ctx */
527 g->ops.gr.free_channel_ctx(ch); 538 g->ops.gr.free_channel_ctx(ch, was_tsg);
528 539
529 gk20a_gr_flush_channel_tlb(gr); 540 gk20a_gr_flush_channel_tlb(gr);
530 541
@@ -571,9 +582,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
571 nvgpu_wait_for_deferred_interrupts(g); 582 nvgpu_wait_for_deferred_interrupts(g);
572 583
573unbind: 584unbind:
574 if (gk20a_is_channel_marked_as_tsg(ch))
575 g->ops.fifo.tsg_unbind_channel(ch);
576
577 g->ops.fifo.unbind_channel(ch); 585 g->ops.fifo.unbind_channel(ch);
578 g->ops.fifo.free_inst(g, ch); 586 g->ops.fifo.free_inst(g, ch);
579 587
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index f022e630..f6ac5780 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -398,6 +398,7 @@ void channel_gk20a_joblist_lock(struct channel_gk20a *c);
398void channel_gk20a_joblist_unlock(struct channel_gk20a *c); 398void channel_gk20a_joblist_unlock(struct channel_gk20a *c);
399bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c); 399bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c);
400 400
401int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add);
401u32 gk20a_channel_get_timeslice(struct channel_gk20a *ch); 402u32 gk20a_channel_get_timeslice(struct channel_gk20a *ch);
402int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g, 403int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
403 int timeslice_period, 404 int timeslice_period,
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 2cc5e4cd..1815c15b 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1899,6 +1899,62 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1899 return 0; 1899 return 0;
1900} 1900}
1901 1901
1902static int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch)
1903{
1904 struct gk20a *g = ch->g;
1905
1906 if (g->ops.fifo.tsg_verify_status_ctx_reload)
1907 g->ops.fifo.tsg_verify_status_ctx_reload(ch);
1908
1909 if (g->ops.fifo.tsg_verify_status_faulted)
1910 g->ops.fifo.tsg_verify_status_faulted(ch);
1911
1912 if (gk20a_fifo_channel_status_is_next(g, ch->chid))
1913 nvgpu_err(g, "Channel %d to be removed from TSG has NEXT set!",
1914 ch->chid);
1915
1916 return 0;
1917}
1918
1919int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
1920{
1921 struct gk20a *g = ch->g;
1922 struct fifo_gk20a *f = &g->fifo;
1923 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
1924 int err;
1925
1926 /* Disable TSG and examine status before unbinding channel */
1927 g->ops.fifo.disable_tsg(tsg);
1928
1929 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
1930 if (err)
1931 goto fail_enable_tsg;
1932
1933 err = gk20a_fifo_tsg_unbind_channel_verify_status(ch);
1934 if (err)
1935 goto fail_enable_tsg;
1936
1937 /* Channel should be seen as TSG channel while updating runlist */
1938 err = channel_gk20a_update_runlist(ch, false);
1939 if (err)
1940 goto fail_enable_tsg;
1941
1942 /* Remove channel from TSG and re-enable rest of the channels */
1943 down_write(&tsg->ch_list_lock);
1944 nvgpu_list_del(&ch->ch_entry);
1945 up_write(&tsg->ch_list_lock);
1946
1947 g->ops.fifo.enable_tsg(tsg);
1948
1949 gk20a_channel_abort_clean_up(ch);
1950
1951 return 0;
1952
1953fail_enable_tsg:
1954 g->ops.fifo.enable_tsg(tsg);
1955 return err;
1956}
1957
1902u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, 1958u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
1903 int *__id, bool *__is_tsg) 1959 int *__id, bool *__is_tsg)
1904{ 1960{
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 70c70931..92dcc8e6 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -273,6 +273,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
273 u32 err_code, bool verbose); 273 u32 err_code, bool verbose);
274void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id); 274void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id);
275int gk20a_init_fifo_reset_enable_hw(struct gk20a *g); 275int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
276int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch);
276 277
277void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g, 278void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
278 unsigned long fault_id); 279 unsigned long fault_id);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 4564b6e9..adc630e6 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -212,7 +212,7 @@ struct gpu_ops {
212 int (*load_ctxsw_ucode)(struct gk20a *g); 212 int (*load_ctxsw_ucode)(struct gk20a *g);
213 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 213 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
214 void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 214 void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
215 void (*free_channel_ctx)(struct channel_gk20a *c); 215 void (*free_channel_ctx)(struct channel_gk20a *c, bool is_tsg);
216 int (*alloc_obj_ctx)(struct channel_gk20a *c, 216 int (*alloc_obj_ctx)(struct channel_gk20a *c,
217 struct nvgpu_alloc_obj_ctx_args *args); 217 struct nvgpu_alloc_obj_ctx_args *args);
218 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr, 218 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr,
@@ -477,6 +477,8 @@ struct gpu_ops {
477 int (*preempt_tsg)(struct gk20a *g, u32 tsgid); 477 int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
478 int (*enable_tsg)(struct tsg_gk20a *tsg); 478 int (*enable_tsg)(struct tsg_gk20a *tsg);
479 int (*disable_tsg)(struct tsg_gk20a *tsg); 479 int (*disable_tsg)(struct tsg_gk20a *tsg);
480 void (*tsg_verify_status_ctx_reload)(struct channel_gk20a *ch);
481 void (*tsg_verify_status_faulted)(struct channel_gk20a *ch);
480 int (*reschedule_runlist)(struct gk20a *g, u32 runlist_id); 482 int (*reschedule_runlist)(struct gk20a *g, u32 runlist_id);
481 int (*update_runlist)(struct gk20a *g, u32 runlist_id, 483 int (*update_runlist)(struct gk20a *g, u32 runlist_id,
482 u32 chid, bool add, 484 u32 chid, bool add,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 27442947..833a3ab9 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -2868,14 +2868,14 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
2868 } 2868 }
2869} 2869}
2870 2870
2871void gk20a_free_channel_ctx(struct channel_gk20a *c) 2871void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg)
2872{ 2872{
2873 if(c->g->ops.fifo.free_channel_ctx_header) 2873 if(c->g->ops.fifo.free_channel_ctx_header)
2874 c->g->ops.fifo.free_channel_ctx_header(c); 2874 c->g->ops.fifo.free_channel_ctx_header(c);
2875 gr_gk20a_unmap_global_ctx_buffers(c); 2875 gr_gk20a_unmap_global_ctx_buffers(c);
2876 gr_gk20a_free_channel_patch_ctx(c); 2876 gr_gk20a_free_channel_patch_ctx(c);
2877 gr_gk20a_free_channel_pm_ctx(c); 2877 gr_gk20a_free_channel_pm_ctx(c);
2878 if (!gk20a_is_channel_marked_as_tsg(c)) 2878 if (!is_tsg)
2879 gr_gk20a_free_channel_gr_ctx(c); 2879 gr_gk20a_free_channel_gr_ctx(c);
2880 2880
2881 /* zcull_ctx */ 2881 /* zcull_ctx */
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 42296084..c69d9df9 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -506,7 +506,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
506 struct nvgpu_alloc_obj_ctx_args *args); 506 struct nvgpu_alloc_obj_ctx_args *args);
507int gk20a_free_obj_ctx(struct channel_gk20a *c, 507int gk20a_free_obj_ctx(struct channel_gk20a *c,
508 struct nvgpu_free_obj_ctx_args *args); 508 struct nvgpu_free_obj_ctx_args *args);
509void gk20a_free_channel_ctx(struct channel_gk20a *c); 509void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
510 510
511int gk20a_gr_isr(struct gk20a *g); 511int gk20a_gr_isr(struct gk20a *g);
512int gk20a_gr_nonstall_isr(struct gk20a *g); 512int gk20a_gr_nonstall_isr(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index eabb98ea..f8c8be5e 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -139,17 +139,20 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
139 139
140int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) 140int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
141{ 141{
142 struct fifo_gk20a *f = &ch->g->fifo; 142 struct gk20a *g = ch->g;
143 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid]; 143 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
144 int err;
144 145
145 down_write(&tsg->ch_list_lock); 146 err = gk20a_fifo_tsg_unbind_channel(ch);
146 nvgpu_list_del(&ch->ch_entry); 147 if (err)
147 up_write(&tsg->ch_list_lock); 148 return err;
148 149
149 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); 150 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
150
151 ch->tsgid = NVGPU_INVALID_TSG_ID; 151 ch->tsgid = NVGPU_INVALID_TSG_ID;
152 152
153 gk20a_dbg(gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n",
154 tsg->tsgid, ch->chid);
155
153 return 0; 156 return 0;
154} 157}
155 158
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 8e913f23..6b462acd 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -183,3 +183,31 @@ void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
183 f->intr.pbdma.restartable_0 = 183 f->intr.pbdma.restartable_0 =
184 pbdma_intr_0_device_pending_f(); 184 pbdma_intr_0_device_pending_f();
185} 185}
186
187static void gm20b_fifo_set_ctx_reload(struct channel_gk20a *ch)
188{
189 struct gk20a *g = ch->g;
190 u32 channel = gk20a_readl(g, ccsr_channel_r(ch->chid));
191
192 gk20a_writel(g, ccsr_channel_r(ch->chid),
193 channel | ccsr_channel_force_ctx_reload_true_f());
194}
195
196void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
197{
198 struct gk20a *g = ch->g;
199 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
200 struct channel_gk20a *temp_ch;
201
202 /* If CTX_RELOAD is set on a channel, move it to some other channel */
203 if (gk20a_fifo_channel_status_is_ctx_reload(ch->g, ch->chid)) {
204 down_read(&tsg->ch_list_lock);
205 nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, channel_gk20a, ch_entry) {
206 if (temp_ch->chid != ch->chid) {
207 gm20b_fifo_set_ctx_reload(temp_ch);
208 break;
209 }
210 }
211 up_read(&tsg->ch_list_lock);
212 }
213}
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.h b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.h
index 1b1b8cc1..f82ae09b 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.h
@@ -25,5 +25,6 @@ void gm20b_device_info_data_parse(struct gk20a *g,
25 u32 table_entry, u32 *inst_id, 25 u32 table_entry, u32 *inst_id,
26 u32 *pri_base, u32 *fault_id); 26 u32 *pri_base, u32 *fault_id);
27void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f); 27void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
28void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch);
28 29
29#endif 30#endif
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 46c1e81f..9ff9fdd7 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -368,6 +368,7 @@ static const struct gpu_ops gm20b_ops = {
368 .preempt_tsg = gk20a_fifo_preempt_tsg, 368 .preempt_tsg = gk20a_fifo_preempt_tsg,
369 .enable_tsg = gk20a_enable_tsg, 369 .enable_tsg = gk20a_enable_tsg,
370 .disable_tsg = gk20a_disable_tsg, 370 .disable_tsg = gk20a_disable_tsg,
371 .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
371 .update_runlist = gk20a_fifo_update_runlist, 372 .update_runlist = gk20a_fifo_update_runlist,
372 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault, 373 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
373 .get_mmu_fault_info = gk20a_fifo_get_mmu_fault_info, 374 .get_mmu_fault_info = gk20a_fifo_get_mmu_fault_info,
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index e3fa596a..10b26712 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -428,6 +428,7 @@ static const struct gpu_ops gp106_ops = {
428 .preempt_tsg = gk20a_fifo_preempt_tsg, 428 .preempt_tsg = gk20a_fifo_preempt_tsg,
429 .enable_tsg = gk20a_enable_tsg, 429 .enable_tsg = gk20a_enable_tsg,
430 .disable_tsg = gk20a_disable_tsg, 430 .disable_tsg = gk20a_disable_tsg,
431 .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
431 .update_runlist = gk20a_fifo_update_runlist, 432 .update_runlist = gk20a_fifo_update_runlist,
432 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault, 433 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
433 .get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info, 434 .get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index ffb6fe24..4dae79e1 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -389,6 +389,7 @@ static const struct gpu_ops gp10b_ops = {
389 .preempt_tsg = gk20a_fifo_preempt_tsg, 389 .preempt_tsg = gk20a_fifo_preempt_tsg,
390 .enable_tsg = gk20a_enable_tsg, 390 .enable_tsg = gk20a_enable_tsg,
391 .disable_tsg = gk20a_disable_tsg, 391 .disable_tsg = gk20a_disable_tsg,
392 .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
392 .reschedule_runlist = gk20a_fifo_reschedule_runlist, 393 .reschedule_runlist = gk20a_fifo_reschedule_runlist,
393 .update_runlist = gk20a_fifo_update_runlist, 394 .update_runlist = gk20a_fifo_update_runlist,
394 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault, 395 .trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h
index 3f5d312f..b00979ff 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gm20b/hw_ccsr_gm20b.h
@@ -146,6 +146,10 @@ static inline u32 ccsr_channel_next_true_v(void)
146{ 146{
147 return 0x00000001; 147 return 0x00000001;
148} 148}
149static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
150{
151 return 0x100;
152}
149static inline u32 ccsr_channel_busy_v(u32 r) 153static inline u32 ccsr_channel_busy_v(u32 r)
150{ 154{
151 return (r >> 28) & 0x1; 155 return (r >> 28) & 0x1;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gp106/hw_ccsr_gp106.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gp106/hw_ccsr_gp106.h
index 13bd4251..5e67ede0 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/hw/gp106/hw_ccsr_gp106.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gp106/hw_ccsr_gp106.h
@@ -146,6 +146,10 @@ static inline u32 ccsr_channel_next_true_v(void)
146{ 146{
147 return 0x00000001; 147 return 0x00000001;
148} 148}
149static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
150{
151 return 0x100;
152}
149static inline u32 ccsr_channel_busy_v(u32 r) 153static inline u32 ccsr_channel_busy_v(u32 r)
150{ 154{
151 return (r >> 28) & 0x1; 155 return (r >> 28) & 0x1;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h
index 33c83c80..6cbe44d4 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gp10b/hw_ccsr_gp10b.h
@@ -146,6 +146,10 @@ static inline u32 ccsr_channel_next_true_v(void)
146{ 146{
147 return 0x00000001; 147 return 0x00000001;
148} 148}
149static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
150{
151 return 0x100;
152}
149static inline u32 ccsr_channel_busy_v(u32 r) 153static inline u32 ccsr_channel_busy_v(u32 r)
150{ 154{
151 return (r >> 28) & 0x1; 155 return (r >> 28) & 0x1;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index e3dfb874..bacd6ded 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -418,7 +418,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
421static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) 421static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg)
422{ 422{
423 gk20a_dbg_fn(""); 423 gk20a_dbg_fn("");
424 424
@@ -427,7 +427,7 @@ static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
427 vgpu_gr_unmap_global_ctx_buffers(c); 427 vgpu_gr_unmap_global_ctx_buffers(c);
428 vgpu_gr_free_channel_patch_ctx(c); 428 vgpu_gr_free_channel_patch_ctx(c);
429 vgpu_gr_free_channel_pm_ctx(c); 429 vgpu_gr_free_channel_pm_ctx(c);
430 if (!gk20a_is_channel_marked_as_tsg(c)) 430 if (!is_tsg)
431 vgpu_gr_free_channel_gr_ctx(c); 431 vgpu_gr_free_channel_gr_ctx(c);
432 432
433 /* zcull_ctx, pm_ctx */ 433 /* zcull_ctx, pm_ctx */