summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2018-12-13 14:02:11 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-02-22 21:59:18 -0500
commitc9d4df288d51e4776188a25a6a2bb26ddd897a20 (patch)
treede70d1fa9da9bd79e783d24db5953c74f5d15fb8 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parentd975bda39876b288479ef5d72cb0495fe1c85c6b (diff)
gpu: nvgpu: remove code for ch not bound to tsg
- Remove handling for channels that are no more bound to tsg as channel could be referenceable but no more part of a tsg - Use tsg_gk20a_from_ch to get pointer to tsg for a given channel - Clear unhandled gr interrupts Bug 2429295 JIRA NVGPU-1580 Change-Id: I9da43a2bc9a0282c793b9f301eaf8e8604f91d70 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1972492 (cherry picked from commit 013ca60edd97e7719e389b3048fed9b165277251 in dev-kernel) Reviewed-on: https://git-master.nvidia.com/r/2018262 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Debarshi Dutta <ddutta@nvidia.com> Tested-by: Debarshi Dutta <ddutta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c88
1 files changed, 54 insertions, 34 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 58aa233f..46cbfd8c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -3018,7 +3018,6 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g,
3018int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) 3018int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
3019{ 3019{
3020 struct gk20a *g = c->g; 3020 struct gk20a *g = c->g;
3021 struct fifo_gk20a *f = &g->fifo;
3022 struct nvgpu_gr_ctx *gr_ctx; 3021 struct nvgpu_gr_ctx *gr_ctx;
3023 struct tsg_gk20a *tsg = NULL; 3022 struct tsg_gk20a *tsg = NULL;
3024 int err = 0; 3023 int err = 0;
@@ -3041,11 +3040,11 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
3041 } 3040 }
3042 c->obj_class = class_num; 3041 c->obj_class = class_num;
3043 3042
3044 if (!gk20a_is_channel_marked_as_tsg(c)) { 3043 tsg = tsg_gk20a_from_ch(c);
3044 if (tsg == NULL) {
3045 return -EINVAL; 3045 return -EINVAL;
3046 } 3046 }
3047 3047
3048 tsg = &f->tsg[c->tsgid];
3049 gr_ctx = &tsg->gr_ctx; 3048 gr_ctx = &tsg->gr_ctx;
3050 3049
3051 if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { 3050 if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
@@ -5213,21 +5212,21 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5213 return; 5212 return;
5214 } 5213 }
5215 5214
5216 if (gk20a_is_channel_marked_as_tsg(ch)) { 5215 tsg = tsg_gk20a_from_ch(ch);
5217 tsg = &g->fifo.tsg[ch->tsgid]; 5216 if (tsg != NULL) {
5218 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 5217 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
5219 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, 5218 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
5220 channel_gk20a, ch_entry) { 5219 channel_gk20a, ch_entry) {
5221 if (gk20a_channel_get(ch_tsg)) { 5220 if (gk20a_channel_get(ch_tsg)) {
5222 g->ops.fifo.set_error_notifier(ch_tsg, 5221 g->ops.fifo.set_error_notifier(ch_tsg,
5223 error_notifier); 5222 error_notifier);
5224 gk20a_channel_put(ch_tsg); 5223 gk20a_channel_put(ch_tsg);
5225 } 5224 }
5226 5225
5227 } 5226 }
5228 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 5227 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
5229 } else { 5228 } else {
5230 g->ops.fifo.set_error_notifier(ch, error_notifier); 5229 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
5231 } 5230 }
5232} 5231}
5233 5232
@@ -5394,12 +5393,21 @@ int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
5394 struct gr_gk20a_isr_data *isr_data) 5393 struct gr_gk20a_isr_data *isr_data)
5395{ 5394{
5396 struct channel_gk20a *ch = isr_data->ch; 5395 struct channel_gk20a *ch = isr_data->ch;
5397 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; 5396 struct tsg_gk20a *tsg;
5397
5398 if (ch == NULL) {
5399 return 0;
5400 }
5398 5401
5399 g->ops.fifo.post_event_id(tsg, 5402 tsg = tsg_gk20a_from_ch(ch);
5400 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN); 5403 if (tsg != NULL) {
5404 g->ops.fifo.post_event_id(tsg,
5405 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5401 5406
5402 nvgpu_cond_broadcast(&ch->semaphore_wq); 5407 nvgpu_cond_broadcast(&ch->semaphore_wq);
5408 } else {
5409 nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
5410 }
5403 5411
5404 return 0; 5412 return 0;
5405} 5413}
@@ -5434,7 +5442,12 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g,
5434 u32 buffer_size; 5442 u32 buffer_size;
5435 u32 offset; 5443 u32 offset;
5436 bool exit; 5444 bool exit;
5445#endif
5446 if (ch == NULL || tsg_gk20a_from_ch(ch) == NULL) {
5447 return 0;
5448 }
5437 5449
5450#if defined(CONFIG_GK20A_CYCLE_STATS)
5438 /* GL will never use payload 0 for cycle state */ 5451 /* GL will never use payload 0 for cycle state */
5439 if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) 5452 if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0))
5440 return 0; 5453 return 0;
@@ -5975,7 +5988,7 @@ int gk20a_gr_isr(struct gk20a *g)
5975 u32 chid; 5988 u32 chid;
5976 5989
5977 nvgpu_log_fn(g, " "); 5990 nvgpu_log_fn(g, " ");
5978 nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); 5991 nvgpu_log(g, gpu_dbg_intr, "pgraph intr 0x%08x", gr_intr);
5979 5992
5980 if (gr_intr == 0U) { 5993 if (gr_intr == 0U) {
5981 return 0; 5994 return 0;
@@ -6009,11 +6022,13 @@ int gk20a_gr_isr(struct gk20a *g)
6009 chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID; 6022 chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
6010 6023
6011 if (ch == NULL) { 6024 if (ch == NULL) {
6012 nvgpu_err(g, "ch id is INVALID 0xffffffff"); 6025 nvgpu_err(g, "pgraph intr: 0x%08x, chid: INVALID", gr_intr);
6013 } 6026 } else {
6014 6027 tsg = tsg_gk20a_from_ch(ch);
6015 if ((ch != NULL) && gk20a_is_channel_marked_as_tsg(ch)) { 6028 if (tsg == NULL) {
6016 tsg = &g->fifo.tsg[ch->tsgid]; 6029 nvgpu_err(g, "pgraph intr: 0x%08x, chid: %d "
6030 "not bound to tsg", gr_intr, chid);
6031 }
6017 } 6032 }
6018 6033
6019 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 6034 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
@@ -6198,7 +6213,9 @@ int gk20a_gr_isr(struct gk20a *g)
6198 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, 6213 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
6199 "GPC exception pending"); 6214 "GPC exception pending");
6200 6215
6201 fault_ch = isr_data.ch; 6216 if (tsg != NULL) {
6217 fault_ch = isr_data.ch;
6218 }
6202 6219
6203 /* fault_ch can be NULL */ 6220 /* fault_ch can be NULL */
6204 /* check if any gpc has an exception */ 6221 /* check if any gpc has an exception */
@@ -6225,39 +6242,42 @@ int gk20a_gr_isr(struct gk20a *g)
6225 } 6242 }
6226 6243
6227 if (need_reset) { 6244 if (need_reset) {
6228 if (tsgid != NVGPU_INVALID_TSG_ID) { 6245 if (tsg != NULL) {
6229 gk20a_fifo_recover(g, gr_engine_id, 6246 gk20a_fifo_recover(g, gr_engine_id,
6230 tsgid, true, true, true, 6247 tsgid, true, true, true,
6231 RC_TYPE_GR_FAULT); 6248 RC_TYPE_GR_FAULT);
6232 } else if (ch) {
6233 gk20a_fifo_recover(g, gr_engine_id,
6234 ch->chid, false, true, true,
6235 RC_TYPE_GR_FAULT);
6236 } else { 6249 } else {
6250 if (ch != NULL) {
6251 nvgpu_err(g, "chid: %d referenceable but not "
6252 "bound to tsg", chid);
6253 }
6237 gk20a_fifo_recover(g, gr_engine_id, 6254 gk20a_fifo_recover(g, gr_engine_id,
6238 0, false, false, true, 6255 0, false, false, true,
6239 RC_TYPE_GR_FAULT); 6256 RC_TYPE_GR_FAULT);
6240 } 6257 }
6241 } 6258 }
6242 6259
6243 if ((gr_intr != 0U) && (ch == NULL)) { 6260 if (gr_intr != 0U) {
6244 /* Clear interrupts for unused channel. This is 6261 /* clear unhandled interrupts */
6245 probably an interrupt during gk20a_free_channel() */ 6262 if (ch == NULL) {
6246 nvgpu_err(g, 6263 /*
6247 "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing", 6264 * This is probably an interrupt during
6248 gr_intr); 6265 * gk20a_free_channel()
6266 */
6267 nvgpu_err(g, "unhandled gr intr 0x%08x for "
6268 "unreferenceable channel, clearing",
6269 gr_intr);
6270 } else {
6271 nvgpu_err(g, "unhandled gr intr 0x%08x for chid: %d",
6272 gr_intr, chid);
6273 }
6249 gk20a_writel(g, gr_intr_r(), gr_intr); 6274 gk20a_writel(g, gr_intr_r(), gr_intr);
6250 gr_intr = 0;
6251 } 6275 }
6252 6276
6253 gk20a_writel(g, gr_gpfifo_ctl_r(), 6277 gk20a_writel(g, gr_gpfifo_ctl_r(),
6254 grfifo_ctl | gr_gpfifo_ctl_access_f(1) | 6278 grfifo_ctl | gr_gpfifo_ctl_access_f(1) |
6255 gr_gpfifo_ctl_semaphore_access_f(1)); 6279 gr_gpfifo_ctl_semaphore_access_f(1));
6256 6280
6257 if (gr_intr) {
6258 nvgpu_err(g,
6259 "unhandled gr interrupt 0x%08x", gr_intr);
6260 }
6261 6281
6262 /* Posting of BPT events should be the last thing in this function */ 6282 /* Posting of BPT events should be the last thing in this function */
6263 if ((global_esr != 0U) && (tsg != NULL)) { 6283 if ((global_esr != 0U) && (tsg != NULL)) {