summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorMayank Kaushik <mkaushik@nvidia.com>2014-05-19 17:00:13 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:53 -0400
commit04efcaf97ee08a460deee192134ba30402c577be (patch)
treeebc478e4bd28030237def8ebaa86bf69e7aa8df7 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent87373abc9598bf5c57f429ea246497d3019a6034 (diff)
gpu: nvgpu: Add support for multiple GPC/TPCs
Add support for multiple GPCs/TPCs to the GPC/TPC exception handling code. Change-Id: Ifb4b53a016e90cb54c4d985a9e17760f87c6046f Signed-off-by: Mayank Kaushik <mkaushik@nvidia.com> Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/411660 Reviewed-by: Automatic_Commit_Validation_User
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c189
1 files changed, 129 insertions, 60 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index a2832a9f..ef7776df 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -4127,13 +4127,16 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4127 4127
4128static void gk20a_gr_enable_gpc_exceptions(struct gk20a *g) 4128static void gk20a_gr_enable_gpc_exceptions(struct gk20a *g)
4129{ 4129{
4130 /* enable tpc exception forwarding */ 4130 struct gr_gk20a *gr = &g->gr;
4131 gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), 4131 u32 tpc_mask;
4132 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f()); 4132
4133 gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
4134 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
4135
4136 tpc_mask =
4137 gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->tpc_count) - 1);
4133 4138
4134 /* enable gpc exception forwarding */ 4139 gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(), tpc_mask);
4135 gk20a_writel(g, gr_gpc0_gpccs_gpc_exception_en_r(),
4136 gr_gpc0_gpccs_gpc_exception_en_tpc_0_enabled_f());
4137} 4140}
4138 4141
4139 4142
@@ -4316,7 +4319,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4316 g->ops.gr.enable_hww_exceptions(g); 4319 g->ops.gr.enable_hww_exceptions(g);
4317 g->ops.gr.set_hww_esr_report_mask(g); 4320 g->ops.gr.set_hww_esr_report_mask(g);
4318 4321
4319 /* enable per GPC exceptions */ 4322 /* enable TPC exceptions per GPC */
4320 gk20a_gr_enable_gpc_exceptions(g); 4323 gk20a_gr_enable_gpc_exceptions(g);
4321 4324
4322 /* TBD: ECC for L1/SM */ 4325 /* TBD: ECC for L1/SM */
@@ -5262,26 +5265,35 @@ unlock:
5262 return chid; 5265 return chid;
5263} 5266}
5264 5267
5265static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask) 5268static int gk20a_gr_lock_down_sm(struct gk20a *g,
5269 u32 gpc, u32 tpc, u32 global_esr_mask)
5266{ 5270{
5267 unsigned long end_jiffies = jiffies + 5271 unsigned long end_jiffies = jiffies +
5268 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); 5272 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
5269 u32 delay = GR_IDLE_CHECK_DEFAULT; 5273 u32 delay = GR_IDLE_CHECK_DEFAULT;
5270 bool mmu_debug_mode_enabled = g->ops.mm.is_debug_mode_enabled(g); 5274 bool mmu_debug_mode_enabled = g->ops.mm.is_debug_mode_enabled(g);
5275 u32 offset =
5276 proj_gpc_stride_v() * gpc + proj_tpc_in_gpc_stride_v() * tpc;
5271 u32 dbgr_control0; 5277 u32 dbgr_control0;
5272 5278
5273 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locking down SM"); 5279 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5280 "GPC%d TPC%d: locking down SM", gpc, tpc);
5274 5281
5275 /* assert stop trigger */ 5282 /* assert stop trigger */
5276 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r()); 5283 dbgr_control0 =
5284 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);
5277 dbgr_control0 |= gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(); 5285 dbgr_control0 |= gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f();
5278 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r(), dbgr_control0); 5286 gk20a_writel(g,
5287 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0);
5279 5288
5280 /* wait for the sm to lock down */ 5289 /* wait for the sm to lock down */
5281 do { 5290 do {
5282 u32 global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r()); 5291 u32 global_esr = gk20a_readl(g,
5283 u32 warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r()); 5292 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
5284 u32 dbgr_status0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_status0_r()); 5293 u32 warp_esr = gk20a_readl(g,
5294 gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
5295 u32 dbgr_status0 = gk20a_readl(g,
5296 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset);
5285 bool locked_down = 5297 bool locked_down =
5286 (gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) == 5298 (gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) ==
5287 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v()); 5299 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v());
@@ -5291,11 +5303,14 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
5291 ((global_esr & ~global_esr_mask) != 0); 5303 ((global_esr & ~global_esr_mask) != 0);
5292 5304
5293 if (locked_down || !error_pending) { 5305 if (locked_down || !error_pending) {
5294 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locked down SM"); 5306 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5307 "GPC%d TPC%d: locked down SM", gpc, tpc);
5295 5308
5296 /* de-assert stop trigger */ 5309 /* de-assert stop trigger */
5297 dbgr_control0 &= ~gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(); 5310 dbgr_control0 &= ~gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f();
5298 gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r(), dbgr_control0); 5311 gk20a_writel(g,
5312 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset,
5313 dbgr_control0);
5299 5314
5300 return 0; 5315 return 0;
5301 } 5316 }
@@ -5303,8 +5318,9 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
5303 /* if an mmu fault is pending and mmu debug mode is not 5318 /* if an mmu fault is pending and mmu debug mode is not
5304 * enabled, the sm will never lock down. */ 5319 * enabled, the sm will never lock down. */
5305 if (!mmu_debug_mode_enabled && gk20a_fifo_mmu_fault_pending(g)) { 5320 if (!mmu_debug_mode_enabled && gk20a_fifo_mmu_fault_pending(g)) {
5306 gk20a_err(dev_from_gk20a(g), "mmu fault pending, sm will" 5321 gk20a_err(dev_from_gk20a(g),
5307 " never lock down!"); 5322 "GPC%d TPC%d: mmu fault pending,"
5323 " sm will never lock down!", gpc, tpc);
5308 return -EFAULT; 5324 return -EFAULT;
5309 } 5325 }
5310 5326
@@ -5314,7 +5330,9 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
5314 } while (time_before(jiffies, end_jiffies) 5330 } while (time_before(jiffies, end_jiffies)
5315 || !tegra_platform_is_silicon()); 5331 || !tegra_platform_is_silicon());
5316 5332
5317 gk20a_err(dev_from_gk20a(g), "timed out while trying to lock down SM"); 5333 gk20a_err(dev_from_gk20a(g),
5334 "GPC%d TPC%d: timed out while trying to lock down SM",
5335 gpc, tpc);
5318 5336
5319 return -EAGAIN; 5337 return -EAGAIN;
5320} 5338}
@@ -5323,7 +5341,9 @@ bool gk20a_gr_sm_debugger_attached(struct gk20a *g)
5323{ 5341{
5324 u32 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r()); 5342 u32 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r());
5325 5343
5326 /* check if an sm debugger is attached */ 5344 /* check if an sm debugger is attached.
5345 * assumption: all SMs will have debug mode enabled/disabled
5346 * uniformly. */
5327 if (gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(dbgr_control0) == 5347 if (gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_v(dbgr_control0) ==
5328 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v()) 5348 gr_gpc0_tpc0_sm_dbgr_control0_debugger_mode_on_v())
5329 return true; 5349 return true;
@@ -5331,12 +5351,17 @@ bool gk20a_gr_sm_debugger_attached(struct gk20a *g)
5331 return false; 5351 return false;
5332} 5352}
5333 5353
5334static void gk20a_gr_clear_sm_hww(struct gk20a *g, u32 global_esr) 5354static void gk20a_gr_clear_sm_hww(struct gk20a *g,
5355 u32 gpc, u32 tpc, u32 global_esr)
5335{ 5356{
5336 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r(), global_esr); 5357 u32 offset = proj_gpc_stride_v() * gpc +
5358 proj_tpc_in_gpc_stride_v() * tpc;
5359
5360 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
5361 global_esr);
5337 5362
5338 /* clear the warp hww */ 5363 /* clear the warp hww */
5339 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_r(), 5364 gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset,
5340 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f()); 5365 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f());
5341} 5366}
5342 5367
@@ -5346,11 +5371,14 @@ channel_from_hw_chid(struct gk20a *g, u32 hw_chid)
5346 return g->fifo.channel+hw_chid; 5371 return g->fifo.channel+hw_chid;
5347} 5372}
5348 5373
5349static int gk20a_gr_handle_sm_exception(struct gk20a *g, 5374static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
5350 struct gr_isr_data *isr_data) 5375 bool *post_event)
5351{ 5376{
5352 int ret = 0; 5377 int ret = 0;
5353 bool do_warp_sync = false; 5378 bool do_warp_sync = false;
5379 u32 offset = proj_gpc_stride_v() * gpc +
5380 proj_tpc_in_gpc_stride_v() * tpc;
5381
5354 /* these three interrupts don't require locking down the SM. They can 5382 /* these three interrupts don't require locking down the SM. They can
5355 * be handled by usermode clients as they aren't fatal. Additionally, 5383 * be handled by usermode clients as they aren't fatal. Additionally,
5356 * usermode clients may wish to allow some warps to execute while others 5384 * usermode clients may wish to allow some warps to execute while others
@@ -5361,75 +5389,112 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g,
5361 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(); 5389 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f();
5362 u32 global_esr, warp_esr; 5390 u32 global_esr, warp_esr;
5363 bool sm_debugger_attached = gk20a_gr_sm_debugger_attached(g); 5391 bool sm_debugger_attached = gk20a_gr_sm_debugger_attached(g);
5364 struct channel_gk20a *fault_ch;
5365 5392
5366 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 5393 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
5367 5394
5368 global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r()); 5395 global_esr = gk20a_readl(g,
5369 warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r()); 5396 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
5397 warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
5370 5398
5371 /* if an sm debugger is attached, disable forwarding of tpc exceptions. 5399 /* if an sm debugger is attached, disable forwarding of tpc exceptions.
5372 * the debugger will reenable exceptions after servicing them. */ 5400 * the debugger will reenable exceptions after servicing them. */
5373 if (sm_debugger_attached) { 5401 if (sm_debugger_attached) {
5374 u32 tpc_exception_en = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r()); 5402 u32 tpc_exception_en = gk20a_readl(g,
5403 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() +
5404 offset);
5375 tpc_exception_en &= ~gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(); 5405 tpc_exception_en &= ~gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f();
5376 gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), tpc_exception_en); 5406 gk20a_writel(g,
5407 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + offset,
5408 tpc_exception_en);
5377 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM debugger attached"); 5409 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM debugger attached");
5378 } 5410 }
5379 5411
5380 /* if a debugger is present and an error has occurred, do a warp sync */ 5412 /* if a debugger is present and an error has occurred, do a warp sync */
5381 if (sm_debugger_attached && ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { 5413 if (sm_debugger_attached &&
5414 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
5382 gk20a_dbg(gpu_dbg_intr, "warp sync needed"); 5415 gk20a_dbg(gpu_dbg_intr, "warp sync needed");
5383 do_warp_sync = true; 5416 do_warp_sync = true;
5384 } 5417 }
5385 5418
5386 if (do_warp_sync) { 5419 if (do_warp_sync) {
5387 ret = gk20a_gr_lock_down_sm(g, global_mask); 5420 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask);
5388 if (ret) { 5421 if (ret) {
5389 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); 5422 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n");
5390 return ret; 5423 return ret;
5391 } 5424 }
5392 } 5425 }
5393 5426
5394 /* finally, signal any client waiting on an event */ 5427 *post_event |= true;
5395 fault_ch = channel_from_hw_chid(g, isr_data->chid);
5396 if (fault_ch)
5397 gk20a_dbg_gpu_post_events(fault_ch);
5398 5428
5399 return ret; 5429 return ret;
5400} 5430}
5401 5431
5402static int gk20a_gr_handle_tpc_exception(struct gk20a *g, 5432static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5403 struct gr_isr_data *isr_data) 5433 bool *post_event)
5404{ 5434{
5405 int ret = 0; 5435 int ret = 0;
5406 u32 tpc_exception = gk20a_readl(g, gr_gpcs_tpcs_tpccs_tpc_exception_r()); 5436 u32 offset = proj_gpc_stride_v() * gpc +
5437 proj_tpc_in_gpc_stride_v() * tpc;
5438 u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r()
5439 + offset);
5407 5440
5408 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); 5441 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
5409 5442
5410 /* check if an sm exeption is pending */ 5443 /* check if an sm exeption is pending */
5411 if (gr_gpcs_tpcs_tpccs_tpc_exception_sm_v(tpc_exception) == 5444 if (gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(tpc_exception) ==
5412 gr_gpcs_tpcs_tpccs_tpc_exception_sm_pending_v()) { 5445 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) {
5413 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM exception pending"); 5446 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5414 ret = gk20a_gr_handle_sm_exception(g, isr_data); 5447 "GPC%d TPC%d: SM exception pending", gpc, tpc);
5448 ret = gk20a_gr_handle_sm_exception(g, gpc, tpc, post_event);
5415 } 5449 }
5416 5450
5417 return ret; 5451 return ret;
5418} 5452}
5419 5453
5420static int gk20a_gr_handle_gpc_exception(struct gk20a *g, 5454static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event)
5421 struct gr_isr_data *isr_data)
5422{ 5455{
5423 int ret = 0; 5456 int ret = 0;
5424 u32 gpc_exception = gk20a_readl(g, gr_gpcs_gpccs_gpc_exception_r()); 5457 u32 gpc_offset, tpc_offset, gpc, tpc;
5458 struct gr_gk20a *gr = &g->gr;
5459 u32 exception1 = gk20a_readl(g, gr_exception1_r());
5460 u32 gpc_exception, global_esr;
5425 5461
5426 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, ""); 5462 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
5427 5463
5428 /* check if tpc 0 has an exception */ 5464 for (gpc = 0; gpc < gr->gpc_count; gpc++) {
5429 if (gr_gpcs_gpccs_gpc_exception_tpc_v(gpc_exception) == 5465 if ((exception1 & (1 << gpc)) == 0)
5430 gr_gpcs_gpccs_gpc_exception_tpc_0_pending_v()) { 5466 continue;
5431 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "TPC exception pending"); 5467
5432 ret = gk20a_gr_handle_tpc_exception(g, isr_data); 5468 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5469 "GPC%d exception pending", gpc);
5470
5471 gpc_offset = proj_gpc_stride_v() * gpc;
5472
5473 gpc_exception = gk20a_readl(g, gr_gpc0_gpccs_gpc_exception_r()
5474 + gpc_offset);
5475
5476 /* check if any tpc has an exception */
5477 for (tpc = 0; tpc < gr->tpc_count; tpc++) {
5478 if ((gr_gpc0_gpccs_gpc_exception_tpc_v(gpc_exception) &
5479 (1 << tpc)) == 0)
5480 continue;
5481
5482 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5483 "GPC%d: TPC%d exception pending", gpc, tpc);
5484
5485 tpc_offset = proj_tpc_in_gpc_stride_v() * tpc;
5486
5487 global_esr = gk20a_readl(g,
5488 gr_gpc0_tpc0_sm_hww_global_esr_r() +
5489 gpc_offset + tpc_offset);
5490
5491 ret = gk20a_gr_handle_tpc_exception(g, gpc, tpc,
5492 post_event);
5493
5494 /* clear the hwws, also causes tpc and gpc
5495 * exceptions to be cleared */
5496 gk20a_gr_clear_sm_hww(g, gpc, tpc, global_esr);
5497 }
5433 } 5498 }
5434 5499
5435 return ret; 5500 return ret;
@@ -5569,8 +5634,7 @@ int gk20a_gr_isr(struct gk20a *g)
5569 5634
5570 /* check if a gpc exception has occurred */ 5635 /* check if a gpc exception has occurred */
5571 if (exception & gr_exception_gpc_m() && need_reset == 0) { 5636 if (exception & gr_exception_gpc_m() && need_reset == 0) {
5572 u32 exception1 = gk20a_readl(g, gr_exception1_r()); 5637 struct channel_gk20a *fault_ch;
5573 u32 global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r());
5574 5638
5575 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending"); 5639 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending");
5576 5640
@@ -5580,12 +5644,17 @@ int gk20a_gr_isr(struct gk20a *g)
5580 "SM debugger not attached, clearing interrupt"); 5644 "SM debugger not attached, clearing interrupt");
5581 need_reset |= -EFAULT; 5645 need_reset |= -EFAULT;
5582 } else { 5646 } else {
5583 /* check if gpc 0 has an exception */ 5647 bool post_event = false;
5584 if (exception1 & gr_exception1_gpc_0_pending_f()) 5648
5585 need_reset |= gk20a_gr_handle_gpc_exception(g, &isr_data); 5649 /* check if any gpc has an exception */
5586 /* clear the hwws, also causes tpc and gpc 5650 need_reset |= gk20a_gr_handle_gpc_exception(g,
5587 * exceptions to be cleared */ 5651 &post_event);
5588 gk20a_gr_clear_sm_hww(g, global_esr); 5652
5653 /* signal clients waiting on an event */
5654 fault_ch = channel_from_hw_chid(g,
5655 isr_data.chid);
5656 if (post_event && fault_ch)
5657 gk20a_dbg_gpu_post_events(fault_ch);
5589 } 5658 }
5590 5659
5591 if (need_reset) 5660 if (need_reset)