summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2015-12-29 03:23:46 -0500
committerSachin Nikam <snikam@nvidia.com>2016-01-13 01:58:32 -0500
commitca76b336b32dce2e57034fbd44a273c539f67a54 (patch)
treeac0dadeb96f3c180a7a0bfcc51679858b2dedb95
parent0ce201e8de6a320b70f1f34d05202650b9b5a046 (diff)
gpu: nvgpu: support preprocessing of SM exceptions
Support preprocessing of SM exceptions if API pointer pre_process_sm_exception() is defined Also, expose some common APIs Bug 200156699 Change-Id: I1303642c1c4403c520b62efb6fd83e95eaeb519b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/925883 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h10
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c109
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h25
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c3
6 files changed, 101 insertions, 56 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index ca5c0ee6..ce91fd49 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2529,6 +2529,12 @@ int gk20a_fifo_set_channel_priority(
2529 return 0; 2529 return 0;
2530} 2530}
2531 2531
2532struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g,
2533 u32 hw_chid)
2534{
2535 return g->fifo.channel + hw_chid;
2536}
2537
2532void gk20a_init_fifo(struct gpu_ops *gops) 2538void gk20a_init_fifo(struct gpu_ops *gops)
2533{ 2539{
2534 gk20a_init_channel(gops); 2540 gk20a_init_channel(gops);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 6ba4153b..5305d007 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * GK20A graphics fifo (gr host) 4 * GK20A graphics fifo (gr host)
5 * 5 *
6 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved. 6 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -194,4 +194,6 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid);
194bool gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, 194bool gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
195 struct channel_gk20a *ch); 195 struct channel_gk20a *ch);
196 196
197struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g,
198 u32 hw_chid);
197#endif /*__GR_GK20A_H__*/ 199#endif /*__GR_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index da115fa8..333f8889 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Graphics 2 * GK20A Graphics
3 * 3 *
4 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -193,6 +193,14 @@ struct gpu_ops {
193 struct warpstate *w_state); 193 struct warpstate *w_state);
194 void (*get_access_map)(struct gk20a *g, 194 void (*get_access_map)(struct gk20a *g,
195 u32 **whitelist, int *num_entries); 195 u32 **whitelist, int *num_entries);
196 int (*handle_fecs_error)(struct gk20a *g,
197 struct channel_gk20a *ch,
198 struct gr_gk20a_isr_data *isr_data);
199 int (*pre_process_sm_exception)(struct gk20a *g,
200 u32 gpc, u32 tpc, u32 global_esr, u32 warp_esr,
201 bool sm_debugger_attached,
202 struct channel_gk20a *fault_ch,
203 bool *early_exit, bool *ignore_debugger);
196 } gr; 204 } gr;
197 const char *name; 205 const char *name;
198 struct { 206 struct {
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 73adb071..6f4669f2 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Graphics 2 * GK20A Graphics
3 * 3 *
4 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -86,10 +86,6 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
86/*elcg init */ 86/*elcg init */
87static void gr_gk20a_enable_elcg(struct gk20a *g); 87static void gr_gk20a_enable_elcg(struct gk20a *g);
88 88
89/* sm lock down */
90static int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
91 u32 global_esr_mask, bool check_errors);
92
93void gk20a_fecs_dump_falcon_stats(struct gk20a *g) 89void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
94{ 90{
95 int i; 91 int i;
@@ -363,10 +359,10 @@ static int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long end_jiffies,
363 return -EAGAIN; 359 return -EAGAIN;
364} 360}
365 361
366static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, 362int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
367 u32 *mailbox_ret, u32 opc_success, 363 u32 *mailbox_ret, u32 opc_success,
368 u32 mailbox_ok, u32 opc_fail, 364 u32 mailbox_ok, u32 opc_fail,
369 u32 mailbox_fail, bool sleepduringwait) 365 u32 mailbox_fail, bool sleepduringwait)
370{ 366{
371 unsigned long end_jiffies = jiffies + 367 unsigned long end_jiffies = jiffies +
372 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); 368 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
@@ -4483,17 +4479,6 @@ void gk20a_gr_wait_initialized(struct gk20a *g)
4483 4479
4484#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0 4480#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0
4485 4481
4486struct gr_isr_data {
4487 u32 addr;
4488 u32 data_lo;
4489 u32 data_hi;
4490 u32 curr_ctx;
4491 u32 chid;
4492 u32 offset;
4493 u32 sub_chan;
4494 u32 class_num;
4495};
4496
4497void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data) 4482void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
4498{ 4483{
4499 gk20a_dbg_fn(""); 4484 gk20a_dbg_fn("");
@@ -4763,7 +4748,7 @@ fail:
4763} 4748}
4764 4749
4765static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, 4750static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
4766 struct gr_isr_data *isr_data) 4751 struct gr_gk20a_isr_data *isr_data)
4767{ 4752{
4768 struct fifo_gk20a *f = &g->fifo; 4753 struct fifo_gk20a *f = &g->fifo;
4769 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4754 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4776,7 +4761,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
4776} 4761}
4777 4762
4778static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, 4763static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
4779 struct gr_isr_data *isr_data) 4764 struct gr_gk20a_isr_data *isr_data)
4780{ 4765{
4781 struct fifo_gk20a *f = &g->fifo; 4766 struct fifo_gk20a *f = &g->fifo;
4782 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4767 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4790,7 +4775,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
4790} 4775}
4791 4776
4792static int gk20a_gr_handle_illegal_method(struct gk20a *g, 4777static int gk20a_gr_handle_illegal_method(struct gk20a *g,
4793 struct gr_isr_data *isr_data) 4778 struct gr_gk20a_isr_data *isr_data)
4794{ 4779{
4795 int ret = g->ops.gr.handle_sw_method(g, isr_data->addr, 4780 int ret = g->ops.gr.handle_sw_method(g, isr_data->addr,
4796 isr_data->class_num, isr_data->offset, 4781 isr_data->class_num, isr_data->offset,
@@ -4804,7 +4789,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
4804} 4789}
4805 4790
4806static int gk20a_gr_handle_illegal_class(struct gk20a *g, 4791static int gk20a_gr_handle_illegal_class(struct gk20a *g,
4807 struct gr_isr_data *isr_data) 4792 struct gr_gk20a_isr_data *isr_data)
4808{ 4793{
4809 struct fifo_gk20a *f = &g->fifo; 4794 struct fifo_gk20a *f = &g->fifo;
4810 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4795 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4817,8 +4802,8 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
4817 return -EINVAL; 4802 return -EINVAL;
4818} 4803}
4819 4804
4820static int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, 4805int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
4821 struct gr_isr_data *isr_data) 4806 struct gr_gk20a_isr_data *isr_data)
4822{ 4807{
4823 u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); 4808 u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r());
4824 4809
@@ -4840,7 +4825,7 @@ static int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
4840} 4825}
4841 4826
4842static int gk20a_gr_handle_class_error(struct gk20a *g, 4827static int gk20a_gr_handle_class_error(struct gk20a *g,
4843 struct gr_isr_data *isr_data) 4828 struct gr_gk20a_isr_data *isr_data)
4844{ 4829{
4845 struct fifo_gk20a *f = &g->fifo; 4830 struct fifo_gk20a *f = &g->fifo;
4846 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4831 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4858,7 +4843,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
4858} 4843}
4859 4844
4860static int gk20a_gr_handle_firmware_method(struct gk20a *g, 4845static int gk20a_gr_handle_firmware_method(struct gk20a *g,
4861 struct gr_isr_data *isr_data) 4846 struct gr_gk20a_isr_data *isr_data)
4862{ 4847{
4863 struct fifo_gk20a *f = &g->fifo; 4848 struct fifo_gk20a *f = &g->fifo;
4864 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4849 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4875,7 +4860,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
4875} 4860}
4876 4861
4877static int gk20a_gr_handle_semaphore_pending(struct gk20a *g, 4862static int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
4878 struct gr_isr_data *isr_data) 4863 struct gr_gk20a_isr_data *isr_data)
4879{ 4864{
4880 struct fifo_gk20a *f = &g->fifo; 4865 struct fifo_gk20a *f = &g->fifo;
4881 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4866 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -4908,7 +4893,7 @@ static inline bool is_valid_cyclestats_bar0_offset_gk20a(struct gk20a *g,
4908#endif 4893#endif
4909 4894
4910static int gk20a_gr_handle_notify_pending(struct gk20a *g, 4895static int gk20a_gr_handle_notify_pending(struct gk20a *g,
4911 struct gr_isr_data *isr_data) 4896 struct gr_gk20a_isr_data *isr_data)
4912{ 4897{
4913 struct fifo_gk20a *f = &g->fifo; 4898 struct fifo_gk20a *f = &g->fifo;
4914 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4899 struct channel_gk20a *ch = &f->channel[isr_data->chid];
@@ -5160,7 +5145,7 @@ bool gk20a_gr_sm_debugger_attached(struct gk20a *g)
5160} 5145}
5161 5146
5162void gk20a_gr_clear_sm_hww(struct gk20a *g, 5147void gk20a_gr_clear_sm_hww(struct gk20a *g,
5163 u32 gpc, u32 tpc, u32 global_esr) 5148 u32 gpc, u32 tpc, u32 global_esr)
5164{ 5149{
5165 u32 offset = proj_gpc_stride_v() * gpc + 5150 u32 offset = proj_gpc_stride_v() * gpc +
5166 proj_tpc_in_gpc_stride_v() * tpc; 5151 proj_tpc_in_gpc_stride_v() * tpc;
@@ -5173,17 +5158,11 @@ void gk20a_gr_clear_sm_hww(struct gk20a *g,
5173 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f()); 5158 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f());
5174} 5159}
5175 5160
5176static struct channel_gk20a *
5177channel_from_hw_chid(struct gk20a *g, u32 hw_chid)
5178{
5179 return g->fifo.channel+hw_chid;
5180}
5181
5182static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, 5161static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
5183 bool *post_event) 5162 bool *post_event, struct channel_gk20a *fault_ch)
5184{ 5163{
5185 int ret = 0; 5164 int ret = 0;
5186 bool do_warp_sync = false; 5165 bool do_warp_sync = false, early_exit = false, ignore_debugger = false;
5187 u32 offset = proj_gpc_stride_v() * gpc + 5166 u32 offset = proj_gpc_stride_v() * gpc +
5188 proj_tpc_in_gpc_stride_v() * tpc; 5167 proj_tpc_in_gpc_stride_v() * tpc;
5189 5168
@@ -5204,9 +5183,28 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
5204 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset); 5183 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
5205 warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset); 5184 warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
5206 5185
5186 if (g->ops.gr.pre_process_sm_exception) {
5187 ret = g->ops.gr.pre_process_sm_exception(g, gpc, tpc,
5188 global_esr, warp_esr,
5189 sm_debugger_attached,
5190 fault_ch,
5191 &early_exit,
5192 &ignore_debugger);
5193 if (ret) {
5194 gk20a_err(dev_from_gk20a(g), "could not pre-process sm error!\n");
5195 return ret;
5196 }
5197 }
5198
5199 if (early_exit) {
5200 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5201 "returning early, skipping event posting");
5202 return ret;
5203 }
5204
5207 /* if an sm debugger is attached, disable forwarding of tpc exceptions. 5205 /* if an sm debugger is attached, disable forwarding of tpc exceptions.
5208 * the debugger will reenable exceptions after servicing them. */ 5206 * the debugger will reenable exceptions after servicing them. */
5209 if (sm_debugger_attached) { 5207 if (!ignore_debugger && sm_debugger_attached) {
5210 u32 tpc_exception_en = gk20a_readl(g, 5208 u32 tpc_exception_en = gk20a_readl(g,
5211 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() + 5209 gr_gpc0_tpc0_tpccs_tpc_exception_en_r() +
5212 offset); 5210 offset);
@@ -5218,7 +5216,7 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
5218 } 5216 }
5219 5217
5220 /* if a debugger is present and an error has occurred, do a warp sync */ 5218 /* if a debugger is present and an error has occurred, do a warp sync */
5221 if (sm_debugger_attached && 5219 if (!ignore_debugger && sm_debugger_attached &&
5222 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) { 5220 ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
5223 gk20a_dbg(gpu_dbg_intr, "warp sync needed"); 5221 gk20a_dbg(gpu_dbg_intr, "warp sync needed");
5224 do_warp_sync = true; 5222 do_warp_sync = true;
@@ -5232,13 +5230,16 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
5232 } 5230 }
5233 } 5231 }
5234 5232
5235 *post_event |= true; 5233 if (ignore_debugger)
5234 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "ignore_debugger set, skipping event posting");
5235 else
5236 *post_event |= true;
5236 5237
5237 return ret; 5238 return ret;
5238} 5239}
5239 5240
5240static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, 5241static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5241 bool *post_event) 5242 bool *post_event, struct channel_gk20a *fault_ch)
5242{ 5243{
5243 int ret = 0; 5244 int ret = 0;
5244 u32 offset = proj_gpc_stride_v() * gpc + 5245 u32 offset = proj_gpc_stride_v() * gpc +
@@ -5253,13 +5254,15 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
5253 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) { 5254 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) {
5254 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 5255 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5255 "GPC%d TPC%d: SM exception pending", gpc, tpc); 5256 "GPC%d TPC%d: SM exception pending", gpc, tpc);
5256 ret = gk20a_gr_handle_sm_exception(g, gpc, tpc, post_event); 5257 ret = gk20a_gr_handle_sm_exception(g, gpc, tpc,
5258 post_event, fault_ch);
5257 } 5259 }
5258 5260
5259 return ret; 5261 return ret;
5260} 5262}
5261 5263
5262static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event) 5264static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
5265 struct channel_gk20a *fault_ch)
5263{ 5266{
5264 int ret = 0; 5267 int ret = 0;
5265 u32 gpc_offset, tpc_offset, gpc, tpc; 5268 u32 gpc_offset, tpc_offset, gpc, tpc;
@@ -5297,7 +5300,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event)
5297 gpc_offset + tpc_offset); 5300 gpc_offset + tpc_offset);
5298 5301
5299 ret = gk20a_gr_handle_tpc_exception(g, gpc, tpc, 5302 ret = gk20a_gr_handle_tpc_exception(g, gpc, tpc,
5300 post_event); 5303 post_event, fault_ch);
5301 5304
5302 /* clear the hwws, also causes tpc and gpc 5305 /* clear the hwws, also causes tpc and gpc
5303 * exceptions to be cleared */ 5306 * exceptions to be cleared */
@@ -5311,7 +5314,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event)
5311int gk20a_gr_isr(struct gk20a *g) 5314int gk20a_gr_isr(struct gk20a *g)
5312{ 5315{
5313 struct device *dev = dev_from_gk20a(g); 5316 struct device *dev = dev_from_gk20a(g);
5314 struct gr_isr_data isr_data; 5317 struct gr_gk20a_isr_data isr_data;
5315 u32 grfifo_ctl; 5318 u32 grfifo_ctl;
5316 u32 obj_table; 5319 u32 obj_table;
5317 int need_reset = 0; 5320 int need_reset = 0;
@@ -5404,7 +5407,7 @@ int gk20a_gr_isr(struct gk20a *g)
5404 } 5407 }
5405 5408
5406 if (gr_intr & gr_intr_fecs_error_pending_f()) { 5409 if (gr_intr & gr_intr_fecs_error_pending_f()) {
5407 need_reset |= gk20a_gr_handle_fecs_error(g, ch, &isr_data); 5410 need_reset |= g->ops.gr.handle_fecs_error(g, ch, &isr_data);
5408 gk20a_writel(g, gr_intr_r(), 5411 gk20a_writel(g, gr_intr_r(),
5409 gr_intr_fecs_error_reset_f()); 5412 gr_intr_fecs_error_reset_f());
5410 gr_intr &= ~gr_intr_fecs_error_pending_f(); 5413 gr_intr &= ~gr_intr_fecs_error_pending_f();
@@ -5460,13 +5463,14 @@ int gk20a_gr_isr(struct gk20a *g)
5460 } else { 5463 } else {
5461 bool post_event = false; 5464 bool post_event = false;
5462 5465
5466 fault_ch = gk20a_fifo_channel_from_hw_chid(g,
5467 isr_data.chid);
5468
5463 /* check if any gpc has an exception */ 5469 /* check if any gpc has an exception */
5464 need_reset |= gk20a_gr_handle_gpc_exception(g, 5470 need_reset |= gk20a_gr_handle_gpc_exception(g,
5465 &post_event); 5471 &post_event, fault_ch);
5466 5472
5467 /* signal clients waiting on an event */ 5473 /* signal clients waiting on an event */
5468 fault_ch = channel_from_hw_chid(g,
5469 isr_data.chid);
5470 if (post_event && fault_ch) 5474 if (post_event && fault_ch)
5471 gk20a_dbg_gpu_post_events(fault_ch); 5475 gk20a_dbg_gpu_post_events(fault_ch);
5472 } 5476 }
@@ -6960,7 +6964,7 @@ static u32 gr_gk20a_get_tpc_num(u32 addr)
6960 return 0; 6964 return 0;
6961} 6965}
6962 6966
6963static int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, 6967int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
6964 u32 global_esr_mask, bool check_errors) 6968 u32 global_esr_mask, bool check_errors)
6965{ 6969{
6966 bool locked_down; 6970 bool locked_down;
@@ -7430,4 +7434,5 @@ void gk20a_init_gr_ops(struct gpu_ops *gops)
7430 gops->gr.set_sm_debug_mode = gr_gk20a_set_sm_debug_mode; 7434 gops->gr.set_sm_debug_mode = gr_gk20a_set_sm_debug_mode;
7431 gops->gr.bpt_reg_info = gr_gk20a_bpt_reg_info; 7435 gops->gr.bpt_reg_info = gr_gk20a_bpt_reg_info;
7432 gops->gr.get_access_map = gr_gk20a_get_access_map; 7436 gops->gr.get_access_map = gr_gk20a_get_access_map;
7437 gops->gr.handle_fecs_error = gk20a_gr_handle_fecs_error;
7433} 7438}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 51b87ac8..9fc45ec0 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Graphics Engine 2 * GK20A Graphics Engine
3 * 3 *
4 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -175,6 +175,17 @@ struct gk20a_cs_snapshot_client;
175struct gk20a_cs_snapshot; 175struct gk20a_cs_snapshot;
176#endif 176#endif
177 177
178struct gr_gk20a_isr_data {
179 u32 addr;
180 u32 data_lo;
181 u32 data_hi;
182 u32 curr_ctx;
183 u32 chid;
184 u32 offset;
185 u32 sub_chan;
186 u32 class_num;
187};
188
178struct gr_gk20a { 189struct gr_gk20a {
179 struct gk20a *g; 190 struct gk20a *g;
180 struct { 191 struct {
@@ -533,4 +544,16 @@ static inline void gr_gk20a_free_cyclestats_snapshot_data(struct gk20a *g)
533} 544}
534#endif 545#endif
535 546
547
548int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
549 struct gr_gk20a_isr_data *isr_data);
550int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
551 u32 global_esr_mask, bool check_errors);
552void gk20a_gr_clear_sm_hww(struct gk20a *g,
553 u32 gpc, u32 tpc, u32 global_esr);
554int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
555 u32 *mailbox_ret, u32 opc_success,
556 u32 mailbox_ok, u32 opc_fail,
557 u32 mailbox_fail, bool sleepduringwait);
558
536#endif /*__GR_GK20A_H__*/ 559#endif /*__GR_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index fcc3ed10..309aaf95 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B GPC MMU 2 * GM20B GPC MMU
3 * 3 *
4 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -1226,4 +1226,5 @@ void gm20b_init_gr(struct gpu_ops *gops)
1226 gops->gr.enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs; 1226 gops->gr.enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs;
1227 gops->gr.bpt_reg_info = gr_gm20b_bpt_reg_info; 1227 gops->gr.bpt_reg_info = gr_gm20b_bpt_reg_info;
1228 gops->gr.get_access_map = gr_gm20b_get_access_map; 1228 gops->gr.get_access_map = gr_gm20b_get_access_map;
1229 gops->gr.handle_fecs_error = gk20a_gr_handle_fecs_error;
1229} 1230}