summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-04-09 06:33:31 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:09:06 -0400
commit3df84a13d142b20f63970b9523143dd0e46c2ff9 (patch)
tree262f8b11454349ea46a28ca808a43d673f934743 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent542f729aa9ea9c5eb845c35d855e3925f45ab24f (diff)
gpu: nvgpu: Make trigger mmu fault GPU specific
Add abstraction for triggering fake MMU fault, and a gk20a implementation. Also adds recovery to FE hardware warning exception to make testing easier. Bug 1495967 Change-Id: I6703cff37900a4c4592023423f9c0b31a8928db2 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c76
1 files changed, 45 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 5575b995..f1987ed5 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1092,43 +1092,15 @@ static void gk20a_fifo_get_faulty_channel(struct gk20a *g, int engine_id,
1092 fifo_engine_status_id_v(status); 1092 fifo_engine_status_id_v(status);
1093} 1093}
1094 1094
1095void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, 1095static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
1096 bool verbose) 1096 unsigned long engine_ids)
1097{ 1097{
1098 unsigned long end_jiffies = jiffies + 1098 unsigned long end_jiffies = jiffies +
1099 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); 1099 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1100 unsigned long delay = GR_IDLE_CHECK_DEFAULT; 1100 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
1101 unsigned long engine_id, i; 1101 unsigned long engine_id;
1102 unsigned long _engine_ids = __engine_ids;
1103 unsigned long engine_ids = 0;
1104 int ret; 1102 int ret;
1105 1103
1106 if (verbose)
1107 gk20a_debug_dump(g->dev);
1108
1109 /* store faulted engines in advance */
1110 g->fifo.mmu_fault_engines = 0;
1111 for_each_set_bit(engine_id, &_engine_ids, 32) {
1112 bool ref_type_ch;
1113 int ref_chid;
1114 gk20a_fifo_get_faulty_channel(g, engine_id, &ref_chid,
1115 &ref_type_ch);
1116
1117 /* Reset *all* engines that use the
1118 * same channel as faulty engine */
1119 for (i = 0; i < g->fifo.max_engines; i++) {
1120 bool type_ch;
1121 u32 chid;
1122 gk20a_fifo_get_faulty_channel(g, i, &chid, &type_ch);
1123 if (ref_type_ch == type_ch && ref_chid == chid) {
1124 engine_ids |= BIT(i);
1125 g->fifo.mmu_fault_engines |=
1126 BIT(gk20a_engine_id_to_mmu_id(i));
1127 }
1128 }
1129
1130 }
1131
1132 /* trigger faults for all bad engines */ 1104 /* trigger faults for all bad engines */
1133 for_each_set_bit(engine_id, &engine_ids, 32) { 1105 for_each_set_bit(engine_id, &engine_ids, 32) {
1134 if (engine_id > g->fifo.max_engines) { 1106 if (engine_id > g->fifo.max_engines) {
@@ -1164,6 +1136,42 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1164 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0); 1136 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
1165} 1137}
1166 1138
1139void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1140 bool verbose)
1141{
1142 unsigned long engine_id, i;
1143 unsigned long _engine_ids = __engine_ids;
1144 unsigned long engine_ids = 0;
1145
1146 if (verbose)
1147 gk20a_debug_dump(g->dev);
1148
1149 /* store faulted engines in advance */
1150 g->fifo.mmu_fault_engines = 0;
1151 for_each_set_bit(engine_id, &_engine_ids, 32) {
1152 bool ref_type_ch;
1153 int ref_chid;
1154 gk20a_fifo_get_faulty_channel(g, engine_id, &ref_chid,
1155 &ref_type_ch);
1156
1157 /* Reset *all* engines that use the
1158 * same channel as faulty engine */
1159 for (i = 0; i < g->fifo.max_engines; i++) {
1160 bool type_ch;
1161 u32 chid;
1162 gk20a_fifo_get_faulty_channel(g, i, &chid, &type_ch);
1163 if (ref_type_ch == type_ch && ref_chid == chid) {
1164 engine_ids |= BIT(i);
1165 g->fifo.mmu_fault_engines |=
1166 BIT(gk20a_engine_id_to_mmu_id(i));
1167 }
1168 }
1169
1170 }
1171
1172 g->ops.fifo.trigger_mmu_fault(g, engine_ids);
1173}
1174
1167 1175
1168static bool gk20a_fifo_handle_sched_error(struct gk20a *g) 1176static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1169{ 1177{
@@ -1834,3 +1842,9 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
1834 else 1842 else
1835 return false; 1843 return false;
1836} 1844}
1845
1846void gk20a_init_fifo(struct gpu_ops *gops)
1847{
1848 gk20a_init_channel(gops);
1849 gops->fifo.trigger_mmu_fault = gk20a_fifo_trigger_mmu_fault;
1850}