diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 100 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/hw_pbdma_gk20a.h | 4 |
3 files changed, 58 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index db1ccd72..f36a3eb6 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -1145,6 +1145,51 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1145 | gk20a_writel(g, fifo_intr_en_0_r(), 0x7FFFFFFF); | 1145 | gk20a_writel(g, fifo_intr_en_0_r(), 0x7FFFFFFF); |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 hw_chid) | ||
1149 | { | ||
1150 | int i; | ||
1151 | u32 engines = 0; | ||
1152 | |||
1153 | for (i = 0; i < g->fifo.max_engines; i++) { | ||
1154 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
1155 | u32 ctx_status = | ||
1156 | fifo_engine_status_ctx_status_v(status); | ||
1157 | bool type_ch = fifo_pbdma_status_id_type_v(status) == | ||
1158 | fifo_pbdma_status_id_type_chid_v(); | ||
1159 | bool busy = fifo_engine_status_engine_v(status) == | ||
1160 | fifo_engine_status_engine_busy_v(); | ||
1161 | u32 id = (ctx_status == | ||
1162 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1163 | fifo_engine_status_next_id_v(status) : | ||
1164 | fifo_engine_status_id_v(status); | ||
1165 | |||
1166 | if (type_ch && busy && id == hw_chid) | ||
1167 | engines |= BIT(i); | ||
1168 | } | ||
1169 | |||
1170 | return engines; | ||
1171 | } | ||
1172 | |||
1173 | void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) | ||
1174 | { | ||
1175 | u32 engines = gk20a_fifo_engines_on_ch(g, hw_chid); | ||
1176 | if (engines) | ||
1177 | gk20a_fifo_recover(g, engines, verbose); | ||
1178 | else { | ||
1179 | int i; | ||
1180 | struct channel_gk20a *ch = | ||
1181 | g->fifo.channel + hw_chid; | ||
1182 | |||
1183 | gk20a_disable_channel_no_update(ch); | ||
1184 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
1185 | gk20a_fifo_update_runlist(g, i, | ||
1186 | hw_chid, false, false); | ||
1187 | |||
1188 | if (gk20a_fifo_set_ctx_mmu_error(g, ch)) | ||
1189 | gk20a_debug_dump(g->dev); | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1148 | void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | 1193 | void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, |
1149 | bool verbose) | 1194 | bool verbose) |
1150 | { | 1195 | { |
@@ -1326,7 +1371,6 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) | |||
1326 | return handled; | 1371 | return handled; |
1327 | } | 1372 | } |
1328 | 1373 | ||
1329 | |||
1330 | static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, | 1374 | static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, |
1331 | struct gk20a *g, | 1375 | struct gk20a *g, |
1332 | struct fifo_gk20a *f, | 1376 | struct fifo_gk20a *f, |
@@ -1345,9 +1389,11 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, | |||
1345 | if ((f->intr.pbdma.device_fatal_0 | | 1389 | if ((f->intr.pbdma.device_fatal_0 | |
1346 | f->intr.pbdma.channel_fatal_0 | | 1390 | f->intr.pbdma.channel_fatal_0 | |
1347 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { | 1391 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { |
1348 | dev_err(dev, "pbdma_intr_0(%d):0x%08x PBH: %08x M0: %08x", | 1392 | gk20a_err(dev_from_gk20a(g), |
1393 | "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x", | ||
1349 | pbdma_id, pbdma_intr_0, | 1394 | pbdma_id, pbdma_intr_0, |
1350 | gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), | 1395 | gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), |
1396 | gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)), | ||
1351 | gk20a_readl(g, pbdma_method0_r(pbdma_id))); | 1397 | gk20a_readl(g, pbdma_method0_r(pbdma_id))); |
1352 | reset = true; | 1398 | reset = true; |
1353 | handled |= ((f->intr.pbdma.device_fatal_0 | | 1399 | handled |= ((f->intr.pbdma.device_fatal_0 | |
@@ -1371,32 +1417,10 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, | |||
1371 | if (reset) { | 1417 | if (reset) { |
1372 | /* Remove the channel from runlist */ | 1418 | /* Remove the channel from runlist */ |
1373 | u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); | 1419 | u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); |
1420 | u32 hw_chid = fifo_pbdma_status_id_v(status); | ||
1374 | if (fifo_pbdma_status_id_type_v(status) | 1421 | if (fifo_pbdma_status_id_type_v(status) |
1375 | == fifo_pbdma_status_id_type_chid_v()) { | 1422 | == fifo_pbdma_status_id_type_chid_v()) { |
1376 | struct channel_gk20a *ch = g->fifo.channel + | 1423 | gk20a_fifo_recover_ch(g, hw_chid, true); |
1377 | fifo_pbdma_status_id_v(status); | ||
1378 | struct fifo_runlist_info_gk20a *runlist = | ||
1379 | g->fifo.runlist_info; | ||
1380 | int i; | ||
1381 | bool verbose; | ||
1382 | |||
1383 | /* disable the channel from hw and increment | ||
1384 | * syncpoints */ | ||
1385 | gk20a_disable_channel_no_update(ch); | ||
1386 | |||
1387 | /* remove the channel from runlist */ | ||
1388 | clear_bit(ch->hw_chid, | ||
1389 | runlist->active_channels); | ||
1390 | ch->has_timedout = true; | ||
1391 | |||
1392 | /* Recreate the runlist */ | ||
1393 | for (i = 0; i < g->fifo.max_runlists; i++) | ||
1394 | gk20a_fifo_update_runlist(g, | ||
1395 | 0, ~0, false, false); | ||
1396 | |||
1397 | verbose = gk20a_fifo_set_ctx_mmu_error(g, ch); | ||
1398 | if (verbose) | ||
1399 | gk20a_debug_dump(g->dev); | ||
1400 | } | 1424 | } |
1401 | } | 1425 | } |
1402 | 1426 | ||
@@ -1523,34 +1547,14 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
1523 | !tegra_platform_is_silicon()); | 1547 | !tegra_platform_is_silicon()); |
1524 | 1548 | ||
1525 | if (ret) { | 1549 | if (ret) { |
1526 | int i; | 1550 | struct channel_gk20a *ch = &g->fifo.channel[hw_chid]; |
1527 | u32 engines = 0; | ||
1528 | struct fifo_gk20a *f = &g->fifo; | ||
1529 | struct channel_gk20a *ch = &f->channel[hw_chid]; | ||
1530 | 1551 | ||
1531 | gk20a_err(dev_from_gk20a(g), "preempt channel %d timeout\n", | 1552 | gk20a_err(dev_from_gk20a(g), "preempt channel %d timeout\n", |
1532 | hw_chid); | 1553 | hw_chid); |
1533 | 1554 | ||
1534 | /* forcefully reset all busy engines using this channel */ | ||
1535 | for (i = 0; i < g->fifo.max_engines; i++) { | ||
1536 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | ||
1537 | u32 ctx_status = | ||
1538 | fifo_engine_status_ctx_status_v(status); | ||
1539 | bool type_ch = fifo_pbdma_status_id_type_v(status) == | ||
1540 | fifo_pbdma_status_id_type_chid_v(); | ||
1541 | bool busy = fifo_engine_status_engine_v(status) == | ||
1542 | fifo_engine_status_engine_busy_v(); | ||
1543 | u32 id = (ctx_status == | ||
1544 | fifo_engine_status_ctx_status_ctxsw_load_v()) ? | ||
1545 | fifo_engine_status_next_id_v(status) : | ||
1546 | fifo_engine_status_id_v(status); | ||
1547 | |||
1548 | if (type_ch && busy && id == hw_chid) | ||
1549 | engines |= BIT(i); | ||
1550 | } | ||
1551 | gk20a_set_error_notifier(ch, | 1555 | gk20a_set_error_notifier(ch, |
1552 | NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 1556 | NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); |
1553 | gk20a_fifo_recover(g, engines, true); | 1557 | gk20a_fifo_recover_ch(g, hw_chid, true); |
1554 | } | 1558 | } |
1555 | 1559 | ||
1556 | /* re-enable elpg or release pmu mutex */ | 1560 | /* re-enable elpg or release pmu mutex */ |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 078ae8f0..8a4e0a8f 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | |||
@@ -149,6 +149,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g, | |||
149 | int gk20a_fifo_disable_engine_activity(struct gk20a *g, | 149 | int gk20a_fifo_disable_engine_activity(struct gk20a *g, |
150 | struct fifo_engine_info_gk20a *eng_info, | 150 | struct fifo_engine_info_gk20a *eng_info, |
151 | bool wait_for_idle); | 151 | bool wait_for_idle); |
152 | u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 hw_chid); | ||
152 | 153 | ||
153 | int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 hw_chid, | 154 | int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 hw_chid, |
154 | bool add, bool wait_for_finish); | 155 | bool add, bool wait_for_finish); |
@@ -157,6 +158,7 @@ int gk20a_fifo_suspend(struct gk20a *g); | |||
157 | 158 | ||
158 | bool gk20a_fifo_mmu_fault_pending(struct gk20a *g); | 159 | bool gk20a_fifo_mmu_fault_pending(struct gk20a *g); |
159 | void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose); | 160 | void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose); |
161 | void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose); | ||
160 | int gk20a_init_fifo_reset_enable_hw(struct gk20a *g); | 162 | int gk20a_init_fifo_reset_enable_hw(struct gk20a *g); |
161 | void gk20a_init_fifo(struct gpu_ops *gops); | 163 | void gk20a_init_fifo(struct gpu_ops *gops); |
162 | 164 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/hw_pbdma_gk20a.h b/drivers/gpu/nvgpu/gk20a/hw_pbdma_gk20a.h index 60e83122..0d749c20 100644 --- a/drivers/gpu/nvgpu/gk20a/hw_pbdma_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/hw_pbdma_gk20a.h | |||
@@ -174,6 +174,10 @@ static inline u32 pbdma_pb_header_type_inc_f(void) | |||
174 | { | 174 | { |
175 | return 0x20000000; | 175 | return 0x20000000; |
176 | } | 176 | } |
177 | static inline u32 pbdma_hdr_shadow_r(u32 i) | ||
178 | { | ||
179 | return 0x00040118 + i*8192; | ||
180 | } | ||
177 | static inline u32 pbdma_subdevice_r(u32 i) | 181 | static inline u32 pbdma_subdevice_r(u32 i) |
178 | { | 182 | { |
179 | return 0x00040094 + i*8192; | 183 | return 0x00040094 + i*8192; |