summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-09-10 07:04:32 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:17 -0400
commit1c7dcfdeef3b0672317fca947cb2097e97c623a9 (patch)
treef5e9c6a843d3e0079f1a124d2b9ffc21bb7585b5 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent2f232348e686f92a6a34e5fa7b98884d4b48313b (diff)
gpu: nvgpu: use TSG recover API
Use TSG specific API gk20a_fifo_recover_tsg() in following cases : - IOCTL_CHANNEL_FORCE_RESET to force reset a channel in TSG, reset all the channels - handle pbdma intr while resetting in case of pbdma intr, if channel is part of TSG, recover entire TSG - TSG preempt failure when TSG preempt times out, use TSG recover API Use preempt_tsg() API to preempt if channel is part of TSG Add below two generic APIs which will take care of preempting/ recovering either of channel or TSG as required gk20a_fifo_preempt() gk20a_fifo_force_reset_ch() Bug 1470692 Change-Id: I8d46e252af79136be85a9a2accf8b51bd924ca8c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/497875 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c66
1 files changed, 62 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 3f35e7b2..617245b8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1291,6 +1291,29 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1291 g->ops.fifo.trigger_mmu_fault(g, engine_ids); 1291 g->ops.fifo.trigger_mmu_fault(g, engine_ids);
1292} 1292}
1293 1293
1294int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
1295{
1296 struct tsg_gk20a *tsg = NULL;
1297 struct channel_gk20a *ch_tsg = NULL;
1298
1299 if (gk20a_is_channel_marked_as_tsg(ch)) {
1300 tsg = &ch->g->fifo.tsg[ch->hw_chid];
1301
1302 mutex_lock(&tsg->ch_list_lock);
1303 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1304 gk20a_set_error_notifier(ch_tsg,
1305 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1306 }
1307 mutex_unlock(&tsg->ch_list_lock);
1308 gk20a_fifo_recover_tsg(ch->g, ch->tsgid, verbose);
1309 } else {
1310 gk20a_set_error_notifier(ch,
1311 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1312 gk20a_fifo_recover_ch(ch->g, ch->hw_chid, verbose);
1313 }
1314
1315 return 0;
1316}
1294 1317
1295static bool gk20a_fifo_handle_sched_error(struct gk20a *g) 1318static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1296{ 1319{
@@ -1482,13 +1505,26 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1482 if (reset) { 1505 if (reset) {
1483 /* Remove the channel from runlist */ 1506 /* Remove the channel from runlist */
1484 u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); 1507 u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
1485 u32 hw_chid = fifo_pbdma_status_id_v(status); 1508 u32 id = fifo_pbdma_status_id_v(status);
1486 if (fifo_pbdma_status_id_type_v(status) 1509 if (fifo_pbdma_status_id_type_v(status)
1487 == fifo_pbdma_status_id_type_chid_v()) { 1510 == fifo_pbdma_status_id_type_chid_v()) {
1488 struct channel_gk20a *ch = &f->channel[hw_chid]; 1511 struct channel_gk20a *ch = &f->channel[id];
1512
1489 gk20a_set_error_notifier(ch, 1513 gk20a_set_error_notifier(ch,
1490 NVHOST_CHANNEL_PBDMA_ERROR); 1514 NVHOST_CHANNEL_PBDMA_ERROR);
1491 gk20a_fifo_recover_ch(g, hw_chid, true); 1515 gk20a_fifo_recover_ch(g, id, true);
1516 } else if (fifo_pbdma_status_id_type_v(status)
1517 == fifo_pbdma_status_id_type_tsgid_v()) {
1518 struct tsg_gk20a *tsg = &f->tsg[id];
1519 struct channel_gk20a *ch = NULL;
1520
1521 mutex_lock(&tsg->ch_list_lock);
1522 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1523 gk20a_set_error_notifier(ch,
1524 NVHOST_CHANNEL_PBDMA_ERROR);
1525 }
1526 mutex_unlock(&tsg->ch_list_lock);
1527 gk20a_fifo_recover_tsg(g, id, true);
1492 } 1528 }
1493 } 1529 }
1494 1530
@@ -1606,9 +1642,19 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
1606 1642
1607 if (ret) { 1643 if (ret) {
1608 if (is_tsg) { 1644 if (is_tsg) {
1609 /* TODO: recovery for TSG */ 1645 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
1646 struct channel_gk20a *ch = NULL;
1647
1610 gk20a_err(dev_from_gk20a(g), 1648 gk20a_err(dev_from_gk20a(g),
1611 "preempt TSG %d timeout\n", id); 1649 "preempt TSG %d timeout\n", id);
1650
1651 mutex_lock(&tsg->ch_list_lock);
1652 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1653 gk20a_set_error_notifier(ch,
1654 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1655 }
1656 mutex_unlock(&tsg->ch_list_lock);
1657 gk20a_fifo_recover_tsg(g, id, true);
1612 } else { 1658 } else {
1613 struct channel_gk20a *ch = &g->fifo.channel[id]; 1659 struct channel_gk20a *ch = &g->fifo.channel[id];
1614 1660
@@ -1678,6 +1724,18 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
1678 return ret; 1724 return ret;
1679} 1725}
1680 1726
1727int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
1728{
1729 int err;
1730
1731 if (gk20a_is_channel_marked_as_tsg(ch))
1732 err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid);
1733 else
1734 err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
1735
1736 return err;
1737}
1738
1681int gk20a_fifo_enable_engine_activity(struct gk20a *g, 1739int gk20a_fifo_enable_engine_activity(struct gk20a *g,
1682 struct fifo_engine_info_gk20a *eng_info) 1740 struct fifo_engine_info_gk20a *eng_info)
1683{ 1741{