summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-09-10 07:04:32 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:17 -0400
commit1c7dcfdeef3b0672317fca947cb2097e97c623a9 (patch)
treef5e9c6a843d3e0079f1a124d2b9ffc21bb7585b5 /drivers
parent2f232348e686f92a6a34e5fa7b98884d4b48313b (diff)
gpu: nvgpu: use TSG recover API
Use TSG specific API gk20a_fifo_recover_tsg() in following cases : - IOCTL_CHANNEL_FORCE_RESET to force reset a channel in TSG, reset all the channels - handle pbdma intr while resetting in case of pbdma intr, if channel is part of TSG, recover entire TSG - TSG preempt failure when TSG preempt times out, use TSG recover API Use preempt_tsg() API to preempt if channel is part of TSG Add below two generic APIs which will take care of preempting/ recovering either of channel or TSG as required gk20a_fifo_preempt() gk20a_fifo_force_reset_ch() Bug 1470692 Change-Id: I8d46e252af79136be85a9a2accf8b51bd924ca8c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/497875 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c66
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h2
3 files changed, 66 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 7a3132c9..1e71c1c7 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -2293,8 +2293,6 @@ long gk20a_channel_ioctl(struct file *filp,
2293 gk20a_idle(dev); 2293 gk20a_idle(dev);
2294 break; 2294 break;
2295 case NVHOST_IOCTL_CHANNEL_PREEMPT: 2295 case NVHOST_IOCTL_CHANNEL_PREEMPT:
2296 if (gk20a_is_channel_marked_as_tsg(ch))
2297 return -EINVAL;
2298 err = gk20a_busy(dev); 2296 err = gk20a_busy(dev);
2299 if (err) { 2297 if (err) {
2300 dev_err(&dev->dev, 2298 dev_err(&dev->dev,
@@ -2302,8 +2300,7 @@ long gk20a_channel_ioctl(struct file *filp,
2302 __func__, cmd); 2300 __func__, cmd);
2303 return err; 2301 return err;
2304 } 2302 }
2305 /* preempt channel */ 2303 err = gk20a_fifo_preempt(ch->g, ch);
2306 err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
2307 gk20a_idle(dev); 2304 gk20a_idle(dev);
2308 break; 2305 break;
2309 case NVHOST_IOCTL_CHANNEL_FORCE_RESET: 2306 case NVHOST_IOCTL_CHANNEL_FORCE_RESET:
@@ -2314,9 +2311,7 @@ long gk20a_channel_ioctl(struct file *filp,
2314 __func__, cmd); 2311 __func__, cmd);
2315 return err; 2312 return err;
2316 } 2313 }
2317 gk20a_set_error_notifier(ch, 2314 err = gk20a_fifo_force_reset_ch(ch, true);
2318 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
2319 gk20a_fifo_recover_ch(ch->g, ch->hw_chid, true);
2320 gk20a_idle(dev); 2315 gk20a_idle(dev);
2321 break; 2316 break;
2322 default: 2317 default:
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 3f35e7b2..617245b8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1291,6 +1291,29 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1291 g->ops.fifo.trigger_mmu_fault(g, engine_ids); 1291 g->ops.fifo.trigger_mmu_fault(g, engine_ids);
1292} 1292}
1293 1293
1294int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
1295{
1296 struct tsg_gk20a *tsg = NULL;
1297 struct channel_gk20a *ch_tsg = NULL;
1298
1299 if (gk20a_is_channel_marked_as_tsg(ch)) {
1300 tsg = &ch->g->fifo.tsg[ch->hw_chid];
1301
1302 mutex_lock(&tsg->ch_list_lock);
1303 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1304 gk20a_set_error_notifier(ch_tsg,
1305 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1306 }
1307 mutex_unlock(&tsg->ch_list_lock);
1308 gk20a_fifo_recover_tsg(ch->g, ch->tsgid, verbose);
1309 } else {
1310 gk20a_set_error_notifier(ch,
1311 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1312 gk20a_fifo_recover_ch(ch->g, ch->hw_chid, verbose);
1313 }
1314
1315 return 0;
1316}
1294 1317
1295static bool gk20a_fifo_handle_sched_error(struct gk20a *g) 1318static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1296{ 1319{
@@ -1482,13 +1505,26 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1482 if (reset) { 1505 if (reset) {
1483 /* Remove the channel from runlist */ 1506 /* Remove the channel from runlist */
1484 u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); 1507 u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
1485 u32 hw_chid = fifo_pbdma_status_id_v(status); 1508 u32 id = fifo_pbdma_status_id_v(status);
1486 if (fifo_pbdma_status_id_type_v(status) 1509 if (fifo_pbdma_status_id_type_v(status)
1487 == fifo_pbdma_status_id_type_chid_v()) { 1510 == fifo_pbdma_status_id_type_chid_v()) {
1488 struct channel_gk20a *ch = &f->channel[hw_chid]; 1511 struct channel_gk20a *ch = &f->channel[id];
1512
1489 gk20a_set_error_notifier(ch, 1513 gk20a_set_error_notifier(ch,
1490 NVHOST_CHANNEL_PBDMA_ERROR); 1514 NVHOST_CHANNEL_PBDMA_ERROR);
1491 gk20a_fifo_recover_ch(g, hw_chid, true); 1515 gk20a_fifo_recover_ch(g, id, true);
1516 } else if (fifo_pbdma_status_id_type_v(status)
1517 == fifo_pbdma_status_id_type_tsgid_v()) {
1518 struct tsg_gk20a *tsg = &f->tsg[id];
1519 struct channel_gk20a *ch = NULL;
1520
1521 mutex_lock(&tsg->ch_list_lock);
1522 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1523 gk20a_set_error_notifier(ch,
1524 NVHOST_CHANNEL_PBDMA_ERROR);
1525 }
1526 mutex_unlock(&tsg->ch_list_lock);
1527 gk20a_fifo_recover_tsg(g, id, true);
1492 } 1528 }
1493 } 1529 }
1494 1530
@@ -1606,9 +1642,19 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
1606 1642
1607 if (ret) { 1643 if (ret) {
1608 if (is_tsg) { 1644 if (is_tsg) {
1609 /* TODO: recovery for TSG */ 1645 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
1646 struct channel_gk20a *ch = NULL;
1647
1610 gk20a_err(dev_from_gk20a(g), 1648 gk20a_err(dev_from_gk20a(g),
1611 "preempt TSG %d timeout\n", id); 1649 "preempt TSG %d timeout\n", id);
1650
1651 mutex_lock(&tsg->ch_list_lock);
1652 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1653 gk20a_set_error_notifier(ch,
1654 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1655 }
1656 mutex_unlock(&tsg->ch_list_lock);
1657 gk20a_fifo_recover_tsg(g, id, true);
1612 } else { 1658 } else {
1613 struct channel_gk20a *ch = &g->fifo.channel[id]; 1659 struct channel_gk20a *ch = &g->fifo.channel[id];
1614 1660
@@ -1678,6 +1724,18 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
1678 return ret; 1724 return ret;
1679} 1725}
1680 1726
1727int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
1728{
1729 int err;
1730
1731 if (gk20a_is_channel_marked_as_tsg(ch))
1732 err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid);
1733 else
1734 err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
1735
1736 return err;
1737}
1738
1681int gk20a_fifo_enable_engine_activity(struct gk20a *g, 1739int gk20a_fifo_enable_engine_activity(struct gk20a *g,
1682 struct fifo_engine_info_gk20a *eng_info) 1740 struct fifo_engine_info_gk20a *eng_info)
1683{ 1741{
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index e7560e27..103f00a1 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -147,6 +147,7 @@ void gk20a_fifo_nonstall_isr(struct gk20a *g);
147 147
148int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid); 148int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid);
149int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 149int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
150int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
150 151
151int gk20a_fifo_enable_engine_activity(struct gk20a *g, 152int gk20a_fifo_enable_engine_activity(struct gk20a *g,
152 struct fifo_engine_info_gk20a *eng_info); 153 struct fifo_engine_info_gk20a *eng_info);
@@ -164,6 +165,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
164void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose); 165void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose);
165void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose); 166void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose);
166void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose); 167void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
168int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose);
167int gk20a_init_fifo_reset_enable_hw(struct gk20a *g); 169int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
168void gk20a_init_fifo(struct gpu_ops *gops); 170void gk20a_init_fifo(struct gpu_ops *gops);
169 171