diff options
author | Seema Khowala <seemaj@nvidia.com> | 2018-12-13 14:02:11 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2019-02-22 21:59:18 -0500 |
commit | c9d4df288d51e4776188a25a6a2bb26ddd897a20 (patch) | |
tree | de70d1fa9da9bd79e783d24db5953c74f5d15fb8 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | d975bda39876b288479ef5d72cb0495fe1c85c6b (diff) |
gpu: nvgpu: remove code for ch not bound to tsg
- Remove handling for channels that are no more bound to tsg
as channel could be referenceable but no more part of a tsg
- Use tsg_gk20a_from_ch to get pointer to tsg for a given channel
- Clear unhandled gr interrupts
Bug 2429295
JIRA NVGPU-1580
Change-Id: I9da43a2bc9a0282c793b9f301eaf8e8604f91d70
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1972492
(cherry picked from commit 013ca60edd97e7719e389b3048fed9b165277251
in dev-kernel)
Reviewed-on: https://git-master.nvidia.com/r/2018262
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Debarshi Dutta <ddutta@nvidia.com>
Tested-by: Debarshi Dutta <ddutta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 64 |
1 files changed, 22 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index d4e386bd..f12c78f8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -1580,7 +1580,8 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt) | |||
1580 | 1580 | ||
1581 | int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | 1581 | int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) |
1582 | { | 1582 | { |
1583 | u32 engine_id, engines; | 1583 | unsigned long engine_id, engines = 0U; |
1584 | struct tsg_gk20a *tsg; | ||
1584 | 1585 | ||
1585 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | 1586 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1586 | gr_gk20a_disable_ctxsw(g); | 1587 | gr_gk20a_disable_ctxsw(g); |
@@ -1589,12 +1590,14 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | |||
1589 | goto clean_up; | 1590 | goto clean_up; |
1590 | } | 1591 | } |
1591 | 1592 | ||
1592 | if (gk20a_is_channel_marked_as_tsg(ch)) { | 1593 | tsg = tsg_gk20a_from_ch(ch); |
1593 | engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true); | 1594 | if (tsg != NULL) { |
1595 | engines = gk20a_fifo_engines_on_id(g, tsg->tsgid, true); | ||
1594 | } else { | 1596 | } else { |
1595 | engines = gk20a_fifo_engines_on_id(g, ch->chid, false); | 1597 | nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); |
1596 | } | 1598 | } |
1597 | if (!engines) { | 1599 | |
1600 | if (engines == 0U) { | ||
1598 | goto clean_up; | 1601 | goto clean_up; |
1599 | } | 1602 | } |
1600 | 1603 | ||
@@ -1750,16 +1753,18 @@ static bool gk20a_fifo_handle_mmu_fault_locked( | |||
1750 | } else if (type == fifo_engine_status_id_type_chid_v()) { | 1753 | } else if (type == fifo_engine_status_id_type_chid_v()) { |
1751 | ch = &g->fifo.channel[id]; | 1754 | ch = &g->fifo.channel[id]; |
1752 | refch = gk20a_channel_get(ch); | 1755 | refch = gk20a_channel_get(ch); |
1756 | if (refch != NULL) { | ||
1757 | tsg = tsg_gk20a_from_ch(refch); | ||
1758 | } | ||
1753 | } | 1759 | } |
1754 | } else { | 1760 | } else { |
1755 | /* read channel based on instruction pointer */ | 1761 | /* read channel based on instruction pointer */ |
1756 | ch = gk20a_refch_from_inst_ptr(g, | 1762 | ch = gk20a_refch_from_inst_ptr(g, |
1757 | mmfault_info.inst_ptr); | 1763 | mmfault_info.inst_ptr); |
1758 | refch = ch; | 1764 | refch = ch; |
1759 | } | 1765 | if (refch != NULL) { |
1760 | 1766 | tsg = tsg_gk20a_from_ch(refch); | |
1761 | if (ch && gk20a_is_channel_marked_as_tsg(ch)) { | 1767 | } |
1762 | tsg = &g->fifo.tsg[ch->tsgid]; | ||
1763 | } | 1768 | } |
1764 | 1769 | ||
1765 | /* check if engine reset should be deferred */ | 1770 | /* check if engine reset should be deferred */ |
@@ -1786,16 +1791,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked( | |||
1786 | } | 1791 | } |
1787 | 1792 | ||
1788 | #ifdef CONFIG_GK20A_CTXSW_TRACE | 1793 | #ifdef CONFIG_GK20A_CTXSW_TRACE |
1789 | /* | 1794 | if (tsg) { |
1790 | * For non fake mmu fault, both tsg and ch pointers | ||
1791 | * could be valid. Check tsg first. | ||
1792 | */ | ||
1793 | if (tsg) | ||
1794 | gk20a_ctxsw_trace_tsg_reset(g, tsg); | 1795 | gk20a_ctxsw_trace_tsg_reset(g, tsg); |
1795 | else if (ch) | 1796 | } |
1796 | gk20a_ctxsw_trace_channel_reset(g, ch); | ||
1797 | #endif | 1797 | #endif |
1798 | |||
1799 | /* | 1798 | /* |
1800 | * Disable the channel/TSG from hw and increment syncpoints. | 1799 | * Disable the channel/TSG from hw and increment syncpoints. |
1801 | */ | 1800 | */ |
@@ -1815,26 +1814,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked( | |||
1815 | if (refch) { | 1814 | if (refch) { |
1816 | gk20a_channel_put(ch); | 1815 | gk20a_channel_put(ch); |
1817 | } | 1816 | } |
1818 | } else if (ch) { | 1817 | } else if (refch != NULL) { |
1819 | if (refch) { | 1818 | nvgpu_err(g, "mmu error in unbound channel %d", |
1820 | if (g->fifo.deferred_reset_pending) { | 1819 | ch->chid); |
1821 | g->ops.fifo.disable_channel(ch); | 1820 | gk20a_channel_put(ch); |
1822 | } else { | ||
1823 | if (!fake_fault) { | ||
1824 | gk20a_fifo_set_ctx_mmu_error_ch( | ||
1825 | g, refch); | ||
1826 | } | ||
1827 | |||
1828 | verbose = gk20a_fifo_error_ch(g, | ||
1829 | refch); | ||
1830 | gk20a_channel_abort(ch, false); | ||
1831 | } | ||
1832 | gk20a_channel_put(ch); | ||
1833 | } else { | ||
1834 | nvgpu_err(g, | ||
1835 | "mmu error in freed channel %d", | ||
1836 | ch->chid); | ||
1837 | } | ||
1838 | } else if (mmfault_info.inst_ptr == | 1821 | } else if (mmfault_info.inst_ptr == |
1839 | nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) { | 1822 | nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) { |
1840 | nvgpu_err(g, "mmu fault from bar1"); | 1823 | nvgpu_err(g, "mmu fault from bar1"); |
@@ -2116,7 +2099,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
2116 | rc_type, NULL); | 2099 | rc_type, NULL); |
2117 | } | 2100 | } |
2118 | 2101 | ||
2119 | /* force reset channel and tsg (if it's part of one) */ | 2102 | /* force reset channel and tsg */ |
2120 | int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | 2103 | int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, |
2121 | u32 err_code, bool verbose) | 2104 | u32 err_code, bool verbose) |
2122 | { | 2105 | { |
@@ -2126,7 +2109,6 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
2126 | struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); | 2109 | struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); |
2127 | 2110 | ||
2128 | if (tsg != NULL) { | 2111 | if (tsg != NULL) { |
2129 | |||
2130 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | 2112 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2131 | 2113 | ||
2132 | nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, | 2114 | nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, |
@@ -2142,9 +2124,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
2142 | gk20a_fifo_recover_tsg(g, tsg, verbose, | 2124 | gk20a_fifo_recover_tsg(g, tsg, verbose, |
2143 | RC_TYPE_FORCE_RESET); | 2125 | RC_TYPE_FORCE_RESET); |
2144 | } else { | 2126 | } else { |
2145 | g->ops.fifo.set_error_notifier(ch, err_code); | 2127 | nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); |
2146 | gk20a_fifo_recover_ch(g, ch, verbose, | ||
2147 | RC_TYPE_FORCE_RESET); | ||
2148 | } | 2128 | } |
2149 | 2129 | ||
2150 | return 0; | 2130 | return 0; |