diff options
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 223 |
1 files changed, 6 insertions, 217 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 7993e071..41892746 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #include "gm20b/gr_gm20b.h" | 33 | #include "gm20b/gr_gm20b.h" |
34 | 34 | ||
35 | #include "gp10b/gr_gp10b.h" | ||
36 | |||
35 | #include "gv11b/gr_gv11b.h" | 37 | #include "gv11b/gr_gv11b.h" |
36 | #include "gv11b/mm_gv11b.h" | 38 | #include "gv11b/mm_gv11b.h" |
37 | #include "gv11b/subctx_gv11b.h" | 39 | #include "gv11b/subctx_gv11b.h" |
@@ -1622,145 +1624,6 @@ static void gr_gv11b_get_access_map(struct gk20a *g, | |||
1622 | *num_entries = ARRAY_SIZE(wl_addr_gv11b); | 1624 | *num_entries = ARRAY_SIZE(wl_addr_gv11b); |
1623 | } | 1625 | } |
1624 | 1626 | ||
1625 | static int gr_gv11b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a *fault_ch) | ||
1626 | { | ||
1627 | int ret = 0; | ||
1628 | |||
1629 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | ||
1630 | |||
1631 | ret = gk20a_disable_channel_tsg(g, fault_ch); | ||
1632 | if (ret) { | ||
1633 | nvgpu_err(g, "CILP: failed to disable channel/TSG!"); | ||
1634 | return ret; | ||
1635 | } | ||
1636 | |||
1637 | ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false); | ||
1638 | if (ret) { | ||
1639 | nvgpu_err(g, "CILP: failed to restart runlist 0!"); | ||
1640 | return ret; | ||
1641 | } | ||
1642 | |||
1643 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); | ||
1644 | |||
1645 | if (gk20a_is_channel_marked_as_tsg(fault_ch)) { | ||
1646 | gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); | ||
1647 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1648 | "CILP: preempted the channel/tsg"); | ||
1649 | } else { | ||
1650 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1651 | "CILP: tsgid is invalid, cannot preempt"); | ||
1652 | WARN_ON(1); /* only TSG can be preempted */ | ||
1653 | } | ||
1654 | |||
1655 | return ret; | ||
1656 | } | ||
1657 | |||
1658 | static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk20a *fault_ch) | ||
1659 | { | ||
1660 | int ret; | ||
1661 | struct gr_ctx_desc *gr_ctx = fault_ch->ch_ctx.gr_ctx; | ||
1662 | |||
1663 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | ||
1664 | |||
1665 | if (!gr_ctx) | ||
1666 | return -EINVAL; | ||
1667 | |||
1668 | if (gr_ctx->t18x.cilp_preempt_pending) { | ||
1669 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1670 | "CILP is already pending for chid %d", | ||
1671 | fault_ch->hw_chid); | ||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | /* get ctx_id from the ucode image */ | ||
1676 | if (!gr_ctx->t18x.ctx_id_valid) { | ||
1677 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1678 | "CILP: looking up ctx id"); | ||
1679 | ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id); | ||
1680 | if (ret) { | ||
1681 | nvgpu_err(g, "CILP: error looking up ctx id!"); | ||
1682 | return ret; | ||
1683 | } | ||
1684 | gr_ctx->t18x.ctx_id_valid = true; | ||
1685 | } | ||
1686 | |||
1687 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1688 | "CILP: ctx id is 0x%x", gr_ctx->t18x.ctx_id); | ||
1689 | |||
1690 | /* send ucode method to set ctxsw interrupt */ | ||
1691 | ret = gr_gk20a_submit_fecs_sideband_method_op(g, | ||
1692 | (struct fecs_method_op_gk20a) { | ||
1693 | .method.data = gr_ctx->t18x.ctx_id, | ||
1694 | .method.addr = | ||
1695 | gr_fecs_method_push_adr_configure_interrupt_completion_option_v(), | ||
1696 | .mailbox = { | ||
1697 | .id = 1 /* sideband */, .data = 0, | ||
1698 | .clr = ~0, .ret = NULL, | ||
1699 | .ok = gr_fecs_ctxsw_mailbox_value_pass_v(), | ||
1700 | .fail = 0}, | ||
1701 | .cond.ok = GR_IS_UCODE_OP_EQUAL, | ||
1702 | .cond.fail = GR_IS_UCODE_OP_SKIP}); | ||
1703 | |||
1704 | if (ret) { | ||
1705 | nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!"); | ||
1706 | return ret; | ||
1707 | } | ||
1708 | |||
1709 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1710 | "CILP: enabled ctxsw completion interrupt"); | ||
1711 | |||
1712 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1713 | "CILP: disabling channel %d", | ||
1714 | fault_ch->hw_chid); | ||
1715 | |||
1716 | ret = gr_gv11b_disable_channel_or_tsg(g, fault_ch); | ||
1717 | if (ret) { | ||
1718 | nvgpu_err(g, "CILP: failed to disable channel!!"); | ||
1719 | return ret; | ||
1720 | } | ||
1721 | |||
1722 | /* set cilp_preempt_pending = true and record the channel */ | ||
1723 | gr_ctx->t18x.cilp_preempt_pending = true; | ||
1724 | g->gr.t18x.cilp_preempt_pending_chid = fault_ch->hw_chid; | ||
1725 | |||
1726 | if (gk20a_is_channel_marked_as_tsg(fault_ch)) { | ||
1727 | struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid]; | ||
1728 | |||
1729 | gk20a_tsg_event_id_post_event(tsg, | ||
1730 | NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED); | ||
1731 | } else { | ||
1732 | gk20a_channel_event_id_post_event(fault_ch, | ||
1733 | NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED); | ||
1734 | } | ||
1735 | |||
1736 | return 0; | ||
1737 | } | ||
1738 | |||
1739 | static int gr_gv11b_clear_cilp_preempt_pending(struct gk20a *g, | ||
1740 | struct channel_gk20a *fault_ch) | ||
1741 | { | ||
1742 | struct gr_ctx_desc *gr_ctx = fault_ch->ch_ctx.gr_ctx; | ||
1743 | |||
1744 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | ||
1745 | |||
1746 | if (!gr_ctx) | ||
1747 | return -EINVAL; | ||
1748 | |||
1749 | /* The ucode is self-clearing, so all we need to do here is | ||
1750 | to clear cilp_preempt_pending. */ | ||
1751 | if (!gr_ctx->t18x.cilp_preempt_pending) { | ||
1752 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1753 | "CILP is already cleared for chid %d\n", | ||
1754 | fault_ch->hw_chid); | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | gr_ctx->t18x.cilp_preempt_pending = false; | ||
1759 | g->gr.t18x.cilp_preempt_pending_chid = -1; | ||
1760 | |||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | /* @brief pre-process work on the SM exceptions to determine if we clear them or not. | 1627 | /* @brief pre-process work on the SM exceptions to determine if we clear them or not. |
1765 | * | 1628 | * |
1766 | * On Pascal, if we are in CILP preemtion mode, preempt the channel and handle errors with special processing | 1629 | * On Pascal, if we are in CILP preemtion mode, preempt the channel and handle errors with special processing |
@@ -1827,7 +1690,7 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
1827 | gpc, tpc); | 1690 | gpc, tpc); |
1828 | 1691 | ||
1829 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); | 1692 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); |
1830 | ret = gr_gv11b_set_cilp_preempt_pending(g, fault_ch); | 1693 | ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); |
1831 | if (ret) { | 1694 | if (ret) { |
1832 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); | 1695 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); |
1833 | return ret; | 1696 | return ret; |
@@ -1858,31 +1721,6 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
1858 | return 0; | 1721 | return 0; |
1859 | } | 1722 | } |
1860 | 1723 | ||
1861 | static int gr_gv11b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid) | ||
1862 | { | ||
1863 | struct gr_ctx_desc *gr_ctx; | ||
1864 | struct channel_gk20a *ch; | ||
1865 | int chid; | ||
1866 | int ret = -EINVAL; | ||
1867 | |||
1868 | chid = g->gr.t18x.cilp_preempt_pending_chid; | ||
1869 | |||
1870 | ch = gk20a_channel_get(gk20a_fifo_channel_from_hw_chid(g, chid)); | ||
1871 | if (!ch) | ||
1872 | return ret; | ||
1873 | |||
1874 | gr_ctx = ch->ch_ctx.gr_ctx; | ||
1875 | |||
1876 | if (gr_ctx->t18x.cilp_preempt_pending) { | ||
1877 | *__chid = chid; | ||
1878 | ret = 0; | ||
1879 | } | ||
1880 | |||
1881 | gk20a_channel_put(ch); | ||
1882 | |||
1883 | return ret; | ||
1884 | } | ||
1885 | |||
1886 | static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr) | 1724 | static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr) |
1887 | { | 1725 | { |
1888 | u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; | 1726 | u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; |
@@ -1971,65 +1809,16 @@ static int gr_gv11b_handle_fecs_error(struct gk20a *g, | |||
1971 | struct gr_gk20a_isr_data *isr_data) | 1809 | struct gr_gk20a_isr_data *isr_data) |
1972 | { | 1810 | { |
1973 | u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); | 1811 | u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); |
1974 | struct channel_gk20a *ch; | 1812 | int ret; |
1975 | int chid = -1; | ||
1976 | int ret = 0; | ||
1977 | 1813 | ||
1978 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 1814 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); |
1979 | 1815 | ||
1980 | /* | 1816 | ret = gr_gp10b_handle_fecs_error(g, __ch, isr_data); |
1981 | * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) | ||
1982 | * indicates that a CILP ctxsw save has finished | ||
1983 | */ | ||
1984 | if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { | ||
1985 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | ||
1986 | "CILP: ctxsw save completed!\n"); | ||
1987 | |||
1988 | /* now clear the interrupt */ | ||
1989 | gk20a_writel(g, gr_fecs_host_int_clear_r(), | ||
1990 | gr_fecs_host_int_clear_ctxsw_intr1_clear_f()); | ||
1991 | |||
1992 | ret = gr_gv11b_get_cilp_preempt_pending_chid(g, &chid); | ||
1993 | if (ret) | ||
1994 | goto clean_up; | ||
1995 | |||
1996 | ch = gk20a_channel_get( | ||
1997 | gk20a_fifo_channel_from_hw_chid(g, chid)); | ||
1998 | if (!ch) | ||
1999 | goto clean_up; | ||
2000 | |||
2001 | |||
2002 | /* set preempt_pending to false */ | ||
2003 | ret = gr_gv11b_clear_cilp_preempt_pending(g, ch); | ||
2004 | if (ret) { | ||
2005 | nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!"); | ||
2006 | gk20a_channel_put(ch); | ||
2007 | goto clean_up; | ||
2008 | } | ||
2009 | |||
2010 | if (gk20a_gr_sm_debugger_attached(g)) { | ||
2011 | gk20a_dbg_gpu_post_events(ch); | ||
2012 | |||
2013 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
2014 | struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; | ||
2015 | |||
2016 | gk20a_tsg_event_id_post_event(tsg, | ||
2017 | NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE); | ||
2018 | } else { | ||
2019 | gk20a_channel_event_id_post_event(ch, | ||
2020 | NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE); | ||
2021 | } | ||
2022 | } | ||
2023 | |||
2024 | gk20a_channel_put(ch); | ||
2025 | } | ||
2026 | 1817 | ||
2027 | /* Handle ECC errors */ | 1818 | /* Handle ECC errors */ |
2028 | gr_gv11b_handle_fecs_ecc_error(g, gr_fecs_intr); | 1819 | gr_gv11b_handle_fecs_ecc_error(g, gr_fecs_intr); |
2029 | 1820 | ||
2030 | clean_up: | 1821 | return ret; |
2031 | /* handle any remaining interrupts */ | ||
2032 | return gk20a_gr_handle_fecs_error(g, __ch, isr_data); | ||
2033 | } | 1822 | } |
2034 | 1823 | ||
2035 | static u32 gv11b_mask_hww_warp_esr(u32 hww_warp_esr) | 1824 | static u32 gv11b_mask_hww_warp_esr(u32 hww_warp_esr) |