diff options
author | seshendra Gadagottu <sgadagottu@nvidia.com> | 2016-08-22 16:20:05 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2016-09-12 13:46:37 -0400 |
commit | 51b5ec852096c0eeb1eaca48ae132d7bf9ac7a9d (patch) | |
tree | 0c182e08ae521ccc449ebdd6abdc0180ed3c98df /drivers/gpu/nvgpu/gv11b/gr_gv11b.c | |
parent | 2c6652f182d84dc7ec4218576b65ad582f05d4a6 (diff) |
gpu: nvgpu: gv11b: hw header update
Updated hw headers to CL#37001916. Some of
important changes include new door bell user
mode mechanism and new runlist structure.
Bug 1735765
Change-Id: Icf01156dd3e7d94466f553ffc53267e4043e1188
Signed-off-by: seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: http://git-master/r/1205888
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/gr_gv11b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 63 |
1 files changed, 31 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 9d0b4ade..088ec040 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c | |||
@@ -72,16 +72,16 @@ static int gr_gv11b_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
72 | gr_gk20a_handle_sm_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); | 72 | gr_gk20a_handle_sm_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); |
73 | 73 | ||
74 | /* Check for LRF ECC errors. */ | 74 | /* Check for LRF ECC errors. */ |
75 | lrf_ecc_status = gk20a_readl(g, | 75 | lrf_ecc_status = gk20a_readl(g, |
76 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset); | 76 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset); |
77 | if ( (lrf_ecc_status & | 77 | if ((lrf_ecc_status & |
78 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp0_pending_f()) || | 78 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp0_pending_f()) || |
79 | (lrf_ecc_status & | 79 | (lrf_ecc_status & |
80 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp1_pending_f()) || | 80 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp1_pending_f()) || |
81 | (lrf_ecc_status & | 81 | (lrf_ecc_status & |
82 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp2_pending_f()) || | 82 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp2_pending_f()) || |
83 | (lrf_ecc_status & | 83 | (lrf_ecc_status & |
84 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp3_pending_f()) ) { | 84 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_single_err_detected_qrfdp3_pending_f())) { |
85 | 85 | ||
86 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 86 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, |
87 | "Single bit error detected in SM LRF!"); | 87 | "Single bit error detected in SM LRF!"); |
@@ -93,14 +93,14 @@ static int gr_gv11b_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
93 | gr_pri_gpc0_tpc0_sm_lrf_ecc_single_err_count_r() + offset, | 93 | gr_pri_gpc0_tpc0_sm_lrf_ecc_single_err_count_r() + offset, |
94 | 0); | 94 | 0); |
95 | } | 95 | } |
96 | if ( (lrf_ecc_status & | 96 | if ((lrf_ecc_status & |
97 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp0_pending_f()) || | 97 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp0_pending_f()) || |
98 | (lrf_ecc_status & | 98 | (lrf_ecc_status & |
99 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp1_pending_f()) || | 99 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp1_pending_f()) || |
100 | (lrf_ecc_status & | 100 | (lrf_ecc_status & |
101 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp2_pending_f()) || | 101 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp2_pending_f()) || |
102 | (lrf_ecc_status & | 102 | (lrf_ecc_status & |
103 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp3_pending_f()) ) { | 103 | gr_pri_gpc0_tpc0_sm_lrf_ecc_status_double_err_detected_qrfdp3_pending_f())) { |
104 | 104 | ||
105 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 105 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, |
106 | "Double bit error detected in SM LRF!"); | 106 | "Double bit error detected in SM LRF!"); |
@@ -109,14 +109,13 @@ static int gr_gv11b_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
109 | gk20a_readl(g, | 109 | gk20a_readl(g, |
110 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset); | 110 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset); |
111 | gk20a_writel(g, | 111 | gk20a_writel(g, |
112 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, | 112 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, 0); |
113 | 0); | ||
114 | } | 113 | } |
115 | gk20a_writel(g, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset, | 114 | gk20a_writel(g, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset, |
116 | lrf_ecc_status); | 115 | lrf_ecc_status); |
117 | 116 | ||
118 | /* Check for SHM ECC errors. */ | 117 | /* Check for SHM ECC errors. */ |
119 | shm_ecc_status = gk20a_readl(g, | 118 | shm_ecc_status = gk20a_readl(g, |
120 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_r() + offset); | 119 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_r() + offset); |
121 | if ((shm_ecc_status & | 120 | if ((shm_ecc_status & |
122 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f()) || | 121 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f()) || |
@@ -125,7 +124,7 @@ static int gr_gv11b_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
125 | (shm_ecc_status & | 124 | (shm_ecc_status & |
126 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f()) || | 125 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f()) || |
127 | (shm_ecc_status & | 126 | (shm_ecc_status & |
128 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { | 127 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f())) { |
129 | u32 ecc_stats_reg_val; | 128 | u32 ecc_stats_reg_val; |
130 | 129 | ||
131 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 130 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, |
@@ -144,10 +143,10 @@ static int gr_gv11b_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
144 | gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_r() + offset, | 143 | gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_r() + offset, |
145 | ecc_stats_reg_val); | 144 | ecc_stats_reg_val); |
146 | } | 145 | } |
147 | if ( (shm_ecc_status & | 146 | if ((shm_ecc_status & |
148 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f()) || | 147 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f()) || |
149 | (shm_ecc_status & | 148 | (shm_ecc_status & |
150 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { | 149 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f())) { |
151 | u32 ecc_stats_reg_val; | 150 | u32 ecc_stats_reg_val; |
152 | 151 | ||
153 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 152 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, |
@@ -1133,8 +1132,8 @@ static int gr_gv11b_dump_gr_status_regs(struct gk20a *g, | |||
1133 | gk20a_readl(g, gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r())); | 1132 | gk20a_readl(g, gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r())); |
1134 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_FS: 0x%x\n", | 1133 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_FS: 0x%x\n", |
1135 | gk20a_readl(g, gr_cwd_fs_r())); | 1134 | gk20a_readl(g, gr_cwd_fs_r())); |
1136 | gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_TPC_FS: 0x%x\n", | 1135 | gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_TPC_FS(0): 0x%x\n", |
1137 | gk20a_readl(g, gr_fe_tpc_fs_r())); | 1136 | gk20a_readl(g, gr_fe_tpc_fs_r(0))); |
1138 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_GPC_TPC_ID: 0x%x\n", | 1137 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_GPC_TPC_ID: 0x%x\n", |
1139 | gk20a_readl(g, gr_cwd_gpc_tpc_id_r(0))); | 1138 | gk20a_readl(g, gr_cwd_gpc_tpc_id_r(0))); |
1140 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_SM_ID(0): 0x%x\n", | 1139 | gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_SM_ID(0): 0x%x\n", |
@@ -1184,7 +1183,7 @@ static int gr_gv11b_dump_gr_status_regs(struct gk20a *g, | |||
1184 | 1183 | ||
1185 | static bool gr_activity_empty_or_preempted(u32 val) | 1184 | static bool gr_activity_empty_or_preempted(u32 val) |
1186 | { | 1185 | { |
1187 | while(val) { | 1186 | while (val) { |
1188 | u32 v = val & 7; | 1187 | u32 v = val & 7; |
1189 | if (v != gr_activity_4_gpc0_empty_v() && | 1188 | if (v != gr_activity_4_gpc0_empty_v() && |
1190 | v != gr_activity_4_gpc0_preempted_v()) | 1189 | v != gr_activity_4_gpc0_preempted_v()) |
@@ -1542,16 +1541,16 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
1542 | gpc, tpc, global_esr); | 1541 | gpc, tpc, global_esr); |
1543 | 1542 | ||
1544 | if (cilp_enabled && sm_debugger_attached) { | 1543 | if (cilp_enabled && sm_debugger_attached) { |
1545 | if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) | 1544 | if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()) |
1546 | gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset, | 1545 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, |
1547 | gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()); | 1546 | gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()); |
1548 | 1547 | ||
1549 | if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()) | 1548 | if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()) |
1550 | gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset, | 1549 | gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, |
1551 | gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()); | 1550 | gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()); |
1552 | 1551 | ||
1553 | global_mask = gr_gpcs_tpcs_sm_hww_global_esr_multiple_warp_errors_pending_f() | | 1552 | global_mask = gr_gpcs_tpcs_sm0_hww_global_esr_multiple_warp_errors_pending_f() | |
1554 | gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(); | 1553 | gr_gpcs_tpcs_sm0_hww_global_esr_bpt_pause_pending_f(); |
1555 | 1554 | ||
1556 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { | 1555 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { |
1557 | *ignore_debugger = true; | 1556 | *ignore_debugger = true; |
@@ -1575,7 +1574,7 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
1575 | } | 1574 | } |
1576 | 1575 | ||
1577 | /* reset the HWW errors after locking down */ | 1576 | /* reset the HWW errors after locking down */ |
1578 | global_esr_copy = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset); | 1577 | global_esr_copy = gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset); |
1579 | gk20a_gr_clear_sm_hww(g, gpc, tpc, global_esr_copy); | 1578 | gk20a_gr_clear_sm_hww(g, gpc, tpc, global_esr_copy); |
1580 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1579 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1581 | "CILP: HWWs cleared for gpc %d tpc %d\n", | 1580 | "CILP: HWWs cleared for gpc %d tpc %d\n", |
@@ -1588,15 +1587,15 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g, | |||
1588 | return ret; | 1587 | return ret; |
1589 | } | 1588 | } |
1590 | 1589 | ||
1591 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); | 1590 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); |
1592 | if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { | 1591 | if (dbgr_control0 & gr_gpcs_tpcs_sm0_dbgr_control0_single_step_mode_enable_f()) { |
1593 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1592 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1594 | "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", | 1593 | "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", |
1595 | gpc, tpc); | 1594 | gpc, tpc); |
1596 | dbgr_control0 = set_field(dbgr_control0, | 1595 | dbgr_control0 = set_field(dbgr_control0, |
1597 | gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_m(), | 1596 | gr_gpcs_tpcs_sm0_dbgr_control0_single_step_mode_m(), |
1598 | gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_disable_f()); | 1597 | gr_gpcs_tpcs_sm0_dbgr_control0_single_step_mode_disable_f()); |
1599 | gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); | 1598 | gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); |
1600 | } | 1599 | } |
1601 | 1600 | ||
1602 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1601 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, |
@@ -1703,10 +1702,10 @@ clean_up: | |||
1703 | 1702 | ||
1704 | static u32 gv11b_mask_hww_warp_esr(u32 hww_warp_esr) | 1703 | static u32 gv11b_mask_hww_warp_esr(u32 hww_warp_esr) |
1705 | { | 1704 | { |
1706 | if (!(hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m())) | 1705 | if (!(hww_warp_esr & gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m())) |
1707 | hww_warp_esr = set_field(hww_warp_esr, | 1706 | hww_warp_esr = set_field(hww_warp_esr, |
1708 | gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(), | 1707 | gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(), |
1709 | gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_none_f()); | 1708 | gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f()); |
1710 | 1709 | ||
1711 | return hww_warp_esr; | 1710 | return hww_warp_esr; |
1712 | } | 1711 | } |