summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-09-12 08:29:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-20 13:52:22 -0400
commitd29300017aa1cd8ef1d2daef4383224bf00aff37 (patch)
tree92d4e912d71e908cae8719a5784676ae069b2982 /drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
parent46477494b2f5d566a0c133746af00a3da4ee6b90 (diff)
gpu: nvgpu: move lts intr servicing to separate functions
We right now service LTS interrupts as part of LTC interrupt service routine Separate out the LTS interrupt handling in separate functions e.g. gp10b_ltc_lts_isr() for gp10b and gv11b_ltc_lts_isr() for gv11b gv11b_ltc_lts_isr() now calls gp10b_ltc_lts_isr() to service legacy LTS interrupts instead of calling gp10b_ltc_isr() directly Bug 2216662 Jira NVGPU-767 Change-Id: Ia8499feca83f67ac455cee311edf32390acb83b8 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1821430 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c109
1 files changed, 59 insertions, 50 deletions
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
index 9aabf543..4d11b44b 100644
--- a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
@@ -233,13 +233,68 @@ out:
233 return err; 233 return err;
234} 234}
235 235
236void gp10b_ltc_isr(struct gk20a *g) 236void gp10b_ltc_lts_isr(struct gk20a *g,
237 unsigned int ltc, unsigned int slice)
237{ 238{
238 u32 mc_intr, ltc_intr; 239 u32 offset;
239 unsigned int ltc, slice; 240 u32 ltc_intr;
240 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 241 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
241 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 242 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
242 243
244 offset = ltc_stride * ltc + lts_stride * slice;
245 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset);
246
247 /* Detect and handle ECC errors */
248 if (ltc_intr &
249 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
250 u32 ecc_stats_reg_val;
251
252 nvgpu_err(g,
253 "Single bit error detected in GPU L2!");
254
255 ecc_stats_reg_val =
256 gk20a_readl(g,
257 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
258 g->ecc.ltc.ecc_sec_count[ltc][slice].counter +=
259 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val);
260 ecc_stats_reg_val &=
261 ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m());
262 nvgpu_writel_check(g,
263 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
264 ecc_stats_reg_val);
265 g->ops.mm.l2_flush(g, true);
266 }
267 if (ltc_intr &
268 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
269 u32 ecc_stats_reg_val;
270
271 nvgpu_err(g,
272 "Double bit error detected in GPU L2!");
273
274 ecc_stats_reg_val =
275 gk20a_readl(g,
276 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
277 g->ecc.ltc.ecc_ded_count[ltc][slice].counter +=
278 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val);
279 ecc_stats_reg_val &=
280 ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m());
281 nvgpu_writel_check(g,
282 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
283 ecc_stats_reg_val);
284 }
285
286 nvgpu_err(g, "ltc%d, slice %d: %08x",
287 ltc, slice, ltc_intr);
288 nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() +
289 ltc_stride * ltc + lts_stride * slice,
290 ltc_intr);
291}
292
293void gp10b_ltc_isr(struct gk20a *g)
294{
295 u32 mc_intr;
296 unsigned int ltc, slice;
297
243 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 298 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
244 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); 299 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
245 for (ltc = 0; ltc < g->ltc_count; ltc++) { 300 for (ltc = 0; ltc < g->ltc_count; ltc++) {
@@ -247,53 +302,7 @@ void gp10b_ltc_isr(struct gk20a *g)
247 continue; 302 continue;
248 } 303 }
249 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 304 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
250 u32 offset = ltc_stride * ltc + lts_stride * slice; 305 gp10b_ltc_lts_isr(g, ltc, slice);
251 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset);
252
253 /* Detect and handle ECC errors */
254 if (ltc_intr &
255 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
256 u32 ecc_stats_reg_val;
257
258 nvgpu_err(g,
259 "Single bit error detected in GPU L2!");
260
261 ecc_stats_reg_val =
262 gk20a_readl(g,
263 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
264 g->ecc.ltc.ecc_sec_count[ltc][slice].counter +=
265 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val);
266 ecc_stats_reg_val &=
267 ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m());
268 nvgpu_writel_check(g,
269 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
270 ecc_stats_reg_val);
271 g->ops.mm.l2_flush(g, true);
272 }
273 if (ltc_intr &
274 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
275 u32 ecc_stats_reg_val;
276
277 nvgpu_err(g,
278 "Double bit error detected in GPU L2!");
279
280 ecc_stats_reg_val =
281 gk20a_readl(g,
282 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
283 g->ecc.ltc.ecc_ded_count[ltc][slice].counter +=
284 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val);
285 ecc_stats_reg_val &=
286 ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m());
287 nvgpu_writel_check(g,
288 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
289 ecc_stats_reg_val);
290 }
291
292 nvgpu_err(g, "ltc%d, slice %d: %08x",
293 ltc, slice, ltc_intr);
294 nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() +
295 ltc_stride * ltc + lts_stride * slice,
296 ltc_intr);
297 } 306 }
298 } 307 }
299} 308}