diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/common/fuse/fuse_gm20b.c | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/fuse/fuse_gp10b.c | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | 45 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | 34 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c | 33 |
5 files changed, 79 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/common/fuse/fuse_gm20b.c b/drivers/gpu/nvgpu/common/fuse/fuse_gm20b.c index c790e297..bb99e644 100644 --- a/drivers/gpu/nvgpu/common/fuse/fuse_gm20b.c +++ b/drivers/gpu/nvgpu/common/fuse/fuse_gm20b.c | |||
@@ -63,16 +63,17 @@ int gm20b_fuse_check_priv_security(struct gk20a *g) | |||
63 | GCPLEX_CONFIG_WPR_ENABLED_MASK) && | 63 | GCPLEX_CONFIG_WPR_ENABLED_MASK) && |
64 | !(gcplex_config & | 64 | !(gcplex_config & |
65 | GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { | 65 | GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { |
66 | if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) | 66 | if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) { |
67 | nvgpu_log(g, gpu_dbg_info, | 67 | nvgpu_log(g, gpu_dbg_info, |
68 | "gcplex_config = 0x%08x, " | 68 | "gcplex_config = 0x%08x, " |
69 | "secure mode: ACR debug", | 69 | "secure mode: ACR debug", |
70 | gcplex_config); | 70 | gcplex_config); |
71 | else | 71 | } else { |
72 | nvgpu_log(g, gpu_dbg_info, | 72 | nvgpu_log(g, gpu_dbg_info, |
73 | "gcplex_config = 0x%08x, " | 73 | "gcplex_config = 0x%08x, " |
74 | "secure mode: ACR non debug", | 74 | "secure mode: ACR non debug", |
75 | gcplex_config); | 75 | gcplex_config); |
76 | } | ||
76 | } else { | 77 | } else { |
77 | nvgpu_err(g, "gcplex_config = 0x%08x " | 78 | nvgpu_err(g, "gcplex_config = 0x%08x " |
78 | "invalid wpr_enabled/vpr_auto_fetch_disable " | 79 | "invalid wpr_enabled/vpr_auto_fetch_disable " |
diff --git a/drivers/gpu/nvgpu/common/fuse/fuse_gp10b.c b/drivers/gpu/nvgpu/common/fuse/fuse_gp10b.c index 3a26e1b9..97570f9c 100644 --- a/drivers/gpu/nvgpu/common/fuse/fuse_gp10b.c +++ b/drivers/gpu/nvgpu/common/fuse/fuse_gp10b.c | |||
@@ -63,16 +63,17 @@ int gp10b_fuse_check_priv_security(struct gk20a *g) | |||
63 | GCPLEX_CONFIG_WPR_ENABLED_MASK) && | 63 | GCPLEX_CONFIG_WPR_ENABLED_MASK) && |
64 | !(gcplex_config & | 64 | !(gcplex_config & |
65 | GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { | 65 | GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { |
66 | if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) | 66 | if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) { |
67 | nvgpu_log(g, gpu_dbg_info, | 67 | nvgpu_log(g, gpu_dbg_info, |
68 | "gcplex_config = 0x%08x, " | 68 | "gcplex_config = 0x%08x, " |
69 | "secure mode: ACR debug", | 69 | "secure mode: ACR debug", |
70 | gcplex_config); | 70 | gcplex_config); |
71 | else | 71 | } else { |
72 | nvgpu_log(g, gpu_dbg_info, | 72 | nvgpu_log(g, gpu_dbg_info, |
73 | "gcplex_config = 0x%08x, " | 73 | "gcplex_config = 0x%08x, " |
74 | "secure mode: ACR non debug", | 74 | "secure mode: ACR non debug", |
75 | gcplex_config); | 75 | gcplex_config); |
76 | } | ||
76 | 77 | ||
77 | } else { | 78 | } else { |
78 | nvgpu_err(g, "gcplex_config = 0x%08x " | 79 | nvgpu_err(g, "gcplex_config = 0x%08x " |
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c index 28d63e82..623b0935 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | |||
@@ -61,11 +61,13 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
61 | 61 | ||
62 | nvgpu_log_fn(g, " "); | 62 | nvgpu_log_fn(g, " "); |
63 | 63 | ||
64 | if (max_comptag_lines == 0U) | 64 | if (max_comptag_lines == 0U) { |
65 | return 0; | 65 | return 0; |
66 | } | ||
66 | 67 | ||
67 | if (max_comptag_lines > hw_max_comptag_lines) | 68 | if (max_comptag_lines > hw_max_comptag_lines) { |
68 | max_comptag_lines = hw_max_comptag_lines; | 69 | max_comptag_lines = hw_max_comptag_lines; |
70 | } | ||
69 | 71 | ||
70 | compbit_backing_size = | 72 | compbit_backing_size = |
71 | DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * | 73 | DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * |
@@ -82,8 +84,9 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
82 | (compbit_backing_size * comptags_per_cacheline) / | 84 | (compbit_backing_size * comptags_per_cacheline) / |
83 | (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); | 85 | (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); |
84 | 86 | ||
85 | if (max_comptag_lines > hw_max_comptag_lines) | 87 | if (max_comptag_lines > hw_max_comptag_lines) { |
86 | max_comptag_lines = hw_max_comptag_lines; | 88 | max_comptag_lines = hw_max_comptag_lines; |
89 | } | ||
87 | 90 | ||
88 | nvgpu_log_info(g, "compbit backing store size : %d", | 91 | nvgpu_log_info(g, "compbit backing store size : %d", |
89 | compbit_backing_size); | 92 | compbit_backing_size); |
@@ -91,12 +94,14 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
91 | max_comptag_lines); | 94 | max_comptag_lines); |
92 | 95 | ||
93 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); | 96 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); |
94 | if (err) | 97 | if (err) { |
95 | return err; | 98 | return err; |
99 | } | ||
96 | 100 | ||
97 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); | 101 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); |
98 | if (err) | 102 | if (err) { |
99 | return err; | 103 | return err; |
104 | } | ||
100 | 105 | ||
101 | gr->max_comptag_lines = max_comptag_lines; | 106 | gr->max_comptag_lines = max_comptag_lines; |
102 | gr->comptags_per_cacheline = comptags_per_cacheline; | 107 | gr->comptags_per_cacheline = comptags_per_cacheline; |
@@ -121,8 +126,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
121 | 126 | ||
122 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); | 127 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); |
123 | 128 | ||
124 | if (gr->compbit_store.mem.size == 0) | 129 | if (gr->compbit_store.mem.size == 0) { |
125 | return 0; | 130 | return 0; |
131 | } | ||
126 | 132 | ||
127 | while (1) { | 133 | while (1) { |
128 | const u32 iter_max = min(min + max_lines - 1, max); | 134 | const u32 iter_max = min(min + max_lines - 1, max); |
@@ -168,8 +174,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
168 | NVGPU_TIMER_RETRY_TIMER); | 174 | NVGPU_TIMER_RETRY_TIMER); |
169 | do { | 175 | do { |
170 | val = gk20a_readl(g, ctrl1); | 176 | val = gk20a_readl(g, ctrl1); |
171 | if (!(val & hw_op)) | 177 | if (!(val & hw_op)) { |
172 | break; | 178 | break; |
179 | } | ||
173 | nvgpu_udelay(5); | 180 | nvgpu_udelay(5); |
174 | } while (!nvgpu_timeout_expired(&timeout)); | 181 | } while (!nvgpu_timeout_expired(&timeout)); |
175 | 182 | ||
@@ -182,8 +189,9 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
182 | } | 189 | } |
183 | 190 | ||
184 | /* are we done? */ | 191 | /* are we done? */ |
185 | if (full_cache_op || iter_max == max) | 192 | if (full_cache_op || iter_max == max) { |
186 | break; | 193 | break; |
194 | } | ||
187 | 195 | ||
188 | /* note: iter_max is inclusive upper bound */ | 196 | /* note: iter_max is inclusive upper bound */ |
189 | min = iter_max + 1; | 197 | min = iter_max + 1; |
@@ -240,8 +248,9 @@ void gm20b_ltc_isr(struct gk20a *g) | |||
240 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); | 248 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); |
241 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); | 249 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); |
242 | for (ltc = 0; ltc < g->ltc_count; ltc++) { | 250 | for (ltc = 0; ltc < g->ltc_count; ltc++) { |
243 | if ((mc_intr & 1U << ltc) == 0) | 251 | if ((mc_intr & 1U << ltc) == 0) { |
244 | continue; | 252 | continue; |
253 | } | ||
245 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { | 254 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { |
246 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + | 255 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + |
247 | ltc_stride * ltc + | 256 | ltc_stride * ltc + |
@@ -433,12 +442,13 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) | |||
433 | u64 compbit_store_iova; | 442 | u64 compbit_store_iova; |
434 | u64 compbit_base_post_divide64; | 443 | u64 compbit_base_post_divide64; |
435 | 444 | ||
436 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | 445 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { |
437 | compbit_store_iova = nvgpu_mem_get_phys_addr(g, | 446 | compbit_store_iova = nvgpu_mem_get_phys_addr(g, |
438 | &gr->compbit_store.mem); | 447 | &gr->compbit_store.mem); |
439 | else | 448 | } else { |
440 | compbit_store_iova = nvgpu_mem_get_addr(g, | 449 | compbit_store_iova = nvgpu_mem_get_addr(g, |
441 | &gr->compbit_store.mem); | 450 | &gr->compbit_store.mem); |
451 | } | ||
442 | 452 | ||
443 | compbit_base_post_divide64 = compbit_store_iova >> | 453 | compbit_base_post_divide64 = compbit_store_iova >> |
444 | ltc_ltcs_ltss_cbc_base_alignment_shift_v(); | 454 | ltc_ltcs_ltss_cbc_base_alignment_shift_v(); |
@@ -449,13 +459,15 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) | |||
449 | compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * | 459 | compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * |
450 | g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); | 460 | g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); |
451 | 461 | ||
452 | if (compbit_base_post_multiply64 < compbit_store_iova) | 462 | if (compbit_base_post_multiply64 < compbit_store_iova) { |
453 | compbit_base_post_divide++; | 463 | compbit_base_post_divide++; |
464 | } | ||
454 | 465 | ||
455 | /* Bug 1477079 indicates sw adjustment on the posted divided base. */ | 466 | /* Bug 1477079 indicates sw adjustment on the posted divided base. */ |
456 | if (g->ops.ltc.cbc_fix_config) | 467 | if (g->ops.ltc.cbc_fix_config) { |
457 | compbit_base_post_divide = | 468 | compbit_base_post_divide = |
458 | g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); | 469 | g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); |
470 | } | ||
459 | 471 | ||
460 | gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), | 472 | gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), |
461 | compbit_base_post_divide); | 473 | compbit_base_post_divide); |
@@ -478,12 +490,13 @@ void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) | |||
478 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); | 490 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); |
479 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); | 491 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); |
480 | 492 | ||
481 | if (enabled) | 493 | if (enabled) { |
482 | /* bypass disabled (normal caching ops)*/ | 494 | /* bypass disabled (normal caching ops) */ |
483 | reg &= ~reg_f; | 495 | reg &= ~reg_f; |
484 | else | 496 | } else { |
485 | /* bypass enabled (no caching) */ | 497 | /* bypass enabled (no caching) */ |
486 | reg |= reg_f; | 498 | reg |= reg_f; |
499 | } | ||
487 | 500 | ||
488 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); | 501 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); |
489 | } | 502 | } |
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c index 5d0a8850..b72346ee 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | |||
@@ -83,15 +83,18 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
83 | 83 | ||
84 | nvgpu_log_fn(g, " "); | 84 | nvgpu_log_fn(g, " "); |
85 | 85 | ||
86 | if (max_comptag_lines == 0U) | 86 | if (max_comptag_lines == 0U) { |
87 | return 0; | 87 | return 0; |
88 | } | ||
88 | 89 | ||
89 | /* Already initialized */ | 90 | /* Already initialized */ |
90 | if (gr->max_comptag_lines) | 91 | if (gr->max_comptag_lines) { |
91 | return 0; | 92 | return 0; |
93 | } | ||
92 | 94 | ||
93 | if (max_comptag_lines > hw_max_comptag_lines) | 95 | if (max_comptag_lines > hw_max_comptag_lines) { |
94 | max_comptag_lines = hw_max_comptag_lines; | 96 | max_comptag_lines = hw_max_comptag_lines; |
97 | } | ||
95 | 98 | ||
96 | compbit_backing_size = | 99 | compbit_backing_size = |
97 | roundup(max_comptag_lines * gobs_per_comptagline_per_slice, | 100 | roundup(max_comptag_lines * gobs_per_comptagline_per_slice, |
@@ -115,12 +118,14 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
115 | gobs_per_comptagline_per_slice); | 118 | gobs_per_comptagline_per_slice); |
116 | 119 | ||
117 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); | 120 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); |
118 | if (err) | 121 | if (err) { |
119 | return err; | 122 | return err; |
123 | } | ||
120 | 124 | ||
121 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); | 125 | err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); |
122 | if (err) | 126 | if (err) { |
123 | return err; | 127 | return err; |
128 | } | ||
124 | 129 | ||
125 | gr->max_comptag_lines = max_comptag_lines; | 130 | gr->max_comptag_lines = max_comptag_lines; |
126 | gr->comptags_per_cacheline = comptags_per_cacheline; | 131 | gr->comptags_per_cacheline = comptags_per_cacheline; |
@@ -146,8 +151,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
146 | 151 | ||
147 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); | 152 | trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); |
148 | 153 | ||
149 | if (gr->compbit_store.mem.size == 0U) | 154 | if (gr->compbit_store.mem.size == 0U) { |
150 | return 0; | 155 | return 0; |
156 | } | ||
151 | 157 | ||
152 | while (1) { | 158 | while (1) { |
153 | const u32 iter_max = min(min + max_lines - 1, max); | 159 | const u32 iter_max = min(min + max_lines - 1, max); |
@@ -195,8 +201,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
195 | NVGPU_TIMER_RETRY_TIMER); | 201 | NVGPU_TIMER_RETRY_TIMER); |
196 | do { | 202 | do { |
197 | val = gk20a_readl(g, ctrl1); | 203 | val = gk20a_readl(g, ctrl1); |
198 | if (!(val & hw_op)) | 204 | if (!(val & hw_op)) { |
199 | break; | 205 | break; |
206 | } | ||
200 | nvgpu_udelay(5); | 207 | nvgpu_udelay(5); |
201 | } while (!nvgpu_timeout_expired(&timeout)); | 208 | } while (!nvgpu_timeout_expired(&timeout)); |
202 | 209 | ||
@@ -209,8 +216,9 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
209 | } | 216 | } |
210 | 217 | ||
211 | /* are we done? */ | 218 | /* are we done? */ |
212 | if (full_cache_op || iter_max == max) | 219 | if (full_cache_op || iter_max == max) { |
213 | break; | 220 | break; |
221 | } | ||
214 | 222 | ||
215 | /* note: iter_max is inclusive upper bound */ | 223 | /* note: iter_max is inclusive upper bound */ |
216 | min = iter_max + 1; | 224 | min = iter_max + 1; |
@@ -234,8 +242,9 @@ void gp10b_ltc_isr(struct gk20a *g) | |||
234 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); | 242 | mc_intr = gk20a_readl(g, mc_intr_ltc_r()); |
235 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); | 243 | nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); |
236 | for (ltc = 0; ltc < g->ltc_count; ltc++) { | 244 | for (ltc = 0; ltc < g->ltc_count; ltc++) { |
237 | if ((mc_intr & 1U << ltc) == 0) | 245 | if ((mc_intr & 1U << ltc) == 0) { |
238 | continue; | 246 | continue; |
247 | } | ||
239 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { | 248 | for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { |
240 | u32 offset = ltc_stride * ltc + lts_stride * slice; | 249 | u32 offset = ltc_stride * ltc + lts_stride * slice; |
241 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); | 250 | ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); |
@@ -310,12 +319,13 @@ void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled) | |||
310 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); | 319 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); |
311 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); | 320 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); |
312 | 321 | ||
313 | if (enabled) | 322 | if (enabled) { |
314 | /* bypass disabled (normal caching ops)*/ | 323 | /* bypass disabled (normal caching ops) */ |
315 | reg &= ~reg_f; | 324 | reg &= ~reg_f; |
316 | else | 325 | } else { |
317 | /* bypass enabled (no caching) */ | 326 | /* bypass enabled (no caching) */ |
318 | reg |= reg_f; | 327 | reg |= reg_f; |
328 | } | ||
319 | 329 | ||
320 | nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); | 330 | nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); |
321 | } | 331 | } |
diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c index 04525375..53141c9a 100644 --- a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c +++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c | |||
@@ -75,24 +75,28 @@ void gp10b_priv_ring_decode_error_code(struct gk20a *g, | |||
75 | 75 | ||
76 | if (error_code == 0xBADF1000) { | 76 | if (error_code == 0xBADF1000) { |
77 | if (error_type_index < | 77 | if (error_type_index < |
78 | ARRAY_SIZE(error_type_badf1xyy)) | 78 | ARRAY_SIZE(error_type_badf1xyy)) { |
79 | nvgpu_err(g, "%s", | 79 | nvgpu_err(g, "%s", |
80 | error_type_badf1xyy[error_type_index]); | 80 | error_type_badf1xyy[error_type_index]); |
81 | } | ||
81 | } else if (error_code == 0xBADF2000) { | 82 | } else if (error_code == 0xBADF2000) { |
82 | if (error_type_index < | 83 | if (error_type_index < |
83 | ARRAY_SIZE(error_type_badf2xyy)) | 84 | ARRAY_SIZE(error_type_badf2xyy)) { |
84 | nvgpu_err(g, "%s", | 85 | nvgpu_err(g, "%s", |
85 | error_type_badf2xyy[error_type_index]); | 86 | error_type_badf2xyy[error_type_index]); |
87 | } | ||
86 | } else if (error_code == 0xBADF3000) { | 88 | } else if (error_code == 0xBADF3000) { |
87 | if (error_type_index < | 89 | if (error_type_index < |
88 | ARRAY_SIZE(error_type_badf3xyy)) | 90 | ARRAY_SIZE(error_type_badf3xyy)) { |
89 | nvgpu_err(g, "%s", | 91 | nvgpu_err(g, "%s", |
90 | error_type_badf3xyy[error_type_index]); | 92 | error_type_badf3xyy[error_type_index]); |
93 | } | ||
91 | } else if (error_code == 0xBADF5000) { | 94 | } else if (error_code == 0xBADF5000) { |
92 | if (error_type_index < | 95 | if (error_type_index < |
93 | ARRAY_SIZE(error_type_badf5xyy)) | 96 | ARRAY_SIZE(error_type_badf5xyy)) { |
94 | nvgpu_err(g, "%s", | 97 | nvgpu_err(g, "%s", |
95 | error_type_badf5xyy[error_type_index]); | 98 | error_type_badf5xyy[error_type_index]); |
99 | } | ||
96 | } | 100 | } |
97 | } | 101 | } |
98 | 102 | ||
@@ -117,15 +121,18 @@ void gp10b_priv_ring_isr(struct gk20a *g) | |||
117 | nvgpu_err(g, "ringmaster intr status0: 0x%08x," | 121 | nvgpu_err(g, "ringmaster intr status0: 0x%08x," |
118 | "status1: 0x%08x", status0, status1); | 122 | "status1: 0x%08x", status0, status1); |
119 | 123 | ||
120 | if (pri_ringmaster_intr_status0_ring_start_conn_fault_v(status0) != 0) | 124 | if (pri_ringmaster_intr_status0_ring_start_conn_fault_v(status0) != 0) { |
121 | nvgpu_err(g, | 125 | nvgpu_err(g, |
122 | "BUG: connectivity problem on the startup sequence"); | 126 | "BUG: connectivity problem on the startup sequence"); |
127 | } | ||
123 | 128 | ||
124 | if (pri_ringmaster_intr_status0_disconnect_fault_v(status0) != 0) | 129 | if (pri_ringmaster_intr_status0_disconnect_fault_v(status0) != 0) { |
125 | nvgpu_err(g, "ring disconnected"); | 130 | nvgpu_err(g, "ring disconnected"); |
131 | } | ||
126 | 132 | ||
127 | if (pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0) | 133 | if (pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0) { |
128 | nvgpu_err(g, "ring overflowed"); | 134 | nvgpu_err(g, "ring overflowed"); |
135 | } | ||
129 | 136 | ||
130 | if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { | 137 | if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { |
131 | error_info = | 138 | error_info = |
@@ -141,8 +148,9 @@ void gp10b_priv_ring_isr(struct gk20a *g) | |||
141 | pri_ringstation_sys_priv_error_info_subid_v(error_info), | 148 | pri_ringstation_sys_priv_error_info_subid_v(error_info), |
142 | pri_ringstation_sys_priv_error_info_priv_level_v(error_info), | 149 | pri_ringstation_sys_priv_error_info_priv_level_v(error_info), |
143 | error_code); | 150 | error_code); |
144 | if (g->ops.priv_ring.decode_error_code) | 151 | if (g->ops.priv_ring.decode_error_code) { |
145 | g->ops.priv_ring.decode_error_code(g, error_code); | 152 | g->ops.priv_ring.decode_error_code(g, error_code); |
153 | } | ||
146 | } | 154 | } |
147 | 155 | ||
148 | if (status1) { | 156 | if (status1) { |
@@ -167,13 +175,15 @@ void gp10b_priv_ring_isr(struct gk20a *g) | |||
167 | pri_ringstation_gpc_gpc0_priv_error_info_priv_level_v(error_info), | 175 | pri_ringstation_gpc_gpc0_priv_error_info_priv_level_v(error_info), |
168 | error_code); | 176 | error_code); |
169 | 177 | ||
170 | if (g->ops.priv_ring.decode_error_code) | 178 | if (g->ops.priv_ring.decode_error_code) { |
171 | g->ops.priv_ring.decode_error_code(g, | 179 | g->ops.priv_ring.decode_error_code(g, |
172 | error_code); | 180 | error_code); |
181 | } | ||
173 | 182 | ||
174 | status1 = status1 & (~(BIT(gpc))); | 183 | status1 = status1 & (~(BIT(gpc))); |
175 | if (!status1) | 184 | if (!status1) { |
176 | break; | 185 | break; |
186 | } | ||
177 | } | 187 | } |
178 | } | 188 | } |
179 | } | 189 | } |
@@ -193,6 +203,7 @@ void gp10b_priv_ring_isr(struct gk20a *g) | |||
193 | retry--; | 203 | retry--; |
194 | } | 204 | } |
195 | 205 | ||
196 | if (retry == 0) | 206 | if (retry == 0) { |
197 | nvgpu_err(g, "priv ringmaster intr ack failed"); | 207 | nvgpu_err(g, "priv ringmaster intr ack failed"); |
208 | } | ||
198 | } | 209 | } |