summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-03-28 17:56:11 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-04-03 20:05:19 -0400
commite7cc24eb9b78e1cdd0f321123e64261c95018e73 (patch)
tree4de1be975c745257afe152cfaac8dceeece61018 /drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
parentb49ee3fe2b07ac9f75d0fa4d496b8eceddaad9ce (diff)
gpu: nvgpu: Correct sign qualifiers for LTC code
In constants we use in LTC code we miss the qualifier indicating if the constant is signed or unsigned. Add qualifiers for LTC code and the ZBC related constant used in LTC code. Change-Id: Id80078722f8a4f50eb53370146437bebb72a3ffc Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1683859 Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/ltc_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 6ec9aec5..0d9fd560 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -43,7 +43,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
43 /* max memory size (MB) to cover */ 43 /* max memory size (MB) to cover */
44 u32 max_size = gr->max_comptag_mem; 44 u32 max_size = gr->max_comptag_mem;
45 /* one tag line covers 128KB */ 45 /* one tag line covers 128KB */
46 u32 max_comptag_lines = max_size << 3; 46 u32 max_comptag_lines = max_size << 3U;
47 47
48 u32 hw_max_comptag_lines = 48 u32 hw_max_comptag_lines =
49 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); 49 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
@@ -53,7 +53,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
53 u32 comptags_per_cacheline = 53 u32 comptags_per_cacheline =
54 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); 54 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
55 u32 cacheline_size = 55 u32 cacheline_size =
56 512 << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param); 56 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param);
57 u32 slices_per_ltc = 57 u32 slices_per_ltc =
58 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param); 58 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param);
59 59
@@ -63,7 +63,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
63 63
64 gk20a_dbg_fn(""); 64 gk20a_dbg_fn("");
65 65
66 if (max_comptag_lines == 0) 66 if (max_comptag_lines == 0U)
67 return 0; 67 return 0;
68 68
69 if (max_comptag_lines > hw_max_comptag_lines) 69 if (max_comptag_lines > hw_max_comptag_lines)
@@ -113,12 +113,12 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
113 struct gr_gk20a *gr = &g->gr; 113 struct gr_gk20a *gr = &g->gr;
114 struct nvgpu_timeout timeout; 114 struct nvgpu_timeout timeout;
115 int err = 0; 115 int err = 0;
116 u32 ltc, slice, ctrl1, val, hw_op = 0; 116 u32 ltc, slice, ctrl1, val, hw_op = 0U;
117 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( 117 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
118 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); 118 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
119 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 119 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
120 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 120 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
121 const u32 max_lines = 16384; 121 const u32 max_lines = 16384U;
122 122
123 gk20a_dbg_fn(""); 123 gk20a_dbg_fn("");
124 124
@@ -237,7 +237,7 @@ void gm20b_ltc_isr(struct gk20a *g)
237 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 237 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
238 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); 238 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
239 for (ltc = 0; ltc < g->ltc_count; ltc++) { 239 for (ltc = 0; ltc < g->ltc_count; ltc++) {
240 if ((mc_intr & 1 << ltc) == 0) 240 if ((mc_intr & 1U << ltc) == 0)
241 continue; 241 continue;
242 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 242 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
243 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + 243 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() +
@@ -256,7 +256,7 @@ void gm20b_ltc_isr(struct gk20a *g)
256u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) 256u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
257{ 257{
258 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); 258 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
259 if (val == 2) { 259 if (val == 2U) {
260 return base * 2; 260 return base * 2;
261 } else if (val != 1) { 261 } else if (val != 1) {
262 nvgpu_err(g, "Invalid number of active ltcs: %08x", val); 262 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
@@ -359,24 +359,24 @@ int gm20b_determine_L2_size_bytes(struct gk20a *g)
359 359
360 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); 360 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
361 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { 361 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
362 sets = 64; 362 sets = 64U;
363 } else if (active_sets_value == 363 } else if (active_sets_value ==
364 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { 364 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
365 sets = 32; 365 sets = 32U;
366 } else if (active_sets_value == 366 } else if (active_sets_value ==
367 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { 367 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
368 sets = 16; 368 sets = 16U;
369 } else { 369 } else {
370 nvgpu_err(g, "Unknown constant %u for active sets", 370 nvgpu_err(g, "Unknown constant %u for active sets",
371 (unsigned)active_sets_value); 371 (unsigned)active_sets_value);
372 sets = 0; 372 sets = 0U;
373 } 373 }
374 374
375 active_ltcs = g->gr.num_fbps; 375 active_ltcs = g->gr.num_fbps;
376 376
377 /* chip-specific values */ 377 /* chip-specific values */
378 lts_per_ltc = 2; 378 lts_per_ltc = 2U;
379 bytes_per_line = 128; 379 bytes_per_line = 128U;
380 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; 380 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;
381 381
382 return cache_size; 382 return cache_size;
@@ -424,7 +424,7 @@ void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g,
424void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) 424void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
425{ 425{
426 u32 max_size = gr->max_comptag_mem; 426 u32 max_size = gr->max_comptag_mem;
427 u32 max_comptag_lines = max_size << 3; 427 u32 max_comptag_lines = max_size << 3U;
428 428
429 u32 compbit_base_post_divide; 429 u32 compbit_base_post_divide;
430 u64 compbit_base_post_multiply64; 430 u64 compbit_base_post_multiply64;