summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-03-28 17:56:11 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-04-03 20:05:19 -0400
commite7cc24eb9b78e1cdd0f321123e64261c95018e73 (patch)
tree4de1be975c745257afe152cfaac8dceeece61018
parentb49ee3fe2b07ac9f75d0fa4d496b8eceddaad9ce (diff)
gpu: nvgpu: Correct sign qualifiers for LTC code
In constants we use in LTC code we miss the qualifier indicating if the constant is signed or unsigned. Add qualifiers for LTC code and the ZBC related constant used in LTC code. Change-Id: Id80078722f8a4f50eb53370146437bebb72a3ffc Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1683859 Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c28
-rw-r--r--drivers/gpu/nvgpu/gp10b/ltc_gp10b.c14
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.c12
4 files changed, 28 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 54833028..a80116b7 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -174,7 +174,7 @@ struct gr_zcull_info {
174 174
175#define GK20A_ZBC_COLOR_VALUE_SIZE 4 /* RGBA */ 175#define GK20A_ZBC_COLOR_VALUE_SIZE 4 /* RGBA */
176 176
177#define GK20A_STARTOF_ZBC_TABLE 1 /* index zero reserved to indicate "not ZBCd" */ 177#define GK20A_STARTOF_ZBC_TABLE 1U /* index zero reserved to indicate "not ZBCd" */
178#define GK20A_SIZEOF_ZBC_TABLE 16 /* match ltcs_ltss_dstg_zbc_index_address width (4) */ 178#define GK20A_SIZEOF_ZBC_TABLE 16 /* match ltcs_ltss_dstg_zbc_index_address width (4) */
179#define GK20A_ZBC_TABLE_SIZE (16 - 1) 179#define GK20A_ZBC_TABLE_SIZE (16 - 1)
180 180
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 6ec9aec5..0d9fd560 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -43,7 +43,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
43 /* max memory size (MB) to cover */ 43 /* max memory size (MB) to cover */
44 u32 max_size = gr->max_comptag_mem; 44 u32 max_size = gr->max_comptag_mem;
45 /* one tag line covers 128KB */ 45 /* one tag line covers 128KB */
46 u32 max_comptag_lines = max_size << 3; 46 u32 max_comptag_lines = max_size << 3U;
47 47
48 u32 hw_max_comptag_lines = 48 u32 hw_max_comptag_lines =
49 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); 49 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
@@ -53,7 +53,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
53 u32 comptags_per_cacheline = 53 u32 comptags_per_cacheline =
54 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); 54 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
55 u32 cacheline_size = 55 u32 cacheline_size =
56 512 << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param); 56 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param);
57 u32 slices_per_ltc = 57 u32 slices_per_ltc =
58 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param); 58 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param);
59 59
@@ -63,7 +63,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
63 63
64 gk20a_dbg_fn(""); 64 gk20a_dbg_fn("");
65 65
66 if (max_comptag_lines == 0) 66 if (max_comptag_lines == 0U)
67 return 0; 67 return 0;
68 68
69 if (max_comptag_lines > hw_max_comptag_lines) 69 if (max_comptag_lines > hw_max_comptag_lines)
@@ -113,12 +113,12 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
113 struct gr_gk20a *gr = &g->gr; 113 struct gr_gk20a *gr = &g->gr;
114 struct nvgpu_timeout timeout; 114 struct nvgpu_timeout timeout;
115 int err = 0; 115 int err = 0;
116 u32 ltc, slice, ctrl1, val, hw_op = 0; 116 u32 ltc, slice, ctrl1, val, hw_op = 0U;
117 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( 117 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
118 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); 118 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
119 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 119 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
120 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 120 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
121 const u32 max_lines = 16384; 121 const u32 max_lines = 16384U;
122 122
123 gk20a_dbg_fn(""); 123 gk20a_dbg_fn("");
124 124
@@ -237,7 +237,7 @@ void gm20b_ltc_isr(struct gk20a *g)
237 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 237 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
238 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); 238 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
239 for (ltc = 0; ltc < g->ltc_count; ltc++) { 239 for (ltc = 0; ltc < g->ltc_count; ltc++) {
240 if ((mc_intr & 1 << ltc) == 0) 240 if ((mc_intr & 1U << ltc) == 0)
241 continue; 241 continue;
242 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 242 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
243 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + 243 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() +
@@ -256,7 +256,7 @@ void gm20b_ltc_isr(struct gk20a *g)
256u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) 256u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
257{ 257{
258 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); 258 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
259 if (val == 2) { 259 if (val == 2U) {
260 return base * 2; 260 return base * 2;
261 } else if (val != 1) { 261 } else if (val != 1) {
262 nvgpu_err(g, "Invalid number of active ltcs: %08x", val); 262 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
@@ -359,24 +359,24 @@ int gm20b_determine_L2_size_bytes(struct gk20a *g)
359 359
360 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); 360 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
361 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { 361 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
362 sets = 64; 362 sets = 64U;
363 } else if (active_sets_value == 363 } else if (active_sets_value ==
364 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { 364 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
365 sets = 32; 365 sets = 32U;
366 } else if (active_sets_value == 366 } else if (active_sets_value ==
367 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { 367 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
368 sets = 16; 368 sets = 16U;
369 } else { 369 } else {
370 nvgpu_err(g, "Unknown constant %u for active sets", 370 nvgpu_err(g, "Unknown constant %u for active sets",
371 (unsigned)active_sets_value); 371 (unsigned)active_sets_value);
372 sets = 0; 372 sets = 0U;
373 } 373 }
374 374
375 active_ltcs = g->gr.num_fbps; 375 active_ltcs = g->gr.num_fbps;
376 376
377 /* chip-specific values */ 377 /* chip-specific values */
378 lts_per_ltc = 2; 378 lts_per_ltc = 2U;
379 bytes_per_line = 128; 379 bytes_per_line = 128U;
380 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; 380 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;
381 381
382 return cache_size; 382 return cache_size;
@@ -424,7 +424,7 @@ void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g,
424void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) 424void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
425{ 425{
426 u32 max_size = gr->max_comptag_mem; 426 u32 max_size = gr->max_comptag_mem;
427 u32 max_comptag_lines = max_size << 3; 427 u32 max_comptag_lines = max_size << 3U;
428 428
429 u32 compbit_base_post_divide; 429 u32 compbit_base_post_divide;
430 u64 compbit_base_post_multiply64; 430 u64 compbit_base_post_multiply64;
diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
index d52a10ad..d6634b14 100644
--- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
@@ -61,7 +61,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
61 /* max memory size (MB) to cover */ 61 /* max memory size (MB) to cover */
62 u32 max_size = gr->max_comptag_mem; 62 u32 max_size = gr->max_comptag_mem;
63 /* one tag line covers 64KB */ 63 /* one tag line covers 64KB */
64 u32 max_comptag_lines = max_size << 4; 64 u32 max_comptag_lines = max_size << 4U;
65 65
66 u32 hw_max_comptag_lines = 66 u32 hw_max_comptag_lines =
67 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); 67 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
@@ -71,7 +71,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
71 u32 comptags_per_cacheline = 71 u32 comptags_per_cacheline =
72 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); 72 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
73 u32 cacheline_size = 73 u32 cacheline_size =
74 512 << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param); 74 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param);
75 u32 slices_per_ltc = 75 u32 slices_per_ltc =
76 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param); 76 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param);
77 u32 cbc_param2 = 77 u32 cbc_param2 =
@@ -85,7 +85,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
85 85
86 gk20a_dbg_fn(""); 86 gk20a_dbg_fn("");
87 87
88 if (max_comptag_lines == 0) 88 if (max_comptag_lines == 0U)
89 return 0; 89 return 0;
90 90
91 /* Already initialized */ 91 /* Already initialized */
@@ -138,18 +138,18 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
138 struct gr_gk20a *gr = &g->gr; 138 struct gr_gk20a *gr = &g->gr;
139 struct nvgpu_timeout timeout; 139 struct nvgpu_timeout timeout;
140 int err = 0; 140 int err = 0;
141 u32 ltc, slice, ctrl1, val, hw_op = 0; 141 u32 ltc, slice, ctrl1, val, hw_op = 0U;
142 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( 142 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
143 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); 143 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
144 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 144 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
145 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 145 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
146 const u32 max_lines = 16384; 146 const u32 max_lines = 16384U;
147 147
148 nvgpu_log_fn(g, " "); 148 nvgpu_log_fn(g, " ");
149 149
150 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); 150 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
151 151
152 if (gr->compbit_store.mem.size == 0) 152 if (gr->compbit_store.mem.size == 0U)
153 return 0; 153 return 0;
154 154
155 while (1) { 155 while (1) {
@@ -235,7 +235,7 @@ void gp10b_ltc_isr(struct gk20a *g)
235 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 235 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
236 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); 236 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
237 for (ltc = 0; ltc < g->ltc_count; ltc++) { 237 for (ltc = 0; ltc < g->ltc_count; ltc++) {
238 if ((mc_intr & 1 << ltc) == 0) 238 if ((mc_intr & 1U << ltc) == 0)
239 continue; 239 continue;
240 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 240 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
241 u32 offset = ltc_stride * ltc + lts_stride * slice; 241 u32 offset = ltc_stride * ltc + lts_stride * slice;
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
index 9bfefa9e..9f6d176e 100644
--- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
@@ -96,9 +96,9 @@ void gv11b_ltc_isr(struct gk20a *g)
96 96
97 mc_intr = gk20a_readl(g, mc_intr_ltc_r()); 97 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
98 for (ltc = 0; ltc < g->ltc_count; ltc++) { 98 for (ltc = 0; ltc < g->ltc_count; ltc++) {
99 if ((mc_intr & 1 << ltc) == 0) 99 if ((mc_intr & 1U << ltc) == 0)
100 continue; 100 continue;
101 ltc_corrected = ltc_uncorrected = 0; 101 ltc_corrected = ltc_uncorrected = 0U;
102 102
103 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { 103 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
104 u32 offset = ltc_stride * ltc + lts_stride * slice; 104 u32 offset = ltc_stride * ltc + lts_stride * slice;
@@ -133,10 +133,10 @@ void gv11b_ltc_isr(struct gk20a *g)
133 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(); 133 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
134 134
135 /* clear the interrupt */ 135 /* clear the interrupt */
136 if ((corrected_delta > 0) || corrected_overflow) { 136 if ((corrected_delta > 0U) || corrected_overflow) {
137 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0); 137 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
138 } 138 }
139 if ((uncorrected_delta > 0) || uncorrected_overflow) { 139 if ((uncorrected_delta > 0U) || uncorrected_overflow) {
140 gk20a_writel(g, 140 gk20a_writel(g,
141 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0); 141 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
142 } 142 }
@@ -146,9 +146,9 @@ void gv11b_ltc_isr(struct gk20a *g)
146 146
147 /* update counters per slice */ 147 /* update counters per slice */
148 if (corrected_overflow) 148 if (corrected_overflow)
149 corrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); 149 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
150 if (uncorrected_overflow) 150 if (uncorrected_overflow)
151 uncorrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); 151 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
152 152
153 ltc_corrected += corrected_delta; 153 ltc_corrected += corrected_delta;
154 ltc_uncorrected += uncorrected_delta; 154 ltc_uncorrected += uncorrected_delta;