summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/ltc
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-08-13 15:58:18 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-16 13:14:40 -0400
commit974d541623929fa2622d27d5d338a5b63596794b (patch)
treef47a540bf07efd7f6cda68f49d3675c2462d731a /drivers/gpu/nvgpu/common/ltc
parent1e7f229e5d92078f772d4f81893b23504cd847a8 (diff)
gpu: nvgpu: Move ltc HAL to common
Move implementation of ltc HAL to common/ltc. JIRA NVGPU-956 Change-Id: Id78d74e8612d7dacfb8d322d491abecd798e42b5 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1798461 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/ltc')
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc.c54
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c572
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h66
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c320
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h35
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c207
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h34
7 files changed, 1288 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc.c b/drivers/gpu/nvgpu/common/ltc/ltc.c
new file mode 100644
index 00000000..1beb1974
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/ltc.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/nvgpu_mem.h>
26
27#include "gk20a/gk20a.h"
28#include "gk20a/gr_gk20a.h"
29
30int nvgpu_init_ltc_support(struct gk20a *g)
31{
32 nvgpu_spinlock_init(&g->ltc_enabled_lock);
33
34 g->mm.ltc_enabled_current = true;
35 g->mm.ltc_enabled_target = true;
36
37 if (g->ops.ltc.init_fs_state)
38 g->ops.ltc.init_fs_state(g);
39
40 return 0;
41}
42
43void nvgpu_ltc_sync_enabled(struct gk20a *g)
44{
45 if (!g->ops.ltc.set_enabled)
46 return;
47
48 nvgpu_spinlock_acquire(&g->ltc_enabled_lock);
49 if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) {
50 g->ops.ltc.set_enabled(g, g->mm.ltc_enabled_target);
51 g->mm.ltc_enabled_current = g->mm.ltc_enabled_target;
52 }
53 nvgpu_spinlock_release(&g->ltc_enabled_lock);
54}
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c
new file mode 100644
index 00000000..28d63e82
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c
@@ -0,0 +1,572 @@
1/*
2 * GM20B L2
3 *
4 * Copyright (c) 2014-2018 NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <trace/events/gk20a.h>
26
27#include <nvgpu/timers.h>
28#include <nvgpu/enabled.h>
29#include <nvgpu/bug.h>
30#include <nvgpu/ltc.h>
31#include <nvgpu/io.h>
32#include <nvgpu/utils.h>
33
34#include <nvgpu/hw/gm20b/hw_mc_gm20b.h>
35#include <nvgpu/hw/gm20b/hw_ltc_gm20b.h>
36#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
37#include <nvgpu/hw/gm20b/hw_pri_ringmaster_gm20b.h>
38
39#include "gk20a/gk20a.h"
40
41#include "ltc_gm20b.h"
42
43int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
44{
45 /* max memory size (MB) to cover */
46 u32 max_size = gr->max_comptag_mem;
47 /* one tag line covers 128KB */
48 u32 max_comptag_lines = max_size << 3U;
49
50 u32 hw_max_comptag_lines =
51 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
52
53 u32 cbc_param =
54 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
55 u32 comptags_per_cacheline =
56 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
57
58 u32 compbit_backing_size;
59
60 int err;
61
62 nvgpu_log_fn(g, " ");
63
64 if (max_comptag_lines == 0U)
65 return 0;
66
67 if (max_comptag_lines > hw_max_comptag_lines)
68 max_comptag_lines = hw_max_comptag_lines;
69
70 compbit_backing_size =
71 DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) *
72 gr->cacheline_size * gr->slices_per_ltc * g->ltc_count;
73
74 /* aligned to 2KB * ltc_count */
75 compbit_backing_size +=
76 g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
77
78 /* must be a multiple of 64KB */
79 compbit_backing_size = roundup(compbit_backing_size, 64*1024);
80
81 max_comptag_lines =
82 (compbit_backing_size * comptags_per_cacheline) /
83 (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count);
84
85 if (max_comptag_lines > hw_max_comptag_lines)
86 max_comptag_lines = hw_max_comptag_lines;
87
88 nvgpu_log_info(g, "compbit backing store size : %d",
89 compbit_backing_size);
90 nvgpu_log_info(g, "max comptag lines : %d",
91 max_comptag_lines);
92
93 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
94 if (err)
95 return err;
96
97 err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines);
98 if (err)
99 return err;
100
101 gr->max_comptag_lines = max_comptag_lines;
102 gr->comptags_per_cacheline = comptags_per_cacheline;
103
104 return 0;
105}
106
107int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
108 u32 min, u32 max)
109{
110 struct gr_gk20a *gr = &g->gr;
111 struct nvgpu_timeout timeout;
112 int err = 0;
113 u32 ltc, slice, ctrl1, val, hw_op = 0U;
114 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
115 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
116 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
117 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
118 const u32 max_lines = 16384U;
119
120 nvgpu_log_fn(g, " ");
121
122 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
123
124 if (gr->compbit_store.mem.size == 0)
125 return 0;
126
127 while (1) {
128 const u32 iter_max = min(min + max_lines - 1, max);
129 bool full_cache_op = true;
130
131 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
132
133 nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
134
135 if (op == gk20a_cbc_op_clear) {
136 gk20a_writel(
137 g, ltc_ltcs_ltss_cbc_ctrl2_r(),
138 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(
139 min));
140 gk20a_writel(
141 g, ltc_ltcs_ltss_cbc_ctrl3_r(),
142 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(
143 iter_max));
144 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f();
145 full_cache_op = false;
146 } else if (op == gk20a_cbc_op_clean) {
147 /* this is full-cache op */
148 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f();
149 } else if (op == gk20a_cbc_op_invalidate) {
150 /* this is full-cache op */
151 hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f();
152 } else {
153 nvgpu_err(g, "Unknown op: %u", (unsigned)op);
154 err = -EINVAL;
155 goto out;
156 }
157 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(),
158 gk20a_readl(g,
159 ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op);
160
161 for (ltc = 0; ltc < g->ltc_count; ltc++) {
162 for (slice = 0; slice < slices_per_ltc; slice++) {
163
164 ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
165 ltc * ltc_stride + slice * lts_stride;
166
167 nvgpu_timeout_init(g, &timeout, 2000,
168 NVGPU_TIMER_RETRY_TIMER);
169 do {
170 val = gk20a_readl(g, ctrl1);
171 if (!(val & hw_op))
172 break;
173 nvgpu_udelay(5);
174 } while (!nvgpu_timeout_expired(&timeout));
175
176 if (nvgpu_timeout_peek_expired(&timeout)) {
177 nvgpu_err(g, "comp tag clear timeout");
178 err = -EBUSY;
179 goto out;
180 }
181 }
182 }
183
184 /* are we done? */
185 if (full_cache_op || iter_max == max)
186 break;
187
188 /* note: iter_max is inclusive upper bound */
189 min = iter_max + 1;
190
191 /* give a chance for higher-priority threads to progress */
192 nvgpu_mutex_release(&g->mm.l2_op_lock);
193 }
194out:
195 trace_gk20a_ltc_cbc_ctrl_done(g->name);
196 nvgpu_mutex_release(&g->mm.l2_op_lock);
197 return err;
198}
199
200void gm20b_ltc_init_fs_state(struct gk20a *g)
201{
202 struct gr_gk20a *gr = &g->gr;
203 u32 reg;
204
205 nvgpu_log_info(g, "initialize gm20b l2");
206
207 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
208 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
209 nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count);
210
211 reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
212 gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);;
213 gr->cacheline_size =
214 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
215
216 gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(),
217 g->ltc_count);
218 gk20a_writel(g, ltc_ltcs_misc_ltc_num_active_ltcs_r(),
219 g->ltc_count);
220
221 gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(),
222 gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) |
223 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m());
224
225 /* Disable LTC interrupts */
226 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
227 reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
228 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
229 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_m();
230 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
231}
232
233void gm20b_ltc_isr(struct gk20a *g)
234{
235 u32 mc_intr, ltc_intr;
236 unsigned int ltc, slice;
237 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
238 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
239
240 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
241 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
242 for (ltc = 0; ltc < g->ltc_count; ltc++) {
243 if ((mc_intr & 1U << ltc) == 0)
244 continue;
245 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
246 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() +
247 ltc_stride * ltc +
248 lts_stride * slice);
249 nvgpu_err(g, "ltc%d, slice %d: %08x",
250 ltc, slice, ltc_intr);
251 gk20a_writel(g, ltc_ltc0_lts0_intr_r() +
252 ltc_stride * ltc +
253 lts_stride * slice,
254 ltc_intr);
255 }
256 }
257}
258
259u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
260{
261 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
262 if (val == 2U) {
263 return base * 2;
264 } else if (val != 1) {
265 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
266 }
267
268 return base;
269}
270
271/*
272 * Performs a full flush of the L2 cache.
273 */
274void gm20b_flush_ltc(struct gk20a *g)
275{
276 struct nvgpu_timeout timeout;
277 unsigned int ltc;
278 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
279
280 /* Clean... */
281 nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(),
282 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() |
283 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f() |
284 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f() |
285 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f() |
286 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f() |
287 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f());
288
289 /* Wait on each LTC individually. */
290 for (ltc = 0; ltc < g->ltc_count; ltc++) {
291 u32 op_pending;
292
293 /*
294 * Use 5ms - this should be sufficient time to flush the cache.
295 * On tegra, rough EMC BW available for old tegra chips (newer
296 * chips are strictly faster) can be estimated as follows:
297 *
298 * Lowest reasonable EMC clock speed will be around 102MHz on
299 * t124 for display enabled boards and generally fixed to max
300 * for non-display boards (since they are generally plugged in).
301 *
302 * Thus, the available BW is 64b * 2 * 102MHz = 1.3GB/s. Of that
303 * BW the GPU will likely get about half (display and overhead/
304 * utilization inefficiency eating the rest) so 650MB/s at
305 * worst. Assuming at most 1MB of GPU L2 cache (less for most
306 * chips) worst case is we take 1MB/650MB/s = 1.5ms.
307 *
308 * So 5ms timeout here should be more than sufficient.
309 */
310 nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
311
312 do {
313 int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() +
314 ltc * ltc_stride;
315 op_pending = gk20a_readl(g, cmgmt1);
316 } while ((op_pending &
317 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) &&
318 !nvgpu_timeout_expired_msg(&timeout,
319 "L2 flush timeout!"));
320 }
321
322 /* And invalidate. */
323 nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt0_r(),
324 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f() |
325 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f() |
326 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f() |
327 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f() |
328 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f());
329
330 /* Wait on each LTC individually. */
331 for (ltc = 0; ltc < g->ltc_count; ltc++) {
332 u32 op_pending;
333
334 /* Again, 5ms. */
335 nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
336
337 do {
338 int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() +
339 ltc * ltc_stride;
340 op_pending = gk20a_readl(g, cmgmt0);
341 } while ((op_pending &
342 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) &&
343 !nvgpu_timeout_expired_msg(&timeout,
344 "L2 flush timeout!"));
345 }
346}
347
348int gm20b_determine_L2_size_bytes(struct gk20a *g)
349{
350 u32 lts_per_ltc;
351 u32 ways;
352 u32 sets;
353 u32 bytes_per_line;
354 u32 active_ltcs;
355 u32 cache_size;
356
357 u32 tmp;
358 u32 active_sets_value;
359
360 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r());
361 ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp));
362
363 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
364 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
365 sets = 64U;
366 } else if (active_sets_value ==
367 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
368 sets = 32U;
369 } else if (active_sets_value ==
370 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
371 sets = 16U;
372 } else {
373 nvgpu_err(g, "Unknown constant %u for active sets",
374 (unsigned)active_sets_value);
375 sets = 0U;
376 }
377
378 active_ltcs = g->gr.num_fbps;
379
380 /* chip-specific values */
381 lts_per_ltc = 2U;
382 bytes_per_line = 128U;
383 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;
384
385 return cache_size;
386}
387
388/*
389 * Sets the ZBC color for the passed index.
390 */
391void gm20b_ltc_set_zbc_color_entry(struct gk20a *g,
392 struct zbc_entry *color_val,
393 u32 index)
394{
395 u32 i;
396 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
397
398 nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
399 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
400
401 for (i = 0;
402 i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) {
403 nvgpu_writel_check(g,
404 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
405 color_val->color_l2[i]);
406 }
407}
408
409/*
410 * Sets the ZBC depth for the passed index.
411 */
412void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g,
413 struct zbc_entry *depth_val,
414 u32 index)
415{
416 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
417
418 nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
419 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
420
421 nvgpu_writel_check(g,
422 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
423 depth_val->depth);
424}
425
426void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
427{
428 u32 max_size = gr->max_comptag_mem;
429 u32 max_comptag_lines = max_size << 3U;
430
431 u32 compbit_base_post_divide;
432 u64 compbit_base_post_multiply64;
433 u64 compbit_store_iova;
434 u64 compbit_base_post_divide64;
435
436 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
437 compbit_store_iova = nvgpu_mem_get_phys_addr(g,
438 &gr->compbit_store.mem);
439 else
440 compbit_store_iova = nvgpu_mem_get_addr(g,
441 &gr->compbit_store.mem);
442
443 compbit_base_post_divide64 = compbit_store_iova >>
444 ltc_ltcs_ltss_cbc_base_alignment_shift_v();
445
446 do_div(compbit_base_post_divide64, g->ltc_count);
447 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
448
449 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
450 g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
451
452 if (compbit_base_post_multiply64 < compbit_store_iova)
453 compbit_base_post_divide++;
454
455 /* Bug 1477079 indicates sw adjustment on the posted divided base. */
456 if (g->ops.ltc.cbc_fix_config)
457 compbit_base_post_divide =
458 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
459
460 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
461 compbit_base_post_divide);
462
463 nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
464 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
465 (u32)(compbit_store_iova >> 32),
466 (u32)(compbit_store_iova & 0xffffffff),
467 compbit_base_post_divide);
468
469 gr->compbit_store.base_hw = compbit_base_post_divide;
470
471 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
472 0, max_comptag_lines - 1);
473
474}
475
476void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled)
477{
478 u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
479 u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
480
481 if (enabled)
482 /* bypass disabled (normal caching ops)*/
483 reg &= ~reg_f;
484 else
485 /* bypass enabled (no caching) */
486 reg |= reg_f;
487
488 gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
489}
490
491/*
492 * LTC pri addressing
493 */
494bool gm20b_ltc_pri_is_ltc_addr(struct gk20a *g, u32 addr)
495{
496 return ((addr >= ltc_pltcg_base_v()) && (addr < ltc_pltcg_extent_v()));
497}
498
499bool gm20b_ltc_is_ltcs_ltss_addr(struct gk20a *g, u32 addr)
500{
501 u32 ltc_shared_base = ltc_ltcs_ltss_v();
502 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
503
504 return (addr >= ltc_shared_base) &&
505 (addr < (ltc_shared_base + lts_stride));
506}
507
508bool gm20b_ltc_is_ltcn_ltss_addr(struct gk20a *g, u32 addr)
509{
510 u32 lts_shared_base = ltc_ltc0_ltss_v();
511 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
512 u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1;
513 u32 base_offset = lts_shared_base & addr_mask;
514 u32 end_offset = base_offset + lts_stride;
515
516 return (!gm20b_ltc_is_ltcs_ltss_addr(g, addr)) &&
517 ((addr & addr_mask) >= base_offset) &&
518 ((addr & addr_mask) < end_offset);
519}
520
521static void gm20b_ltc_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num,
522 u32 *priv_addr_table,
523 u32 *priv_addr_table_index)
524{
525 u32 num_ltc_slices = g->ops.gr.get_max_lts_per_ltc(g);
526 u32 index = *priv_addr_table_index;
527 u32 lts_num;
528 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
529 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
530
531 for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) {
532 priv_addr_table[index++] = ltc_ltc0_lts0_v() +
533 ltc_num * ltc_stride +
534 lts_num * lts_stride +
535 (addr & (lts_stride - 1));
536 }
537
538 *priv_addr_table_index = index;
539}
540
541void gm20b_ltc_split_lts_broadcast_addr(struct gk20a *g, u32 addr,
542 u32 *priv_addr_table,
543 u32 *priv_addr_table_index)
544{
545 u32 num_ltc = g->ltc_count;
546 u32 i, start, ltc_num = 0;
547 u32 pltcg_base = ltc_pltcg_base_v();
548 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
549
550 for (i = 0; i < num_ltc; i++) {
551 start = pltcg_base + i * ltc_stride;
552 if ((addr >= start) && (addr < (start + ltc_stride))) {
553 ltc_num = i;
554 break;
555 }
556 }
557 gm20b_ltc_update_ltc_lts_addr(g, addr, ltc_num, priv_addr_table,
558 priv_addr_table_index);
559}
560
561void gm20b_ltc_split_ltc_broadcast_addr(struct gk20a *g, u32 addr,
562 u32 *priv_addr_table,
563 u32 *priv_addr_table_index)
564{
565 u32 num_ltc = g->ltc_count;
566 u32 ltc_num;
567
568 for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) {
569 gm20b_ltc_update_ltc_lts_addr(g, addr, ltc_num,
570 priv_addr_table, priv_addr_table_index);
571 }
572}
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h
new file mode 100644
index 00000000..cc92c70a
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h
@@ -0,0 +1,66 @@
1/*
2 * GM20B L2
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef NVGPU_LTC_GM20B
26#define NVGPU_LTC_GM20B
27
28#include <nvgpu/types.h>
29
30struct gk20a;
31struct gr_gk20a;
32struct gpu_ops;
33struct zbc_entry;
34enum gk20a_cbc_op;
35
36int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr);
37int gm20b_determine_L2_size_bytes(struct gk20a *g);
38void gm20b_ltc_set_zbc_color_entry(struct gk20a *g,
39 struct zbc_entry *color_val,
40 u32 index);
41void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g,
42 struct zbc_entry *depth_val,
43 u32 index);
44void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
45void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled);
46void gm20b_ltc_init_fs_state(struct gk20a *g);
47int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
48 u32 min, u32 max);
49void gm20b_ltc_isr(struct gk20a *g);
50u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base);
51void gm20b_flush_ltc(struct gk20a *g);
52int gm20b_ltc_alloc_phys_cbc(struct gk20a *g,
53 size_t compbit_backing_size);
54int gm20b_ltc_alloc_virt_cbc(struct gk20a *g,
55 size_t compbit_backing_size);
56bool gm20b_ltc_pri_is_ltc_addr(struct gk20a *g, u32 addr);
57bool gm20b_ltc_is_ltcs_ltss_addr(struct gk20a *g, u32 addr);
58bool gm20b_ltc_is_ltcn_ltss_addr(struct gk20a *g, u32 addr);
59void gm20b_ltc_split_lts_broadcast_addr(struct gk20a *g, u32 addr,
60 u32 *priv_addr_table,
61 u32 *priv_addr_table_index);
62void gm20b_ltc_split_ltc_broadcast_addr(struct gk20a *g, u32 addr,
63 u32 *priv_addr_table,
64 u32 *priv_addr_table_index);
65
66#endif
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
new file mode 100644
index 00000000..eb262add
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c
@@ -0,0 +1,320 @@
1/*
2 * GP10B L2
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <trace/events/gk20a.h>
26
27#include <nvgpu/ltc.h>
28#include <nvgpu/log.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/io.h>
31
32#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
33#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h>
34
35#include "gk20a/gk20a.h"
36
37#include "ltc_gm20b.h"
38#include "ltc_gp10b.h"
39
40int gp10b_determine_L2_size_bytes(struct gk20a *g)
41{
42 u32 tmp;
43 int ret;
44
45 nvgpu_log_fn(g, " ");
46
47 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
48
49 ret = g->ltc_count *
50 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 *
51 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp);
52
53 nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret);
54
55 nvgpu_log_fn(g, "done");
56
57 return ret;
58}
59
60int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
61{
62 /* max memory size (MB) to cover */
63 u32 max_size = gr->max_comptag_mem;
64 /* one tag line covers 64KB */
65 u32 max_comptag_lines = max_size << 4U;
66
67 u32 hw_max_comptag_lines =
68 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
69
70 u32 cbc_param =
71 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
72 u32 comptags_per_cacheline =
73 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
74 u32 cbc_param2 =
75 gk20a_readl(g, ltc_ltcs_ltss_cbc_param2_r());
76 u32 gobs_per_comptagline_per_slice =
77 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(cbc_param2);
78
79 u32 compbit_backing_size;
80
81 int err;
82
83 nvgpu_log_fn(g, " ");
84
85 if (max_comptag_lines == 0U)
86 return 0;
87
88 /* Already initialized */
89 if (gr->max_comptag_lines)
90 return 0;
91
92 if (max_comptag_lines > hw_max_comptag_lines)
93 max_comptag_lines = hw_max_comptag_lines;
94
95 compbit_backing_size =
96 roundup(max_comptag_lines * gobs_per_comptagline_per_slice,
97 gr->cacheline_size);
98 compbit_backing_size = roundup(
99 compbit_backing_size * gr->slices_per_ltc * g->ltc_count,
100 g->ops.fb.compressible_page_size(g));
101
102 /* aligned to 2KB * ltc_count */
103 compbit_backing_size +=
104 g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
105
106 /* must be a multiple of 64KB */
107 compbit_backing_size = roundup(compbit_backing_size, 64*1024);
108
109 nvgpu_log_info(g, "compbit backing store size : %d",
110 compbit_backing_size);
111 nvgpu_log_info(g, "max comptag lines : %d",
112 max_comptag_lines);
113 nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d",
114 gobs_per_comptagline_per_slice);
115
116 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
117 if (err)
118 return err;
119
120 err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines);
121 if (err)
122 return err;
123
124 gr->max_comptag_lines = max_comptag_lines;
125 gr->comptags_per_cacheline = comptags_per_cacheline;
126 gr->gobs_per_comptagline_per_slice = gobs_per_comptagline_per_slice;
127
128 return 0;
129}
130
131int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
132 u32 min, u32 max)
133{
134 struct gr_gk20a *gr = &g->gr;
135 struct nvgpu_timeout timeout;
136 int err = 0;
137 u32 ltc, slice, ctrl1, val, hw_op = 0U;
138 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
139 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
140 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
141 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
142 const u32 max_lines = 16384U;
143
144 nvgpu_log_fn(g, " ");
145
146 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
147
148 if (gr->compbit_store.mem.size == 0U)
149 return 0;
150
151 while (1) {
152 const u32 iter_max = min(min + max_lines - 1, max);
153 bool full_cache_op = true;
154
155 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
156
157 nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
158
159 if (op == gk20a_cbc_op_clear) {
160 nvgpu_writel_check(
161 g, ltc_ltcs_ltss_cbc_ctrl2_r(),
162 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(
163 min));
164
165 nvgpu_writel_check(
166 g, ltc_ltcs_ltss_cbc_ctrl3_r(),
167 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(
168 iter_max));
169
170 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f();
171 full_cache_op = false;
172 } else if (op == gk20a_cbc_op_clean) {
173 /* this is full-cache op */
174 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f();
175 } else if (op == gk20a_cbc_op_invalidate) {
176 /* this is full-cache op */
177 hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f();
178 } else {
179 nvgpu_err(g, "Unknown op: %u", (unsigned)op);
180 err = -EINVAL;
181 goto out;
182 }
183 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(),
184 gk20a_readl(g,
185 ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op);
186
187 for (ltc = 0; ltc < g->ltc_count; ltc++) {
188 for (slice = 0; slice < slices_per_ltc; slice++) {
189
190 ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
191 ltc * ltc_stride + slice * lts_stride;
192
193 nvgpu_timeout_init(g, &timeout, 2000,
194 NVGPU_TIMER_RETRY_TIMER);
195 do {
196 val = gk20a_readl(g, ctrl1);
197 if (!(val & hw_op))
198 break;
199 nvgpu_udelay(5);
200 } while (!nvgpu_timeout_expired(&timeout));
201
202 if (nvgpu_timeout_peek_expired(&timeout)) {
203 nvgpu_err(g, "comp tag clear timeout");
204 err = -EBUSY;
205 goto out;
206 }
207 }
208 }
209
210 /* are we done? */
211 if (full_cache_op || iter_max == max)
212 break;
213
214 /* note: iter_max is inclusive upper bound */
215 min = iter_max + 1;
216
217 /* give a chance for higher-priority threads to progress */
218 nvgpu_mutex_release(&g->mm.l2_op_lock);
219 }
220out:
221 trace_gk20a_ltc_cbc_ctrl_done(g->name);
222 nvgpu_mutex_release(&g->mm.l2_op_lock);
223 return err;
224}
225
226void gp10b_ltc_isr(struct gk20a *g)
227{
228 u32 mc_intr, ltc_intr;
229 unsigned int ltc, slice;
230 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
231 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
232
233 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
234 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
235 for (ltc = 0; ltc < g->ltc_count; ltc++) {
236 if ((mc_intr & 1U << ltc) == 0)
237 continue;
238 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
239 u32 offset = ltc_stride * ltc + lts_stride * slice;
240 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset);
241
242 /* Detect and handle ECC errors */
243 if (ltc_intr &
244 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
245 u32 ecc_stats_reg_val;
246
247 nvgpu_err(g,
248 "Single bit error detected in GPU L2!");
249
250 ecc_stats_reg_val =
251 gk20a_readl(g,
252 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
253 g->ecc.ltc.ecc_sec_count[ltc][slice].counter +=
254 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val);
255 ecc_stats_reg_val &=
256 ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m());
257 nvgpu_writel_check(g,
258 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
259 ecc_stats_reg_val);
260 g->ops.mm.l2_flush(g, true);
261 }
262 if (ltc_intr &
263 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
264 u32 ecc_stats_reg_val;
265
266 nvgpu_err(g,
267 "Double bit error detected in GPU L2!");
268
269 ecc_stats_reg_val =
270 gk20a_readl(g,
271 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
272 g->ecc.ltc.ecc_ded_count[ltc][slice].counter +=
273 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val);
274 ecc_stats_reg_val &=
275 ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m());
276 nvgpu_writel_check(g,
277 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
278 ecc_stats_reg_val);
279 }
280
281 nvgpu_err(g, "ltc%d, slice %d: %08x",
282 ltc, slice, ltc_intr);
283 nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() +
284 ltc_stride * ltc + lts_stride * slice,
285 ltc_intr);
286 }
287 }
288}
289
290void gp10b_ltc_init_fs_state(struct gk20a *g)
291{
292 u32 ltc_intr;
293
294 gm20b_ltc_init_fs_state(g);
295
296 gk20a_writel(g, ltc_ltca_g_axi_pctrl_r(),
297 ltc_ltca_g_axi_pctrl_user_sid_f(g->ltc_streamid));
298
299 /* Enable ECC interrupts */
300 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
301 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
302 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
303 gk20a_writel(g, ltc_ltcs_ltss_intr_r(),
304 ltc_intr);
305}
306
307void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled)
308{
309 u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
310 u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
311
312 if (enabled)
313 /* bypass disabled (normal caching ops)*/
314 reg &= ~reg_f;
315 else
316 /* bypass enabled (no caching) */
317 reg |= reg_f;
318
319 nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
320}
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h
new file mode 100644
index 00000000..c1a2bf64
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef LTC_GP10B_H
24#define LTC_GP10B_H
25struct gpu_ops;
26
27void gp10b_ltc_isr(struct gk20a *g);
28
29int gp10b_determine_L2_size_bytes(struct gk20a *g);
30int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr);
31void gp10b_ltc_init_fs_state(struct gk20a *g);
32int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
33 u32 min, u32 max);
34void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled);
35#endif
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
new file mode 100644
index 00000000..98306079
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
@@ -0,0 +1,207 @@
1/*
2 * GV11B LTC
3 *
4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/io.h>
26#include "gk20a/gk20a.h"
27
28#include "ltc_gp10b.h"
29#include "ltc_gv11b.h"
30
31#include <nvgpu/hw/gv11b/hw_ltc_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
33#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
34#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
35
36#include <nvgpu/utils.h>
37
38/*
39 * Sets the ZBC stencil for the passed index.
40 */
41void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
42 struct zbc_entry *stencil_val,
43 u32 index)
44{
45 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
46
47 nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
48 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
49
50 nvgpu_writel_check(g,
51 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(),
52 stencil_val->depth);
53}
54
55void gv11b_ltc_init_fs_state(struct gk20a *g)
56{
57 struct gr_gk20a *gr = &g->gr;
58 u32 ltc_intr;
59 u32 reg;
60
61 nvgpu_log_info(g, "initialize gv11b l2");
62
63 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
64 g->ltc_count = g->ops.priv_ring.enum_ltc(g);
65 nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
66
67 reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
68 gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);;
69 gr->cacheline_size =
70 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
71
72 /* Disable LTC interrupts */
73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
74 reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg);
77
78 if (g->ops.ltc.intr_en_illegal_compstat)
79 g->ops.ltc.intr_en_illegal_compstat(g,
80 g->ltc_intr_en_illegal_compstat);
81
82 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
84 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
85 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
86 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(),
87 ltc_intr);
88}
89
90void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable)
91{
92 u32 val;
93
94 /* disble/enble illegal_compstat interrupt */
95 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
96 if (enable)
97 val = set_field(val,
98 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
99 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f());
100 else
101 val = set_field(val,
102 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
103 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f());
104 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val);
105}
106
107
108void gv11b_ltc_isr(struct gk20a *g)
109{
110 u32 mc_intr, ltc_intr3;
111 unsigned int ltc, slice;
112 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
113 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
114 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
115 u32 corrected_delta, uncorrected_delta;
116 u32 corrected_overflow, uncorrected_overflow;
117
118 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
119 for (ltc = 0; ltc < g->ltc_count; ltc++) {
120 if ((mc_intr & 1U << ltc) == 0)
121 continue;
122
123 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
124 u32 offset = ltc_stride * ltc + lts_stride * slice;
125 ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() +
126 offset);
127
128 /* Detect and handle ECC PARITY errors */
129
130 if (ltc_intr3 &
131 (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() |
132 ltc_ltcs_ltss_intr3_ecc_corrected_m())) {
133
134 ecc_status = gk20a_readl(g,
135 ltc_ltc0_lts0_l2_cache_ecc_status_r() +
136 offset);
137 ecc_addr = gk20a_readl(g,
138 ltc_ltc0_lts0_l2_cache_ecc_address_r() +
139 offset);
140 corrected_cnt = gk20a_readl(g,
141 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset);
142 uncorrected_cnt = gk20a_readl(g,
143 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset);
144
145 corrected_delta =
146 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt);
147 uncorrected_delta =
148 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
149 corrected_overflow = ecc_status &
150 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m();
151
152 uncorrected_overflow = ecc_status &
153 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
154
155 /* clear the interrupt */
156 if ((corrected_delta > 0U) || corrected_overflow) {
157 nvgpu_writel_check(g,
158 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
159 }
160 if ((uncorrected_delta > 0U) || uncorrected_overflow) {
161 nvgpu_writel_check(g,
162 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
163 }
164
165 nvgpu_writel_check(g,
166 ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
167 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
168
169 /* update counters per slice */
170 if (corrected_overflow)
171 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
172 if (uncorrected_overflow)
173 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
174
175 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta;
176 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta;
177 nvgpu_log(g, gpu_dbg_intr,
178 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
179
180 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
181 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
182 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
183 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
184 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
185 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
186 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
187 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
189 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
190 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
191 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
192
193 if (corrected_overflow || uncorrected_overflow)
194 nvgpu_info(g, "ecc counter overflow!");
195
196 nvgpu_log(g, gpu_dbg_intr,
197 "ecc error address: 0x%x", ecc_addr);
198
199 }
200
201 }
202
203 }
204
205 /* fallback to other interrupts */
206 gp10b_ltc_isr(g);
207}
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h
new file mode 100644
index 00000000..9d33b9fb
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef LTC_GV11B_H
24#define LTC_GV11B_H
25struct gk20a;
26
27void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
28 struct zbc_entry *stencil_val,
29 u32 index);
30void gv11b_ltc_init_fs_state(struct gk20a *g);
31void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable);
32void gv11b_ltc_isr(struct gk20a *g);
33
34#endif