summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ltc_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c323
1 files changed, 0 insertions, 323 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index 9220685a..a543a0d3 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -16,19 +16,10 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <trace/events/gk20a.h>
20#include <nvgpu/timers.h>
21#include <nvgpu/log.h>
22#include <nvgpu/bug.h>
23#include <nvgpu/enabled.h>
24#include <nvgpu/dma.h> 19#include <nvgpu/dma.h>
25 20
26#include "gk20a.h" 21#include "gk20a.h"
27#include "gr_gk20a.h" 22#include "gr_gk20a.h"
28#include "ltc_gk20a.h"
29
30#include <nvgpu/hw/gk20a/hw_ltc_gk20a.h>
31
32 23
33/* Non HW reg dependent stuff: */ 24/* Non HW reg dependent stuff: */
34 25
@@ -49,317 +40,3 @@ int gk20a_ltc_alloc_virt_cbc(struct gk20a *g, size_t compbit_backing_size)
49 compbit_backing_size, 40 compbit_backing_size,
50 &gr->compbit_store.mem); 41 &gr->compbit_store.mem);
51} 42}
52
53/* HW reg dependent stuff: */
54int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
55{
56 /* max memory size (MB) to cover */
57 u32 max_size = gr->max_comptag_mem;
58 /* one tag line covers 128KB */
59 u32 max_comptag_lines = max_size << 3;
60
61 u32 hw_max_comptag_lines =
62 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
63
64 u32 cbc_param =
65 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
66 u32 comptags_per_cacheline =
67 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
68 u32 slices_per_fbp =
69 ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(cbc_param);
70 u32 cacheline_size =
71 512 << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param);
72
73 u32 compbit_backing_size;
74
75 int err;
76
77 gk20a_dbg_fn("");
78
79 if (max_comptag_lines == 0)
80 return 0;
81
82 if (max_comptag_lines > hw_max_comptag_lines)
83 max_comptag_lines = hw_max_comptag_lines;
84
85 /* no hybird fb */
86 compbit_backing_size =
87 DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) *
88 cacheline_size * slices_per_fbp * gr->num_fbps;
89
90 /* aligned to 2KB * num_fbps */
91 compbit_backing_size +=
92 gr->num_fbps << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
93
94 /* must be a multiple of 64KB */
95 compbit_backing_size = roundup(compbit_backing_size, 64*1024);
96
97 max_comptag_lines =
98 (compbit_backing_size * comptags_per_cacheline) /
99 cacheline_size * slices_per_fbp * gr->num_fbps;
100
101 if (max_comptag_lines > hw_max_comptag_lines)
102 max_comptag_lines = hw_max_comptag_lines;
103
104 gk20a_dbg_info("compbit backing store size : %d",
105 compbit_backing_size);
106 gk20a_dbg_info("max comptag lines : %d",
107 max_comptag_lines);
108
109 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
110 err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size);
111 else
112 err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size);
113
114 if (err)
115 return err;
116
117 err = gk20a_comptag_allocator_init(&gr->comp_tags, max_comptag_lines);
118 if (err)
119 return err;
120
121 gr->comptags_per_cacheline = comptags_per_cacheline;
122 gr->slices_per_ltc = slices_per_fbp / g->ltc_count;
123 gr->cacheline_size = cacheline_size;
124
125 return 0;
126}
127
128int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
129 u32 min, u32 max)
130{
131 int err = 0;
132 struct gr_gk20a *gr = &g->gr;
133 u32 fbp, slice, ctrl1, val, hw_op = 0;
134 u32 slices_per_fbp =
135 ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(
136 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
137 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
138 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
139
140 gk20a_dbg_fn("");
141
142 trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
143
144 if (gr->compbit_store.mem.size == 0)
145 return 0;
146
147 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
148
149 if (op == gk20a_cbc_op_clear) {
150 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(),
151 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(min));
152 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl3_r(),
153 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(max));
154 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f();
155 } else if (op == gk20a_cbc_op_clean) {
156 hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f();
157 } else if (op == gk20a_cbc_op_invalidate) {
158 hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f();
159 } else {
160 BUG_ON(1);
161 }
162
163 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(),
164 gk20a_readl(g, ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op);
165
166 for (fbp = 0; fbp < gr->num_fbps; fbp++) {
167 struct nvgpu_timeout timeout;
168
169 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
170 for (slice = 0; slice < slices_per_fbp; slice++) {
171
172
173 ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
174 fbp * ltc_stride +
175 slice * lts_stride;
176
177 do {
178 val = gk20a_readl(g, ctrl1);
179 if (!(val & hw_op))
180 break;
181 nvgpu_udelay(5);
182
183 } while (!nvgpu_timeout_expired(&timeout));
184
185 if (nvgpu_timeout_peek_expired(&timeout)) {
186 nvgpu_err(g, "comp tag clear timeout");
187 err = -EBUSY;
188 goto out;
189 }
190 }
191 }
192out:
193 trace_gk20a_ltc_cbc_ctrl_done(g->name);
194 nvgpu_mutex_release(&g->mm.l2_op_lock);
195 return err;
196}
197
198
199void gk20a_ltc_init_fs_state(struct gk20a *g)
200{
201 gk20a_dbg_info("initialize gk20a L2");
202
203 g->max_ltc_count = g->ltc_count = 1;
204}
205
206void gk20a_ltc_isr(struct gk20a *g)
207{
208 u32 intr;
209
210 intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
211 nvgpu_err(g, "ltc: %08x", intr);
212 gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
213}
214
215int gk20a_determine_L2_size_bytes(struct gk20a *g)
216{
217 u32 lts_per_ltc;
218 u32 ways;
219 u32 sets;
220 u32 bytes_per_line;
221 u32 active_ltcs;
222 u32 cache_size;
223
224 u32 tmp;
225 u32 active_sets_value;
226
227 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r());
228 ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp));
229
230 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
231 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
232 sets = 64;
233 } else if (active_sets_value ==
234 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
235 sets = 32;
236 } else if (active_sets_value ==
237 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
238 sets = 16;
239 } else {
240 nvgpu_err(g,
241 "Unknown constant %u for active sets",
242 (unsigned)active_sets_value);
243 sets = 0;
244 }
245
246 active_ltcs = g->gr.num_fbps;
247
248 /* chip-specific values */
249 lts_per_ltc = 1;
250 bytes_per_line = 128;
251 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;
252
253 return cache_size;
254}
255
256/*
257 * Sets the ZBC color for the passed index.
258 */
259void gk20a_ltc_set_zbc_color_entry(struct gk20a *g,
260 struct zbc_entry *color_val,
261 u32 index)
262{
263 u32 i;
264 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
265
266 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
267 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
268
269 for (i = 0;
270 i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) {
271 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
272 color_val->color_l2[i]);
273 }
274 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
275}
276
277/*
278 * Sets the ZBC depth for the passed index.
279 */
280void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g,
281 struct zbc_entry *depth_val,
282 u32 index)
283{
284 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
285
286 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
287 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
288
289 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
290 depth_val->depth);
291
292 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
293}
294
295void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
296{
297 u32 max_size = gr->max_comptag_mem;
298 u32 max_comptag_lines = max_size << 3;
299
300 u32 compbit_base_post_divide;
301 u64 compbit_base_post_multiply64;
302 u64 compbit_store_iova;
303 u64 compbit_base_post_divide64;
304
305 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
306 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
307 else
308 compbit_store_iova = g->ops.mm.get_iova_addr(g,
309 gr->compbit_store.mem.priv.sgt->sgl, 0);
310
311 compbit_base_post_divide64 = compbit_store_iova >>
312 ltc_ltcs_ltss_cbc_base_alignment_shift_v();
313
314 do_div(compbit_base_post_divide64, g->ltc_count);
315 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
316
317 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
318 g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
319
320 if (compbit_base_post_multiply64 < compbit_store_iova)
321 compbit_base_post_divide++;
322
323 /* Bug 1477079 indicates sw adjustment on the posted divided base. */
324 if (g->ops.ltc.cbc_fix_config)
325 compbit_base_post_divide =
326 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
327
328 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
329 compbit_base_post_divide);
330
331 gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
332 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
333 (u32)(compbit_store_iova >> 32),
334 (u32)(compbit_store_iova & 0xffffffff),
335 compbit_base_post_divide);
336
337 gr->compbit_store.base_hw = compbit_base_post_divide;
338
339 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
340 0, max_comptag_lines - 1);
341
342}
343
344#ifdef CONFIG_DEBUG_FS
345void gk20a_ltc_sync_debugfs(struct gk20a *g)
346{
347 u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
348
349 nvgpu_spinlock_acquire(&g->debugfs_lock);
350 if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) {
351 u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
352
353 if (g->mm.ltc_enabled_debug)
354 /* bypass disabled (normal caching ops)*/
355 reg &= ~reg_f;
356 else
357 /* bypass enabled (no caching) */
358 reg |= reg_f;
359
360 gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
361 g->mm.ltc_enabled = g->mm.ltc_enabled_debug;
362 }
363 nvgpu_spinlock_release(&g->debugfs_lock);
364}
365#endif