aboutsummaryrefslogtreecommitdiffstats
path: root/include/gk20a/gr_ctx_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/gk20a/gr_ctx_gk20a.c')
-rw-r--r--include/gk20a/gr_ctx_gk20a.c486
1 files changed, 486 insertions, 0 deletions
diff --git a/include/gk20a/gr_ctx_gk20a.c b/include/gk20a/gr_ctx_gk20a.c
new file mode 100644
index 0000000..8b9ac32
--- /dev/null
+++ b/include/gk20a/gr_ctx_gk20a.c
@@ -0,0 +1,486 @@
1/*
2 * GK20A Graphics Context
3 *
4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/nvgpu_common.h>
26#include <nvgpu/kmem.h>
27#include <nvgpu/log.h>
28#include <nvgpu/firmware.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/io.h>
31
32#include "gk20a.h"
33#include "gr_ctx_gk20a.h"
34
35#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
36
37static int gr_gk20a_alloc_load_netlist_u32(struct gk20a *g, u32 *src, u32 len,
38 struct u32_list_gk20a *u32_list)
39{
40 u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32);
41 if (!alloc_u32_list_gk20a(g, u32_list)) {
42 return -ENOMEM;
43 }
44
45 memcpy(u32_list->l, src, len);
46
47 return 0;
48}
49
50static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len,
51 struct av_list_gk20a *av_list)
52{
53 av_list->count = len / sizeof(struct av_gk20a);
54 if (!alloc_av_list_gk20a(g, av_list)) {
55 return -ENOMEM;
56 }
57
58 memcpy(av_list->l, src, len);
59
60 return 0;
61}
62
63static int gr_gk20a_alloc_load_netlist_av64(struct gk20a *g, u32 *src, u32 len,
64 struct av64_list_gk20a *av64_list)
65{
66 av64_list->count = len / sizeof(struct av64_gk20a);
67 if (!alloc_av64_list_gk20a(g, av64_list)) {
68 return -ENOMEM;
69 }
70
71 memcpy(av64_list->l, src, len);
72
73 return 0;
74}
75
76static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len,
77 struct aiv_list_gk20a *aiv_list)
78{
79 aiv_list->count = len / sizeof(struct aiv_gk20a);
80 if (!alloc_aiv_list_gk20a(g, aiv_list)) {
81 return -ENOMEM;
82 }
83
84 memcpy(aiv_list->l, src, len);
85
86 return 0;
87}
88
89static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
90{
91 struct nvgpu_firmware *netlist_fw;
92 struct netlist_image *netlist = NULL;
93 char name[MAX_NETLIST_NAME];
94 u32 i, major_v = ~0, major_v_hw, netlist_num;
95 int net, max, err = -ENOENT;
96
97 nvgpu_log_fn(g, " ");
98
99 if (g->ops.gr_ctx.is_fw_defined()) {
100 net = NETLIST_FINAL;
101 max = 0;
102 major_v_hw = ~0;
103 g->gr.ctx_vars.dynamic = false;
104 } else {
105 net = NETLIST_SLOT_A;
106 max = MAX_NETLIST;
107 major_v_hw = gk20a_readl(g,
108 gr_fecs_ctx_state_store_major_rev_id_r());
109 g->gr.ctx_vars.dynamic = true;
110 }
111
112 for (; net < max; net++) {
113 if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) {
114 nvgpu_warn(g, "invalid netlist index %d", net);
115 continue;
116 }
117
118 netlist_fw = nvgpu_request_firmware(g, name, 0);
119 if (!netlist_fw) {
120 nvgpu_warn(g, "failed to load netlist %s", name);
121 continue;
122 }
123
124 netlist = (struct netlist_image *)netlist_fw->data;
125
126 for (i = 0; i < netlist->header.regions; i++) {
127 u32 *src = (u32 *)((u8 *)netlist + netlist->regions[i].data_offset);
128 u32 size = netlist->regions[i].data_size;
129
130 switch (netlist->regions[i].region_id) {
131 case NETLIST_REGIONID_FECS_UCODE_DATA:
132 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA");
133 err = gr_gk20a_alloc_load_netlist_u32(g,
134 src, size, &g->gr.ctx_vars.ucode.fecs.data);
135 if (err) {
136 goto clean_up;
137 }
138 break;
139 case NETLIST_REGIONID_FECS_UCODE_INST:
140 nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST");
141 err = gr_gk20a_alloc_load_netlist_u32(g,
142 src, size, &g->gr.ctx_vars.ucode.fecs.inst);
143 if (err) {
144 goto clean_up;
145 }
146 break;
147 case NETLIST_REGIONID_GPCCS_UCODE_DATA:
148 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA");
149 err = gr_gk20a_alloc_load_netlist_u32(g,
150 src, size, &g->gr.ctx_vars.ucode.gpccs.data);
151 if (err) {
152 goto clean_up;
153 }
154 break;
155 case NETLIST_REGIONID_GPCCS_UCODE_INST:
156 nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST");
157 err = gr_gk20a_alloc_load_netlist_u32(g,
158 src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
159 if (err) {
160 goto clean_up;
161 }
162 break;
163 case NETLIST_REGIONID_SW_BUNDLE_INIT:
164 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT");
165 err = gr_gk20a_alloc_load_netlist_av(g,
166 src, size, &g->gr.ctx_vars.sw_bundle_init);
167 if (err) {
168 goto clean_up;
169 }
170 break;
171 case NETLIST_REGIONID_SW_METHOD_INIT:
172 nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT");
173 err = gr_gk20a_alloc_load_netlist_av(g,
174 src, size, &g->gr.ctx_vars.sw_method_init);
175 if (err) {
176 goto clean_up;
177 }
178 break;
179 case NETLIST_REGIONID_SW_CTX_LOAD:
180 nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD");
181 err = gr_gk20a_alloc_load_netlist_aiv(g,
182 src, size, &g->gr.ctx_vars.sw_ctx_load);
183 if (err) {
184 goto clean_up;
185 }
186 break;
187 case NETLIST_REGIONID_SW_NON_CTX_LOAD:
188 nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD");
189 err = gr_gk20a_alloc_load_netlist_av(g,
190 src, size, &g->gr.ctx_vars.sw_non_ctx_load);
191 if (err) {
192 goto clean_up;
193 }
194 break;
195 case NETLIST_REGIONID_SWVEIDBUNDLEINIT:
196 nvgpu_log_info(g,
197 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT");
198 err = gr_gk20a_alloc_load_netlist_av(g,
199 src, size,
200 &g->gr.ctx_vars.sw_veid_bundle_init);
201 if (err) {
202 goto clean_up;
203 }
204 break;
205 case NETLIST_REGIONID_CTXREG_SYS:
206 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS");
207 err = gr_gk20a_alloc_load_netlist_aiv(g,
208 src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
209 if (err) {
210 goto clean_up;
211 }
212 break;
213 case NETLIST_REGIONID_CTXREG_GPC:
214 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC");
215 err = gr_gk20a_alloc_load_netlist_aiv(g,
216 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
217 if (err) {
218 goto clean_up;
219 }
220 break;
221 case NETLIST_REGIONID_CTXREG_TPC:
222 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC");
223 err = gr_gk20a_alloc_load_netlist_aiv(g,
224 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
225 if (err) {
226 goto clean_up;
227 }
228 break;
229 case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
230 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC");
231 err = gr_gk20a_alloc_load_netlist_aiv(g,
232 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
233 if (err) {
234 goto clean_up;
235 }
236 break;
237 case NETLIST_REGIONID_CTXREG_PPC:
238 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC");
239 err = gr_gk20a_alloc_load_netlist_aiv(g,
240 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
241 if (err) {
242 goto clean_up;
243 }
244 break;
245 case NETLIST_REGIONID_CTXREG_PM_SYS:
246 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS");
247 err = gr_gk20a_alloc_load_netlist_aiv(g,
248 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
249 if (err) {
250 goto clean_up;
251 }
252 break;
253 case NETLIST_REGIONID_CTXREG_PM_GPC:
254 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC");
255 err = gr_gk20a_alloc_load_netlist_aiv(g,
256 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
257 if (err) {
258 goto clean_up;
259 }
260 break;
261 case NETLIST_REGIONID_CTXREG_PM_TPC:
262 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC");
263 err = gr_gk20a_alloc_load_netlist_aiv(g,
264 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
265 if (err) {
266 goto clean_up;
267 }
268 break;
269 case NETLIST_REGIONID_BUFFER_SIZE:
270 g->gr.ctx_vars.buffer_size = *src;
271 nvgpu_log_info(g, "NETLIST_REGIONID_BUFFER_SIZE : %d",
272 g->gr.ctx_vars.buffer_size);
273 break;
274 case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX:
275 g->gr.ctx_vars.regs_base_index = *src;
276 nvgpu_log_info(g, "NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %u",
277 g->gr.ctx_vars.regs_base_index);
278 break;
279 case NETLIST_REGIONID_MAJORV:
280 major_v = *src;
281 nvgpu_log_info(g, "NETLIST_REGIONID_MAJORV : %d",
282 major_v);
283 break;
284 case NETLIST_REGIONID_NETLIST_NUM:
285 netlist_num = *src;
286 nvgpu_log_info(g, "NETLIST_REGIONID_NETLIST_NUM : %d",
287 netlist_num);
288 break;
289 case NETLIST_REGIONID_CTXREG_PMPPC:
290 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC");
291 err = gr_gk20a_alloc_load_netlist_aiv(g,
292 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc);
293 if (err) {
294 goto clean_up;
295 }
296 break;
297 case NETLIST_REGIONID_NVPERF_CTXREG_SYS:
298 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS");
299 err = gr_gk20a_alloc_load_netlist_aiv(g,
300 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys);
301 if (err) {
302 goto clean_up;
303 }
304 break;
305 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS:
306 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS");
307 err = gr_gk20a_alloc_load_netlist_aiv(g,
308 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp);
309 if (err) {
310 goto clean_up;
311 }
312 break;
313 case NETLIST_REGIONID_NVPERF_CTXREG_GPC:
314 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC");
315 err = gr_gk20a_alloc_load_netlist_aiv(g,
316 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc);
317 if (err) {
318 goto clean_up;
319 }
320 break;
321 case NETLIST_REGIONID_NVPERF_FBP_ROUTER:
322 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER");
323 err = gr_gk20a_alloc_load_netlist_aiv(g,
324 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router);
325 if (err) {
326 goto clean_up;
327 }
328 break;
329 case NETLIST_REGIONID_NVPERF_GPC_ROUTER:
330 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER");
331 err = gr_gk20a_alloc_load_netlist_aiv(g,
332 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router);
333 if (err) {
334 goto clean_up;
335 }
336 break;
337 case NETLIST_REGIONID_CTXREG_PMLTC:
338 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC");
339 err = gr_gk20a_alloc_load_netlist_aiv(g,
340 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc);
341 if (err) {
342 goto clean_up;
343 }
344 break;
345 case NETLIST_REGIONID_CTXREG_PMFBPA:
346 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA");
347 err = gr_gk20a_alloc_load_netlist_aiv(g,
348 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa);
349 if (err) {
350 goto clean_up;
351 }
352 break;
353 case NETLIST_REGIONID_NVPERF_SYS_ROUTER:
354 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER");
355 err = gr_gk20a_alloc_load_netlist_aiv(g,
356 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router);
357 if (err) {
358 goto clean_up;
359 }
360 break;
361 case NETLIST_REGIONID_NVPERF_PMA:
362 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA");
363 err = gr_gk20a_alloc_load_netlist_aiv(g,
364 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma);
365 if (err) {
366 goto clean_up;
367 }
368 break;
369 case NETLIST_REGIONID_CTXREG_PMROP:
370 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP");
371 err = gr_gk20a_alloc_load_netlist_aiv(g,
372 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop);
373 if (err) {
374 goto clean_up;
375 }
376 break;
377 case NETLIST_REGIONID_CTXREG_PMUCGPC:
378 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC");
379 err = gr_gk20a_alloc_load_netlist_aiv(g,
380 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc);
381 if (err) {
382 goto clean_up;
383 }
384 break;
385 case NETLIST_REGIONID_CTXREG_ETPC:
386 nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC");
387 err = gr_gk20a_alloc_load_netlist_aiv(g,
388 src, size, &g->gr.ctx_vars.ctxsw_regs.etpc);
389 if (err) {
390 goto clean_up;
391 }
392 break;
393 case NETLIST_REGIONID_SW_BUNDLE64_INIT:
394 nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE64_INIT");
395 err = gr_gk20a_alloc_load_netlist_av64(g,
396 src, size,
397 &g->gr.ctx_vars.sw_bundle64_init);
398 if (err) {
399 goto clean_up;
400 }
401 break;
402 case NETLIST_REGIONID_NVPERF_PMCAU:
403 nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMCAU");
404 err = gr_gk20a_alloc_load_netlist_aiv(g,
405 src, size,
406 &g->gr.ctx_vars.ctxsw_regs.pm_cau);
407 if (err) {
408 goto clean_up;
409 }
410 break;
411
412 default:
413 nvgpu_log_info(g, "unrecognized region %d skipped", i);
414 break;
415 }
416 }
417
418 if (net != NETLIST_FINAL && major_v != major_v_hw) {
419 nvgpu_log_info(g, "skip %s: major_v 0x%08x doesn't match hw 0x%08x",
420 name, major_v, major_v_hw);
421 goto clean_up;
422 }
423
424 g->gr.ctx_vars.valid = true;
425 g->gr.netlist = net;
426
427 nvgpu_release_firmware(g, netlist_fw);
428 nvgpu_log_fn(g, "done");
429 goto done;
430
431clean_up:
432 g->gr.ctx_vars.valid = false;
433 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l);
434 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l);
435 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l);
436 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l);
437 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l);
438 nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l);
439 nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l);
440 nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l);
441 nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l);
442 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l);
443 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l);
444 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l);
445 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l);
446 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l);
447 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l);
448 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l);
449 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l);
450 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ppc.l);
451 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys.l);
452 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp.l);
453 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_gpc.l);
454 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp_router.l);
455 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc_router.l);
456 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ltc.l);
457 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l);
458 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l);
459 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_pma.l);
460 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l);
461 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l);
462 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.etpc.l);
463 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle64_init.l);
464 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_cau.l);
465 nvgpu_release_firmware(g, netlist_fw);
466 err = -ENOENT;
467 }
468
469done:
470 if (g->gr.ctx_vars.valid) {
471 nvgpu_log_info(g, "netlist image %s loaded", name);
472 return 0;
473 } else {
474 nvgpu_err(g, "failed to load netlist image!!");
475 return err;
476 }
477}
478
479int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr)
480{
481 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
482 return gr_gk20a_init_ctx_vars_sim(g, gr);
483 } else {
484 return gr_gk20a_init_ctx_vars_fw(g, gr);
485 }
486}