summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/vbios
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-02-09 11:17:47 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-17 16:46:32 -0500
commit53465def649b813987ca0d4a7ced744305204b82 (patch)
treecdff16681cb0442de3b1a8bd151b2a38c0bc5311 /drivers/gpu/nvgpu/common/vbios
parent29a79e6b80c6a0da489d8b0a470c86e2fec9c355 (diff)
gpu: nvgpu: Generalize BIOS code
Most of BIOS parsing code is not specific to any particular GPU. Move most of the code to generic files, and leave only chip specific parts dealing with microcontroller boot into chip specific files. As most of the parsing is generic, they do not need to be called via HALs so remove the HALs and change the calls into direct function calls. All definitions meant to be used outside BIOS code itself are now in <nvgpu/bios.h> Change-Id: Id48e94c74511d6e95645e90e5bba5c12ef8da45d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1302222 GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/common/vbios')
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c748
1 files changed, 748 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
new file mode 100644
index 00000000..c31f9a29
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -0,0 +1,748 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/bios.h>
15#include <nvgpu/hw/gp106/hw_gc6_gp106.h>
16
17#include "gk20a/gk20a.h"
18
19#define BIT_HEADER_ID 0xb8ff
20#define BIT_HEADER_SIGNATURE 0x00544942
21#define PCI_EXP_ROM_SIG 0xaa55
22#define PCI_EXP_ROM_SIG_NV 0x4e56
23
24#define INIT_DONE 0x71
25#define INIT_RESUME 0x72
26#define INIT_CONDITION 0x75
27#define INIT_XMEMSEL_ZM_NV_REG_ARRAY 0x8f
28
29struct condition_entry {
30 u32 cond_addr;
31 u32 cond_mask;
32 u32 cond_compare;
33} __packed;
34
35static u16 nvgpu_bios_rdu16(struct gk20a *g, int offset)
36{
37 u16 val = (g->bios.data[offset+1] << 8) + g->bios.data[offset];
38 return val;
39}
40
41static u32 nvgpu_bios_rdu32(struct gk20a *g, int offset)
42{
43 u32 val = (g->bios.data[offset+3] << 24) +
44 (g->bios.data[offset+2] << 16) +
45 (g->bios.data[offset+1] << 8) +
46 g->bios.data[offset];
47 return val;
48}
49
50struct bit {
51 u16 id;
52 u32 signature;
53 u16 bcd_version;
54 u8 header_size;
55 u8 token_size;
56 u8 token_entries;
57 u8 header_checksum;
58} __packed;
59
60#define TOKEN_ID_BIOSDATA 0x42
61#define TOKEN_ID_NVINIT_PTRS 0x49
62#define TOKEN_ID_FALCON_DATA 0x70
63#define TOKEN_ID_PERF_PTRS 0x50
64#define TOKEN_ID_CLOCK_PTRS 0x43
65#define TOKEN_ID_VIRT_PTRS 0x56
66#define TOKEN_ID_MEMORY_PTRS 0x4D
67
68
69union memory_ptrs {
70 struct {
71 u8 rsvd0[2];
72 u8 mem_strap_data_count;
73 u16 mem_strap_xlat_tbl_ptr;
74 u8 rsvd1[8];
75 } v1 __packed;
76 struct {
77 u8 mem_strap_data_count;
78 u16 mem_strap_xlat_tbl_ptr;
79 u8 rsvd[14];
80 } v2 __packed;
81};
82
83struct biosdata {
84 u32 version;
85 u8 oem_version;
86 u8 checksum;
87 u16 int15callbackspost;
88 u16 int16callbackssystem;
89 u16 boardid;
90 u16 framecount;
91 u8 biosmoddate[8];
92} __packed;
93
94struct nvinit_ptrs {
95 u16 initscript_table_ptr;
96 u16 macro_index_table_ptr;
97 u16 macro_table_ptr;
98 u16 condition_table_ptr;
99 u16 io_condition_table_ptr;
100 u16 io_flag_condition_table_ptr;
101 u16 init_function_table_ptr;
102 u16 vbios_private_table_ptr;
103 u16 data_arrays_table_ptr;
104 u16 pcie_settings_script_ptr;
105 u16 devinit_tables_ptr;
106 u16 devinit_tables_size;
107 u16 bootscripts_ptr;
108 u16 bootscripts_size;
109 u16 nvlink_config_data_ptr;
110} __packed;
111
112struct falcon_data_v2 {
113 u32 falcon_ucode_table_ptr;
114} __packed;
115
116struct falcon_ucode_table_hdr_v1 {
117 u8 version;
118 u8 header_size;
119 u8 entry_size;
120 u8 entry_count;
121 u8 desc_version;
122 u8 desc_size;
123} __packed;
124
125struct falcon_ucode_table_entry_v1 {
126 u8 application_id;
127 u8 target_id;
128 u32 desc_ptr;
129} __packed;
130
131#define TARGET_ID_PMU 0x01
132#define APPLICATION_ID_DEVINIT 0x04
133#define APPLICATION_ID_PRE_OS 0x01
134
135struct falcon_ucode_desc_v1 {
136 union {
137 u32 v_desc;
138 u32 stored_size;
139 } hdr_size;
140 u32 uncompressed_size;
141 u32 virtual_entry;
142 u32 interface_offset;
143 u32 imem_phys_base;
144 u32 imem_load_size;
145 u32 imem_virt_base;
146 u32 imem_sec_base;
147 u32 imem_sec_size;
148 u32 dmem_offset;
149 u32 dmem_phys_base;
150 u32 dmem_load_size;
151} __packed;
152
153struct application_interface_table_hdr_v1 {
154 u8 version;
155 u8 header_size;
156 u8 entry_size;
157 u8 entry_count;
158} __packed;
159
160struct application_interface_entry_v1 {
161 u32 id;
162 u32 dmem_offset;
163} __packed;
164
165#define APPINFO_ID_DEVINIT 0x01
166
167struct devinit_engine_interface {
168 u32 field0;
169 u32 field1;
170 u32 tables_phys_base;
171 u32 tables_virt_base;
172 u32 script_phys_base;
173 u32 script_virt_base;
174 u32 script_virt_entry;
175 u16 script_size;
176 u8 memory_strap_count;
177 u8 reserved;
178 u32 memory_information_table_virt_base;
179 u32 empty_script_virt_base;
180 u32 cond_table_virt_base;
181 u32 io_cond_table_virt_base;
182 u32 data_arrays_table_virt_base;
183 u32 gpio_assignment_table_virt_base;
184} __packed;
185
186struct pci_exp_rom {
187 u16 sig;
188 u8 reserved[0x16];
189 u16 pci_data_struct_ptr;
190 u32 size_of_block;
191} __packed;
192
193struct pci_data_struct {
194 u32 sig;
195 u16 vendor_id;
196 u16 device_id;
197 u16 device_list_ptr;
198 u16 pci_data_struct_len;
199 u8 pci_data_struct_rev;
200 u8 class_code[3];
201 u16 image_len;
202 u16 vendor_rom_rev;
203 u8 code_type;
204 u8 last_image;
205 u16 max_runtime_image_len;
206} __packed;
207
208struct pci_ext_data_struct {
209 u32 sig;
210 u16 nv_pci_data_ext_rev;
211 u16 nv_pci_data_ext_len;
212 u16 sub_image_len;
213 u8 priv_last_image;
214 u8 flags;
215} __packed;
216
217static void nvgpu_bios_parse_bit(struct gk20a *g, int offset);
218
219int nvgpu_bios_parse_rom(struct gk20a *g)
220{
221 int offset = 0;
222 int last = 0;
223 bool found = false;
224 unsigned int i;
225
226 while (!last) {
227 struct pci_exp_rom *pci_rom;
228 struct pci_data_struct *pci_data;
229 struct pci_ext_data_struct *pci_ext_data;
230
231 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset];
232 gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x",
233 pci_rom->sig, pci_rom->pci_data_struct_ptr,
234 pci_rom->size_of_block);
235
236 if (pci_rom->sig != PCI_EXP_ROM_SIG &&
237 pci_rom->sig != PCI_EXP_ROM_SIG_NV) {
238 gk20a_err(g->dev, "invalid VBIOS signature");
239 return -EINVAL;
240 }
241
242 pci_data =
243 (struct pci_data_struct *)
244 &g->bios.data[offset + pci_rom->pci_data_struct_ptr];
245 gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x",
246 pci_data->sig, pci_data->pci_data_struct_len,
247 pci_data->image_len, pci_data->code_type,
248 pci_data->last_image,
249 pci_data->max_runtime_image_len);
250
251 if (pci_data->code_type == 0x3) {
252 pci_ext_data = (struct pci_ext_data_struct *)
253 &g->bios.data[(offset +
254 pci_rom->pci_data_struct_ptr +
255 pci_data->pci_data_struct_len +
256 0xf)
257 & ~0xf];
258 gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
259 pci_ext_data->sig,
260 pci_ext_data->nv_pci_data_ext_rev,
261 pci_ext_data->nv_pci_data_ext_len,
262 pci_ext_data->sub_image_len,
263 pci_ext_data->priv_last_image,
264 pci_ext_data->flags);
265
266 gk20a_dbg_fn("expansion rom offset %x",
267 pci_data->image_len * 512);
268 g->bios.expansion_rom_offset =
269 pci_data->image_len * 512;
270 offset += pci_ext_data->sub_image_len * 512;
271 last = pci_ext_data->priv_last_image;
272 } else {
273 offset += pci_data->image_len * 512;
274 last = pci_data->last_image;
275 }
276 }
277
278 gk20a_dbg_info("read bios");
279 for (i = 0; i < g->bios.size - 6; i++) {
280 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
281 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) {
282 nvgpu_bios_parse_bit(g, i);
283 found = true;
284 }
285 }
286
287 if (!found)
288 return -EINVAL;
289 else
290 return 0;
291}
292
293static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
294{
295 struct biosdata biosdata;
296
297 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata));
298 gk20a_dbg_fn("bios version %x, oem version %x",
299 biosdata.version,
300 biosdata.oem_version);
301
302 g->gpu_characteristics.vbios_version = biosdata.version;
303 g->gpu_characteristics.vbios_oem_version = biosdata.oem_version;
304}
305
306static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
307{
308 struct nvinit_ptrs nvinit_ptrs;
309
310 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs));
311 gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr,
312 nvinit_ptrs.devinit_tables_size);
313 gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr,
314 nvinit_ptrs.bootscripts_size);
315
316 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr];
317 g->bios.devinit_tables_size = nvinit_ptrs.devinit_tables_size;
318 g->bios.bootscripts = &g->bios.data[nvinit_ptrs.bootscripts_ptr];
319 g->bios.bootscripts_size = nvinit_ptrs.bootscripts_size;
320 g->bios.condition_table_ptr = nvinit_ptrs.condition_table_ptr;
321}
322
323static void nvgpu_bios_parse_memory_ptrs(struct gk20a *g, int offset, u8 version)
324{
325 union memory_ptrs memory_ptrs;
326
327 if ((version < 1) || (version > 2))
328 return;
329
330 memcpy(&memory_ptrs, &g->bios.data[offset], sizeof(memory_ptrs));
331
332 g->bios.mem_strap_data_count = (version > 1) ? memory_ptrs.v2.mem_strap_data_count :
333 memory_ptrs.v1.mem_strap_data_count;
334 g->bios.mem_strap_xlat_tbl_ptr = (version > 1) ? memory_ptrs.v2.mem_strap_xlat_tbl_ptr :
335 memory_ptrs.v1.mem_strap_xlat_tbl_ptr;
336}
337
338static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
339{
340 struct devinit_engine_interface interface;
341
342 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface));
343 gk20a_dbg_fn("devinit tables phys %x script phys %x size %d",
344 interface.tables_phys_base,
345 interface.script_phys_base,
346 interface.script_size);
347
348 g->bios.devinit_tables_phys_base = interface.tables_phys_base;
349 g->bios.devinit_script_phys_base = interface.script_phys_base;
350}
351
352static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
353{
354 struct application_interface_table_hdr_v1 hdr;
355 int i;
356
357 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
358
359 gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d",
360 hdr.version, hdr.header_size,
361 hdr.entry_size, hdr.entry_count);
362
363 if (hdr.version != 1)
364 return 0;
365
366 offset += sizeof(hdr);
367 for (i = 0; i < hdr.entry_count; i++) {
368 struct application_interface_entry_v1 entry;
369
370 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
371
372 gk20a_dbg_fn("appInfo id %d dmem_offset %d",
373 entry.id, entry.dmem_offset);
374
375 if (entry.id == APPINFO_ID_DEVINIT)
376 nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset);
377
378 offset += hdr.entry_size;
379 }
380
381 return 0;
382}
383
384static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
385 struct nvgpu_bios_ucode *ucode, int offset)
386{
387 struct falcon_ucode_desc_v1 desc;
388
389 memcpy(&desc, &g->bios.data[offset], sizeof(desc));
390 gk20a_dbg_info("falcon ucode desc stored size %d uncompressed size %d",
391 desc.hdr_size.stored_size, desc.uncompressed_size);
392 gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x",
393 desc.virtual_entry, desc.interface_offset);
394 gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
395 desc.imem_phys_base, desc.imem_load_size,
396 desc.imem_virt_base, desc.imem_sec_base,
397 desc.imem_sec_size);
398 gk20a_dbg_info("falcon ucode DMEM offset %d phys base %x, load size %d",
399 desc.dmem_offset, desc.dmem_phys_base,
400 desc.dmem_load_size);
401
402 if (desc.hdr_size.stored_size != desc.uncompressed_size) {
403 gk20a_dbg_info("does not match");
404 return -EINVAL;
405 }
406
407 ucode->code_entry_point = desc.virtual_entry;
408 ucode->bootloader = &g->bios.data[offset] + sizeof(desc);
409 ucode->bootloader_phys_base = desc.imem_phys_base;
410 ucode->bootloader_size = desc.imem_load_size - desc.imem_sec_size;
411 ucode->ucode = ucode->bootloader + ucode->bootloader_size;
412 ucode->phys_base = ucode->bootloader_phys_base + ucode->bootloader_size;
413 ucode->size = desc.imem_sec_size;
414 ucode->dmem = ucode->bootloader + desc.dmem_offset;
415 ucode->dmem_phys_base = desc.dmem_phys_base;
416 ucode->dmem_size = desc.dmem_load_size;
417
418 return nvgpu_bios_parse_appinfo_table(g,
419 offset + sizeof(desc) +
420 desc.dmem_offset + desc.interface_offset);
421}
422
423static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
424{
425 struct falcon_ucode_table_hdr_v1 hdr;
426 int i;
427
428 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
429 gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
430 hdr.version, hdr.header_size,
431 hdr.entry_size, hdr.entry_count,
432 hdr.desc_version, hdr.desc_size);
433
434 if (hdr.version != 1)
435 return -EINVAL;
436
437 offset += hdr.header_size;
438
439 for (i = 0; i < hdr.entry_count; i++) {
440 struct falcon_ucode_table_entry_v1 entry;
441
442 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
443
444 gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x",
445 entry.application_id, entry.target_id,
446 entry.desc_ptr);
447
448 if (entry.target_id == TARGET_ID_PMU &&
449 entry.application_id == APPLICATION_ID_DEVINIT) {
450 int err;
451
452 err = nvgpu_bios_parse_falcon_ucode_desc(g,
453 &g->bios.devinit, entry.desc_ptr);
454 if (err)
455 err = nvgpu_bios_parse_falcon_ucode_desc(g,
456 &g->bios.devinit,
457 entry.desc_ptr +
458 g->bios.expansion_rom_offset);
459
460 if (err)
461 gk20a_err(dev_from_gk20a(g),
462 "could not parse devinit ucode desc");
463 } else if (entry.target_id == TARGET_ID_PMU &&
464 entry.application_id == APPLICATION_ID_PRE_OS) {
465 int err;
466
467 err = nvgpu_bios_parse_falcon_ucode_desc(g,
468 &g->bios.preos, entry.desc_ptr);
469 if (err)
470 err = nvgpu_bios_parse_falcon_ucode_desc(g,
471 &g->bios.preos,
472 entry.desc_ptr +
473 g->bios.expansion_rom_offset);
474
475 if (err)
476 gk20a_err(dev_from_gk20a(g),
477 "could not parse preos ucode desc");
478 }
479
480 offset += hdr.entry_size;
481 }
482
483 return 0;
484}
485
486static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
487{
488 struct falcon_data_v2 falcon_data;
489 int err;
490
491 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data));
492 gk20a_dbg_fn("falcon ucode table ptr %x",
493 falcon_data.falcon_ucode_table_ptr);
494 err = nvgpu_bios_parse_falcon_ucode_table(g,
495 falcon_data.falcon_ucode_table_ptr);
496 if (err)
497 err = nvgpu_bios_parse_falcon_ucode_table(g,
498 falcon_data.falcon_ucode_table_ptr +
499 g->bios.expansion_rom_offset);
500
501 if (err)
502 gk20a_err(dev_from_gk20a(g),
503 "could not parse falcon ucode table");
504}
505
506void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
507 struct bit_token *ptoken, u8 table_id)
508{
509 u32 perf_table_id_offset = 0;
510 u8 *perf_table_ptr = NULL;
511 u8 data_size = 4;
512
513 if (ptoken != NULL) {
514
515 if (ptoken->token_id == TOKEN_ID_VIRT_PTRS) {
516 perf_table_id_offset = *((u16 *)&g->bios.data[
517 ptoken->data_ptr +
518 (table_id * PERF_PTRS_WIDTH_16)]);
519 data_size = PERF_PTRS_WIDTH_16;
520 } else {
521 perf_table_id_offset = *((u32 *)&g->bios.data[
522 ptoken->data_ptr +
523 (table_id * PERF_PTRS_WIDTH)]);
524 data_size = PERF_PTRS_WIDTH;
525 }
526 } else
527 return (void *)perf_table_ptr;
528
529 if (table_id < (ptoken->data_size/data_size)) {
530
531 gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
532 (ptoken->data_ptr +
533 (table_id * data_size)),
534 perf_table_id_offset);
535
536 if (perf_table_id_offset != 0) {
537 /* check is perf_table_id_offset is > 64k */
538 if (perf_table_id_offset & ~0xFFFF)
539 perf_table_ptr =
540 &g->bios.data[g->bios.expansion_rom_offset +
541 perf_table_id_offset];
542 else
543 perf_table_ptr =
544 &g->bios.data[perf_table_id_offset];
545 } else
546 gk20a_warn(g->dev, "PERF TABLE ID %d is NULL",
547 table_id);
548 } else
549 gk20a_warn(g->dev, "INVALID PERF TABLE ID - %d ", table_id);
550
551 return (void *)perf_table_ptr;
552}
553
554static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
555{
556 struct bit bit;
557 struct bit_token bit_token;
558 int i;
559
560 gk20a_dbg_fn("");
561 memcpy(&bit, &g->bios.data[offset], sizeof(bit));
562
563 gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature);
564 gk20a_dbg_info("tokens: %d entries * %d bytes",
565 bit.token_entries, bit.token_size);
566
567 offset += bit.header_size;
568 for (i = 0; i < bit.token_entries; i++) {
569 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
570
571 gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d",
572 bit_token.token_id, bit_token.data_ptr,
573 bit_token.data_size, bit_token.data_version);
574
575 switch (bit_token.token_id) {
576 case TOKEN_ID_BIOSDATA:
577 nvgpu_bios_parse_biosdata(g, bit_token.data_ptr);
578 break;
579 case TOKEN_ID_NVINIT_PTRS:
580 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr);
581 break;
582 case TOKEN_ID_FALCON_DATA:
583 if (bit_token.data_version == 2)
584 nvgpu_bios_parse_falcon_data_v2(g,
585 bit_token.data_ptr);
586 break;
587 case TOKEN_ID_PERF_PTRS:
588 g->bios.perf_token =
589 (struct bit_token *)&g->bios.data[offset];
590 break;
591 case TOKEN_ID_CLOCK_PTRS:
592 g->bios.clock_token =
593 (struct bit_token *)&g->bios.data[offset];
594 break;
595 case TOKEN_ID_VIRT_PTRS:
596 g->bios.virt_token =
597 (struct bit_token *)&g->bios.data[offset];
598 break;
599 case TOKEN_ID_MEMORY_PTRS:
600 nvgpu_bios_parse_memory_ptrs(g, bit_token.data_ptr,
601 bit_token.data_version);
602 default:
603 break;
604 }
605
606 offset += bit.token_size;
607 }
608 gk20a_dbg_fn("done");
609}
610
611static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset)
612{
613 return (u32) g->bios.data[offset];
614}
615
616u8 nvgpu_bios_read_u8(struct gk20a *g, u32 offset)
617{
618 return (u8) __nvgpu_bios_readbyte(g, offset);
619}
620
621s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
622{
623 u32 val;
624 val = __nvgpu_bios_readbyte(g, offset);
625 val = val & 0x80 ? (val | ~0xff) : val;
626
627 return (s8) val;
628}
629
630u16 nvgpu_bios_read_u16(struct gk20a *g, u32 offset)
631{
632 u16 val;
633
634 val = __nvgpu_bios_readbyte(g, offset) |
635 (__nvgpu_bios_readbyte(g, offset+1) << 8);
636
637 return val;
638}
639
640u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
641{
642 u32 val;
643
644 val = __nvgpu_bios_readbyte(g, offset) |
645 (__nvgpu_bios_readbyte(g, offset+1) << 8) |
646 (__nvgpu_bios_readbyte(g, offset+2) << 16) |
647 (__nvgpu_bios_readbyte(g, offset+3) << 24);
648
649 return val;
650}
651
652static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condition,
653 u32 reg, u32 stride, u32 count, u32 data_table_offset)
654{
655 u8 i;
656 u32 data, strap, index;
657
658 if (*condition) {
659
660 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xf;
661
662 index = g->bios.mem_strap_xlat_tbl_ptr ?
663 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr +
664 strap) : strap;
665
666 for (i = 0; i < count; i++) {
667 data = nvgpu_bios_read_u32(g, data_table_offset + ((i *
668 g->bios.mem_strap_data_count + index) *
669 sizeof(u32)));
670 gk20a_writel(g, reg, data);
671 reg += stride;
672 }
673 }
674}
675
676static void gp106_init_condition(struct gk20a *g, bool *condition,
677 u32 condition_id)
678{
679 struct condition_entry entry;
680
681 entry.cond_addr = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
682 sizeof(entry)*condition_id);
683 entry.cond_mask = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
684 sizeof(entry)*condition_id + 4);
685 entry.cond_compare = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
686 sizeof(entry)*condition_id + 8);
687
688 if ((gk20a_readl(g, entry.cond_addr) & entry.cond_mask)
689 != entry.cond_compare) {
690 *condition = false;
691 }
692}
693
694int nvgpu_bios_execute_script(struct gk20a *g, u32 offset)
695{
696 u8 opcode;
697 u32 ip;
698 u32 operand[8];
699 bool condition, end;
700 int status = 0;
701
702 ip = offset;
703 condition = true;
704 end = false;
705
706 while (!end) {
707
708 opcode = nvgpu_bios_read_u8(g, ip++);
709
710 switch (opcode) {
711
712 case INIT_XMEMSEL_ZM_NV_REG_ARRAY:
713 operand[0] = nvgpu_bios_read_u32(g, ip);
714 operand[1] = nvgpu_bios_read_u8(g, ip+4);
715 operand[2] = nvgpu_bios_read_u8(g, ip+5);
716 ip += 6;
717
718 nvgpu_bios_init_xmemsel_zm_nv_reg_array(g, &condition,
719 operand[0], operand[1], operand[2], ip);
720 ip += operand[2] * sizeof(u32) *
721 g->bios.mem_strap_data_count;
722 break;
723
724 case INIT_CONDITION:
725 operand[0] = nvgpu_bios_read_u8(g, ip);
726 ip++;
727
728 gp106_init_condition(g, &condition, operand[0]);
729 break;
730
731 case INIT_RESUME:
732 condition = true;
733 break;
734
735 case INIT_DONE:
736 end = true;
737 break;
738
739 default:
740 gk20a_err(dev_from_gk20a(g), "opcode: 0x%02x", opcode);
741 end = true;
742 status = -EINVAL;
743 break;
744 }
745 }
746
747 return status;
748}