summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/vbios/bios.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/vbios/bios.c')
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c845
1 files changed, 845 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
new file mode 100644
index 00000000..fa700a66
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24#include <nvgpu/hw/gp106/hw_gc6_gp106.h>
25
26#include "gk20a/gk20a.h"
27
28#define BIT_HEADER_ID 0xb8ff
29#define BIT_HEADER_SIGNATURE 0x00544942
30#define PCI_EXP_ROM_SIG 0xaa55
31#define PCI_EXP_ROM_SIG_NV 0x4e56
32
33#define INIT_DONE 0x71
34#define INIT_RESUME 0x72
35#define INIT_CONDITION 0x75
36#define INIT_XMEMSEL_ZM_NV_REG_ARRAY 0x8f
37
38struct condition_entry {
39 u32 cond_addr;
40 u32 cond_mask;
41 u32 cond_compare;
42} __packed;
43
44static u16 nvgpu_bios_rdu16(struct gk20a *g, int offset)
45{
46 u16 val = (g->bios.data[offset+1] << 8) + g->bios.data[offset];
47 return val;
48}
49
50static u32 nvgpu_bios_rdu32(struct gk20a *g, int offset)
51{
52 u32 val = (g->bios.data[offset+3] << 24) +
53 (g->bios.data[offset+2] << 16) +
54 (g->bios.data[offset+1] << 8) +
55 g->bios.data[offset];
56 return val;
57}
58
59struct bit {
60 u16 id;
61 u32 signature;
62 u16 bcd_version;
63 u8 header_size;
64 u8 token_size;
65 u8 token_entries;
66 u8 header_checksum;
67} __packed;
68
69#define TOKEN_ID_BIOSDATA 0x42
70#define TOKEN_ID_NVINIT_PTRS 0x49
71#define TOKEN_ID_FALCON_DATA 0x70
72#define TOKEN_ID_PERF_PTRS 0x50
73#define TOKEN_ID_CLOCK_PTRS 0x43
74#define TOKEN_ID_VIRT_PTRS 0x56
75#define TOKEN_ID_MEMORY_PTRS 0x4D
76
77#define MEMORY_PTRS_V1 1
78#define MEMORY_PTRS_V2 2
79
80struct memory_ptrs_v1 {
81 u8 rsvd0[2];
82 u8 mem_strap_data_count;
83 u16 mem_strap_xlat_tbl_ptr;
84 u8 rsvd1[8];
85} __packed;
86
87struct memory_ptrs_v2 {
88 u8 mem_strap_data_count;
89 u16 mem_strap_xlat_tbl_ptr;
90 u8 rsvd[14];
91} __packed;
92
93struct biosdata {
94 u32 version;
95 u8 oem_version;
96 u8 checksum;
97 u16 int15callbackspost;
98 u16 int16callbackssystem;
99 u16 boardid;
100 u16 framecount;
101 u8 biosmoddate[8];
102} __packed;
103
104struct nvinit_ptrs {
105 u16 initscript_table_ptr;
106 u16 macro_index_table_ptr;
107 u16 macro_table_ptr;
108 u16 condition_table_ptr;
109 u16 io_condition_table_ptr;
110 u16 io_flag_condition_table_ptr;
111 u16 init_function_table_ptr;
112 u16 vbios_private_table_ptr;
113 u16 data_arrays_table_ptr;
114 u16 pcie_settings_script_ptr;
115 u16 devinit_tables_ptr;
116 u16 devinit_tables_size;
117 u16 bootscripts_ptr;
118 u16 bootscripts_size;
119 u16 nvlink_config_data_ptr;
120} __packed;
121
122struct falcon_data_v2 {
123 u32 falcon_ucode_table_ptr;
124} __packed;
125
126struct falcon_ucode_table_hdr_v1 {
127 u8 version;
128 u8 header_size;
129 u8 entry_size;
130 u8 entry_count;
131 u8 desc_version;
132 u8 desc_size;
133} __packed;
134
135struct falcon_ucode_table_entry_v1 {
136 u8 application_id;
137 u8 target_id;
138 u32 desc_ptr;
139} __packed;
140
141#define TARGET_ID_PMU 0x01
142#define APPLICATION_ID_DEVINIT 0x04
143#define APPLICATION_ID_PRE_OS 0x01
144
145#define FALCON_UCODE_FLAGS_VERSION_AVAILABLE 0x1
146#define FALCON_UCODE_IS_VERSION_AVAILABLE(hdr) \
147 ((hdr.v2.v_desc & FALCON_UCODE_FLAGS_VERSION_AVAILABLE) == \
148 FALCON_UCODE_FLAGS_VERSION_AVAILABLE)
149
150/*
151 * version is embedded in bits 8:15 of the header on version 2+
152 * and the header length in bits 16:31
153 */
154
155#define FALCON_UCODE_GET_VERSION(hdr) \
156 ((hdr.v2.v_desc >> 8) & 0xff)
157
158#define FALCON_UCODE_GET_DESC_SIZE(hdr) \
159 ((hdr.v2.v_desc >> 16) & 0xffff)
160
161struct falcon_ucode_desc_v1 {
162 union {
163 u32 v_desc;
164 u32 stored_size;
165 } hdr_size;
166 u32 uncompressed_size;
167 u32 virtual_entry;
168 u32 interface_offset;
169 u32 imem_phys_base;
170 u32 imem_load_size;
171 u32 imem_virt_base;
172 u32 imem_sec_base;
173 u32 imem_sec_size;
174 u32 dmem_offset;
175 u32 dmem_phys_base;
176 u32 dmem_load_size;
177} __packed;
178
179struct falcon_ucode_desc_v2 {
180 u32 v_desc;
181 u32 stored_size;
182 u32 uncompressed_size;
183 u32 virtual_entry;
184 u32 interface_offset;
185 u32 imem_phys_base;
186 u32 imem_load_size;
187 u32 imem_virt_base;
188 u32 imem_sec_base;
189 u32 imem_sec_size;
190 u32 dmem_offset;
191 u32 dmem_phys_base;
192 u32 dmem_load_size;
193 u32 alt_imem_load_size;
194 u32 alt_dmem_load_size;
195} __packed;
196
197union falcon_ucode_desc {
198 struct falcon_ucode_desc_v1 v1;
199 struct falcon_ucode_desc_v2 v2;
200};
201
202struct application_interface_table_hdr_v1 {
203 u8 version;
204 u8 header_size;
205 u8 entry_size;
206 u8 entry_count;
207} __packed;
208
209struct application_interface_entry_v1 {
210 u32 id;
211 u32 dmem_offset;
212} __packed;
213
214#define APPINFO_ID_DEVINIT 0x01
215
216struct devinit_engine_interface {
217 u16 version;
218 u16 size;
219 u16 application_version;
220 u16 application_features;
221 u32 tables_phys_base;
222 u32 tables_virt_base;
223 u32 script_phys_base;
224 u32 script_virt_base;
225 u32 script_virt_entry;
226 u16 script_size;
227 u8 memory_strap_count;
228 u8 reserved;
229 u32 memory_information_table_virt_base;
230 u32 empty_script_virt_base;
231 u32 cond_table_virt_base;
232 u32 io_cond_table_virt_base;
233 u32 data_arrays_table_virt_base;
234 u32 gpio_assignment_table_virt_base;
235} __packed;
236
237struct pci_exp_rom {
238 u16 sig;
239 u8 reserved[0x16];
240 u16 pci_data_struct_ptr;
241 u32 size_of_block;
242} __packed;
243
244struct pci_data_struct {
245 u32 sig;
246 u16 vendor_id;
247 u16 device_id;
248 u16 device_list_ptr;
249 u16 pci_data_struct_len;
250 u8 pci_data_struct_rev;
251 u8 class_code[3];
252 u16 image_len;
253 u16 vendor_rom_rev;
254 u8 code_type;
255 u8 last_image;
256 u16 max_runtime_image_len;
257} __packed;
258
259struct pci_ext_data_struct {
260 u32 sig;
261 u16 nv_pci_data_ext_rev;
262 u16 nv_pci_data_ext_len;
263 u16 sub_image_len;
264 u8 priv_last_image;
265 u8 flags;
266} __packed;
267
268static void nvgpu_bios_parse_bit(struct gk20a *g, int offset);
269
270int nvgpu_bios_parse_rom(struct gk20a *g)
271{
272 int offset = 0;
273 int last = 0;
274 bool found = false;
275 unsigned int i;
276
277 while (!last) {
278 struct pci_exp_rom *pci_rom;
279 struct pci_data_struct *pci_data;
280 struct pci_ext_data_struct *pci_ext_data;
281
282 pci_rom = (struct pci_exp_rom *)&g->bios.data[offset];
283 gk20a_dbg_fn("pci rom sig %04x ptr %04x block %x",
284 pci_rom->sig, pci_rom->pci_data_struct_ptr,
285 pci_rom->size_of_block);
286
287 if (pci_rom->sig != PCI_EXP_ROM_SIG &&
288 pci_rom->sig != PCI_EXP_ROM_SIG_NV) {
289 nvgpu_err(g, "invalid VBIOS signature");
290 return -EINVAL;
291 }
292
293 pci_data =
294 (struct pci_data_struct *)
295 &g->bios.data[offset + pci_rom->pci_data_struct_ptr];
296 gk20a_dbg_fn("pci data sig %08x len %d image len %x type %x last %d max %08x",
297 pci_data->sig, pci_data->pci_data_struct_len,
298 pci_data->image_len, pci_data->code_type,
299 pci_data->last_image,
300 pci_data->max_runtime_image_len);
301
302 if (pci_data->code_type == 0x3) {
303 pci_ext_data = (struct pci_ext_data_struct *)
304 &g->bios.data[(offset +
305 pci_rom->pci_data_struct_ptr +
306 pci_data->pci_data_struct_len +
307 0xf)
308 & ~0xf];
309 gk20a_dbg_fn("pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
310 pci_ext_data->sig,
311 pci_ext_data->nv_pci_data_ext_rev,
312 pci_ext_data->nv_pci_data_ext_len,
313 pci_ext_data->sub_image_len,
314 pci_ext_data->priv_last_image,
315 pci_ext_data->flags);
316
317 gk20a_dbg_fn("expansion rom offset %x",
318 pci_data->image_len * 512);
319 g->bios.expansion_rom_offset =
320 pci_data->image_len * 512;
321 offset += pci_ext_data->sub_image_len * 512;
322 last = pci_ext_data->priv_last_image;
323 } else {
324 offset += pci_data->image_len * 512;
325 last = pci_data->last_image;
326 }
327 }
328
329 gk20a_dbg_info("read bios");
330 for (i = 0; i < g->bios.size - 6; i++) {
331 if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
332 nvgpu_bios_rdu32(g, i+2) == BIT_HEADER_SIGNATURE) {
333 nvgpu_bios_parse_bit(g, i);
334 found = true;
335 }
336 }
337
338 if (!found)
339 return -EINVAL;
340 else
341 return 0;
342}
343
344static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
345{
346 struct biosdata biosdata;
347
348 memcpy(&biosdata, &g->bios.data[offset], sizeof(biosdata));
349 gk20a_dbg_fn("bios version %x, oem version %x",
350 biosdata.version,
351 biosdata.oem_version);
352
353 g->bios.vbios_version = biosdata.version;
354 g->bios.vbios_oem_version = biosdata.oem_version;
355}
356
357static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
358{
359 struct nvinit_ptrs nvinit_ptrs;
360
361 memcpy(&nvinit_ptrs, &g->bios.data[offset], sizeof(nvinit_ptrs));
362 gk20a_dbg_fn("devinit ptr %x size %d", nvinit_ptrs.devinit_tables_ptr,
363 nvinit_ptrs.devinit_tables_size);
364 gk20a_dbg_fn("bootscripts ptr %x size %d", nvinit_ptrs.bootscripts_ptr,
365 nvinit_ptrs.bootscripts_size);
366
367 g->bios.devinit_tables = &g->bios.data[nvinit_ptrs.devinit_tables_ptr];
368 g->bios.devinit_tables_size = nvinit_ptrs.devinit_tables_size;
369 g->bios.bootscripts = &g->bios.data[nvinit_ptrs.bootscripts_ptr];
370 g->bios.bootscripts_size = nvinit_ptrs.bootscripts_size;
371 g->bios.condition_table_ptr = nvinit_ptrs.condition_table_ptr;
372}
373
374static void nvgpu_bios_parse_memory_ptrs(struct gk20a *g, int offset, u8 version)
375{
376 struct memory_ptrs_v1 v1;
377 struct memory_ptrs_v2 v2;
378
379 switch (version) {
380 case MEMORY_PTRS_V1:
381 memcpy(&v1, &g->bios.data[offset], sizeof(v1));
382 g->bios.mem_strap_data_count = v1.mem_strap_data_count;
383 g->bios.mem_strap_xlat_tbl_ptr = v1.mem_strap_xlat_tbl_ptr;
384 return;
385 case MEMORY_PTRS_V2:
386 memcpy(&v2, &g->bios.data[offset], sizeof(v2));
387 g->bios.mem_strap_data_count = v2.mem_strap_data_count;
388 g->bios.mem_strap_xlat_tbl_ptr = v2.mem_strap_xlat_tbl_ptr;
389 return;
390 default:
391 nvgpu_err(g, "unknown vbios memory table version %x", version);
392 return;
393 }
394}
395
396static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
397{
398 struct devinit_engine_interface interface;
399
400 memcpy(&interface, &g->bios.devinit.dmem[dmem_offset], sizeof(interface));
401 gk20a_dbg_fn("devinit version %x tables phys %x script phys %x size %d",
402 interface.version,
403 interface.tables_phys_base,
404 interface.script_phys_base,
405 interface.script_size);
406
407 if (interface.version != 1)
408 return;
409 g->bios.devinit_tables_phys_base = interface.tables_phys_base;
410 g->bios.devinit_script_phys_base = interface.script_phys_base;
411}
412
413static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
414{
415 struct application_interface_table_hdr_v1 hdr;
416 int i;
417
418 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
419
420 gk20a_dbg_fn("appInfoHdr ver %d size %d entrySize %d entryCount %d",
421 hdr.version, hdr.header_size,
422 hdr.entry_size, hdr.entry_count);
423
424 if (hdr.version != 1)
425 return 0;
426
427 offset += sizeof(hdr);
428 for (i = 0; i < hdr.entry_count; i++) {
429 struct application_interface_entry_v1 entry;
430
431 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
432
433 gk20a_dbg_fn("appInfo id %d dmem_offset %d",
434 entry.id, entry.dmem_offset);
435
436 if (entry.id == APPINFO_ID_DEVINIT)
437 nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset);
438
439 offset += hdr.entry_size;
440 }
441
442 return 0;
443}
444
445static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
446 struct nvgpu_bios_ucode *ucode, int offset)
447{
448 union falcon_ucode_desc udesc;
449 struct falcon_ucode_desc_v2 desc;
450 u8 version;
451 u16 desc_size;
452
453 memcpy(&udesc, &g->bios.data[offset], sizeof(udesc));
454
455 if (FALCON_UCODE_IS_VERSION_AVAILABLE(udesc)) {
456 version = FALCON_UCODE_GET_VERSION(udesc);
457 desc_size = FALCON_UCODE_GET_DESC_SIZE(udesc);
458 } else {
459 version = 1;
460 desc_size = sizeof(udesc.v1);
461 }
462
463 switch (version) {
464 case 1:
465 desc.stored_size = udesc.v1.hdr_size.stored_size;
466 desc.uncompressed_size = udesc.v1.uncompressed_size;
467 desc.virtual_entry = udesc.v1.virtual_entry;
468 desc.interface_offset = udesc.v1.interface_offset;
469 desc.imem_phys_base = udesc.v1.imem_phys_base;
470 desc.imem_load_size = udesc.v1.imem_load_size;
471 desc.imem_virt_base = udesc.v1.imem_virt_base;
472 desc.imem_sec_base = udesc.v1.imem_sec_base;
473 desc.imem_sec_size = udesc.v1.imem_sec_size;
474 desc.dmem_offset = udesc.v1.dmem_offset;
475 desc.dmem_phys_base = udesc.v1.dmem_phys_base;
476 desc.dmem_load_size = udesc.v1.dmem_load_size;
477 break;
478 case 2:
479 memcpy(&desc, &udesc, sizeof(udesc.v2));
480 break;
481 default:
482 gk20a_dbg_info("invalid version");
483 return -EINVAL;
484 }
485
486 gk20a_dbg_info("falcon ucode desc version %x len %x", version, desc_size);
487
488 gk20a_dbg_info("falcon ucode desc stored size %x uncompressed size %x",
489 desc.stored_size, desc.uncompressed_size);
490 gk20a_dbg_info("falcon ucode desc virtualEntry %x, interfaceOffset %x",
491 desc.virtual_entry, desc.interface_offset);
492 gk20a_dbg_info("falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
493 desc.imem_phys_base, desc.imem_load_size,
494 desc.imem_virt_base, desc.imem_sec_base,
495 desc.imem_sec_size);
496 gk20a_dbg_info("falcon ucode DMEM offset %x phys base %x, load size %x",
497 desc.dmem_offset, desc.dmem_phys_base,
498 desc.dmem_load_size);
499
500 if (desc.stored_size != desc.uncompressed_size) {
501 gk20a_dbg_info("does not match");
502 return -EINVAL;
503 }
504
505 ucode->code_entry_point = desc.virtual_entry;
506 ucode->bootloader = &g->bios.data[offset] + desc_size;
507 ucode->bootloader_phys_base = desc.imem_phys_base;
508 ucode->bootloader_size = desc.imem_load_size - desc.imem_sec_size;
509 ucode->ucode = ucode->bootloader + ucode->bootloader_size;
510 ucode->phys_base = ucode->bootloader_phys_base + ucode->bootloader_size;
511 ucode->size = desc.imem_sec_size;
512 ucode->dmem = ucode->bootloader + desc.dmem_offset;
513 ucode->dmem_phys_base = desc.dmem_phys_base;
514 ucode->dmem_size = desc.dmem_load_size;
515
516 return nvgpu_bios_parse_appinfo_table(g,
517 offset + desc_size +
518 desc.dmem_offset + desc.interface_offset);
519}
520
521static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
522{
523 struct falcon_ucode_table_hdr_v1 hdr;
524 int i;
525
526 memcpy(&hdr, &g->bios.data[offset], sizeof(hdr));
527 gk20a_dbg_fn("falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
528 hdr.version, hdr.header_size,
529 hdr.entry_size, hdr.entry_count,
530 hdr.desc_version, hdr.desc_size);
531
532 if (hdr.version != 1)
533 return -EINVAL;
534
535 offset += hdr.header_size;
536
537 for (i = 0; i < hdr.entry_count; i++) {
538 struct falcon_ucode_table_entry_v1 entry;
539
540 memcpy(&entry, &g->bios.data[offset], sizeof(entry));
541
542 gk20a_dbg_fn("falcon ucode table entry appid %x targetId %x descPtr %x",
543 entry.application_id, entry.target_id,
544 entry.desc_ptr);
545
546 if (entry.target_id == TARGET_ID_PMU &&
547 entry.application_id == APPLICATION_ID_DEVINIT) {
548 int err;
549
550 err = nvgpu_bios_parse_falcon_ucode_desc(g,
551 &g->bios.devinit, entry.desc_ptr);
552 if (err)
553 err = nvgpu_bios_parse_falcon_ucode_desc(g,
554 &g->bios.devinit,
555 entry.desc_ptr +
556 g->bios.expansion_rom_offset);
557
558 if (err)
559 nvgpu_err(g,
560 "could not parse devinit ucode desc");
561 } else if (entry.target_id == TARGET_ID_PMU &&
562 entry.application_id == APPLICATION_ID_PRE_OS) {
563 int err;
564
565 err = nvgpu_bios_parse_falcon_ucode_desc(g,
566 &g->bios.preos, entry.desc_ptr);
567 if (err)
568 err = nvgpu_bios_parse_falcon_ucode_desc(g,
569 &g->bios.preos,
570 entry.desc_ptr +
571 g->bios.expansion_rom_offset);
572
573 if (err)
574 nvgpu_err(g,
575 "could not parse preos ucode desc");
576 }
577
578 offset += hdr.entry_size;
579 }
580
581 return 0;
582}
583
584static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
585{
586 struct falcon_data_v2 falcon_data;
587 int err;
588
589 memcpy(&falcon_data, &g->bios.data[offset], sizeof(falcon_data));
590 gk20a_dbg_fn("falcon ucode table ptr %x",
591 falcon_data.falcon_ucode_table_ptr);
592 err = nvgpu_bios_parse_falcon_ucode_table(g,
593 falcon_data.falcon_ucode_table_ptr);
594 if (err)
595 err = nvgpu_bios_parse_falcon_ucode_table(g,
596 falcon_data.falcon_ucode_table_ptr +
597 g->bios.expansion_rom_offset);
598
599 if (err)
600 nvgpu_err(g, "could not parse falcon ucode table");
601}
602
603void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
604 struct bit_token *ptoken, u8 table_id)
605{
606 u32 perf_table_id_offset = 0;
607 u8 *perf_table_ptr = NULL;
608 u8 data_size = 4;
609
610 if (ptoken != NULL) {
611
612 if (ptoken->token_id == TOKEN_ID_VIRT_PTRS) {
613 perf_table_id_offset = *((u16 *)&g->bios.data[
614 ptoken->data_ptr +
615 (table_id * PERF_PTRS_WIDTH_16)]);
616 data_size = PERF_PTRS_WIDTH_16;
617 } else {
618 perf_table_id_offset = *((u32 *)&g->bios.data[
619 ptoken->data_ptr +
620 (table_id * PERF_PTRS_WIDTH)]);
621 data_size = PERF_PTRS_WIDTH;
622 }
623 } else
624 return (void *)perf_table_ptr;
625
626 if (table_id < (ptoken->data_size/data_size)) {
627
628 gk20a_dbg_info("Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
629 (ptoken->data_ptr +
630 (table_id * data_size)),
631 perf_table_id_offset);
632
633 if (perf_table_id_offset != 0) {
634 /* check is perf_table_id_offset is > 64k */
635 if (perf_table_id_offset & ~0xFFFF)
636 perf_table_ptr =
637 &g->bios.data[g->bios.expansion_rom_offset +
638 perf_table_id_offset];
639 else
640 perf_table_ptr =
641 &g->bios.data[perf_table_id_offset];
642 } else
643 nvgpu_warn(g, "PERF TABLE ID %d is NULL",
644 table_id);
645 } else
646 nvgpu_warn(g, "INVALID PERF TABLE ID - %d ", table_id);
647
648 return (void *)perf_table_ptr;
649}
650
651static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
652{
653 struct bit bit;
654 struct bit_token bit_token;
655 int i;
656
657 gk20a_dbg_fn("");
658 memcpy(&bit, &g->bios.data[offset], sizeof(bit));
659
660 gk20a_dbg_info("BIT header: %04x %08x", bit.id, bit.signature);
661 gk20a_dbg_info("tokens: %d entries * %d bytes",
662 bit.token_entries, bit.token_size);
663
664 offset += bit.header_size;
665 for (i = 0; i < bit.token_entries; i++) {
666 memcpy(&bit_token, &g->bios.data[offset], sizeof(bit_token));
667
668 gk20a_dbg_info("BIT token id %d ptr %d size %d ver %d",
669 bit_token.token_id, bit_token.data_ptr,
670 bit_token.data_size, bit_token.data_version);
671
672 switch (bit_token.token_id) {
673 case TOKEN_ID_BIOSDATA:
674 nvgpu_bios_parse_biosdata(g, bit_token.data_ptr);
675 break;
676 case TOKEN_ID_NVINIT_PTRS:
677 nvgpu_bios_parse_nvinit_ptrs(g, bit_token.data_ptr);
678 break;
679 case TOKEN_ID_FALCON_DATA:
680 if (bit_token.data_version == 2)
681 nvgpu_bios_parse_falcon_data_v2(g,
682 bit_token.data_ptr);
683 break;
684 case TOKEN_ID_PERF_PTRS:
685 g->bios.perf_token =
686 (struct bit_token *)&g->bios.data[offset];
687 break;
688 case TOKEN_ID_CLOCK_PTRS:
689 g->bios.clock_token =
690 (struct bit_token *)&g->bios.data[offset];
691 break;
692 case TOKEN_ID_VIRT_PTRS:
693 g->bios.virt_token =
694 (struct bit_token *)&g->bios.data[offset];
695 break;
696 case TOKEN_ID_MEMORY_PTRS:
697 nvgpu_bios_parse_memory_ptrs(g, bit_token.data_ptr,
698 bit_token.data_version);
699 default:
700 break;
701 }
702
703 offset += bit.token_size;
704 }
705 gk20a_dbg_fn("done");
706}
707
708static u32 __nvgpu_bios_readbyte(struct gk20a *g, u32 offset)
709{
710 return (u32) g->bios.data[offset];
711}
712
713u8 nvgpu_bios_read_u8(struct gk20a *g, u32 offset)
714{
715 return (u8) __nvgpu_bios_readbyte(g, offset);
716}
717
718s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
719{
720 u32 val;
721 val = __nvgpu_bios_readbyte(g, offset);
722 val = val & 0x80 ? (val | ~0xff) : val;
723
724 return (s8) val;
725}
726
727u16 nvgpu_bios_read_u16(struct gk20a *g, u32 offset)
728{
729 u16 val;
730
731 val = __nvgpu_bios_readbyte(g, offset) |
732 (__nvgpu_bios_readbyte(g, offset+1) << 8);
733
734 return val;
735}
736
737u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
738{
739 u32 val;
740
741 val = __nvgpu_bios_readbyte(g, offset) |
742 (__nvgpu_bios_readbyte(g, offset+1) << 8) |
743 (__nvgpu_bios_readbyte(g, offset+2) << 16) |
744 (__nvgpu_bios_readbyte(g, offset+3) << 24);
745
746 return val;
747}
748
749static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condition,
750 u32 reg, u32 stride, u32 count, u32 data_table_offset)
751{
752 u8 i;
753 u32 data, strap, index;
754
755 if (*condition) {
756
757 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xf;
758
759 index = g->bios.mem_strap_xlat_tbl_ptr ?
760 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr +
761 strap) : strap;
762
763 for (i = 0; i < count; i++) {
764 data = nvgpu_bios_read_u32(g, data_table_offset + ((i *
765 g->bios.mem_strap_data_count + index) *
766 sizeof(u32)));
767 gk20a_writel(g, reg, data);
768 reg += stride;
769 }
770 }
771}
772
773static void gp106_init_condition(struct gk20a *g, bool *condition,
774 u32 condition_id)
775{
776 struct condition_entry entry;
777
778 entry.cond_addr = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
779 sizeof(entry)*condition_id);
780 entry.cond_mask = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
781 sizeof(entry)*condition_id + 4);
782 entry.cond_compare = nvgpu_bios_read_u32(g, g->bios.condition_table_ptr +
783 sizeof(entry)*condition_id + 8);
784
785 if ((gk20a_readl(g, entry.cond_addr) & entry.cond_mask)
786 != entry.cond_compare) {
787 *condition = false;
788 }
789}
790
791int nvgpu_bios_execute_script(struct gk20a *g, u32 offset)
792{
793 u8 opcode;
794 u32 ip;
795 u32 operand[8];
796 bool condition, end;
797 int status = 0;
798
799 ip = offset;
800 condition = true;
801 end = false;
802
803 while (!end) {
804
805 opcode = nvgpu_bios_read_u8(g, ip++);
806
807 switch (opcode) {
808
809 case INIT_XMEMSEL_ZM_NV_REG_ARRAY:
810 operand[0] = nvgpu_bios_read_u32(g, ip);
811 operand[1] = nvgpu_bios_read_u8(g, ip+4);
812 operand[2] = nvgpu_bios_read_u8(g, ip+5);
813 ip += 6;
814
815 nvgpu_bios_init_xmemsel_zm_nv_reg_array(g, &condition,
816 operand[0], operand[1], operand[2], ip);
817 ip += operand[2] * sizeof(u32) *
818 g->bios.mem_strap_data_count;
819 break;
820
821 case INIT_CONDITION:
822 operand[0] = nvgpu_bios_read_u8(g, ip);
823 ip++;
824
825 gp106_init_condition(g, &condition, operand[0]);
826 break;
827
828 case INIT_RESUME:
829 condition = true;
830 break;
831
832 case INIT_DONE:
833 end = true;
834 break;
835
836 default:
837 nvgpu_err(g, "opcode: 0x%02x", opcode);
838 end = true;
839 status = -EINVAL;
840 break;
841 }
842 }
843
844 return status;
845}