summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c374
1 files changed, 368 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 87f3ef54..a4657ff3 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -38,8 +38,10 @@
38#include <nvgpu/gk20a.h> 38#include <nvgpu/gk20a.h>
39 39
40#include "mm_gm20b.h" 40#include "mm_gm20b.h"
41#include "pmu_gm20b.h"
41#include "acr_gm20b.h" 42#include "acr_gm20b.h"
42 43
44#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h>
43#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 45#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
44 46
45typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); 47typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
@@ -358,7 +360,7 @@ int prepare_ucode_blob(struct gk20a *g)
358 g->ops.fb.vpr_info_fetch(g); 360 g->ops.fb.vpr_info_fetch(g);
359 gr_gk20a_init_ctxsw_ucode(g); 361 gr_gk20a_init_ctxsw_ucode(g);
360 362
361 g->ops.pmu.get_wpr(g, &wpr_inf); 363 g->acr.get_wpr_info(g, &wpr_inf);
362 nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); 364 nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
363 nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size); 365 nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size);
364 366
@@ -377,7 +379,7 @@ int prepare_ucode_blob(struct gk20a *g)
377 } 379 }
378 380
379 /*Alloc memory to hold ucode blob contents*/ 381 /*Alloc memory to hold ucode blob contents*/
380 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 382 err = g->acr.alloc_blob_space(g, plsfm->wpr_size
381 , &g->acr.ucode_blob); 383 , &g->acr.ucode_blob);
382 if (err) { 384 if (err) {
383 goto free_sgt; 385 goto free_sgt;
@@ -450,7 +452,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
450 452
451 /*0th index is always PMU which is already handled in earlier 453 /*0th index is always PMU which is already handled in earlier
452 if condition*/ 454 if condition*/
453 for (i = 1; i < (MAX_SUPPORTED_LSFM); i++) { 455 for (i = 1; i < g->acr.max_supported_lsfm; i++) {
454 memset(&ucode_img, 0, sizeof(ucode_img)); 456 memset(&ucode_img, 0, sizeof(ucode_img));
455 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { 457 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
456 if (ucode_img.lsf_desc != NULL) { 458 if (ucode_img.lsf_desc != NULL) {
@@ -520,7 +522,7 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
520 * physical addresses of each respective segment. 522 * physical addresses of each respective segment.
521 */ 523 */
522 addr_base = p_lsfm->lsb_header.ucode_off; 524 addr_base = p_lsfm->lsb_header.ucode_off;
523 g->ops.pmu.get_wpr(g, &wpr_inf); 525 g->acr.get_wpr_info(g, &wpr_inf);
524 addr_base += wpr_inf.wpr_base; 526 addr_base += wpr_inf.wpr_base;
525 nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 527 nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
526 /*From linux*/ 528 /*From linux*/
@@ -596,7 +598,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
596 * physical addresses of each respective segment. 598 * physical addresses of each respective segment.
597 */ 599 */
598 addr_base = p_lsfm->lsb_header.ucode_off; 600 addr_base = p_lsfm->lsb_header.ucode_off;
599 g->ops.pmu.get_wpr(g, &wpr_inf); 601 g->acr.get_wpr_info(g, &wpr_inf);
600 addr_base += wpr_inf.wpr_base; 602 addr_base += wpr_inf.wpr_base;
601 603
602 nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 604 nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
@@ -874,7 +876,7 @@ static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
874 876
875 /* Fill in static WPR header info*/ 877 /* Fill in static WPR header info*/
876 pnode->wpr_header.falcon_id = falcon_id; 878 pnode->wpr_header.falcon_id = falcon_id;
877 pnode->wpr_header.bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 879 pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
878 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY; 880 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
879 881
880 pnode->wpr_header.lazy_bootstrap = 882 pnode->wpr_header.lazy_bootstrap =
@@ -1112,3 +1114,363 @@ void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
1112 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 1114 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
1113 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 1115 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
1114} 1116}
1117
1118static int nvgpu_gm20b_acr_wait_for_completion(struct gk20a *g,
1119 struct nvgpu_falcon *flcn, unsigned int timeout)
1120{
1121 u32 base_addr = flcn->flcn_base;
1122 int completion = 0;
1123 u32 data = 0;
1124
1125 nvgpu_log_fn(g, " ");
1126
1127 completion = nvgpu_flcn_wait_for_halt(flcn, timeout);
1128 if (completion != 0U) {
1129 nvgpu_err(g, "flcn-%d: ACR boot timed out", flcn->flcn_id);
1130 goto exit;
1131 }
1132
1133 nvgpu_pmu_dbg(g, "flcn-%d: ACR capabilities %x\n", flcn->flcn_id,
1134 nvgpu_flcn_mailbox_read(flcn, FALCON_MAILBOX_1));
1135
1136 data = nvgpu_flcn_mailbox_read(flcn, FALCON_MAILBOX_0);
1137 if (data != 0U) {
1138 nvgpu_err(g, "flcn-%d: ACR boot failed, err %x", flcn->flcn_id,
1139 data);
1140 completion = -EAGAIN;
1141 goto exit;
1142 }
1143
1144 nvgpu_pmu_dbg(g, "flcn-%d: sctl reg %x", flcn->flcn_id,
1145 gk20a_readl(g, base_addr + falcon_falcon_sctl_r()));
1146 nvgpu_pmu_dbg(g, "flcn-%d: cpuctl reg %x", flcn->flcn_id,
1147 gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()));
1148
1149exit:
1150 return completion;
1151}
1152
1153static int gm20b_acr_hs_bl_exec(struct gk20a *g, struct nvgpu_acr *acr,
1154 struct hs_acr *acr_desc, bool b_wait_for_halt)
1155{
1156 struct nvgpu_firmware *hs_bl_fw = acr_desc->acr_hs_bl.hs_bl_fw;
1157 struct hsflcn_bl_desc *hs_bl_desc;
1158 struct nvgpu_falcon_bl_info bl_info;
1159 struct hs_flcn_bl *hs_bl = &acr_desc->acr_hs_bl;
1160 struct mm_gk20a *mm = &g->mm;
1161 struct vm_gk20a *vm = mm->pmu.vm;
1162 u32 *hs_bl_code = NULL;
1163 int err = 0;
1164 u32 bl_sz;
1165
1166 nvgpu_pmu_dbg(g, "Executing ACR HS Bootloader %s on Falcon-ID - %d",
1167 hs_bl->bl_fw_name, acr_desc->acr_flcn->flcn_id);
1168
1169 if (hs_bl_fw == NULL) {
1170 hs_bl_fw = nvgpu_request_firmware(g, hs_bl->bl_fw_name, 0U);
1171 if (hs_bl_fw == NULL) {
1172 nvgpu_err(g, "ACR HS BL ucode load fail");
1173 return -ENOENT;
1174 }
1175
1176 hs_bl->hs_bl_fw = hs_bl_fw;
1177 hs_bl->hs_bl_bin_hdr = (struct bin_hdr *)hs_bl_fw->data;
1178 hs_bl->hs_bl_desc = (struct hsflcn_bl_desc *)(hs_bl_fw->data +
1179 hs_bl->hs_bl_bin_hdr->header_offset);
1180
1181 hs_bl_desc = hs_bl->hs_bl_desc;
1182 hs_bl_code = (u32 *)(hs_bl_fw->data +
1183 hs_bl->hs_bl_bin_hdr->data_offset);
1184
1185 bl_sz = ALIGN(hs_bl_desc->bl_img_hdr.bl_code_size, 256U);
1186
1187 hs_bl->hs_bl_ucode.size = bl_sz;
1188
1189 err = nvgpu_dma_alloc_sys(g, bl_sz, &hs_bl->hs_bl_ucode);
1190 if (err) {
1191 nvgpu_err(g, "ACR HS BL failed to allocate memory");
1192 goto err_done;
1193 }
1194
1195 hs_bl->hs_bl_ucode.gpu_va = nvgpu_gmmu_map(vm,
1196 &hs_bl->hs_bl_ucode,
1197 bl_sz,
1198 0U, /* flags */
1199 gk20a_mem_flag_read_only, false,
1200 hs_bl->hs_bl_ucode.aperture);
1201 if (hs_bl->hs_bl_ucode.gpu_va == 0U) {
1202 nvgpu_err(g, "ACR HS BL failed to map ucode memory!!");
1203 goto err_free_ucode;
1204 }
1205
1206 nvgpu_mem_wr_n(g, &hs_bl->hs_bl_ucode, 0U, hs_bl_code, bl_sz);
1207
1208 nvgpu_pmu_dbg(g, "Copied BL ucode to bl_cpuva");
1209 }
1210
1211 /* Fill HS BL info */
1212 bl_info.bl_src = hs_bl->hs_bl_ucode.cpu_va;
1213 bl_info.bl_desc = acr_desc->ptr_bl_dmem_desc;
1214 bl_info.bl_desc_size = acr_desc->bl_dmem_desc_size;
1215 bl_info.bl_size = hs_bl->hs_bl_ucode.size;
1216 bl_info.bl_start_tag = hs_bl->hs_bl_desc->bl_start_tag;
1217
1218 /*
1219 * 1. Dose falcon reset
1220 * 2. setup falcon apertures
1221 * 3. bootstrap falcon
1222 */
1223 acr_desc->acr_flcn_setup_hw_and_bl_bootstrap(g, acr_desc, &bl_info);
1224
1225 if (b_wait_for_halt) {
1226 /* wait for ACR halt*/
1227 err = nvgpu_gm20b_acr_wait_for_completion(g, acr_desc->acr_flcn,
1228 ACR_COMPLETION_TIMEOUT_MS);
1229 if (err != 0U) {
1230 goto err_unmap_bl;
1231 }
1232 }
1233
1234 return 0U;
1235err_unmap_bl:
1236 nvgpu_gmmu_unmap(vm, &hs_bl->hs_bl_ucode, hs_bl->hs_bl_ucode.gpu_va);
1237err_free_ucode:
1238 nvgpu_dma_free(g, &hs_bl->hs_bl_ucode);
1239err_done:
1240 nvgpu_release_firmware(g, hs_bl_fw);
1241
1242 return err;
1243}
1244
1245int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
1246 struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
1247{
1248 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1249 struct acr_fw_header *acr_fw_hdr = NULL;
1250 struct bin_hdr *acr_fw_bin_hdr = NULL;
1251 struct flcn_acr_desc *acr_dmem_desc;
1252 u32 *acr_ucode_header = NULL;
1253 u32 *acr_ucode_data = NULL;
1254
1255 nvgpu_log_fn(g, " ");
1256
1257 if (is_recovery) {
1258 acr_desc->acr_dmem_desc->nonwpr_ucode_blob_size = 0U;
1259 } else {
1260 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1261 acr_fw_hdr = (struct acr_fw_header *)
1262 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1263
1264 acr_ucode_data = (u32 *)(acr_fw->data +
1265 acr_fw_bin_hdr->data_offset);
1266
1267 acr_ucode_header = (u32 *)(acr_fw->data +
1268 acr_fw_hdr->hdr_offset);
1269
1270 /* During recovery need to update blob size as 0x0*/
1271 acr_desc->acr_dmem_desc = (struct flcn_acr_desc *)((u8 *)(
1272 acr_desc->acr_ucode.cpu_va) + acr_ucode_header[2U]);
1273
1274 /* Patch WPR info to ucode */
1275 acr_dmem_desc = (struct flcn_acr_desc *)
1276 &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
1277
1278 acr_dmem_desc->nonwpr_ucode_blob_start =
1279 nvgpu_mem_get_addr(g, &g->acr.ucode_blob);
1280 acr_dmem_desc->nonwpr_ucode_blob_size =
1281 g->acr.ucode_blob.size;
1282 acr_dmem_desc->regions.no_regions = 1U;
1283 acr_dmem_desc->wpr_offset = 0U;
1284 }
1285
1286 return 0;
1287}
1288
1289int gm20b_acr_fill_bl_dmem_desc(struct gk20a *g,
1290 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
1291 u32 *acr_ucode_header)
1292{
1293 struct flcn_bl_dmem_desc *bl_dmem_desc = &acr_desc->bl_dmem_desc;
1294
1295 nvgpu_log_fn(g, " ");
1296
1297 memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc));
1298
1299 bl_dmem_desc->signature[0] = 0U;
1300 bl_dmem_desc->signature[1] = 0U;
1301 bl_dmem_desc->signature[2] = 0U;
1302 bl_dmem_desc->signature[3] = 0U;
1303 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
1304 bl_dmem_desc->code_dma_base =
1305 (unsigned int)(((u64)acr_desc->acr_ucode.gpu_va >> 8U));
1306 bl_dmem_desc->code_dma_base1 = 0x0U;
1307 bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
1308 bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
1309 bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
1310 bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
1311 bl_dmem_desc->code_entry_point = 0U; /* Start at 0th offset */
1312 bl_dmem_desc->data_dma_base =
1313 bl_dmem_desc->code_dma_base +
1314 ((acr_ucode_header[2U]) >> 8U);
1315 bl_dmem_desc->data_dma_base1 = 0x0U;
1316 bl_dmem_desc->data_size = acr_ucode_header[3U];
1317
1318 return 0;
1319}
1320
1321/*
1322 * Loads ACR bin to SYSMEM/FB and bootstraps ACR with bootloader code
1323 * start and end are addresses of ucode blob in non-WPR region
1324 */
1325int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr,
1326 struct hs_acr *acr_desc)
1327{
1328 struct mm_gk20a *mm = &g->mm;
1329 struct vm_gk20a *vm = mm->pmu.vm;
1330 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1331 struct bin_hdr *acr_fw_bin_hdr = NULL;
1332 struct acr_fw_header *acr_fw_hdr = NULL;
1333 struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
1334 u32 img_size_in_bytes = 0;
1335 u32 *acr_ucode_data;
1336 u32 *acr_ucode_header;
1337 u32 status = 0U;
1338
1339 nvgpu_pmu_dbg(g, "ACR TYPE %x ", acr_desc->acr_type);
1340
1341 if (acr_fw != NULL) {
1342 acr->patch_wpr_info_to_ucode(g, acr, acr_desc, true);
1343 } else {
1344 acr_fw = nvgpu_request_firmware(g, acr_desc->acr_fw_name,
1345 NVGPU_REQUEST_FIRMWARE_NO_SOC);
1346 if (acr_fw == NULL) {
1347 nvgpu_err(g, "%s ucode get fail for %s",
1348 acr_desc->acr_fw_name, g->name);
1349 return -ENOENT;
1350 }
1351
1352 acr_desc->acr_fw = acr_fw;
1353
1354 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1355
1356 acr_fw_hdr = (struct acr_fw_header *)
1357 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1358
1359 acr_ucode_header = (u32 *)(acr_fw->data +
1360 acr_fw_hdr->hdr_offset);
1361
1362 acr_ucode_data = (u32 *)(acr_fw->data +
1363 acr_fw_bin_hdr->data_offset);
1364
1365 img_size_in_bytes = ALIGN((acr_fw_bin_hdr->data_size), 256U);
1366
1367 /* Lets patch the signatures first.. */
1368 if (acr_ucode_patch_sig(g, acr_ucode_data,
1369 (u32 *)(acr_fw->data + acr_fw_hdr->sig_prod_offset),
1370 (u32 *)(acr_fw->data + acr_fw_hdr->sig_dbg_offset),
1371 (u32 *)(acr_fw->data + acr_fw_hdr->patch_loc),
1372 (u32 *)(acr_fw->data + acr_fw_hdr->patch_sig)) < 0) {
1373 nvgpu_err(g, "patch signatures fail");
1374 status = -1;
1375 goto err_release_acr_fw;
1376 }
1377
1378 status = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
1379 acr_ucode_mem);
1380 if (status != 0U) {
1381 status = -ENOMEM;
1382 goto err_release_acr_fw;
1383 }
1384
1385 acr->patch_wpr_info_to_ucode(g, acr, acr_desc, false);
1386
1387 nvgpu_mem_wr_n(g, acr_ucode_mem, 0U, acr_ucode_data,
1388 img_size_in_bytes);
1389
1390 /*
1391 * In order to execute this binary, we will be using
1392 * a bootloader which will load this image into
1393 * FALCON IMEM/DMEM.
1394 * Fill up the bootloader descriptor to use..
1395 * TODO: Use standard descriptor which the generic bootloader is
1396 * checked in.
1397 */
1398 acr->acr_fill_bl_dmem_desc(g, acr, acr_desc, acr_ucode_header);
1399 }
1400
1401 status = gm20b_acr_hs_bl_exec(g, acr, acr_desc, true);
1402 if (status != 0U) {
1403 goto err_free_ucode_map;
1404 }
1405
1406 return 0;
1407err_free_ucode_map:
1408 nvgpu_dma_unmap_free(vm, acr_ucode_mem);
1409err_release_acr_fw:
1410 nvgpu_release_firmware(g, acr_fw);
1411 acr_fw = NULL;
1412 return status;
1413}
1414
1415void gm20b_remove_acr_support(struct nvgpu_acr *acr)
1416{
1417 struct gk20a *g = acr->g;
1418 struct mm_gk20a *mm = &g->mm;
1419 struct vm_gk20a *vm = mm->pmu.vm;
1420
1421 if (acr->acr.acr_fw != NULL) {
1422 nvgpu_release_firmware(g, acr->acr.acr_fw);
1423 }
1424
1425 if (acr->acr.acr_hs_bl.hs_bl_fw != NULL) {
1426 nvgpu_release_firmware(g, acr->acr.acr_hs_bl.hs_bl_fw);
1427 }
1428
1429 nvgpu_dma_unmap_free(vm, &acr->acr.acr_ucode);
1430 nvgpu_dma_unmap_free(vm, &acr->acr.acr_hs_bl.hs_bl_ucode);
1431}
1432
1433static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
1434{
1435 struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
1436
1437 nvgpu_log_fn(g, " ");
1438
1439 /* ACR HS bootloader ucode name */
1440 hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
1441
1442 /* ACR HS ucode type & f/w name*/
1443 hs_acr->acr_type = ACR_DEFAULT;
1444 hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
1445
1446 /* bootlader interface used by ACR HS bootloader*/
1447 hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc;
1448 hs_acr->bl_dmem_desc_size = sizeof(struct flcn_bl_dmem_desc);
1449
1450 /* set on which falcon ACR need to execute*/
1451 hs_acr->acr_flcn = &g->pmu_flcn;
1452 hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
1453 gm20b_pmu_setup_hw_and_bl_bootstrap;
1454}
1455
1456void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
1457{
1458 nvgpu_log_fn(g, " ");
1459
1460 acr->g = g;
1461
1462 acr->bootstrap_owner = LSF_FALCON_ID_PMU;
1463 acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
1464
1465 gm20b_acr_default_sw_init(g, &acr->acr);
1466
1467 acr->get_wpr_info = gm20b_wpr_info;
1468 acr->alloc_blob_space = gm20b_alloc_blob_space;
1469 acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
1470 acr->patch_wpr_info_to_ucode =
1471 gm20b_acr_patch_wpr_info_to_ucode;
1472 acr->acr_fill_bl_dmem_desc =
1473 gm20b_acr_fill_bl_dmem_desc;
1474
1475 acr->remove_support = gm20b_remove_acr_support;
1476}