summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-09-06 11:14:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-24 11:11:49 -0400
commit5d30a5cda37ca349b4d9cb7e1985c7a0849001b6 (patch)
tree89a37078480d7cec42d9a8c7bc869aae8bb28279
parent7465926ccdcdad87c22c788fe04fc11961df53ba (diff)
gpu: nvgpu: ACR code refactor
-Created struct nvgpu_acr to hold acr module related member within single struct which are currently spread across multiple structs like nvgpu_pmu, pmu_ops & gk20a. -Created struct hs_flcn_bl struct to hold ACR HS bootloader specific members -Created struct hs_acr to hold ACR ucode specific members like bootloader data using struct hs_flcn_bl, acr type & falcon info on which ACR ucode need to run. -Created acr ops under struct nvgpu_acr to perform ACR specific operation, currently ACR ops were part PMU which caused to have always dependence on PMU even though ACR was not executing on PMU. -Added acr_remove_support ops which will be called as part of gk20a_remove_support() method, earlier acr cleanup was part of pmu remove_support method. -Created define for ACR types, -Ops acr_sw_init() function helps to set ACR properties statically for chip currently in execution & assign ops to point to needed functions as per chip. -Ops acr_sw_init execute at early as nvgpu_init_mm_support calls acr function to alloc blob space. -Created ops to fill bootloader descriptor & to patch WPR info to ACR uocde based on interfaces used to bootstrap ACR ucode. -Created function gm20b_bootstrap_hs_acr() function which is now common HAL for all chips to bootstrap ACR, earlier had 3 different function for gm20b/gp10b, gv11b & for all dgpu based on interface needed. -Removed duplicate code for falcon engine wherever common falcon code can be used. -Removed ACR code dependent on PMU & made changes to use from nvgpu_acr. JIRA NVGPU-1148 Change-Id: I39951d2fc9a0bb7ee6057e0fa06da78045d47590 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1813231 GVS: Gerrit_Virtual_Submit Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c374
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.h17
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c60
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.h4
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c133
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.h8
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c5
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c171
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.h12
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c5
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c5
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c87
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.h5
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c5
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h108
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gk20a.h9
-rw-r--r--drivers/gpu/nvgpu/os/linux/module.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c5
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c4
22 files changed, 850 insertions, 196 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 03325cce..88e3baa8 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -442,9 +442,10 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
442 * this requires fixed allocations in vidmem which must be 442 * this requires fixed allocations in vidmem which must be
443 * allocated before all other buffers 443 * allocated before all other buffers
444 */ 444 */
445 if (g->ops.pmu.alloc_blob_space != NULL && 445
446 if (g->acr.alloc_blob_space != NULL &&
446 !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { 447 !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
447 err = g->ops.pmu.alloc_blob_space(g, 0, &g->acr.ucode_blob); 448 err = g->acr.alloc_blob_space(g, 0, &g->acr.ucode_blob);
448 if (err) { 449 if (err) {
449 return err; 450 return err;
450 } 451 }
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 223c7727..2dfe9e58 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -165,6 +165,11 @@ int gk20a_finalize_poweron(struct gk20a *g)
165 goto done; 165 goto done;
166 } 166 }
167 167
168 if (g->ops.acr.acr_sw_init != NULL &&
169 nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
170 g->ops.acr.acr_sw_init(g, &g->acr);
171 }
172
168 if (g->ops.bios.init) { 173 if (g->ops.bios.init) {
169 err = g->ops.bios.init(g); 174 err = g->ops.bios.init(g);
170 } 175 }
@@ -273,6 +278,15 @@ int gk20a_finalize_poweron(struct gk20a *g)
273 } 278 }
274 } 279 }
275 280
281 if (g->acr.bootstrap_hs_acr != NULL &&
282 nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
283 err = g->acr.bootstrap_hs_acr(g, &g->acr, &g->acr.acr);
284 if (err != 0) {
285 nvgpu_err(g, "ACR bootstrap failed");
286 goto done;
287 }
288 }
289
276 if (g->ops.pmu.is_pmu_supported(g)) { 290 if (g->ops.pmu.is_pmu_supported(g)) {
277 err = nvgpu_init_pmu_support(g); 291 err = nvgpu_init_pmu_support(g);
278 if (err) { 292 if (err) {
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 87f3ef54..a4657ff3 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -38,8 +38,10 @@
38#include <nvgpu/gk20a.h> 38#include <nvgpu/gk20a.h>
39 39
40#include "mm_gm20b.h" 40#include "mm_gm20b.h"
41#include "pmu_gm20b.h"
41#include "acr_gm20b.h" 42#include "acr_gm20b.h"
42 43
44#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h>
43#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 45#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
44 46
45typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); 47typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
@@ -358,7 +360,7 @@ int prepare_ucode_blob(struct gk20a *g)
358 g->ops.fb.vpr_info_fetch(g); 360 g->ops.fb.vpr_info_fetch(g);
359 gr_gk20a_init_ctxsw_ucode(g); 361 gr_gk20a_init_ctxsw_ucode(g);
360 362
361 g->ops.pmu.get_wpr(g, &wpr_inf); 363 g->acr.get_wpr_info(g, &wpr_inf);
362 nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); 364 nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
363 nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size); 365 nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size);
364 366
@@ -377,7 +379,7 @@ int prepare_ucode_blob(struct gk20a *g)
377 } 379 }
378 380
379 /*Alloc memory to hold ucode blob contents*/ 381 /*Alloc memory to hold ucode blob contents*/
380 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 382 err = g->acr.alloc_blob_space(g, plsfm->wpr_size
381 , &g->acr.ucode_blob); 383 , &g->acr.ucode_blob);
382 if (err) { 384 if (err) {
383 goto free_sgt; 385 goto free_sgt;
@@ -450,7 +452,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
450 452
451 /*0th index is always PMU which is already handled in earlier 453 /*0th index is always PMU which is already handled in earlier
452 if condition*/ 454 if condition*/
453 for (i = 1; i < (MAX_SUPPORTED_LSFM); i++) { 455 for (i = 1; i < g->acr.max_supported_lsfm; i++) {
454 memset(&ucode_img, 0, sizeof(ucode_img)); 456 memset(&ucode_img, 0, sizeof(ucode_img));
455 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { 457 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
456 if (ucode_img.lsf_desc != NULL) { 458 if (ucode_img.lsf_desc != NULL) {
@@ -520,7 +522,7 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
520 * physical addresses of each respective segment. 522 * physical addresses of each respective segment.
521 */ 523 */
522 addr_base = p_lsfm->lsb_header.ucode_off; 524 addr_base = p_lsfm->lsb_header.ucode_off;
523 g->ops.pmu.get_wpr(g, &wpr_inf); 525 g->acr.get_wpr_info(g, &wpr_inf);
524 addr_base += wpr_inf.wpr_base; 526 addr_base += wpr_inf.wpr_base;
525 nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); 527 nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
526 /*From linux*/ 528 /*From linux*/
@@ -596,7 +598,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
596 * physical addresses of each respective segment. 598 * physical addresses of each respective segment.
597 */ 599 */
598 addr_base = p_lsfm->lsb_header.ucode_off; 600 addr_base = p_lsfm->lsb_header.ucode_off;
599 g->ops.pmu.get_wpr(g, &wpr_inf); 601 g->acr.get_wpr_info(g, &wpr_inf);
600 addr_base += wpr_inf.wpr_base; 602 addr_base += wpr_inf.wpr_base;
601 603
602 nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 604 nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
@@ -874,7 +876,7 @@ static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
874 876
875 /* Fill in static WPR header info*/ 877 /* Fill in static WPR header info*/
876 pnode->wpr_header.falcon_id = falcon_id; 878 pnode->wpr_header.falcon_id = falcon_id;
877 pnode->wpr_header.bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 879 pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
878 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY; 880 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
879 881
880 pnode->wpr_header.lazy_bootstrap = 882 pnode->wpr_header.lazy_bootstrap =
@@ -1112,3 +1114,363 @@ void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
1112 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 1114 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
1113 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 1115 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
1114} 1116}
1117
1118static int nvgpu_gm20b_acr_wait_for_completion(struct gk20a *g,
1119 struct nvgpu_falcon *flcn, unsigned int timeout)
1120{
1121 u32 base_addr = flcn->flcn_base;
1122 int completion = 0;
1123 u32 data = 0;
1124
1125 nvgpu_log_fn(g, " ");
1126
1127 completion = nvgpu_flcn_wait_for_halt(flcn, timeout);
1128 if (completion != 0U) {
1129 nvgpu_err(g, "flcn-%d: ACR boot timed out", flcn->flcn_id);
1130 goto exit;
1131 }
1132
1133 nvgpu_pmu_dbg(g, "flcn-%d: ACR capabilities %x\n", flcn->flcn_id,
1134 nvgpu_flcn_mailbox_read(flcn, FALCON_MAILBOX_1));
1135
1136 data = nvgpu_flcn_mailbox_read(flcn, FALCON_MAILBOX_0);
1137 if (data != 0U) {
1138 nvgpu_err(g, "flcn-%d: ACR boot failed, err %x", flcn->flcn_id,
1139 data);
1140 completion = -EAGAIN;
1141 goto exit;
1142 }
1143
1144 nvgpu_pmu_dbg(g, "flcn-%d: sctl reg %x", flcn->flcn_id,
1145 gk20a_readl(g, base_addr + falcon_falcon_sctl_r()));
1146 nvgpu_pmu_dbg(g, "flcn-%d: cpuctl reg %x", flcn->flcn_id,
1147 gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()));
1148
1149exit:
1150 return completion;
1151}
1152
1153static int gm20b_acr_hs_bl_exec(struct gk20a *g, struct nvgpu_acr *acr,
1154 struct hs_acr *acr_desc, bool b_wait_for_halt)
1155{
1156 struct nvgpu_firmware *hs_bl_fw = acr_desc->acr_hs_bl.hs_bl_fw;
1157 struct hsflcn_bl_desc *hs_bl_desc;
1158 struct nvgpu_falcon_bl_info bl_info;
1159 struct hs_flcn_bl *hs_bl = &acr_desc->acr_hs_bl;
1160 struct mm_gk20a *mm = &g->mm;
1161 struct vm_gk20a *vm = mm->pmu.vm;
1162 u32 *hs_bl_code = NULL;
1163 int err = 0;
1164 u32 bl_sz;
1165
1166 nvgpu_pmu_dbg(g, "Executing ACR HS Bootloader %s on Falcon-ID - %d",
1167 hs_bl->bl_fw_name, acr_desc->acr_flcn->flcn_id);
1168
1169 if (hs_bl_fw == NULL) {
1170 hs_bl_fw = nvgpu_request_firmware(g, hs_bl->bl_fw_name, 0U);
1171 if (hs_bl_fw == NULL) {
1172 nvgpu_err(g, "ACR HS BL ucode load fail");
1173 return -ENOENT;
1174 }
1175
1176 hs_bl->hs_bl_fw = hs_bl_fw;
1177 hs_bl->hs_bl_bin_hdr = (struct bin_hdr *)hs_bl_fw->data;
1178 hs_bl->hs_bl_desc = (struct hsflcn_bl_desc *)(hs_bl_fw->data +
1179 hs_bl->hs_bl_bin_hdr->header_offset);
1180
1181 hs_bl_desc = hs_bl->hs_bl_desc;
1182 hs_bl_code = (u32 *)(hs_bl_fw->data +
1183 hs_bl->hs_bl_bin_hdr->data_offset);
1184
1185 bl_sz = ALIGN(hs_bl_desc->bl_img_hdr.bl_code_size, 256U);
1186
1187 hs_bl->hs_bl_ucode.size = bl_sz;
1188
1189 err = nvgpu_dma_alloc_sys(g, bl_sz, &hs_bl->hs_bl_ucode);
1190 if (err) {
1191 nvgpu_err(g, "ACR HS BL failed to allocate memory");
1192 goto err_done;
1193 }
1194
1195 hs_bl->hs_bl_ucode.gpu_va = nvgpu_gmmu_map(vm,
1196 &hs_bl->hs_bl_ucode,
1197 bl_sz,
1198 0U, /* flags */
1199 gk20a_mem_flag_read_only, false,
1200 hs_bl->hs_bl_ucode.aperture);
1201 if (hs_bl->hs_bl_ucode.gpu_va == 0U) {
1202 nvgpu_err(g, "ACR HS BL failed to map ucode memory!!");
1203 goto err_free_ucode;
1204 }
1205
1206 nvgpu_mem_wr_n(g, &hs_bl->hs_bl_ucode, 0U, hs_bl_code, bl_sz);
1207
1208 nvgpu_pmu_dbg(g, "Copied BL ucode to bl_cpuva");
1209 }
1210
1211 /* Fill HS BL info */
1212 bl_info.bl_src = hs_bl->hs_bl_ucode.cpu_va;
1213 bl_info.bl_desc = acr_desc->ptr_bl_dmem_desc;
1214 bl_info.bl_desc_size = acr_desc->bl_dmem_desc_size;
1215 bl_info.bl_size = hs_bl->hs_bl_ucode.size;
1216 bl_info.bl_start_tag = hs_bl->hs_bl_desc->bl_start_tag;
1217
1218 /*
1219 * 1. Dose falcon reset
1220 * 2. setup falcon apertures
1221 * 3. bootstrap falcon
1222 */
1223 acr_desc->acr_flcn_setup_hw_and_bl_bootstrap(g, acr_desc, &bl_info);
1224
1225 if (b_wait_for_halt) {
1226 /* wait for ACR halt*/
1227 err = nvgpu_gm20b_acr_wait_for_completion(g, acr_desc->acr_flcn,
1228 ACR_COMPLETION_TIMEOUT_MS);
1229 if (err != 0U) {
1230 goto err_unmap_bl;
1231 }
1232 }
1233
1234 return 0U;
1235err_unmap_bl:
1236 nvgpu_gmmu_unmap(vm, &hs_bl->hs_bl_ucode, hs_bl->hs_bl_ucode.gpu_va);
1237err_free_ucode:
1238 nvgpu_dma_free(g, &hs_bl->hs_bl_ucode);
1239err_done:
1240 nvgpu_release_firmware(g, hs_bl_fw);
1241
1242 return err;
1243}
1244
1245int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
1246 struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
1247{
1248 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1249 struct acr_fw_header *acr_fw_hdr = NULL;
1250 struct bin_hdr *acr_fw_bin_hdr = NULL;
1251 struct flcn_acr_desc *acr_dmem_desc;
1252 u32 *acr_ucode_header = NULL;
1253 u32 *acr_ucode_data = NULL;
1254
1255 nvgpu_log_fn(g, " ");
1256
1257 if (is_recovery) {
1258 acr_desc->acr_dmem_desc->nonwpr_ucode_blob_size = 0U;
1259 } else {
1260 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1261 acr_fw_hdr = (struct acr_fw_header *)
1262 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1263
1264 acr_ucode_data = (u32 *)(acr_fw->data +
1265 acr_fw_bin_hdr->data_offset);
1266
1267 acr_ucode_header = (u32 *)(acr_fw->data +
1268 acr_fw_hdr->hdr_offset);
1269
1270 /* During recovery need to update blob size as 0x0*/
1271 acr_desc->acr_dmem_desc = (struct flcn_acr_desc *)((u8 *)(
1272 acr_desc->acr_ucode.cpu_va) + acr_ucode_header[2U]);
1273
1274 /* Patch WPR info to ucode */
1275 acr_dmem_desc = (struct flcn_acr_desc *)
1276 &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
1277
1278 acr_dmem_desc->nonwpr_ucode_blob_start =
1279 nvgpu_mem_get_addr(g, &g->acr.ucode_blob);
1280 acr_dmem_desc->nonwpr_ucode_blob_size =
1281 g->acr.ucode_blob.size;
1282 acr_dmem_desc->regions.no_regions = 1U;
1283 acr_dmem_desc->wpr_offset = 0U;
1284 }
1285
1286 return 0;
1287}
1288
1289int gm20b_acr_fill_bl_dmem_desc(struct gk20a *g,
1290 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
1291 u32 *acr_ucode_header)
1292{
1293 struct flcn_bl_dmem_desc *bl_dmem_desc = &acr_desc->bl_dmem_desc;
1294
1295 nvgpu_log_fn(g, " ");
1296
1297 memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc));
1298
1299 bl_dmem_desc->signature[0] = 0U;
1300 bl_dmem_desc->signature[1] = 0U;
1301 bl_dmem_desc->signature[2] = 0U;
1302 bl_dmem_desc->signature[3] = 0U;
1303 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
1304 bl_dmem_desc->code_dma_base =
1305 (unsigned int)(((u64)acr_desc->acr_ucode.gpu_va >> 8U));
1306 bl_dmem_desc->code_dma_base1 = 0x0U;
1307 bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
1308 bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
1309 bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
1310 bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
1311 bl_dmem_desc->code_entry_point = 0U; /* Start at 0th offset */
1312 bl_dmem_desc->data_dma_base =
1313 bl_dmem_desc->code_dma_base +
1314 ((acr_ucode_header[2U]) >> 8U);
1315 bl_dmem_desc->data_dma_base1 = 0x0U;
1316 bl_dmem_desc->data_size = acr_ucode_header[3U];
1317
1318 return 0;
1319}
1320
1321/*
1322 * Loads ACR bin to SYSMEM/FB and bootstraps ACR with bootloader code
1323 * start and end are addresses of ucode blob in non-WPR region
1324 */
1325int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr,
1326 struct hs_acr *acr_desc)
1327{
1328 struct mm_gk20a *mm = &g->mm;
1329 struct vm_gk20a *vm = mm->pmu.vm;
1330 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1331 struct bin_hdr *acr_fw_bin_hdr = NULL;
1332 struct acr_fw_header *acr_fw_hdr = NULL;
1333 struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
1334 u32 img_size_in_bytes = 0;
1335 u32 *acr_ucode_data;
1336 u32 *acr_ucode_header;
1337 u32 status = 0U;
1338
1339 nvgpu_pmu_dbg(g, "ACR TYPE %x ", acr_desc->acr_type);
1340
1341 if (acr_fw != NULL) {
1342 acr->patch_wpr_info_to_ucode(g, acr, acr_desc, true);
1343 } else {
1344 acr_fw = nvgpu_request_firmware(g, acr_desc->acr_fw_name,
1345 NVGPU_REQUEST_FIRMWARE_NO_SOC);
1346 if (acr_fw == NULL) {
1347 nvgpu_err(g, "%s ucode get fail for %s",
1348 acr_desc->acr_fw_name, g->name);
1349 return -ENOENT;
1350 }
1351
1352 acr_desc->acr_fw = acr_fw;
1353
1354 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1355
1356 acr_fw_hdr = (struct acr_fw_header *)
1357 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1358
1359 acr_ucode_header = (u32 *)(acr_fw->data +
1360 acr_fw_hdr->hdr_offset);
1361
1362 acr_ucode_data = (u32 *)(acr_fw->data +
1363 acr_fw_bin_hdr->data_offset);
1364
1365 img_size_in_bytes = ALIGN((acr_fw_bin_hdr->data_size), 256U);
1366
1367 /* Lets patch the signatures first.. */
1368 if (acr_ucode_patch_sig(g, acr_ucode_data,
1369 (u32 *)(acr_fw->data + acr_fw_hdr->sig_prod_offset),
1370 (u32 *)(acr_fw->data + acr_fw_hdr->sig_dbg_offset),
1371 (u32 *)(acr_fw->data + acr_fw_hdr->patch_loc),
1372 (u32 *)(acr_fw->data + acr_fw_hdr->patch_sig)) < 0) {
1373 nvgpu_err(g, "patch signatures fail");
1374 status = -1;
1375 goto err_release_acr_fw;
1376 }
1377
1378 status = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
1379 acr_ucode_mem);
1380 if (status != 0U) {
1381 status = -ENOMEM;
1382 goto err_release_acr_fw;
1383 }
1384
1385 acr->patch_wpr_info_to_ucode(g, acr, acr_desc, false);
1386
1387 nvgpu_mem_wr_n(g, acr_ucode_mem, 0U, acr_ucode_data,
1388 img_size_in_bytes);
1389
1390 /*
1391 * In order to execute this binary, we will be using
1392 * a bootloader which will load this image into
1393 * FALCON IMEM/DMEM.
1394 * Fill up the bootloader descriptor to use..
1395 * TODO: Use standard descriptor which the generic bootloader is
1396 * checked in.
1397 */
1398 acr->acr_fill_bl_dmem_desc(g, acr, acr_desc, acr_ucode_header);
1399 }
1400
1401 status = gm20b_acr_hs_bl_exec(g, acr, acr_desc, true);
1402 if (status != 0U) {
1403 goto err_free_ucode_map;
1404 }
1405
1406 return 0;
1407err_free_ucode_map:
1408 nvgpu_dma_unmap_free(vm, acr_ucode_mem);
1409err_release_acr_fw:
1410 nvgpu_release_firmware(g, acr_fw);
1411 acr_fw = NULL;
1412 return status;
1413}
1414
1415void gm20b_remove_acr_support(struct nvgpu_acr *acr)
1416{
1417 struct gk20a *g = acr->g;
1418 struct mm_gk20a *mm = &g->mm;
1419 struct vm_gk20a *vm = mm->pmu.vm;
1420
1421 if (acr->acr.acr_fw != NULL) {
1422 nvgpu_release_firmware(g, acr->acr.acr_fw);
1423 }
1424
1425 if (acr->acr.acr_hs_bl.hs_bl_fw != NULL) {
1426 nvgpu_release_firmware(g, acr->acr.acr_hs_bl.hs_bl_fw);
1427 }
1428
1429 nvgpu_dma_unmap_free(vm, &acr->acr.acr_ucode);
1430 nvgpu_dma_unmap_free(vm, &acr->acr.acr_hs_bl.hs_bl_ucode);
1431}
1432
1433static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
1434{
1435 struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
1436
1437 nvgpu_log_fn(g, " ");
1438
1439 /* ACR HS bootloader ucode name */
1440 hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
1441
1442 /* ACR HS ucode type & f/w name*/
1443 hs_acr->acr_type = ACR_DEFAULT;
1444 hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
1445
1446 /* bootlader interface used by ACR HS bootloader*/
1447 hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc;
1448 hs_acr->bl_dmem_desc_size = sizeof(struct flcn_bl_dmem_desc);
1449
1450 /* set on which falcon ACR need to execute*/
1451 hs_acr->acr_flcn = &g->pmu_flcn;
1452 hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
1453 gm20b_pmu_setup_hw_and_bl_bootstrap;
1454}
1455
1456void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
1457{
1458 nvgpu_log_fn(g, " ");
1459
1460 acr->g = g;
1461
1462 acr->bootstrap_owner = LSF_FALCON_ID_PMU;
1463 acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
1464
1465 gm20b_acr_default_sw_init(g, &acr->acr);
1466
1467 acr->get_wpr_info = gm20b_wpr_info;
1468 acr->alloc_blob_space = gm20b_alloc_blob_space;
1469 acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
1470 acr->patch_wpr_info_to_ucode =
1471 gm20b_acr_patch_wpr_info_to_ucode;
1472 acr->acr_fill_bl_dmem_desc =
1473 gm20b_acr_fill_bl_dmem_desc;
1474
1475 acr->remove_support = gm20b_remove_acr_support;
1476}
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
index 329d53b8..cae6ab6a 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
@@ -37,21 +37,32 @@ bool gm20b_is_pmu_supported(struct gk20a *g);
37int prepare_ucode_blob(struct gk20a *g); 37int prepare_ucode_blob(struct gk20a *g);
38bool gm20b_is_lazy_bootstrap(u32 falcon_id); 38bool gm20b_is_lazy_bootstrap(u32 falcon_id);
39bool gm20b_is_priv_load(u32 falcon_id); 39bool gm20b_is_priv_load(u32 falcon_id);
40void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
41int gm20b_alloc_blob_space(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
42int gm20b_pmu_populate_loader_cfg(struct gk20a *g, 40int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
43 void *lsfm, u32 *p_bl_gen_desc_size); 41 void *lsfm, u32 *p_bl_gen_desc_size);
44int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, 42int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
45 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid); 43 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
46void gm20b_update_lspmu_cmdline_args(struct gk20a *g); 44void gm20b_update_lspmu_cmdline_args(struct gk20a *g);
47void gm20b_setup_apertures(struct gk20a *g); 45void gm20b_setup_apertures(struct gk20a *g);
48
49int gm20b_pmu_setup_sw(struct gk20a *g); 46int gm20b_pmu_setup_sw(struct gk20a *g);
50int gm20b_init_nspmu_setup_hw1(struct gk20a *g); 47int gm20b_init_nspmu_setup_hw1(struct gk20a *g);
48
51int acr_ucode_patch_sig(struct gk20a *g, 49int acr_ucode_patch_sig(struct gk20a *g,
52 unsigned int *p_img, 50 unsigned int *p_img,
53 unsigned int *p_prod_sig, 51 unsigned int *p_prod_sig,
54 unsigned int *p_dbg_sig, 52 unsigned int *p_dbg_sig,
55 unsigned int *p_patch_loc, 53 unsigned int *p_patch_loc,
56 unsigned int *p_patch_ind); 54 unsigned int *p_patch_ind);
55int gm20b_alloc_blob_space(struct gk20a *g,
56 size_t size, struct nvgpu_mem *mem);
57void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
58int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
59 struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery);
60int gm20b_acr_fill_bl_dmem_desc(struct gk20a *g,
61 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
62 u32 *acr_ucode_header);
63int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr,
64 struct hs_acr *acr_desc);
65void gm20b_remove_acr_support(struct nvgpu_acr *acr);
66void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
67
57#endif /*NVGPU_GM20B_ACR_GM20B_H*/ 68#endif /*NVGPU_GM20B_ACR_GM20B_H*/
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 39c902d7..52f86dab 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -656,6 +656,9 @@ static const struct gpu_ops gm20b_ops = {
656 .read_vin_cal_slope_intercept_fuse = NULL, 656 .read_vin_cal_slope_intercept_fuse = NULL,
657 .read_vin_cal_gain_offset_fuse = NULL, 657 .read_vin_cal_gain_offset_fuse = NULL,
658 }, 658 },
659 .acr = {
660 .acr_sw_init = nvgpu_gm20b_acr_sw_init,
661 },
659 .chip_init_gpu_characteristics = gk20a_init_gpu_characteristics, 662 .chip_init_gpu_characteristics = gk20a_init_gpu_characteristics,
660 .get_litter_value = gm20b_get_litter_value, 663 .get_litter_value = gm20b_get_litter_value,
661}; 664};
@@ -703,6 +706,8 @@ int gm20b_init_hal(struct gk20a *g)
703 706
704 gops->fuse = gm20b_ops.fuse; 707 gops->fuse = gm20b_ops.fuse;
705 708
709 gops->acr = gm20b_ops.acr;
710
706 /* Lone functions */ 711 /* Lone functions */
707 gops->chip_init_gpu_characteristics = 712 gops->chip_init_gpu_characteristics =
708 gm20b_ops.chip_init_gpu_characteristics; 713 gm20b_ops.chip_init_gpu_characteristics;
@@ -750,7 +755,6 @@ int gm20b_init_hal(struct gk20a *g)
750 755
751 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); 756 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
752 g->pmu_lsf_pmu_wpr_init_done = 0; 757 g->pmu_lsf_pmu_wpr_init_done = 0;
753 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
754 758
755 g->name = "gm20b"; 759 g->name = "gm20b";
756 760
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index c1ec4d8e..6e764ac5 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -277,3 +277,63 @@ bool gm20b_pmu_is_debug_mode_en(struct gk20a *g)
277 u32 ctl_stat = gk20a_readl(g, pwr_pmu_scpctl_stat_r()); 277 u32 ctl_stat = gk20a_readl(g, pwr_pmu_scpctl_stat_r());
278 return pwr_pmu_scpctl_stat_debug_mode_v(ctl_stat) != 0U; 278 return pwr_pmu_scpctl_stat_debug_mode_v(ctl_stat) != 0U;
279} 279}
280
281
282static int gm20b_bl_bootstrap(struct gk20a *g,
283 struct nvgpu_falcon_bl_info *bl_info)
284{
285 struct mm_gk20a *mm = &g->mm;
286
287 nvgpu_log_fn(g, " ");
288
289 gk20a_writel(g, pwr_falcon_itfen_r(),
290 gk20a_readl(g, pwr_falcon_itfen_r()) |
291 pwr_falcon_itfen_ctxen_enable_f());
292 gk20a_writel(g, pwr_pmu_new_instblk_r(),
293 pwr_pmu_new_instblk_ptr_f(
294 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U) |
295 pwr_pmu_new_instblk_valid_f(1U) |
296 (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
297 pwr_pmu_new_instblk_target_sys_coh_f() :
298 pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
299
300 nvgpu_flcn_bl_bootstrap(&g->pmu_flcn, bl_info);
301
302 return 0;
303}
304
305int gm20b_pmu_setup_hw_and_bl_bootstrap(struct gk20a *g,
306 struct hs_acr *acr_desc,
307 struct nvgpu_falcon_bl_info *bl_info)
308{
309 struct nvgpu_pmu *pmu = &g->pmu;
310 int err;
311
312 nvgpu_log_fn(g, " ");
313
314 nvgpu_mutex_acquire(&pmu->isr_mutex);
315 /*
316 * disable irqs for hs falcon booting
317 * as we will poll for halt
318 */
319 g->ops.pmu.pmu_enable_irq(pmu, false);
320 pmu->isr_enabled = false;
321 err = nvgpu_flcn_reset(acr_desc->acr_flcn);
322 if (err != 0) {
323 nvgpu_mutex_release(&pmu->isr_mutex);
324 goto exit;
325 }
326 nvgpu_mutex_release(&pmu->isr_mutex);
327
328 if (g->ops.pmu.setup_apertures) {
329 g->ops.pmu.setup_apertures(g);
330 }
331
332 /*Clearing mailbox register used to reflect capabilities*/
333 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
334
335 err = gm20b_bl_bootstrap(g, bl_info);
336
337exit:
338 return err;
339}
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
index f1b6cd93..37634783 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
@@ -34,5 +34,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
34int gm20b_pmu_init_acr(struct gk20a *g); 34int gm20b_pmu_init_acr(struct gk20a *g);
35void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr); 35void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr);
36bool gm20b_pmu_is_debug_mode_en(struct gk20a *g); 36bool gm20b_pmu_is_debug_mode_en(struct gk20a *g);
37 37int gm20b_pmu_setup_hw_and_bl_bootstrap(struct gk20a *g,
38 struct hs_acr *acr_desc,
39 struct nvgpu_falcon_bl_info *bl_info);
38#endif /*NVGPU_GM20B_PMU_GM20B_H*/ 40#endif /*NVGPU_GM20B_PMU_GM20B_H*/
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index 7bb099e5..b1150e29 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -93,7 +93,7 @@ int gp106_alloc_blob_space(struct gk20a *g,
93 return 0; 93 return 0;
94 } 94 }
95 95
96 g->ops.pmu.get_wpr(g, &wpr_inf); 96 g->acr.get_wpr_info(g, &wpr_inf);
97 97
98 /* 98 /*
99 * Even though this mem_desc wouldn't be used, the wpr region needs to 99 * Even though this mem_desc wouldn't be used, the wpr region needs to
@@ -456,7 +456,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
456 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1)); 456 memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
457 gr_gk20a_init_ctxsw_ucode(g); 457 gr_gk20a_init_ctxsw_ucode(g);
458 458
459 g->ops.pmu.get_wpr(g, &wpr_inf); 459 g->acr.get_wpr_info(g, &wpr_inf);
460 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); 460 gp106_dbg_pmu(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
461 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size); 461 gp106_dbg_pmu(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
462 462
@@ -479,7 +479,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
479 } 479 }
480 480
481 /*Alloc memory to hold ucode blob contents*/ 481 /*Alloc memory to hold ucode blob contents*/
482 err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size 482 err = g->acr.alloc_blob_space(g, plsfm->wpr_size
483 ,&g->acr.ucode_blob); 483 ,&g->acr.ucode_blob);
484 if (err) { 484 if (err) {
485 goto exit_err; 485 goto exit_err;
@@ -557,7 +557,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
557 557
558 /*0th index is always PMU which is already handled in earlier 558 /*0th index is always PMU which is already handled in earlier
559 if condition*/ 559 if condition*/
560 for (i = 1; i < (MAX_SUPPORTED_LSFM); i++) { 560 for (i = 1; i < g->acr.max_supported_lsfm; i++) {
561 memset(&ucode_img, 0, sizeof(ucode_img)); 561 memset(&ucode_img, 0, sizeof(ucode_img));
562 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { 562 if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
563 if (ucode_img.lsf_desc != NULL) { 563 if (ucode_img.lsf_desc != NULL) {
@@ -626,7 +626,7 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
626 * physical addresses of each respective segment. 626 * physical addresses of each respective segment.
627 */ 627 */
628 addr_base = p_lsfm->lsb_header.ucode_off; 628 addr_base = p_lsfm->lsb_header.ucode_off;
629 g->ops.pmu.get_wpr(g, &wpr_inf); 629 g->acr.get_wpr_info(g, &wpr_inf);
630 addr_base += (wpr_inf.wpr_base); 630 addr_base += (wpr_inf.wpr_base);
631 631
632 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base); 632 gp106_dbg_pmu(g, "pmu loader cfg addrbase 0x%llx\n", addr_base);
@@ -701,7 +701,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
701 * physical addresses of each respective segment. 701 * physical addresses of each respective segment.
702 */ 702 */
703 addr_base = p_lsfm->lsb_header.ucode_off; 703 addr_base = p_lsfm->lsb_header.ucode_off;
704 g->ops.pmu.get_wpr(g, &wpr_inf); 704 g->acr.get_wpr_info(g, &wpr_inf);
705 addr_base += wpr_inf.wpr_base; 705 addr_base += wpr_inf.wpr_base;
706 706
707 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id); 707 gp106_dbg_pmu(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
@@ -1017,7 +1017,7 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
1017 1017
1018 /* Fill in static WPR header info*/ 1018 /* Fill in static WPR header info*/
1019 pnode->wpr_header.falcon_id = falcon_id; 1019 pnode->wpr_header.falcon_id = falcon_id;
1020 pnode->wpr_header.bootstrap_owner = g->bootstrap_owner; 1020 pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
1021 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY; 1021 pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
1022 1022
1023 pnode->wpr_header.lazy_bootstrap = 1023 pnode->wpr_header.lazy_bootstrap =
@@ -1030,6 +1030,7 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
1030 pnode->wpr_header.bin_version = pnode->lsb_header.signature.version; 1030 pnode->wpr_header.bin_version = pnode->lsb_header.signature.version;
1031 pnode->next = plsfm->ucode_img_list; 1031 pnode->next = plsfm->ucode_img_list;
1032 plsfm->ucode_img_list = pnode; 1032 plsfm->ucode_img_list = pnode;
1033
1033 return 0; 1034 return 0;
1034} 1035}
1035 1036
@@ -1191,3 +1192,121 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
1191 plsfm->wpr_size = wpr_offset; 1192 plsfm->wpr_size = wpr_offset;
1192 return 0; 1193 return 0;
1193} 1194}
1195
1196int gp106_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
1197 struct hs_acr *acr_desc, bool is_recovery)
1198{
1199 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
1200 struct acr_fw_header *acr_fw_hdr = NULL;
1201 struct bin_hdr *acr_fw_bin_hdr = NULL;
1202 struct flcn_acr_desc_v1 *acr_dmem_desc;
1203 struct wpr_carveout_info wpr_inf;
1204 u32 *acr_ucode_header = NULL;
1205 u32 *acr_ucode_data = NULL;
1206
1207 nvgpu_log_fn(g, " ");
1208
1209 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
1210 acr_fw_hdr = (struct acr_fw_header *)
1211 (acr_fw->data + acr_fw_bin_hdr->header_offset);
1212
1213 acr_ucode_data = (u32 *)(acr_fw->data + acr_fw_bin_hdr->data_offset);
1214 acr_ucode_header = (u32 *)(acr_fw->data + acr_fw_hdr->hdr_offset);
1215
1216 acr->get_wpr_info(g, &wpr_inf);
1217
1218 acr_dmem_desc = (struct flcn_acr_desc_v1 *)
1219 &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
1220
1221 acr_dmem_desc->nonwpr_ucode_blob_start = wpr_inf.nonwpr_base;
1222 acr_dmem_desc->nonwpr_ucode_blob_size = wpr_inf.size;
1223 acr_dmem_desc->regions.no_regions = 1U;
1224 acr_dmem_desc->wpr_offset = 0U;
1225
1226 acr_dmem_desc->wpr_region_id = 1U;
1227 acr_dmem_desc->regions.region_props[0U].region_id = 1U;
1228 acr_dmem_desc->regions.region_props[0U].start_addr =
1229 (wpr_inf.wpr_base) >> 8U;
1230 acr_dmem_desc->regions.region_props[0U].end_addr =
1231 ((wpr_inf.wpr_base) + wpr_inf.size) >> 8U;
1232 acr_dmem_desc->regions.region_props[0U].shadowmMem_startaddress =
1233 wpr_inf.nonwpr_base >> 8U;
1234
1235 return 0;
1236}
1237
1238int gp106_acr_fill_bl_dmem_desc(struct gk20a *g,
1239 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
1240 u32 *acr_ucode_header)
1241{
1242 struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
1243 struct flcn_bl_dmem_desc_v1 *bl_dmem_desc =
1244 &acr_desc->bl_dmem_desc_v1;
1245
1246 nvgpu_log_fn(g, " ");
1247
1248 memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc_v1));
1249
1250 bl_dmem_desc->signature[0] = 0U;
1251 bl_dmem_desc->signature[1] = 0U;
1252 bl_dmem_desc->signature[2] = 0U;
1253 bl_dmem_desc->signature[3] = 0U;
1254 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
1255
1256 flcn64_set_dma(&bl_dmem_desc->code_dma_base,
1257 acr_ucode_mem->gpu_va);
1258
1259 bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
1260 bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
1261 bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
1262 bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
1263 bl_dmem_desc->code_entry_point = 0U;
1264
1265 flcn64_set_dma(&bl_dmem_desc->data_dma_base,
1266 acr_ucode_mem->gpu_va + acr_ucode_header[2U]);
1267
1268 bl_dmem_desc->data_size = acr_ucode_header[3U];
1269
1270 return 0;
1271}
1272
1273static void nvgpu_gp106_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
1274{
1275 struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
1276
1277 nvgpu_log_fn(g, " ");
1278
1279 hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
1280
1281 hs_acr->acr_type = ACR_DEFAULT;
1282 hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
1283
1284 hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
1285 hs_acr->bl_dmem_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
1286
1287 hs_acr->acr_flcn = &g->sec2_flcn;
1288 hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
1289 gp106_sec2_setup_hw_and_bl_bootstrap;
1290}
1291
1292void nvgpu_gp106_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
1293{
1294 nvgpu_log_fn(g, " ");
1295
1296 acr->g = g;
1297
1298 acr->bootstrap_owner = LSF_FALCON_ID_SEC2;
1299 acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
1300
1301 nvgpu_gp106_acr_default_sw_init(g, &acr->acr);
1302
1303 acr->get_wpr_info = gp106_wpr_info;
1304 acr->alloc_blob_space = gp106_alloc_blob_space;
1305 acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
1306 acr->patch_wpr_info_to_ucode =
1307 gp106_acr_patch_wpr_info_to_ucode;
1308 acr->acr_fill_bl_dmem_desc =
1309 gp106_acr_fill_bl_dmem_desc;
1310
1311 acr->remove_support = gm20b_remove_acr_support;
1312}
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.h b/drivers/gpu/nvgpu/gp106/acr_gp106.h
index 3fab1509..ad004bf0 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.h
@@ -63,4 +63,12 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
63 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid); 63 void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
64int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, 64int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
65 struct lsfm_managed_ucode_img_v2 *pnode); 65 struct lsfm_managed_ucode_img_v2 *pnode);
66int gp106_acr_fill_bl_dmem_desc(struct gk20a *g,
67 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
68 u32 *acr_ucode_header);
69int gp106_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
70 struct hs_acr *acr_desc, bool is_recovery);
71void nvgpu_gp106_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
72
66#endif /* NVGPU_ACR_GP106_H */ 73#endif /* NVGPU_ACR_GP106_H */
74
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index e94bc1ea..048c0a45 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -800,6 +800,9 @@ static const struct gpu_ops gp106_ops = {
800 .read_vin_cal_gain_offset_fuse = 800 .read_vin_cal_gain_offset_fuse =
801 gp106_fuse_read_vin_cal_gain_offset_fuse, 801 gp106_fuse_read_vin_cal_gain_offset_fuse,
802 }, 802 },
803 .acr = {
804 .acr_sw_init = nvgpu_gp106_acr_sw_init,
805 },
803 .get_litter_value = gp106_get_litter_value, 806 .get_litter_value = gp106_get_litter_value,
804 .chip_init_gpu_characteristics = gp106_init_gpu_characteristics, 807 .chip_init_gpu_characteristics = gp106_init_gpu_characteristics,
805}; 808};
@@ -855,6 +858,7 @@ int gp106_init_hal(struct gk20a *g)
855 gops->falcon = gp106_ops.falcon; 858 gops->falcon = gp106_ops.falcon;
856 gops->priv_ring = gp106_ops.priv_ring; 859 gops->priv_ring = gp106_ops.priv_ring;
857 gops->fuse = gp106_ops.fuse; 860 gops->fuse = gp106_ops.fuse;
861 gops->acr = gp106_ops.acr;
858 862
859 /* Lone functions */ 863 /* Lone functions */
860 gops->get_litter_value = gp106_ops.get_litter_value; 864 gops->get_litter_value = gp106_ops.get_litter_value;
@@ -875,7 +879,6 @@ int gp106_init_hal(struct gk20a *g)
875 } 879 }
876 880
877 g->pmu_lsf_pmu_wpr_init_done = 0; 881 g->pmu_lsf_pmu_wpr_init_done = 0;
878 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
879 gops->clk.split_rail_support = true; 882 gops->clk.split_rail_support = true;
880 gops->clk.support_clk_freq_controller = true; 883 gops->clk.support_clk_freq_controller = true;
881 gops->clk.support_pmgr_domain = true; 884 gops->clk.support_pmgr_domain = true;
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index dec35a91..40823b69 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -32,167 +32,61 @@
32#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 32#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
33#include <nvgpu/hw/gp106/hw_psec_gp106.h> 33#include <nvgpu/hw/gp106/hw_psec_gp106.h>
34 34
35int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g, 35int gp106_sec2_reset(struct gk20a *g)
36 unsigned int timeout)
37{ 36{
38 int status = 0; 37 nvgpu_log_fn(g, " ");
39
40 if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout)) {
41 status = -EBUSY;
42 }
43 38
44 return status; 39 gk20a_writel(g, psec_falcon_engine_r(),
45} 40 pwr_falcon_engine_reset_true_f());
41 nvgpu_udelay(10);
42 gk20a_writel(g, psec_falcon_engine_r(),
43 pwr_falcon_engine_reset_false_f());
46 44
47int gp106_sec2_wait_for_halt(struct gk20a *g, unsigned int timeout) 45 nvgpu_log_fn(g, "done");
48{ 46 return 0;
49 u32 data = 0;
50 int completion = 0;
51
52 completion = nvgpu_flcn_wait_for_halt(&g->sec2_flcn, timeout);
53 if (completion) {
54 nvgpu_err(g, "ACR boot timed out");
55 goto exit;
56 }
57
58 g->acr.capabilities = nvgpu_flcn_mailbox_read(&g->sec2_flcn,
59 FALCON_MAILBOX_1);
60 nvgpu_pmu_dbg(g, "ACR capabilities %x\n", g->acr.capabilities);
61 data = nvgpu_flcn_mailbox_read(&g->sec2_flcn, FALCON_MAILBOX_0);
62 if (data) {
63 nvgpu_err(g, "ACR boot failed, err %x", data);
64 completion = -EAGAIN;
65 goto exit;
66 }
67
68 init_pmu_setup_hw1(g);
69
70exit:
71 if (completion) {
72 nvgpu_kill_task_pg_init(g);
73 nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
74 nvgpu_flcn_dump_stats(&g->sec2_flcn);
75 }
76
77 return completion;
78} 47}
79 48
80int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, 49static int sec2_flcn_bl_bootstrap(struct gk20a *g,
81 void *desc, u32 bl_sz) 50 struct nvgpu_falcon_bl_info *bl_info)
82{ 51{
83 struct gk20a *g = gk20a_from_pmu(pmu);
84 struct mm_gk20a *mm = &g->mm; 52 struct mm_gk20a *mm = &g->mm;
85 struct nvgpu_falcon_bl_info bl_info; 53 u32 data = 0U;
86 u32 data = 0; 54 int err = 0U;
87 55
88 nvgpu_log_fn(g, " "); 56 nvgpu_log_fn(g, " ");
89 57
90 /* SEC2 Config */ 58 /* SEC2 Config */
91 gk20a_writel(g, psec_falcon_itfen_r(), 59 gk20a_writel(g, psec_falcon_itfen_r(),
92 gk20a_readl(g, psec_falcon_itfen_r()) | 60 gk20a_readl(g, psec_falcon_itfen_r()) |
93 psec_falcon_itfen_ctxen_enable_f()); 61 psec_falcon_itfen_ctxen_enable_f());
94 62
95 gk20a_writel(g, psec_falcon_nxtctx_r(), 63 gk20a_writel(g, psec_falcon_nxtctx_r(),
96 pwr_pmu_new_instblk_ptr_f( 64 pwr_pmu_new_instblk_ptr_f(
97 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | 65 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U) |
98 pwr_pmu_new_instblk_valid_f(1) | 66 pwr_pmu_new_instblk_valid_f(1U) |
99 nvgpu_aperture_mask(g, &mm->pmu.inst_block, 67 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
100 pwr_pmu_new_instblk_target_sys_ncoh_f(), 68 pwr_pmu_new_instblk_target_sys_ncoh_f(),
101 pwr_pmu_new_instblk_target_sys_coh_f(), 69 pwr_pmu_new_instblk_target_sys_coh_f(),
102 pwr_pmu_new_instblk_target_fb_f())); 70 pwr_pmu_new_instblk_target_fb_f()));
103 71
104 data = gk20a_readl(g, psec_falcon_debug1_r()); 72 data = gk20a_readl(g, psec_falcon_debug1_r());
105 data |= psec_falcon_debug1_ctxsw_mode_m(); 73 data |= psec_falcon_debug1_ctxsw_mode_m();
106 gk20a_writel(g, psec_falcon_debug1_r(), data); 74 gk20a_writel(g, psec_falcon_debug1_r(), data);
107 75
108 data = gk20a_readl(g, psec_falcon_engctl_r()); 76 data = gk20a_readl(g, psec_falcon_engctl_r());
109 data |= (1 << 3); 77 data |= (1U << 3U);
110 gk20a_writel(g, psec_falcon_engctl_r(), data); 78 gk20a_writel(g, psec_falcon_engctl_r(), data);
111 79
112 bl_info.bl_src = g->acr.hsbl_ucode.cpu_va; 80 err = nvgpu_flcn_bl_bootstrap(&g->sec2_flcn, bl_info);
113 bl_info.bl_desc = desc;
114 bl_info.bl_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
115 bl_info.bl_size = bl_sz;
116 bl_info.bl_start_tag = g->acr.pmu_hsbl_desc->bl_start_tag;
117 nvgpu_flcn_bl_bootstrap(&g->sec2_flcn, &bl_info);
118 81
119 return 0; 82 return err;
120} 83}
121 84
122void init_pmu_setup_hw1(struct gk20a *g) 85int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
86 struct hs_acr *acr_desc,
87 struct nvgpu_falcon_bl_info *bl_info)
123{ 88{
124 struct mm_gk20a *mm = &g->mm; 89 u32 data = 0U;
125 struct nvgpu_pmu *pmu = &g->pmu;
126
127 /* PMU TRANSCFG */
128 /* setup apertures - virtual */
129 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
130 pwr_fbif_transcfg_mem_type_physical_f() |
131 pwr_fbif_transcfg_target_local_fb_f());
132 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
133 pwr_fbif_transcfg_mem_type_virtual_f());
134 /* setup apertures - physical */
135 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
136 pwr_fbif_transcfg_mem_type_physical_f() |
137 pwr_fbif_transcfg_target_local_fb_f());
138 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
139 pwr_fbif_transcfg_mem_type_physical_f() |
140 pwr_fbif_transcfg_target_coherent_sysmem_f());
141 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
142 pwr_fbif_transcfg_mem_type_physical_f() |
143 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
144
145 /* PMU Config */
146 gk20a_writel(g, pwr_falcon_itfen_r(),
147 gk20a_readl(g, pwr_falcon_itfen_r()) |
148 pwr_falcon_itfen_ctxen_enable_f());
149 gk20a_writel(g, pwr_pmu_new_instblk_r(),
150 pwr_pmu_new_instblk_ptr_f(
151 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
152 pwr_pmu_new_instblk_valid_f(1) |
153 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
154 pwr_pmu_new_instblk_target_sys_ncoh_f(),
155 pwr_pmu_new_instblk_target_sys_coh_f(),
156 pwr_pmu_new_instblk_target_fb_f()));
157
158 /*Copying pmu cmdline args*/
159 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 0);
160 g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
161 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
162 pmu, GK20A_PMU_TRACE_BUFSIZE);
163 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
164 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
165 pmu, GK20A_PMU_DMAIDX_VIRT);
166 if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface) {
167 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
168 }
169
170 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
171 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
172 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
173
174}
175
176int gp106_sec2_reset(struct gk20a *g)
177{
178 nvgpu_log_fn(g, " ");
179
180 gk20a_writel(g, psec_falcon_engine_r(),
181 pwr_falcon_engine_reset_true_f());
182 nvgpu_udelay(10);
183 gk20a_writel(g, psec_falcon_engine_r(),
184 pwr_falcon_engine_reset_false_f());
185
186 nvgpu_log_fn(g, "done");
187 return 0;
188}
189
190int init_sec2_setup_hw1(struct gk20a *g,
191 void *desc, u32 bl_sz)
192{
193 struct nvgpu_pmu *pmu = &g->pmu;
194 int err;
195 u32 data = 0;
196 90
197 nvgpu_log_fn(g, " "); 91 nvgpu_log_fn(g, " ");
198 92
@@ -219,10 +113,5 @@ int init_sec2_setup_hw1(struct gk20a *g,
219 psec_fbif_transcfg_mem_type_physical_f() | 113 psec_fbif_transcfg_mem_type_physical_f() |
220 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 114 psec_fbif_transcfg_target_noncoherent_sysmem_f());
221 115
222 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 116 return sec2_flcn_bl_bootstrap(g, bl_info);
223 if (err) {
224 return err;
225 }
226
227 return 0;
228} 117}
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.h b/drivers/gpu/nvgpu/gp106/sec2_gp106.h
index b17028e7..f1cad65a 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.h
@@ -23,14 +23,10 @@
23#ifndef NVGPU_SEC2_GP106_H 23#ifndef NVGPU_SEC2_GP106_H
24#define NVGPU_SEC2_GP106_H 24#define NVGPU_SEC2_GP106_H
25 25
26int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g,
27 unsigned int timeout);
28int gp106_sec2_wait_for_halt(struct gk20a *g, unsigned int timeout);
29int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
30 void *desc, u32 bl_sz);
31void init_pmu_setup_hw1(struct gk20a *g);
32int init_sec2_setup_hw1(struct gk20a *g,
33 void *desc, u32 bl_sz);
34int gp106_sec2_reset(struct gk20a *g); 26int gp106_sec2_reset(struct gk20a *g);
35 27
28int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
29 struct hs_acr *acr_desc,
30 struct nvgpu_falcon_bl_info *bl_info);
31
36#endif /* NVGPU_SEC2_GP106_H */ 32#endif /* NVGPU_SEC2_GP106_H */
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index d8a430fa..759d271e 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -723,6 +723,9 @@ static const struct gpu_ops gp10b_ops = {
723 .read_vin_cal_slope_intercept_fuse = NULL, 723 .read_vin_cal_slope_intercept_fuse = NULL,
724 .read_vin_cal_gain_offset_fuse = NULL, 724 .read_vin_cal_gain_offset_fuse = NULL,
725 }, 725 },
726 .acr = {
727 .acr_sw_init = nvgpu_gm20b_acr_sw_init,
728 },
726 .chip_init_gpu_characteristics = gp10b_init_gpu_characteristics, 729 .chip_init_gpu_characteristics = gp10b_init_gpu_characteristics,
727 .get_litter_value = gp10b_get_litter_value, 730 .get_litter_value = gp10b_get_litter_value,
728}; 731};
@@ -761,6 +764,7 @@ int gp10b_init_hal(struct gk20a *g)
761 gops->priv_ring = gp10b_ops.priv_ring; 764 gops->priv_ring = gp10b_ops.priv_ring;
762 765
763 gops->fuse = gp10b_ops.fuse; 766 gops->fuse = gp10b_ops.fuse;
767 gops->acr = gp10b_ops.acr;
764 768
765 /* Lone Functions */ 769 /* Lone Functions */
766 gops->chip_init_gpu_characteristics = 770 gops->chip_init_gpu_characteristics =
@@ -812,7 +816,6 @@ int gp10b_init_hal(struct gk20a *g)
812 816
813 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); 817 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
814 g->pmu_lsf_pmu_wpr_init_done = 0; 818 g->pmu_lsf_pmu_wpr_init_done = 0;
815 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
816 819
817 g->name = "gp10b"; 820 g->name = "gp10b";
818 821
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index 85fc1915..45c3adb3 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -930,6 +930,9 @@ static const struct gpu_ops gv100_ops = {
930 .set_nvhsclk_ctrl_swap_clk_nvl = 930 .set_nvhsclk_ctrl_swap_clk_nvl =
931 gv100_top_set_nvhsclk_ctrl_swap_clk_nvl, 931 gv100_top_set_nvhsclk_ctrl_swap_clk_nvl,
932 }, 932 },
933 .acr = {
934 .acr_sw_init = nvgpu_gp106_acr_sw_init,
935 },
933 .chip_init_gpu_characteristics = gv100_init_gpu_characteristics, 936 .chip_init_gpu_characteristics = gv100_init_gpu_characteristics,
934 .get_litter_value = gv100_get_litter_value, 937 .get_litter_value = gv100_get_litter_value,
935}; 938};
@@ -969,6 +972,7 @@ int gv100_init_hal(struct gk20a *g)
969 gops->fuse = gv100_ops.fuse; 972 gops->fuse = gv100_ops.fuse;
970 gops->nvlink = gv100_ops.nvlink; 973 gops->nvlink = gv100_ops.nvlink;
971 gops->top = gv100_ops.top; 974 gops->top = gv100_ops.top;
975 gops->acr = gv100_ops.acr;
972 976
973 /* clocks */ 977 /* clocks */
974 gops->clk.init_clk_support = gv100_ops.clk.init_clk_support; 978 gops->clk.init_clk_support = gv100_ops.clk.init_clk_support;
@@ -995,7 +999,6 @@ int gv100_init_hal(struct gk20a *g)
995 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); 999 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
996 1000
997 g->pmu_lsf_pmu_wpr_init_done = 0; 1001 g->pmu_lsf_pmu_wpr_init_done = 0;
998 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
999 gops->clk.split_rail_support = false; 1002 gops->clk.split_rail_support = false;
1000 gops->clk.support_clk_freq_controller = false; 1003 gops->clk.support_clk_freq_controller = false;
1001 gops->clk.support_pmgr_domain = false; 1004 gops->clk.support_pmgr_domain = false;
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index f5ca144a..de2502ce 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -38,6 +38,7 @@
38#include "acr_gv11b.h" 38#include "acr_gv11b.h"
39#include "pmu_gv11b.h" 39#include "pmu_gv11b.h"
40#include "gm20b/mm_gm20b.h" 40#include "gm20b/mm_gm20b.h"
41#include "gm20b/pmu_gm20b.h"
41#include "gm20b/acr_gm20b.h" 42#include "gm20b/acr_gm20b.h"
42#include "gp106/acr_gp106.h" 43#include "gp106/acr_gp106.h"
43 44
@@ -68,6 +69,8 @@ void gv11b_setup_apertures(struct gk20a *g)
68 struct mm_gk20a *mm = &g->mm; 69 struct mm_gk20a *mm = &g->mm;
69 struct nvgpu_mem *inst_block = &mm->pmu.inst_block; 70 struct nvgpu_mem *inst_block = &mm->pmu.inst_block;
70 71
72 nvgpu_log_fn(g, " ");
73
71 /* setup apertures - virtual */ 74 /* setup apertures - virtual */
72 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 75 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
73 pwr_fbif_transcfg_mem_type_physical_f() | 76 pwr_fbif_transcfg_mem_type_physical_f() |
@@ -91,3 +94,87 @@ void gv11b_setup_apertures(struct gk20a *g)
91 pwr_fbif_transcfg_mem_type_physical_f() | 94 pwr_fbif_transcfg_mem_type_physical_f() |
92 pwr_fbif_transcfg_target_noncoherent_sysmem_f()); 95 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
93} 96}
97
98int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
99 struct hs_acr *acr_desc, bool is_recovery)
100{
101 struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
102 struct acr_fw_header *acr_fw_hdr = NULL;
103 struct bin_hdr *acr_fw_bin_hdr = NULL;
104 struct flcn_acr_desc_v1 *acr_dmem_desc;
105 u32 *acr_ucode_header = NULL;
106 u32 *acr_ucode_data = NULL;
107
108 nvgpu_log_fn(g, " ");
109
110 if (is_recovery) {
111 acr_desc->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0U;
112 } else {
113 acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
114 acr_fw_hdr = (struct acr_fw_header *)
115 (acr_fw->data + acr_fw_bin_hdr->header_offset);
116
117 acr_ucode_data = (u32 *)(acr_fw->data +
118 acr_fw_bin_hdr->data_offset);
119 acr_ucode_header = (u32 *)(acr_fw->data +
120 acr_fw_hdr->hdr_offset);
121
122 /* During recovery need to update blob size as 0x0*/
123 acr_desc->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)
124 ((u8 *)(acr_desc->acr_ucode.cpu_va) +
125 acr_ucode_header[2U]);
126
127 /* Patch WPR info to ucode */
128 acr_dmem_desc = (struct flcn_acr_desc_v1 *)
129 &(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
130
131 acr_dmem_desc->nonwpr_ucode_blob_start =
132 nvgpu_mem_get_addr(g, &g->acr.ucode_blob);
133 acr_dmem_desc->nonwpr_ucode_blob_size =
134 g->acr.ucode_blob.size;
135 acr_dmem_desc->regions.no_regions = 1U;
136 acr_dmem_desc->wpr_offset = 0U;
137 }
138
139 return 0;
140}
141
142static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
143{
144 struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
145
146 nvgpu_log_fn(g, " ");
147
148 hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
149
150 hs_acr->acr_type = ACR_DEFAULT;
151 hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
152
153 hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
154 hs_acr->bl_dmem_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
155
156 hs_acr->acr_flcn = &g->pmu_flcn;
157 hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
158 gm20b_pmu_setup_hw_and_bl_bootstrap;
159}
160
161void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
162{
163 nvgpu_log_fn(g, " ");
164
165 acr->g = g;
166
167 acr->bootstrap_owner = LSF_FALCON_ID_PMU;
168 acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
169
170 gv11b_acr_default_sw_init(g, &acr->acr);
171
172 acr->get_wpr_info = gm20b_wpr_info;
173 acr->alloc_blob_space = gv11b_alloc_blob_space;
174 acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
175 acr->patch_wpr_info_to_ucode = gv11b_acr_patch_wpr_info_to_ucode;
176 acr->acr_fill_bl_dmem_desc =
177 gp106_acr_fill_bl_dmem_desc;
178
179 acr->remove_support = gm20b_remove_acr_support;
180}
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.h b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h
index 8529e821..99fe3ea3 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h
@@ -30,4 +30,9 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g,
30void gv11b_setup_apertures(struct gk20a *g); 30void gv11b_setup_apertures(struct gk20a *g);
31int gv11b_alloc_blob_space(struct gk20a *g, size_t size, 31int gv11b_alloc_blob_space(struct gk20a *g, size_t size,
32 struct nvgpu_mem *mem); 32 struct nvgpu_mem *mem);
33
34void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
35int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
36 struct hs_acr *acr_desc, bool is_recovery);
33#endif /* NVGPU_ACR_GV11B_H */ 37#endif /* NVGPU_ACR_GV11B_H */
38
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 0d9f65bf..18b00ea4 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -823,6 +823,9 @@ static const struct gpu_ops gv11b_ops = {
823 .read_vin_cal_slope_intercept_fuse = NULL, 823 .read_vin_cal_slope_intercept_fuse = NULL,
824 .read_vin_cal_gain_offset_fuse = NULL, 824 .read_vin_cal_gain_offset_fuse = NULL,
825 }, 825 },
826 .acr = {
827 .acr_sw_init = nvgpu_gv11b_acr_sw_init,
828 },
826 .chip_init_gpu_characteristics = gv11b_init_gpu_characteristics, 829 .chip_init_gpu_characteristics = gv11b_init_gpu_characteristics,
827 .get_litter_value = gv11b_get_litter_value, 830 .get_litter_value = gv11b_get_litter_value,
828}; 831};
@@ -858,6 +861,7 @@ int gv11b_init_hal(struct gk20a *g)
858 gops->priv_ring = gv11b_ops.priv_ring; 861 gops->priv_ring = gv11b_ops.priv_ring;
859 gops->fuse = gv11b_ops.fuse; 862 gops->fuse = gv11b_ops.fuse;
860 gops->clk_arb = gv11b_ops.clk_arb; 863 gops->clk_arb = gv11b_ops.clk_arb;
864 gops->acr = gv11b_ops.acr;
861 865
862 /* Lone functions */ 866 /* Lone functions */
863 gops->chip_init_gpu_characteristics = 867 gops->chip_init_gpu_characteristics =
@@ -903,7 +907,6 @@ int gv11b_init_hal(struct gk20a *g)
903 907
904 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); 908 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
905 __nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true); 909 __nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
906 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
907 910
908 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false); 911 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
909 912
diff --git a/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h b/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h
index a9ed6e68..5fb26e1a 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,8 @@
23#ifndef __NVGPU_ACR_H__ 23#ifndef __NVGPU_ACR_H__
24#define __NVGPU_ACR_H__ 24#define __NVGPU_ACR_H__
25 25
26#include <nvgpu/falcon.h>
27
26#include "gk20a/mm_gk20a.h" 28#include "gk20a/mm_gk20a.h"
27 29
28#include "acr_lsfm.h" 30#include "acr_lsfm.h"
@@ -31,6 +33,13 @@
31#include "acr_objflcn.h" 33#include "acr_objflcn.h"
32 34
33struct nvgpu_firmware; 35struct nvgpu_firmware;
36struct gk20a;
37struct hs_acr_ops;
38struct hs_acr;
39struct nvgpu_acr;
40
41#define HSBIN_ACR_BL_UCODE_IMAGE "pmu_bl.bin"
42#define HSBIN_ACR_UCODE_IMAGE "acr_ucode.bin"
34 43
35#define MAX_SUPPORTED_LSFM 3 /*PMU, FECS, GPCCS*/ 44#define MAX_SUPPORTED_LSFM 3 /*PMU, FECS, GPCCS*/
36 45
@@ -77,29 +86,94 @@ struct wpr_carveout_info {
77 u64 size; 86 u64 size;
78}; 87};
79 88
80struct acr_desc { 89/* ACR interfaces */
81 struct nvgpu_mem ucode_blob; 90
82 struct nvgpu_mem wpr_dummy; 91struct hs_flcn_bl {
83 struct bin_hdr *bl_bin_hdr; 92 char *bl_fw_name;
84 struct hsflcn_bl_desc *pmu_hsbl_desc; 93 struct nvgpu_firmware *hs_bl_fw;
85 struct bin_hdr *hsbin_hdr; 94 struct hsflcn_bl_desc *hs_bl_desc;
86 struct acr_fw_header *fw_hdr; 95 struct bin_hdr *hs_bl_bin_hdr;
87 u32 pmu_args; 96 struct nvgpu_mem hs_bl_ucode;
97};
98
99struct hs_acr {
100 u32 acr_type;
101
102 /* HS bootloader to validate & load ACR ucode */
103 struct hs_flcn_bl acr_hs_bl;
104
105 /* ACR ucode */
106 char *acr_fw_name;
88 struct nvgpu_firmware *acr_fw; 107 struct nvgpu_firmware *acr_fw;
89 union{
90 struct flcn_acr_desc *acr_dmem_desc;
91 struct flcn_acr_desc_v1 *acr_dmem_desc_v1;
92 };
93 struct nvgpu_mem acr_ucode; 108 struct nvgpu_mem acr_ucode;
94 struct nvgpu_firmware *hsbl_fw; 109
95 struct nvgpu_mem hsbl_ucode;
96 union { 110 union {
97 struct flcn_bl_dmem_desc bl_dmem_desc; 111 struct flcn_bl_dmem_desc bl_dmem_desc;
98 struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1; 112 struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1;
99 }; 113 };
114
115 void *ptr_bl_dmem_desc;
116 u32 bl_dmem_desc_size;
117
118 union{
119 struct flcn_acr_desc *acr_dmem_desc;
120 struct flcn_acr_desc_v1 *acr_dmem_desc_v1;
121 };
122
123 /* Falcon used to execute ACR ucode */
124 struct nvgpu_falcon *acr_flcn;
125
126 int (*acr_flcn_setup_hw_and_bl_bootstrap)(struct gk20a *g,
127 struct hs_acr *acr_desc,
128 struct nvgpu_falcon_bl_info *bl_info);
129};
130
131#define ACR_DEFAULT 0U
132#define ACR_AHESASC 1U
133#define ACR_ASB 2U
134
135struct nvgpu_acr {
136 struct gk20a *g;
137
138 u32 bootstrap_owner;
139 u32 max_supported_lsfm;
140 u32 capabilities;
141
142 /*
143 * non-wpr space to hold LSF ucodes,
144 * ACR does copy ucode from non-wpr to wpr
145 */
146 struct nvgpu_mem ucode_blob;
147 /*
148 * Even though this mem_desc wouldn't be used,
149 * the wpr region needs to be reserved in the
150 * allocator in dGPU case.
151 */
152 struct nvgpu_mem wpr_dummy;
153
154 /* ACR member for different types of ucode */
155 /* For older dgpu/tegra ACR cuode */
156 struct hs_acr acr;
157 /* ACR load split feature support */
158 struct hs_acr acr_ahesasc;
159 struct hs_acr acr_asb;
160
161 u32 pmu_args;
100 struct nvgpu_firmware *pmu_fw; 162 struct nvgpu_firmware *pmu_fw;
101 struct nvgpu_firmware *pmu_desc; 163 struct nvgpu_firmware *pmu_desc;
102 u32 capabilities;
103};
104 164
165 int (*prepare_ucode_blob)(struct gk20a *g, struct nvgpu_acr *acr);
166 void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf);
167 int (*alloc_blob_space)(struct gk20a *g, size_t size,
168 struct nvgpu_mem *mem);
169 int (*patch_wpr_info_to_ucode)(struct gk20a *g, struct nvgpu_acr *acr,
170 struct hs_acr *acr_desc, bool is_recovery);
171 int (*acr_fill_bl_dmem_desc)(struct gk20a *g,
172 struct nvgpu_acr *acr, struct hs_acr *acr_desc,
173 u32 *acr_ucode_header);
174 int (*bootstrap_hs_acr)(struct gk20a *g, struct nvgpu_acr *acr,
175 struct hs_acr *acr_desc);
176
177 void (*remove_support)(struct nvgpu_acr *acr);
178};
105#endif /*__NVGPU_ACR_H__*/ 179#endif /*__NVGPU_ACR_H__*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
index 6f57fddc..892aa9af 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
@@ -1054,9 +1054,6 @@ struct gpu_ops {
1054 bool (*is_engine_in_reset)(struct gk20a *g); 1054 bool (*is_engine_in_reset)(struct gk20a *g);
1055 bool (*is_lazy_bootstrap)(u32 falcon_id); 1055 bool (*is_lazy_bootstrap)(u32 falcon_id);
1056 bool (*is_priv_load)(u32 falcon_id); 1056 bool (*is_priv_load)(u32 falcon_id);
1057 void (*get_wpr)(struct gk20a *g, struct wpr_carveout_info *inf);
1058 int (*alloc_blob_space)(struct gk20a *g,
1059 size_t size, struct nvgpu_mem *mem);
1060 int (*pmu_populate_loader_cfg)(struct gk20a *g, 1057 int (*pmu_populate_loader_cfg)(struct gk20a *g,
1061 void *lsfm, u32 *p_bl_gen_desc_size); 1058 void *lsfm, u32 *p_bl_gen_desc_size);
1062 int (*flcn_populate_bl_dmem_desc)(struct gk20a *g, 1059 int (*flcn_populate_bl_dmem_desc)(struct gk20a *g,
@@ -1318,6 +1315,9 @@ struct gpu_ops {
1318 u32 (*get_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g); 1315 u32 (*get_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g);
1319 void (*set_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g, u32 val); 1316 void (*set_nvhsclk_ctrl_swap_clk_nvl)(struct gk20a *g, u32 val);
1320 } top; 1317 } top;
1318 struct {
1319 void (*acr_sw_init)(struct gk20a *g, struct nvgpu_acr *acr);
1320 } acr;
1321 void (*semaphore_wakeup)(struct gk20a *g, bool post_events); 1321 void (*semaphore_wakeup)(struct gk20a *g, bool post_events);
1322}; 1322};
1323 1323
@@ -1429,7 +1429,7 @@ struct gk20a {
1429 struct sim_nvgpu *sim; 1429 struct sim_nvgpu *sim;
1430 struct mm_gk20a mm; 1430 struct mm_gk20a mm;
1431 struct nvgpu_pmu pmu; 1431 struct nvgpu_pmu pmu;
1432 struct acr_desc acr; 1432 struct nvgpu_acr acr;
1433 struct nvgpu_ecc ecc; 1433 struct nvgpu_ecc ecc;
1434 struct clk_pmupstate clk_pmu; 1434 struct clk_pmupstate clk_pmu;
1435 struct perf_pmupstate perf_pmu; 1435 struct perf_pmupstate perf_pmu;
@@ -1477,7 +1477,6 @@ struct gk20a {
1477 u32 disable_syncpoints; 1477 u32 disable_syncpoints;
1478 1478
1479 bool support_pmu; 1479 bool support_pmu;
1480 u32 bootstrap_owner;
1481 1480
1482 bool is_virtual; 1481 bool is_virtual;
1483 1482
diff --git a/drivers/gpu/nvgpu/os/linux/module.c b/drivers/gpu/nvgpu/os/linux/module.c
index dbc97f95..1fd7f544 100644
--- a/drivers/gpu/nvgpu/os/linux/module.c
+++ b/drivers/gpu/nvgpu/os/linux/module.c
@@ -683,6 +683,10 @@ void gk20a_remove_support(struct gk20a *g)
683 if (g->pmu.remove_support) 683 if (g->pmu.remove_support)
684 g->pmu.remove_support(&g->pmu); 684 g->pmu.remove_support(&g->pmu);
685 685
686 if (g->acr.remove_support != NULL) {
687 g->acr.remove_support(&g->acr);
688 }
689
686 if (g->gr.remove_support) 690 if (g->gr.remove_support)
687 g->gr.remove_support(&g->gr); 691 g->gr.remove_support(&g->gr);
688 692
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
index 56a6b01a..9b3b4ed5 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c
@@ -548,6 +548,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
548 .read_vin_cal_slope_intercept_fuse = NULL, 548 .read_vin_cal_slope_intercept_fuse = NULL,
549 .read_vin_cal_gain_offset_fuse = NULL, 549 .read_vin_cal_gain_offset_fuse = NULL,
550 }, 550 },
551 .acr = {
552 .acr_sw_init = nvgpu_gm20b_acr_sw_init,
553 },
551 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics, 554 .chip_init_gpu_characteristics = vgpu_init_gpu_characteristics,
552 .get_litter_value = gp10b_get_litter_value, 555 .get_litter_value = gp10b_get_litter_value,
553}; 556};
@@ -585,6 +588,7 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
585 gops->priv_ring = vgpu_gp10b_ops.priv_ring; 588 gops->priv_ring = vgpu_gp10b_ops.priv_ring;
586 589
587 gops->fuse = vgpu_gp10b_ops.fuse; 590 gops->fuse = vgpu_gp10b_ops.fuse;
591 gops->acr = vgpu_gp10b_ops.acr;
588 592
589 /* Lone Functions */ 593 /* Lone Functions */
590 gops->chip_init_gpu_characteristics = 594 gops->chip_init_gpu_characteristics =
@@ -631,7 +635,6 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
631 635
632 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); 636 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
633 g->pmu_lsf_pmu_wpr_init_done = 0; 637 g->pmu_lsf_pmu_wpr_init_done = 0;
634 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
635 638
636 g->name = "gp10b"; 639 g->name = "gp10b";
637 640
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index bcd352e7..9ad0468e 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -217,6 +217,10 @@ void vgpu_remove_support_common(struct gk20a *g)
217 if (g->pmu.remove_support) 217 if (g->pmu.remove_support)
218 g->pmu.remove_support(&g->pmu); 218 g->pmu.remove_support(&g->pmu);
219 219
220 if (g->acr.remove_support != NULL) {
221 g->acr.remove_support(&g->acr);
222 }
223
220 if (g->gr.remove_support) 224 if (g->gr.remove_support)
221 g->gr.remove_support(&g->gr); 225 g->gr.remove_support(&g->gr);
222 226