From 74639b444251d7adc222400625eb59a3d53d0c0a Mon Sep 17 00:00:00 2001 From: Debarshi Dutta Date: Wed, 22 Aug 2018 09:57:01 +0530 Subject: gpu: nvgpu: invoke calls to methods in pmu_gk20a.h via HAL In nvgpu repository, we have multiple accesses to methods in pmu_gk20a.h which have register accesses. Instead of directly invoking these methods, these are now called via HALs. Some common methods such as pmu_wait_message_cond which donot have any register accesses are moved to pmu_ipc.c and the method declarations are moved to pmu.h. Also, changed gm20b_pmu_dbg to nvgpu_dbg_pmu all across the code base. This would remove all indirect dependencies via gk20a.h into pmu_gk20a.h. As a result pmu_gk20a.h is now removed from gk20a.h JIRA-597 Change-Id: Id54b2684ca39362fda7626238c3116cd49e92080 Signed-off-by: Debarshi Dutta Reviewed-on: https://git-master.nvidia.com/r/1804283 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 129 +++++++++++++++++------------------- 1 file changed, 62 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c') diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 916e7a66..9725ebe7 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -37,16 +37,11 @@ #include #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" #include "mm_gm20b.h" #include "acr_gm20b.h" #include -/*Defines*/ -#define gm20b_dbg_pmu(g, fmt, arg...) \ - nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) - typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata); /*Externs*/ @@ -80,7 +75,7 @@ static void start_gm20b_pmu(struct gk20a *g) { /*disable irqs for hs falcon booting as we will poll for halt*/ nvgpu_mutex_acquire(&g->pmu.isr_mutex); - pmu_enable_irq(&g->pmu, true); + g->ops.pmu.pmu_enable_irq(&g->pmu, true); g->pmu.isr_enabled = true; nvgpu_mutex_release(&g->pmu.isr_mutex); gk20a_writel(g, pwr_falcon_cpuctl_alias_r(), @@ -103,16 +98,16 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) struct nvgpu_pmu *pmu = &g->pmu; struct lsf_ucode_desc *lsf_desc; int err; - gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B\n"); + nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B\n"); pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); if (!pmu_fw) { nvgpu_err(g, "failed to load pmu ucode!!"); return -ENOENT; } g->acr.pmu_fw = pmu_fw; - gm20b_dbg_pmu(g, "Loaded PMU ucode in for blob preparation"); + nvgpu_pmu_dbg(g, "Loaded PMU ucode in for blob preparation"); - gm20b_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n"); + nvgpu_pmu_dbg(g, "requesting PMU ucode desc in GM20B\n"); pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); if (!pmu_desc) { nvgpu_err(g, "failed to load pmu ucode desc!!"); @@ -131,7 +126,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu(g, "failed to set function pointers\n"); + nvgpu_pmu_dbg(g, "failed to set function pointers\n"); goto release_sig; } @@ -150,7 +145,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n"); + nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B exit\n"); nvgpu_release_firmware(g, pmu_sig); return 0; release_sig: @@ -223,7 +218,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu(g, "fecs fw loaded\n"); + nvgpu_pmu_dbg(g, "fecs fw loaded\n"); nvgpu_release_firmware(g, fecs_sig); return 0; free_lsf_desc: @@ -295,7 +290,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) p_img->fw_ver = NULL; p_img->header = NULL; p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc; - gm20b_dbg_pmu(g, "gpccs fw loaded\n"); + nvgpu_pmu_dbg(g, "gpccs fw loaded\n"); nvgpu_release_firmware(g, gpccs_sig); return 0; free_lsf_desc: @@ -364,24 +359,24 @@ int prepare_ucode_blob(struct gk20a *g) non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); if (err) { - gm20b_dbg_pmu(g, "failed to set function pointers\n"); + nvgpu_pmu_dbg(g, "failed to set function pointers\n"); return err; } return 0; } plsfm = &lsfm_l; memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); - gm20b_dbg_pmu(g, "fetching GMMU regs\n"); + nvgpu_pmu_dbg(g, "fetching GMMU regs\n"); g->ops.fb.vpr_info_fetch(g); gr_gk20a_init_ctxsw_ucode(g); g->ops.pmu.get_wpr(g, &wpr_inf); - gm20b_dbg_pmu(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); - gm20b_dbg_pmu(g, "wpr carveout size :%llx\n", wpr_inf.size); + nvgpu_pmu_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); + nvgpu_pmu_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size); /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); - gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); + nvgpu_pmu_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err) { goto free_sgt; } @@ -400,13 +395,13 @@ int prepare_ucode_blob(struct gk20a *g) goto free_sgt; } - gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n", + nvgpu_pmu_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob); } else { - gm20b_dbg_pmu(g, "LSFM is managing no falcons.\n"); + nvgpu_pmu_dbg(g, "LSFM is managing no falcons.\n"); } - gm20b_dbg_pmu(g, "prepare ucode blob return 0\n"); + nvgpu_pmu_dbg(g, "prepare ucode blob return 0\n"); free_acr_resources(g, plsfm); free_sgt: return err; @@ -452,13 +447,13 @@ static int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } else { - gm20b_dbg_pmu(g, "id not managed %d\n", + nvgpu_pmu_dbg(g, "id not managed %d\n", ucode_img.lsf_desc->falcon_id); } /*Free any ucode image resources if not managing this falcon*/ if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) { - gm20b_dbg_pmu(g, "pmu is not LSFM managed\n"); + nvgpu_pmu_dbg(g, "pmu is not LSFM managed\n"); lsfm_free_ucode_img_res(g, &ucode_img); } @@ -490,7 +485,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, plsfm->managed_flcn_cnt++; } } else { - gm20b_dbg_pmu(g, "not managed %d\n", + nvgpu_pmu_dbg(g, "not managed %d\n", ucode_img.lsf_desc->falcon_id); lsfm_free_nonpmu_ucode_img_res(g, &ucode_img); @@ -498,7 +493,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g, } } else { /* Consumed all available falcon objects */ - gm20b_dbg_pmu(g, "Done checking for ucodes %d\n", i); + nvgpu_pmu_dbg(g, "Done checking for ucodes %d\n", i); break; } } @@ -539,26 +534,26 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g, addr_base = p_lsfm->lsb_header.ucode_off; g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); + nvgpu_pmu_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base); /*From linux*/ addr_code = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_code_offset) >> 8); - gm20b_dbg_pmu(g, "app start %d app res code off %d\n", + nvgpu_pmu_dbg(g, "app start %d app res code off %d\n", desc->app_start_offset, desc->app_resident_code_offset); addr_data = u64_lo32((addr_base + desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu(g, "app res data offset%d\n", + nvgpu_pmu_dbg(g, "app res data offset%d\n", desc->app_resident_data_offset); - gm20b_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset); + nvgpu_pmu_dbg(g, "bl start off %d\n", desc->bootloader_start_offset); addr_args = ((pwr_falcon_hwcfg_dmem_size_v( gk20a_readl(g, pwr_falcon_hwcfg_r()))) << GK20A_PMU_DMEM_BLKSIZE2); addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); - gm20b_dbg_pmu(g, "addr_args %x\n", addr_args); + nvgpu_pmu_dbg(g, "addr_args %x\n", addr_args); /* Populate the loader_config state*/ ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE; @@ -616,7 +611,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, g->ops.pmu.get_wpr(g, &wpr_inf); addr_base += wpr_inf.wpr_base; - gm20b_dbg_pmu(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, + nvgpu_pmu_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, p_lsfm->wpr_header.falcon_id); addr_code = u64_lo32((addr_base + desc->app_start_offset + @@ -625,7 +620,7 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, desc->app_start_offset + desc->app_resident_data_offset) >> 8); - gm20b_dbg_pmu(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", + nvgpu_pmu_dbg(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n", (u32)addr_code, (u32)addr_data, desc->bootloader_start_offset, p_lsfm->wpr_header.falcon_id); @@ -648,7 +643,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { - gm20b_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); + nvgpu_pmu_dbg(g, "non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); @@ -656,7 +651,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, } if (pmu->pmu_mode & PMU_LSFM_MANAGED) { - gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n"); + nvgpu_pmu_dbg(g, "pmu write flcn bl gen desc\n"); if (pnode->wpr_header.falcon_id == pmu->falcon_id) { return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); @@ -690,46 +685,46 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), &pnode->wpr_header, sizeof(pnode->wpr_header)); - gm20b_dbg_pmu(g, "wpr header"); - gm20b_dbg_pmu(g, "falconid :%d", + nvgpu_pmu_dbg(g, "wpr header"); + nvgpu_pmu_dbg(g, "falconid :%d", pnode->wpr_header.falcon_id); - gm20b_dbg_pmu(g, "lsb_offset :%x", + nvgpu_pmu_dbg(g, "lsb_offset :%x", pnode->wpr_header.lsb_offset); - gm20b_dbg_pmu(g, "bootstrap_owner :%d", + nvgpu_pmu_dbg(g, "bootstrap_owner :%d", pnode->wpr_header.bootstrap_owner); - gm20b_dbg_pmu(g, "lazy_bootstrap :%d", + nvgpu_pmu_dbg(g, "lazy_bootstrap :%d", pnode->wpr_header.lazy_bootstrap); - gm20b_dbg_pmu(g, "status :%d", + nvgpu_pmu_dbg(g, "status :%d", pnode->wpr_header.status); /*Flush LSB header to memory*/ nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, &pnode->lsb_header, sizeof(pnode->lsb_header)); - gm20b_dbg_pmu(g, "lsb header"); - gm20b_dbg_pmu(g, "ucode_off :%x", + nvgpu_pmu_dbg(g, "lsb header"); + nvgpu_pmu_dbg(g, "ucode_off :%x", pnode->lsb_header.ucode_off); - gm20b_dbg_pmu(g, "ucode_size :%x", + nvgpu_pmu_dbg(g, "ucode_size :%x", pnode->lsb_header.ucode_size); - gm20b_dbg_pmu(g, "data_size :%x", + nvgpu_pmu_dbg(g, "data_size :%x", pnode->lsb_header.data_size); - gm20b_dbg_pmu(g, "bl_code_size :%x", + nvgpu_pmu_dbg(g, "bl_code_size :%x", pnode->lsb_header.bl_code_size); - gm20b_dbg_pmu(g, "bl_imem_off :%x", + nvgpu_pmu_dbg(g, "bl_imem_off :%x", pnode->lsb_header.bl_imem_off); - gm20b_dbg_pmu(g, "bl_data_off :%x", + nvgpu_pmu_dbg(g, "bl_data_off :%x", pnode->lsb_header.bl_data_off); - gm20b_dbg_pmu(g, "bl_data_size :%x", + nvgpu_pmu_dbg(g, "bl_data_size :%x", pnode->lsb_header.bl_data_size); - gm20b_dbg_pmu(g, "app_code_off :%x", + nvgpu_pmu_dbg(g, "app_code_off :%x", pnode->lsb_header.app_code_off); - gm20b_dbg_pmu(g, "app_code_size :%x", + nvgpu_pmu_dbg(g, "app_code_size :%x", pnode->lsb_header.app_code_size); - gm20b_dbg_pmu(g, "app_data_off :%x", + nvgpu_pmu_dbg(g, "app_data_off :%x", pnode->lsb_header.app_data_off); - gm20b_dbg_pmu(g, "app_data_size :%x", + nvgpu_pmu_dbg(g, "app_data_size :%x", pnode->lsb_header.app_data_size); - gm20b_dbg_pmu(g, "flags :%x", + nvgpu_pmu_dbg(g, "flags :%x", pnode->lsb_header.flags); /*If this falcon has a boot loader and related args, @@ -1049,7 +1044,7 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g) start = nvgpu_mem_get_addr(g, &acr->ucode_blob); size = acr->ucode_blob.size; - gm20b_dbg_pmu(g, " "); + nvgpu_pmu_dbg(g, " "); if (!acr_fw) { /*First time init case*/ @@ -1163,14 +1158,14 @@ int acr_ucode_patch_sig(struct gk20a *g, unsigned int *p_patch_ind) { unsigned int i, *p_sig; - gm20b_dbg_pmu(g, " "); + nvgpu_pmu_dbg(g, " "); if (!pmu_is_debug_mode_en(g)) { p_sig = p_prod_sig; - gm20b_dbg_pmu(g, "PRODUCTION MODE\n"); + nvgpu_pmu_dbg(g, "PRODUCTION MODE\n"); } else { p_sig = p_dbg_sig; - gm20b_dbg_pmu(g, "DEBUG MODE\n"); + nvgpu_pmu_dbg(g, "DEBUG MODE\n"); } /* Patching logic:*/ @@ -1303,7 +1298,7 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g, /*disable irqs for hs falcon booting as we will poll for halt*/ nvgpu_mutex_acquire(&pmu->isr_mutex); - pmu_enable_irq(pmu, false); + g->ops.pmu.pmu_enable_irq(pmu, false); pmu->isr_enabled = false; nvgpu_mutex_release(&pmu->isr_mutex); /*Clearing mailbox register used to reflect capabilities*/ @@ -1335,7 +1330,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw; struct hsflcn_bl_desc *pmu_bl_gm10x_desc; u32 *pmu_bl_gm10x = NULL; - gm20b_dbg_pmu(g, " "); + nvgpu_pmu_dbg(g, " "); if (!hsbl_fw) { hsbl_fw = nvgpu_request_firmware(g, @@ -1354,7 +1349,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size, 256); acr->hsbl_ucode.size = bl_sz; - gm20b_dbg_pmu(g, "Executing Generic Bootloader\n"); + nvgpu_pmu_dbg(g, "Executing Generic Bootloader\n"); /*TODO in code verify that enable PMU is done, scrubbing etc is done*/ @@ -1377,7 +1372,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) } nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); - gm20b_dbg_pmu(g, "Copied bl ucode to bl_cpuva\n"); + nvgpu_pmu_dbg(g, "Copied bl ucode to bl_cpuva\n"); } /* * Disable interrupts to avoid kernel hitting breakpoint due @@ -1389,9 +1384,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) goto err_unmap_bl; } - gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, + nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size); @@ -1409,10 +1404,10 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) goto err_unmap_bl; } } - gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err); - gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g, + nvgpu_pmu_dbg(g, "after waiting for halt, err %x\n", err); + nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g, pwr_falcon_mmu_phys_sec_r())); - gm20b_dbg_pmu(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); + nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r())); start_gm20b_pmu(g); return 0; err_unmap_bl: @@ -1443,7 +1438,7 @@ int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) } g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); - gm20b_dbg_pmu(g, "ACR capabilities %x\n", g->acr.capabilities); + nvgpu_pmu_dbg(g, "ACR capabilities %x\n", g->acr.capabilities); data = gk20a_readl(g, pwr_falcon_mailbox0_r()); if (data) { nvgpu_err(g, "ACR boot failed, err %x", data); -- cgit v1.2.2