From 5f010177de985c901c33c914efe70a8498a5974f Mon Sep 17 00:00:00 2001 From: Sunny He Date: Tue, 1 Aug 2017 17:10:42 -0700 Subject: gpu: nvgpu: Reorg pmu HAL initialization Reorganize HAL initialization to remove inheritance and construct the gpu_ops struct at compile time. This patch only covers the pmu sub-module of the gpu_ops struct. Perform HAL function assignments in hal_gxxxx.c through the population of a chip-specific copy of gpu_ops. Jira NVGPU-74 Change-Id: I8839ac99e87153637005e23b3013237f57275c54 Signed-off-by: Sunny He Reviewed-on: https://git-master.nvidia.com/r/1530982 Reviewed-by: svccoveritychecker Reviewed-by: svc-mobile-coverity Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/gp106/acr_gp106.c | 25 +++------------ drivers/gpu/nvgpu/gp106/acr_gp106.h | 9 +++++- drivers/gpu/nvgpu/gp106/hal_gp106.c | 49 +++++++++++++++++++++++++++- drivers/gpu/nvgpu/gp106/pmu_gp106.c | 64 +++++-------------------------------- drivers/gpu/nvgpu/gp106/pmu_gp106.h | 10 +++++- 5 files changed, 77 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/nvgpu/gp106') diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 929036a3..bd47f467 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -56,9 +56,6 @@ typedef int (*get_ucode_details)(struct gk20a *g, /*Externs*/ /*Forwards*/ -static int gp106_bootstrap_hs_flcn(struct gk20a *g); - -static int gp106_prepare_ucode_blob(struct gk20a *g); /*Globals*/ static get_ucode_details pmu_acr_supp_ucode_list[] = { @@ -67,7 +64,7 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = { gpccs_ucode_details, }; -static void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf) +void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf) { inf->nonwpr_base = g->mm.vidmem.bootstrap_base; inf->wpr_base = inf->nonwpr_base + GP106_DGPU_WPR_OFFSET; @@ -80,7 +77,7 @@ static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) dma_addr->hi |= u64_hi32(value); } -static int gp106_alloc_blob_space(struct gk20a *g, +int gp106_alloc_blob_space(struct gk20a *g, size_t size, struct nvgpu_mem *mem) { struct wpr_carveout_info wpr_inf; @@ -105,20 +102,6 @@ static int gp106_alloc_blob_space(struct gk20a *g, NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem, wpr_inf.nonwpr_base); } - -void gp106_init_secure_pmu(struct gpu_ops *gops) -{ - gops->pmu.prepare_ucode = gp106_prepare_ucode_blob; - gops->pmu.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn; - gops->pmu.get_wpr = gp106_wpr_info; - gops->pmu.alloc_blob_space = gp106_alloc_blob_space; - gops->pmu.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg; - gops->pmu.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc; - gops->pmu.falcon_wait_for_halt = sec2_wait_for_halt; - gops->pmu.falcon_clear_halt_interrupt_status = - sec2_clear_halt_interrupt_status; - gops->pmu.init_falcon_setup_hw = init_sec2_setup_hw1; -} /* TODO - check if any free blob res needed*/ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) @@ -373,7 +356,7 @@ rel_sig: return err; } -static int gp106_prepare_ucode_blob(struct gk20a *g) +int gp106_prepare_ucode_blob(struct gk20a *g) { int err; @@ -1040,7 +1023,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g, /*Loads ACR bin to FB mem and bootstraps PMU with bootloader code * start and end are addresses of ucode blob in non-WPR region*/ -static int gp106_bootstrap_hs_flcn(struct gk20a *g) +int gp106_bootstrap_hs_flcn(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = mm->pmu.vm; diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.h b/drivers/gpu/nvgpu/gp106/acr_gp106.h index fe8fbdb1..85448a81 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.h +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.h @@ -19,7 +19,14 @@ #define GP104_FECS_UCODE_SIG "gp104/fecs_sig.bin" #define GP104_GPCCS_UCODE_SIG "gp104/gpccs_sig.bin" -void gp106_init_secure_pmu(struct gpu_ops *gops); + +int gp106_bootstrap_hs_flcn(struct gk20a *g); +int gp106_prepare_ucode_blob(struct gk20a *g); +int gp106_alloc_blob_space(struct gk20a *g, + size_t size, struct nvgpu_mem *mem); + +void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf); + void lsfm_free_ucode_img_res(struct gk20a *g, struct flcn_ucode_img_v1 *p_img); void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c index 6a50be34..21d5fee3 100644 --- a/drivers/gpu/nvgpu/gp106/hal_gp106.c +++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c @@ -26,6 +26,7 @@ #include "gk20a/regops_gk20a.h" #include "gk20a/mc_gk20a.h" #include "gk20a/fb_gk20a.h" +#include "gk20a/pmu_gk20a.h" #include "gp10b/ltc_gp10b.h" #include "gp10b/gr_gp10b.h" @@ -38,6 +39,7 @@ #include "gp10b/priv_ring_gp10b.h" #include "gp10b/fifo_gp10b.h" #include "gp10b/fb_gp10b.h" +#include "gp10b/pmu_gp10b.h" #include "gp106/fifo_gp106.h" #include "gp106/regops_gp106.h" @@ -48,7 +50,10 @@ #include "gm20b/mm_gm20b.h" #include "gm20b/pmu_gm20b.h" #include "gm20b/fb_gm20b.h" +#include "gm20b/acr_gm20b.h" +#include "gp106/acr_gp106.h" +#include "gp106/sec2_gp106.h" #include "gp106/clk_gp106.h" #include "gp106/clk_arb_gp106.h" #include "gp106/mclk_gp106.h" @@ -77,6 +82,7 @@ #include #include #include +#include static int gp106_get_litter_value(struct gk20a *g, int value) @@ -398,6 +404,45 @@ static const struct gpu_ops gp106_ops = { .get_internal_sensor_limits = gp106_get_internal_sensor_limits, .configure_therm_alert = gp106_configure_therm_alert, }, + .pmu = { + .init_wpr_region = gm20b_pmu_init_acr, + .load_lsfalcon_ucode = gp106_load_falcon_ucode, + .is_lazy_bootstrap = gp106_is_lazy_bootstrap, + .is_priv_load = gp106_is_priv_load, + .prepare_ucode = gp106_prepare_ucode_blob, + .pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn, + .get_wpr = gp106_wpr_info, + .alloc_blob_space = gp106_alloc_blob_space, + .pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg, + .flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc, + .falcon_wait_for_halt = sec2_wait_for_halt, + .falcon_clear_halt_interrupt_status = + sec2_clear_halt_interrupt_status, + .init_falcon_setup_hw = init_sec2_setup_hw1, + .pmu_queue_tail = gk20a_pmu_queue_tail, + .pmu_get_queue_head = pwr_pmu_queue_head_r, + .pmu_mutex_release = gk20a_pmu_mutex_release, + .is_pmu_supported = gp106_is_pmu_supported, + .pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list, + .pmu_elpg_statistics = gp106_pmu_elpg_statistics, + .pmu_mutex_acquire = gk20a_pmu_mutex_acquire, + .pmu_is_lpwr_feature_supported = + gp106_pmu_is_lpwr_feature_supported, + .pmu_msgq_tail = gk20a_pmu_msgq_tail, + .pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list, + .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, + .pmu_queue_head = gk20a_pmu_queue_head, + .pmu_pg_param_post_init = nvgpu_lpwr_post_init, + .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, + .pmu_pg_init_param = gp106_pg_param_init, + .reset_engine = gp106_pmu_engine_reset, + .pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg, + .write_dmatrfbase = gp10b_write_dmatrfbase, + .pmu_mutex_size = pwr_pmu_mutex__size_1_v, + .is_engine_in_reset = gp106_pmu_is_engine_in_reset, + .pmu_get_queue_tail = pwr_pmu_queue_tail_r, + .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg, + }, .clk = { .init_clk_support = gp106_init_clk_support, .get_crystal_clk_hz = gp106_crystal_clk_hz, @@ -532,6 +577,7 @@ int gp106_init_hal(struct gk20a *g) gops->mm = gp106_ops.mm; gops->pramin = gp106_ops.pramin; gops->therm = gp106_ops.therm; + gops->pmu = gp106_ops.pmu; /* * clk must be assigned member by member * since some clk ops are assigned during probe prior to HAL init @@ -568,10 +614,11 @@ int gp106_init_hal(struct gk20a *g) __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); + __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); + g->pmu_lsf_pmu_wpr_init_done = 0; g->bootstrap_owner = LSF_FALCON_ID_SEC2; gp106_init_gr(g); - gp106_init_pmu_ops(g); gp10b_init_uncompressed_kind_map(); gp10b_init_kind_attr(); diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c index 3b75b488..998993c9 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c @@ -32,7 +32,7 @@ #include #include -static bool gp106_is_pmu_supported(struct gk20a *g) +bool gp106_is_pmu_supported(struct gk20a *g) { return true; } @@ -69,7 +69,7 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset) return 0; } -static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) +u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) return PMU_PG_FEATURE_GR_RPPG_ENABLED; @@ -80,7 +80,7 @@ static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) return 0; } -static u32 gp106_pmu_pg_engines_list(struct gk20a *g) +u32 gp106_pmu_pg_engines_list(struct gk20a *g) { return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | BIT(PMU_PG_ELPG_ENGINE_ID_MS); @@ -100,7 +100,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, msg->msg.pg.msg_type); } -static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) +int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) { struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; @@ -168,7 +168,7 @@ void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; } -static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) +bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) { bool is_feature_supported = false; @@ -188,7 +188,7 @@ static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) return is_feature_supported; } -static bool gp106_is_lazy_bootstrap(u32 falcon_id) +bool gp106_is_lazy_bootstrap(u32 falcon_id) { bool enable_status = false; @@ -206,7 +206,7 @@ static bool gp106_is_lazy_bootstrap(u32 falcon_id) return enable_status; } -static bool gp106_is_priv_load(u32 falcon_id) +bool gp106_is_priv_load(u32 falcon_id) { bool enable_status = false; @@ -258,7 +258,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, gk20a_dbg_fn("done"); } -static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) +int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) { u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; @@ -289,51 +289,3 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) return -ETIMEDOUT; return 0; } - -void gp106_init_pmu_ops(struct gk20a *g) -{ - struct gpu_ops *gops = &g->ops; - gk20a_dbg_fn(""); - if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { - gp106_init_secure_pmu(gops); - gops->pmu.init_wpr_region = gm20b_pmu_init_acr; - gops->pmu.load_lsfalcon_ucode = gp106_load_falcon_ucode; - gops->pmu.is_lazy_bootstrap = gp106_is_lazy_bootstrap; - gops->pmu.is_priv_load = gp106_is_priv_load; - } else { - gk20a_init_pmu_ops(gops); - gops->pmu.pmu_setup_hw_and_bootstrap = - gm20b_init_nspmu_setup_hw1; - gops->pmu.load_lsfalcon_ucode = NULL; - gops->pmu.init_wpr_region = NULL; - } - gops->pmu.pmu_setup_elpg = NULL; - gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r; - gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v; - gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r; - gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v; - gops->pmu.pmu_queue_head = gk20a_pmu_queue_head; - gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail; - gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail; - gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v; - gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire; - gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release; - g->pmu_lsf_pmu_wpr_init_done = 0; - __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); - gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase; - gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics; - gops->pmu.pmu_pg_init_param = gp106_pg_param_init; - gops->pmu.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list; - gops->pmu.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list; - gops->pmu.pmu_is_lpwr_feature_supported = - gp106_pmu_is_lpwr_feature_supported; - gops->pmu.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg; - gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg; - gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init; - gops->pmu.dump_secure_fuses = NULL; - gops->pmu.is_pmu_supported = gp106_is_pmu_supported; - gops->pmu.reset_engine = gp106_pmu_engine_reset; - gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset; - - gk20a_dbg_fn("done"); -} diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h index 68a00bb5..1b59b2c4 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h @@ -19,7 +19,15 @@ struct gk20a; -void gp106_init_pmu_ops(struct gk20a *g); +bool gp106_is_pmu_supported(struct gk20a *g); +u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); +u32 gp106_pmu_pg_engines_list(struct gk20a *g); +int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id); +bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id); +bool gp106_is_lazy_bootstrap(u32 falcon_id); +bool gp106_is_priv_load(u32 falcon_id); +int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask); + void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); bool gp106_pmu_is_engine_in_reset(struct gk20a *g); -- cgit v1.2.2