summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/pmu_gp106.c
diff options
context:
space:
mode:
authorSunny He <suhe@nvidia.com>2017-08-01 20:10:42 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-21 16:06:07 -0400
commit5f010177de985c901c33c914efe70a8498a5974f (patch)
tree1b1a2ac1ab71608a0754a7eb64222f5d198e793c /drivers/gpu/nvgpu/gp106/pmu_gp106.c
parentb50b379c192714d0d08c3f2d33e90c95cf795253 (diff)
gpu: nvgpu: Reorg pmu HAL initialization
Reorganize HAL initialization to remove inheritance and construct the gpu_ops struct at compile time. This patch only covers the pmu sub-module of the gpu_ops struct. Perform HAL function assignments in hal_gxxxx.c through the population of a chip-specific copy of gpu_ops. Jira NVGPU-74 Change-Id: I8839ac99e87153637005e23b3013237f57275c54 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530982 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/pmu_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c64
1 files changed, 8 insertions, 56 deletions
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 3b75b488..998993c9 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -32,7 +32,7 @@
32#include <nvgpu/hw/gp106/hw_mc_gp106.h> 32#include <nvgpu/hw/gp106/hw_mc_gp106.h>
33#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 33#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
34 34
35static bool gp106_is_pmu_supported(struct gk20a *g) 35bool gp106_is_pmu_supported(struct gk20a *g)
36{ 36{
37 return true; 37 return true;
38} 38}
@@ -69,7 +69,7 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
69 return 0; 69 return 0;
70} 70}
71 71
72static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 72u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
73{ 73{
74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
75 return PMU_PG_FEATURE_GR_RPPG_ENABLED; 75 return PMU_PG_FEATURE_GR_RPPG_ENABLED;
@@ -80,7 +80,7 @@ static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
80 return 0; 80 return 0;
81} 81}
82 82
83static u32 gp106_pmu_pg_engines_list(struct gk20a *g) 83u32 gp106_pmu_pg_engines_list(struct gk20a *g)
84{ 84{
85 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | 85 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
86 BIT(PMU_PG_ELPG_ENGINE_ID_MS); 86 BIT(PMU_PG_ELPG_ENGINE_ID_MS);
@@ -100,7 +100,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
100 msg->msg.pg.msg_type); 100 msg->msg.pg.msg_type);
101} 101}
102 102
103static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) 103int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
104{ 104{
105 struct nvgpu_pmu *pmu = &g->pmu; 105 struct nvgpu_pmu *pmu = &g->pmu;
106 struct pmu_cmd cmd; 106 struct pmu_cmd cmd;
@@ -168,7 +168,7 @@ void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
168 pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; 168 pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us;
169} 169}
170 170
171static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) 171bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
172{ 172{
173 bool is_feature_supported = false; 173 bool is_feature_supported = false;
174 174
@@ -188,7 +188,7 @@ static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
188 return is_feature_supported; 188 return is_feature_supported;
189} 189}
190 190
191static bool gp106_is_lazy_bootstrap(u32 falcon_id) 191bool gp106_is_lazy_bootstrap(u32 falcon_id)
192{ 192{
193 bool enable_status = false; 193 bool enable_status = false;
194 194
@@ -206,7 +206,7 @@ static bool gp106_is_lazy_bootstrap(u32 falcon_id)
206 return enable_status; 206 return enable_status;
207} 207}
208 208
209static bool gp106_is_priv_load(u32 falcon_id) 209bool gp106_is_priv_load(u32 falcon_id)
210{ 210{
211 bool enable_status = false; 211 bool enable_status = false;
212 212
@@ -258,7 +258,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
258 gk20a_dbg_fn("done"); 258 gk20a_dbg_fn("done");
259} 259}
260 260
261static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) 261int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
262{ 262{
263 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 263 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
264 264
@@ -289,51 +289,3 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
289 return -ETIMEDOUT; 289 return -ETIMEDOUT;
290 return 0; 290 return 0;
291} 291}
292
293void gp106_init_pmu_ops(struct gk20a *g)
294{
295 struct gpu_ops *gops = &g->ops;
296 gk20a_dbg_fn("");
297 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
298 gp106_init_secure_pmu(gops);
299 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
300 gops->pmu.load_lsfalcon_ucode = gp106_load_falcon_ucode;
301 gops->pmu.is_lazy_bootstrap = gp106_is_lazy_bootstrap;
302 gops->pmu.is_priv_load = gp106_is_priv_load;
303 } else {
304 gk20a_init_pmu_ops(gops);
305 gops->pmu.pmu_setup_hw_and_bootstrap =
306 gm20b_init_nspmu_setup_hw1;
307 gops->pmu.load_lsfalcon_ucode = NULL;
308 gops->pmu.init_wpr_region = NULL;
309 }
310 gops->pmu.pmu_setup_elpg = NULL;
311 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
312 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
313 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
314 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
315 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
316 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
317 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
318 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
319 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
320 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
321 g->pmu_lsf_pmu_wpr_init_done = 0;
322 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
323 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
324 gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
325 gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
326 gops->pmu.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list;
327 gops->pmu.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list;
328 gops->pmu.pmu_is_lpwr_feature_supported =
329 gp106_pmu_is_lpwr_feature_supported;
330 gops->pmu.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg;
331 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
332 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
333 gops->pmu.dump_secure_fuses = NULL;
334 gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
335 gops->pmu.reset_engine = gp106_pmu_engine_reset;
336 gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset;
337
338 gk20a_dbg_fn("done");
339}