summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b
diff options
context:
space:
mode:
authorSunny He <suhe@nvidia.com>2017-08-01 20:10:42 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-21 16:06:07 -0400
commit5f010177de985c901c33c914efe70a8498a5974f (patch)
tree1b1a2ac1ab71608a0754a7eb64222f5d198e793c /drivers/gpu/nvgpu/gp10b
parentb50b379c192714d0d08c3f2d33e90c95cf795253 (diff)
gpu: nvgpu: Reorg pmu HAL initialization
Reorganize HAL initialization to remove inheritance and construct the gpu_ops struct at compile time. This patch only covers the pmu sub-module of the gpu_ops struct. Perform HAL function assignments in hal_gxxxx.c through the population of a chip-specific copy of gpu_ops. Jira NVGPU-74 Change-Id: I8839ac99e87153637005e23b3013237f57275c54 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530982 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c63
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c57
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.h10
3 files changed, 78 insertions, 52 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index a37295bb..40ef35d5 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -26,6 +26,7 @@
26#include "gk20a/regops_gk20a.h" 26#include "gk20a/regops_gk20a.h"
27#include "gk20a/mc_gk20a.h" 27#include "gk20a/mc_gk20a.h"
28#include "gk20a/fb_gk20a.h" 28#include "gk20a/fb_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "gp10b/gr_gp10b.h" 31#include "gp10b/gr_gp10b.h"
31#include "gp10b/fecs_trace_gp10b.h" 32#include "gp10b/fecs_trace_gp10b.h"
@@ -46,6 +47,7 @@
46#include "gm20b/ltc_gm20b.h" 47#include "gm20b/ltc_gm20b.h"
47#include "gm20b/gr_gm20b.h" 48#include "gm20b/gr_gm20b.h"
48#include "gm20b/fifo_gm20b.h" 49#include "gm20b/fifo_gm20b.h"
50#include "gm20b/acr_gm20b.h"
49#include "gm20b/pmu_gm20b.h" 51#include "gm20b/pmu_gm20b.h"
50#include "gm20b/clk_gm20b.h" 52#include "gm20b/clk_gm20b.h"
51#include "gm20b/fb_gm20b.h" 53#include "gm20b/fb_gm20b.h"
@@ -65,6 +67,7 @@
65#include <nvgpu/hw/gp10b/hw_ram_gp10b.h> 67#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
66#include <nvgpu/hw/gp10b/hw_top_gp10b.h> 68#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
67#include <nvgpu/hw/gp10b/hw_pram_gp10b.h> 69#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
70#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
68 71
69static int gp10b_get_litter_value(struct gk20a *g, int value) 72static int gp10b_get_litter_value(struct gk20a *g, int value)
70{ 73{
@@ -353,6 +356,27 @@ static const struct gpu_ops gp10b_ops = {
353 .init_therm_setup_hw = gp10b_init_therm_setup_hw, 356 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
354 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters, 357 .elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
355 }, 358 },
359 .pmu = {
360 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
361 .pmu_get_queue_head = pwr_pmu_queue_head_r,
362 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
363 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
364 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
365 .pmu_queue_head = gk20a_pmu_queue_head,
366 .pmu_queue_tail = gk20a_pmu_queue_tail,
367 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
368 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
369 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
370 .pmu_mutex_release = gk20a_pmu_mutex_release,
371 .write_dmatrfbase = gp10b_write_dmatrfbase,
372 .pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
373 .pmu_pg_init_param = gp10b_pg_gr_init,
374 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
375 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
376 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
377 .reset_engine = gk20a_pmu_engine_reset,
378 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
379 },
356 .regops = { 380 .regops = {
357 .get_global_whitelist_ranges = 381 .get_global_whitelist_ranges =
358 gp10b_get_global_whitelist_ranges, 382 gp10b_get_global_whitelist_ranges,
@@ -455,6 +479,7 @@ int gp10b_init_hal(struct gk20a *g)
455 gops->mm = gp10b_ops.mm; 479 gops->mm = gp10b_ops.mm;
456 gops->pramin = gp10b_ops.pramin; 480 gops->pramin = gp10b_ops.pramin;
457 gops->therm = gp10b_ops.therm; 481 gops->therm = gp10b_ops.therm;
482 gops->pmu = gp10b_ops.pmu;
458 gops->regops = gp10b_ops.regops; 483 gops->regops = gp10b_ops.regops;
459 gops->mc = gp10b_ops.mc; 484 gops->mc = gp10b_ops.mc;
460 gops->debug = gp10b_ops.debug; 485 gops->debug = gp10b_ops.debug;
@@ -513,9 +538,45 @@ int gp10b_init_hal(struct gk20a *g)
513 } 538 }
514#endif 539#endif
515 540
541 /* priv security dependent ops */
542 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
543 /* Add in ops from gm20b acr */
544 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
545 gops->pmu.prepare_ucode = prepare_ucode_blob,
546 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
547 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
548 gops->pmu.is_priv_load = gm20b_is_priv_load,
549 gops->pmu.get_wpr = gm20b_wpr_info,
550 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
551 gops->pmu.pmu_populate_loader_cfg =
552 gm20b_pmu_populate_loader_cfg,
553 gops->pmu.flcn_populate_bl_dmem_desc =
554 gm20b_flcn_populate_bl_dmem_desc,
555 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
556 gops->pmu.falcon_clear_halt_interrupt_status =
557 clear_halt_interrupt_status,
558 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
559
560 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
561 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
562 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
563 gops->pmu.is_priv_load = gp10b_is_priv_load;
564 } else {
565 /* Inherit from gk20a */
566 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
567 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
568 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
569 gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
570
571 gops->pmu.load_lsfalcon_ucode = NULL;
572 gops->pmu.init_wpr_region = NULL;
573 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
574 }
575
576 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
577 g->pmu_lsf_pmu_wpr_init_done = 0;
516 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 578 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
517 gp10b_init_gr(g); 579 gp10b_init_gr(g);
518 gp10b_init_pmu_ops(g);
519 580
520 gp10b_init_uncompressed_kind_map(); 581 gp10b_init_uncompressed_kind_map();
521 gp10b_init_kind_attr(); 582 gp10b_init_kind_attr();
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index f45490db..81568122 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -252,7 +252,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
252 return 0; 252 return 0;
253} 253}
254 254
255static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 255void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
256 struct pmu_pg_stats_data *pg_stat_data) 256 struct pmu_pg_stats_data *pg_stat_data)
257{ 257{
258 struct nvgpu_pmu *pmu = &g->pmu; 258 struct nvgpu_pmu *pmu = &g->pmu;
@@ -269,7 +269,7 @@ static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
269 pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus; 269 pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
270} 270}
271 271
272static int gp10b_pmu_setup_elpg(struct gk20a *g) 272int gp10b_pmu_setup_elpg(struct gk20a *g)
273{ 273{
274 int ret = 0; 274 int ret = 0;
275 u32 reg_writes; 275 u32 reg_writes;
@@ -299,7 +299,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
299 0x0); 299 0x0);
300} 300}
301 301
302static int gp10b_init_pmu_setup_hw1(struct gk20a *g) 302int gp10b_init_pmu_setup_hw1(struct gk20a *g)
303{ 303{
304 struct nvgpu_pmu *pmu = &g->pmu; 304 struct nvgpu_pmu *pmu = &g->pmu;
305 int err; 305 int err;
@@ -337,7 +337,7 @@ static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
337 337
338} 338}
339 339
340static bool gp10b_is_lazy_bootstrap(u32 falcon_id) 340bool gp10b_is_lazy_bootstrap(u32 falcon_id)
341{ 341{
342 bool enable_status = false; 342 bool enable_status = false;
343 343
@@ -355,7 +355,7 @@ static bool gp10b_is_lazy_bootstrap(u32 falcon_id)
355 return enable_status; 355 return enable_status;
356} 356}
357 357
358static bool gp10b_is_priv_load(u32 falcon_id) 358bool gp10b_is_priv_load(u32 falcon_id)
359{ 359{
360 bool enable_status = false; 360 bool enable_status = false;
361 361
@@ -374,7 +374,7 @@ static bool gp10b_is_priv_load(u32 falcon_id)
374} 374}
375 375
376/*Dump Security related fuses*/ 376/*Dump Security related fuses*/
377static void pmu_dump_security_fuses_gp10b(struct gk20a *g) 377void pmu_dump_security_fuses_gp10b(struct gk20a *g)
378{ 378{
379 u32 val; 379 u32 val;
380 380
@@ -386,50 +386,7 @@ static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
386 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val); 386 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
387} 387}
388 388
389static bool gp10b_is_pmu_supported(struct gk20a *g) 389bool gp10b_is_pmu_supported(struct gk20a *g)
390{ 390{
391 return true; 391 return true;
392} 392}
393
394void gp10b_init_pmu_ops(struct gk20a *g)
395{
396 struct gpu_ops *gops = &g->ops;
397 gops->pmu.is_pmu_supported = gp10b_is_pmu_supported;
398 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
399 gm20b_init_secure_pmu(gops);
400 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
401 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
402 gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
403 gops->pmu.is_priv_load = gp10b_is_priv_load;
404 } else {
405 gk20a_init_pmu_ops(gops);
406 gops->pmu.load_lsfalcon_ucode = NULL;
407 gops->pmu.init_wpr_region = NULL;
408 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
409 }
410 gops->pmu.pmu_setup_elpg = gp10b_pmu_setup_elpg;
411 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
412 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
413 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
414 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
415 gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
416 gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
417 gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
418 gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
419 gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
420 gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
421 g->pmu_lsf_pmu_wpr_init_done = false;
422 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
423 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
424 gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
425 gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init;
426 gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
427 gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
428 gops->pmu.pmu_is_lpwr_feature_supported = NULL;
429 gops->pmu.pmu_lpwr_enable_pg = NULL;
430 gops->pmu.pmu_lpwr_disable_pg = NULL;
431 gops->pmu.pmu_pg_param_post_init = NULL;
432 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
433 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
434 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
435}
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
index 5ba7bb9b..071740f4 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.h
@@ -18,7 +18,15 @@
18 18
19struct gk20a; 19struct gk20a;
20 20
21void gp10b_init_pmu_ops(struct gk20a *g); 21
22bool gp10b_is_lazy_bootstrap(u32 falcon_id);
23bool gp10b_is_priv_load(u32 falcon_id);
24bool gp10b_is_pmu_supported(struct gk20a *g);
25int gp10b_init_pmu_setup_hw1(struct gk20a *g);
26void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
27 struct pmu_pg_stats_data *pg_stat_data);
28int gp10b_pmu_setup_elpg(struct gk20a *g);
29void pmu_dump_security_fuses_gp10b(struct gk20a *g);
22int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask); 30int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
23int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); 31int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
24void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr); 32void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);