summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/hal_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c67
1 files changed, 66 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 073c377e..38e8934b 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -26,6 +26,7 @@
26#include "gk20a/flcn_gk20a.h" 26#include "gk20a/flcn_gk20a.h"
27#include "gk20a/priv_ring_gk20a.h" 27#include "gk20a/priv_ring_gk20a.h"
28#include "gk20a/regops_gk20a.h" 28#include "gk20a/regops_gk20a.h"
29#include "gk20a/pmu_gk20a.h"
29 30
30#include "ltc_gm20b.h" 31#include "ltc_gm20b.h"
31#include "gr_gm20b.h" 32#include "gr_gm20b.h"
@@ -42,6 +43,7 @@
42#include "therm_gm20b.h" 43#include "therm_gm20b.h"
43#include "bus_gm20b.h" 44#include "bus_gm20b.h"
44#include "hal_gm20b.h" 45#include "hal_gm20b.h"
46#include "acr_gm20b.h"
45 47
46#include <nvgpu/debug.h> 48#include <nvgpu/debug.h>
47#include <nvgpu/bug.h> 49#include <nvgpu/bug.h>
@@ -53,6 +55,8 @@
53#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h> 55#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
54#include <nvgpu/hw/gm20b/hw_ram_gm20b.h> 56#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
55#include <nvgpu/hw/gm20b/hw_top_gm20b.h> 57#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
58#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
59#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
56 60
57#define PRIV_SECURITY_DISABLE 0x01 61#define PRIV_SECURITY_DISABLE 0x01
58 62
@@ -313,6 +317,31 @@ static const struct gpu_ops gm20b_ops = {
313 .init_therm_setup_hw = gm20b_init_therm_setup_hw, 317 .init_therm_setup_hw = gm20b_init_therm_setup_hw,
314 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters, 318 .elcg_init_idle_filters = gk20a_elcg_init_idle_filters,
315 }, 319 },
320 .pmu = {
321 .pmu_setup_elpg = gm20b_pmu_setup_elpg,
322 .pmu_get_queue_head = pwr_pmu_queue_head_r,
323 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
324 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
325 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
326 .pmu_queue_head = gk20a_pmu_queue_head,
327 .pmu_queue_tail = gk20a_pmu_queue_tail,
328 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
329 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
330 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
331 .pmu_mutex_release = gk20a_pmu_mutex_release,
332 .write_dmatrfbase = gm20b_write_dmatrfbase,
333 .pmu_elpg_statistics = gk20a_pmu_elpg_statistics,
334 .pmu_pg_init_param = NULL,
335 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
336 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
337 .pmu_is_lpwr_feature_supported = NULL,
338 .pmu_lpwr_enable_pg = NULL,
339 .pmu_lpwr_disable_pg = NULL,
340 .pmu_pg_param_post_init = NULL,
341 .dump_secure_fuses = pmu_dump_security_fuses_gm20b,
342 .reset_engine = gk20a_pmu_engine_reset,
343 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
344 },
316 .clk = { 345 .clk = {
317 .init_clk_support = gm20b_init_clk_support, 346 .init_clk_support = gm20b_init_clk_support,
318 .suspend_clk_support = gm20b_suspend_clk_support, 347 .suspend_clk_support = gm20b_suspend_clk_support,
@@ -422,6 +451,7 @@ int gm20b_init_hal(struct gk20a *g)
422 gops->gr_ctx = gm20b_ops.gr_ctx; 451 gops->gr_ctx = gm20b_ops.gr_ctx;
423 gops->mm = gm20b_ops.mm; 452 gops->mm = gm20b_ops.mm;
424 gops->therm = gm20b_ops.therm; 453 gops->therm = gm20b_ops.therm;
454 gops->pmu = gm20b_ops.pmu;
425 /* 455 /*
426 * clk must be assigned member by member 456 * clk must be assigned member by member
427 * since some clk ops are assigned during probe prior to HAL init 457 * since some clk ops are assigned during probe prior to HAL init
@@ -483,9 +513,44 @@ int gm20b_init_hal(struct gk20a *g)
483 } 513 }
484 } 514 }
485#endif 515#endif
516
517 /* priv security dependent ops */
518 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
519 /* Add in ops from gm20b acr */
520 gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
521 gops->pmu.prepare_ucode = prepare_ucode_blob;
522 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
523 gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
524 gops->pmu.is_priv_load = gm20b_is_priv_load;
525 gops->pmu.get_wpr = gm20b_wpr_info;
526 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
527 gops->pmu.pmu_populate_loader_cfg =
528 gm20b_pmu_populate_loader_cfg;
529 gops->pmu.flcn_populate_bl_dmem_desc =
530 gm20b_flcn_populate_bl_dmem_desc;
531 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
532 gops->pmu.falcon_clear_halt_interrupt_status =
533 clear_halt_interrupt_status;
534 gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
535
536 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
537 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
538 } else {
539 /* Inherit from gk20a */
540 gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
541 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
542 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
543 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
544
545 gops->pmu.load_lsfalcon_ucode = NULL;
546 gops->pmu.init_wpr_region = NULL;
547 }
548
549 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
550 g->pmu_lsf_pmu_wpr_init_done = 0;
486 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; 551 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
552
487 gm20b_init_gr(g); 553 gm20b_init_gr(g);
488 gm20b_init_pmu_ops(g);
489 554
490 gm20b_init_uncompressed_kind_map(); 555 gm20b_init_uncompressed_kind_map();
491 gm20b_init_kind_attr(); 556 gm20b_init_kind_attr();