diff options
author | Debarshi Dutta <ddutta@nvidia.com> | 2018-08-22 00:27:01 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-29 20:46:51 -0400 |
commit | 74639b444251d7adc222400625eb59a3d53d0c0a (patch) | |
tree | 19373fbe8ee522863c990fdfa0db24e6474f5167 /drivers/gpu/nvgpu/common/pmu | |
parent | e3710e5431d8f14f1b8c2812f5c1aeeb7bdaac1c (diff) |
gpu: nvgpu: invoke calls to methods in pmu_gk20a.h via HAL
In nvgpu repository, we have multiple accesses to methods in
pmu_gk20a.h which have register accesses. Instead of directly invoking
these methods, these are now called via HALs. Some common methods such
as pmu_wait_message_cond which donot have any register accesses
are moved to pmu_ipc.c and the method declarations are moved
to pmu.h. Also, changed gm20b_pmu_dbg to
nvgpu_dbg_pmu all across the code base. This would remove all
indirect dependencies via gk20a.h into pmu_gk20a.h. As a result
pmu_gk20a.h is now removed from gk20a.h
JIRA-597
Change-Id: Id54b2684ca39362fda7626238c3116cd49e92080
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1804283
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu')
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_debug.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_pg.c | 2 |
5 files changed, 14 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 86e56d9e..0395e463 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c | |||
@@ -81,7 +81,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) | |||
81 | 81 | ||
82 | if (!enable) { | 82 | if (!enable) { |
83 | if (!g->ops.pmu.is_engine_in_reset(g)) { | 83 | if (!g->ops.pmu.is_engine_in_reset(g)) { |
84 | pmu_enable_irq(pmu, false); | 84 | g->ops.pmu.pmu_enable_irq(pmu, false); |
85 | pmu_enable_hw(pmu, false); | 85 | pmu_enable_hw(pmu, false); |
86 | } | 86 | } |
87 | } else { | 87 | } else { |
@@ -95,7 +95,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) | |||
95 | goto exit; | 95 | goto exit; |
96 | } | 96 | } |
97 | 97 | ||
98 | pmu_enable_irq(pmu, true); | 98 | g->ops.pmu.pmu_enable_irq(pmu, true); |
99 | } | 99 | } |
100 | 100 | ||
101 | exit: | 101 | exit: |
@@ -412,7 +412,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g) | |||
412 | if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) { | 412 | if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) { |
413 | /* Save zbc table after PMU is initialized. */ | 413 | /* Save zbc table after PMU is initialized. */ |
414 | pmu->zbc_ready = true; | 414 | pmu->zbc_ready = true; |
415 | gk20a_pmu_save_zbc(g, 0xf); | 415 | g->ops.gr.pmu_save_zbc(g, 0xf); |
416 | } | 416 | } |
417 | 417 | ||
418 | if (g->elpg_enabled) { | 418 | if (g->elpg_enabled) { |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_debug.c b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c index 6ad82ca8..68a39432 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_debug.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c | |||
@@ -39,7 +39,7 @@ void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) | |||
39 | pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS], | 39 | pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS], |
40 | sizeof(struct pmu_pg_stats_v2)); | 40 | sizeof(struct pmu_pg_stats_v2)); |
41 | 41 | ||
42 | gk20a_pmu_dump_elpg_stats(pmu); | 42 | g->ops.pmu.pmu_dump_elpg_stats(pmu); |
43 | } | 43 | } |
44 | 44 | ||
45 | void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) | 45 | void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) |
@@ -47,7 +47,7 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) | |||
47 | struct gk20a *g = pmu->g; | 47 | struct gk20a *g = pmu->g; |
48 | 48 | ||
49 | nvgpu_flcn_dump_stats(pmu->flcn); | 49 | nvgpu_flcn_dump_stats(pmu->flcn); |
50 | gk20a_pmu_dump_falcon_stats(pmu); | 50 | g->ops.pmu.pmu_dump_falcon_stats(pmu); |
51 | 51 | ||
52 | nvgpu_err(g, "pmu state: %d", pmu->pmu_state); | 52 | nvgpu_err(g, "pmu state: %d", pmu->pmu_state); |
53 | nvgpu_err(g, "elpg state: %d", pmu->elpg_stat); | 53 | nvgpu_err(g, "elpg state: %d", pmu->elpg_stat); |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 843a4551..9fe999ae 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | |||
@@ -744,8 +744,8 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, | |||
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | 746 | ||
747 | if (gk20a_pmu_is_interrupted(pmu)) { | 747 | if (g->ops.pmu.pmu_is_interrupted(pmu)) { |
748 | gk20a_pmu_isr(g); | 748 | g->ops.pmu.pmu_isr(g); |
749 | } | 749 | } |
750 | 750 | ||
751 | nvgpu_usleep_range(delay, delay * 2U); | 751 | nvgpu_usleep_range(delay, delay * 2U); |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index 5d736591..a99e86ce 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c | |||
@@ -73,7 +73,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) | |||
73 | 73 | ||
74 | pmu->perfmon_ready = 0; | 74 | pmu->perfmon_ready = 0; |
75 | 75 | ||
76 | gk20a_pmu_init_perfmon_counter(g); | 76 | g->ops.pmu.pmu_init_perfmon_counter(g); |
77 | 77 | ||
78 | if (!pmu->sample_buffer) { | 78 | if (!pmu->sample_buffer) { |
79 | pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, | 79 | pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, |
@@ -246,8 +246,8 @@ void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, | |||
246 | return; | 246 | return; |
247 | } | 247 | } |
248 | 248 | ||
249 | *busy_cycles = gk20a_pmu_read_idle_counter(g, 1); | 249 | *busy_cycles = g->ops.pmu.pmu_read_idle_counter(g, 1); |
250 | *total_cycles = gk20a_pmu_read_idle_counter(g, 2); | 250 | *total_cycles = g->ops.pmu.pmu_read_idle_counter(g, 2); |
251 | 251 | ||
252 | gk20a_idle(g); | 252 | gk20a_idle(g); |
253 | } | 253 | } |
@@ -258,8 +258,8 @@ void nvgpu_pmu_reset_load_counters(struct gk20a *g) | |||
258 | return; | 258 | return; |
259 | } | 259 | } |
260 | 260 | ||
261 | gk20a_pmu_reset_idle_counter(g, 2); | 261 | g->ops.pmu.pmu_reset_idle_counter(g, 2); |
262 | gk20a_pmu_reset_idle_counter(g, 1); | 262 | g->ops.pmu.pmu_reset_idle_counter(g, 1); |
263 | 263 | ||
264 | gk20a_idle(g); | 264 | gk20a_idle(g); |
265 | } | 265 | } |
@@ -316,7 +316,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu) | |||
316 | memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init)); | 316 | memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perfmon_init)); |
317 | pmu->perfmon_ready = 0; | 317 | pmu->perfmon_ready = 0; |
318 | 318 | ||
319 | gk20a_pmu_init_perfmon_counter(g); | 319 | g->ops.pmu.pmu_init_perfmon_counter(g); |
320 | 320 | ||
321 | /* microseconds interval between pmu polls perf counters */ | 321 | /* microseconds interval between pmu polls perf counters */ |
322 | rpc.sample_periodus = 16700; | 322 | rpc.sample_periodus = 16700; |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 76ed0621..0758279d 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c | |||
@@ -394,7 +394,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) | |||
394 | 394 | ||
395 | nvgpu_log_fn(g, " "); | 395 | nvgpu_log_fn(g, " "); |
396 | 396 | ||
397 | gk20a_pmu_pg_idle_counter_config(g, pg_engine_id); | 397 | g->ops.pmu.pmu_pg_idle_counter_config(g, pg_engine_id); |
398 | 398 | ||
399 | if (g->ops.pmu.pmu_pg_init_param) { | 399 | if (g->ops.pmu.pmu_pg_init_param) { |
400 | g->ops.pmu.pmu_pg_init_param(g, pg_engine_id); | 400 | g->ops.pmu.pmu_pg_init_param(g, pg_engine_id); |