summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c114
1 files changed, 102 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index eb1c83fb..cc87c89b 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -18,8 +18,109 @@
18 18
19#include "gk20a/gk20a.h" 19#include "gk20a/gk20a.h"
20 20
21#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
22#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
23
21static int nvgpu_pg_init_task(void *arg); 24static int nvgpu_pg_init_task(void *arg);
22 25
26static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
27{
28 struct gk20a *g = pmu->g;
29 struct nvgpu_timeout timeout;
30 int err = 0;
31
32 nvgpu_log_fn(g, " %s ", g->name);
33
34 if (enable) {
35 /* bring PMU falcon/engine out of reset */
36 g->ops.pmu.reset_engine(g, true);
37
38 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
39 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
40 g->slcg_enabled);
41
42 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
43 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
44 g->blcg_enabled);
45
46 /* check for PMU IMEM/DMEM scrubbing complete status */
47 nvgpu_timeout_init(g, &timeout,
48 PMU_MEM_SCRUBBING_TIMEOUT_MAX /
49 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
50 NVGPU_TIMER_RETRY_TIMER);
51 do {
52 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn))
53 goto exit;
54
55 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
56 } while (!nvgpu_timeout_expired(&timeout));
57
58 /* keep PMU falcon/engine in reset
59 * if IMEM/DMEM scrubbing fails
60 */
61 g->ops.pmu.reset_engine(g, false);
62 nvgpu_err(g, "Falcon mem scrubbing timeout");
63 err = -ETIMEDOUT;
64 } else
65 /* keep PMU falcon/engine in reset */
66 g->ops.pmu.reset_engine(g, false);
67
68exit:
69 nvgpu_log_fn(g, "%s Done, status - %d ", g->name, err);
70 return err;
71}
72
73static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
74{
75 struct gk20a *g = pmu->g;
76 int err = 0;
77
78 nvgpu_log_fn(g, " ");
79
80 if (!enable) {
81 if (!g->ops.pmu.is_engine_in_reset(g)) {
82 pmu_enable_irq(pmu, false);
83 pmu_enable_hw(pmu, false);
84 }
85 } else {
86 err = pmu_enable_hw(pmu, true);
87 if (err)
88 goto exit;
89
90 err = nvgpu_flcn_wait_idle(pmu->flcn);
91 if (err)
92 goto exit;
93
94 pmu_enable_irq(pmu, true);
95 }
96
97exit:
98 nvgpu_log_fn(g, "Done, status - %d ", err);
99 return err;
100}
101
102int nvgpu_pmu_reset(struct gk20a *g)
103{
104 struct nvgpu_pmu *pmu = &g->pmu;
105 int err = 0;
106
107 nvgpu_log_fn(g, " %s ", g->name);
108
109 err = nvgpu_flcn_wait_idle(pmu->flcn);
110 if (err)
111 goto exit;
112
113 err = pmu_enable(pmu, false);
114 if (err)
115 goto exit;
116
117 err = pmu_enable(pmu, true);
118
119exit:
120 nvgpu_log_fn(g, " %s Done, status - %d ", g->name, err);
121 return err;
122}
123
23static int nvgpu_init_task_pg_init(struct gk20a *g) 124static int nvgpu_init_task_pg_init(struct gk20a *g)
24{ 125{
25 struct nvgpu_pmu *pmu = &g->pmu; 126 struct nvgpu_pmu *pmu = &g->pmu;
@@ -139,17 +240,6 @@ skip_init:
139 return err; 240 return err;
140} 241}
141 242
142static int nvgpu_init_pmu_reset_enable_hw(struct gk20a *g)
143{
144 struct nvgpu_pmu *pmu = &g->pmu;
145
146 nvgpu_log_fn(g, " ");
147
148 pmu_enable_hw(pmu, true);
149
150 return 0;
151}
152
153int nvgpu_init_pmu_support(struct gk20a *g) 243int nvgpu_init_pmu_support(struct gk20a *g)
154{ 244{
155 struct nvgpu_pmu *pmu = &g->pmu; 245 struct nvgpu_pmu *pmu = &g->pmu;
@@ -160,7 +250,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
160 if (pmu->initialized) 250 if (pmu->initialized)
161 return 0; 251 return 0;
162 252
163 err = nvgpu_init_pmu_reset_enable_hw(g); 253 err = pmu_enable_hw(pmu, true);
164 if (err) 254 if (err)
165 return err; 255 return err;
166 256