summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-23 07:40:13 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-29 16:29:52 -0400
commit268721975c6c72418e2282126e7f594f62e6e118 (patch)
tree62c64561775f42513a9dda74244f613dc5a59543 /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent97aea977e25c17ecb44448eb19dc15e740036958 (diff)
gpu: nvgpu: PMU reset reorg
- nvgpu_pmu_reset() as pmu reset for all chips & removed gk20a_pmu_reset() & gp106_pmu_reset() along with dependent code. - Created ops to do PMU engine reset & to know the engine reset status - Removed pmu.reset ops & replaced with nvgpu_flcn_reset(pmu->flcn) - Moved sec2 reset to sec2_gp106 from pmu_gp106 & cleaned PMU code part of sec2. JIRA NVGPU-99 Change-Id: I7575e4ca2b34922d73d171f6a41bfcdc2f40dc96 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master/r/1507881 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c108
1 files changed, 18 insertions, 90 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 4a676b82..3fc73e42 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -38,8 +38,6 @@
38#define gk20a_dbg_pmu(fmt, arg...) \ 38#define gk20a_dbg_pmu(fmt, arg...) \
39 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 39 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
40 40
41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
43 41
44bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 42bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
45{ 43{
@@ -159,80 +157,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
159 gk20a_dbg_fn("done"); 157 gk20a_dbg_fn("done");
160} 158}
161 159
162int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
163{
164 struct gk20a *g = gk20a_from_pmu(pmu);
165 struct nvgpu_timeout timeout;
166 int err = 0;
167
168 gk20a_dbg_fn("");
169
170 if (enable) {
171 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
172
173 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
174 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
175 g->slcg_enabled);
176 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
177 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
178 g->blcg_enabled);
179
180 nvgpu_timeout_init(g, &timeout,
181 PMU_MEM_SCRUBBING_TIMEOUT_MAX /
182 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
183 NVGPU_TIMER_RETRY_TIMER);
184 do {
185 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
186 gk20a_dbg_fn("done");
187 goto exit;
188 }
189 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
190 } while (!nvgpu_timeout_expired(&timeout));
191
192 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
193 nvgpu_err(g, "Falcon mem scrubbing timeout");
194
195 err = -ETIMEDOUT;
196 } else
197 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
198
199exit:
200 return err;
201}
202 160
203static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
204{
205 struct gk20a *g = gk20a_from_pmu(pmu);
206 u32 pmc_enable;
207 int err;
208
209 gk20a_dbg_fn("");
210
211 if (!enable) {
212 pmc_enable = gk20a_readl(g, mc_enable_r());
213 if (mc_enable_pwr_v(pmc_enable) !=
214 mc_enable_pwr_disabled_v()) {
215
216 pmu_enable_irq(pmu, false);
217 pmu_enable_hw(pmu, false);
218 }
219 } else {
220 err = pmu_enable_hw(pmu, true);
221 if (err)
222 return err;
223
224 /* TBD: post reset */
225
226 err = nvgpu_flcn_wait_idle(pmu->flcn);
227 if (err)
228 return err;
229
230 pmu_enable_irq(pmu, true);
231 }
232
233 gk20a_dbg_fn("done");
234 return 0;
235}
236 161
237int pmu_bootstrap(struct nvgpu_pmu *pmu) 162int pmu_bootstrap(struct nvgpu_pmu *pmu)
238{ 163{
@@ -576,25 +501,27 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
576 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 501 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
577} 502}
578 503
579int gk20a_pmu_reset(struct gk20a *g) 504bool gk20a_pmu_is_engine_in_reset(struct gk20a *g)
580{ 505{
581 struct nvgpu_pmu *pmu = &g->pmu; 506 u32 pmc_enable;
582 int err; 507 bool status = false;
583 508
584 err = nvgpu_flcn_wait_idle(pmu->flcn); 509 pmc_enable = gk20a_readl(g, mc_enable_r());
585 if (err) 510 if (mc_enable_pwr_v(pmc_enable) ==
586 goto exit; 511 mc_enable_pwr_disabled_v())
512 status = true;
587 513
588 err = pmu_enable(pmu, false); 514 return status;
589 if (err) 515}
590 goto exit;
591 516
592 err = pmu_enable(pmu, true); 517int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
593 if (err) 518{
594 goto exit; 519 if (do_reset)
520 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
521 else
522 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
595 523
596exit: 524 return 0;
597 return err;
598} 525}
599 526
600static bool gk20a_is_pmu_supported(struct gk20a *g) 527static bool gk20a_is_pmu_supported(struct gk20a *g)
@@ -650,7 +577,8 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
650 gops->pmu.alloc_blob_space = NULL; 577 gops->pmu.alloc_blob_space = NULL;
651 gops->pmu.pmu_populate_loader_cfg = NULL; 578 gops->pmu.pmu_populate_loader_cfg = NULL;
652 gops->pmu.flcn_populate_bl_dmem_desc = NULL; 579 gops->pmu.flcn_populate_bl_dmem_desc = NULL;
653 gops->pmu.reset = NULL; 580 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
581 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
654} 582}
655 583
656static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 584static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,