summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-17 12:56:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-27 13:48:31 -0400
commitb45a67934faeba042dbf6ebe47c520db3ef4090d (patch)
tree771f8c223a47281da915fee3348167724c332f56 /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent0c45c5fcb60810f06b0ae05270f0fa7e32d31869 (diff)
gpu: nvgpu: Use nvgpu_timeout for all loops
There were still a few remaining loops where we did not use nvgpu_timeout and required Tegra specific functions for detecting if timeout should be skipped. Replace all of them with nvgpu_timeout and remove including chip-id.h where possible. FE power mode timeout loop also used wrong delay value. It always waited for the whole max timeout instead of looping with smaller increments. If SEC2 ACR boot fails to halt, we should not try to check ACR result from mailbox. Add an early return for that case. JIRA NVGPU-16 Change-Id: I9f0984250d7d01785755338e39822e6631dcaa5a Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1323227
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 993cef7b..85fa8ea1 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2374,12 +2374,11 @@ void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable)
2374int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) 2374int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2375{ 2375{
2376 struct gk20a *g = gk20a_from_pmu(pmu); 2376 struct gk20a *g = gk20a_from_pmu(pmu);
2377 struct nvgpu_timeout timeout;
2377 2378
2378 gk20a_dbg_fn(""); 2379 gk20a_dbg_fn("");
2379 2380
2380 if (enable) { 2381 if (enable) {
2381 int retries = PMU_MEM_SCRUBBING_TIMEOUT_MAX /
2382 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT;
2383 g->ops.mc.enable(g, mc_enable_pwr_enabled_f()); 2382 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
2384 2383
2385 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) 2384 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
@@ -2389,6 +2388,9 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2389 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, 2388 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
2390 g->blcg_enabled); 2389 g->blcg_enabled);
2391 2390
2391 nvgpu_timeout_init(g, &timeout,
2392 PMU_MEM_SCRUBBING_TIMEOUT_MAX / 1000,
2393 NVGPU_TIMER_CPU_TIMER);
2392 do { 2394 do {
2393 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 2395 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
2394 (pwr_falcon_dmactl_dmem_scrubbing_m() | 2396 (pwr_falcon_dmactl_dmem_scrubbing_m() |
@@ -2399,7 +2401,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2399 return 0; 2401 return 0;
2400 } 2402 }
2401 udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT); 2403 udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
2402 } while (--retries || !tegra_platform_is_silicon()); 2404 } while (!nvgpu_timeout_expired(&timeout));
2403 2405
2404 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 2406 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
2405 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 2407 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");