summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-17 12:56:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-27 13:48:31 -0400
commitb45a67934faeba042dbf6ebe47c520db3ef4090d (patch)
tree771f8c223a47281da915fee3348167724c332f56 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent0c45c5fcb60810f06b0ae05270f0fa7e32d31869 (diff)
gpu: nvgpu: Use nvgpu_timeout for all loops
There were still a few remaining loops where we did not use nvgpu_timeout and required Tegra specific functions for detecting if timeout should be skipped. Replace all of them with nvgpu_timeout and remove including chip-id.h where possible. FE power mode timeout loop also used wrong delay value. It always waited for the whole max timeout instead of looping with smaller increments. If SEC2 ACR boot fails to halt, we should not try to check ACR result from mailbox. Add an early return for that case. JIRA NVGPU-16 Change-Id: I9f0984250d7d01785755338e39822e6631dcaa5a Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1323227
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 63ae1da1..b23cabe8 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -20,7 +20,6 @@
20#include <linux/delay.h> /* for udelay */ 20#include <linux/delay.h> /* for udelay */
21#include <linux/mm.h> /* for totalram_pages */ 21#include <linux/mm.h> /* for totalram_pages */
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <soc/tegra/chip-id.h>
24#include <linux/debugfs.h> 23#include <linux/debugfs.h>
25#include <uapi/linux/nvgpu.h> 24#include <uapi/linux/nvgpu.h>
26#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
@@ -1587,7 +1586,6 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1587 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load; 1586 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load;
1588 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1587 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
1589 u32 last_method_data = 0; 1588 u32 last_method_data = 0;
1590 int retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
1591 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 1589 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1592 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; 1590 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
1593 struct mem_desc *ctxheader = &ctx->mem; 1591 struct mem_desc *ctxheader = &ctx->mem;
@@ -1603,18 +1601,21 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1603 goto clean_up; 1601 goto clean_up;
1604 } 1602 }
1605 if (!platform->is_fmodel) { 1603 if (!platform->is_fmodel) {
1604 struct nvgpu_timeout timeout;
1605
1606 nvgpu_timeout_init(g, &timeout, FE_PWR_MODE_TIMEOUT_MAX / 1000,
1607 NVGPU_TIMER_CPU_TIMER);
1606 gk20a_writel(g, gr_fe_pwr_mode_r(), 1608 gk20a_writel(g, gr_fe_pwr_mode_r(),
1607 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f()); 1609 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f());
1608 do { 1610 do {
1609 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1611 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1610 if (req == gr_fe_pwr_mode_req_done_v()) 1612 if (req == gr_fe_pwr_mode_req_done_v())
1611 break; 1613 break;
1612 udelay(FE_PWR_MODE_TIMEOUT_MAX); 1614 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1613 } while (--retries || !tegra_platform_is_silicon()); 1615 } while (!nvgpu_timeout_expired_msg(&timeout,
1616 "timeout forcing FE on"));
1614 } 1617 }
1615 1618
1616 if (!retries)
1617 gk20a_err(g->dev, "timeout forcing FE on");
1618 1619
1619 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1620 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(),
1620 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | 1621 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() |
@@ -1643,19 +1644,20 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1643 udelay(10); 1644 udelay(10);
1644 1645
1645 if (!platform->is_fmodel) { 1646 if (!platform->is_fmodel) {
1647 struct nvgpu_timeout timeout;
1648
1649 nvgpu_timeout_init(g, &timeout, FE_PWR_MODE_TIMEOUT_MAX / 1000,
1650 NVGPU_TIMER_CPU_TIMER);
1646 gk20a_writel(g, gr_fe_pwr_mode_r(), 1651 gk20a_writel(g, gr_fe_pwr_mode_r(),
1647 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f()); 1652 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f());
1648 1653
1649 retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
1650 do { 1654 do {
1651 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1655 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1652 if (req == gr_fe_pwr_mode_req_done_v()) 1656 if (req == gr_fe_pwr_mode_req_done_v())
1653 break; 1657 break;
1654 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1658 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1655 } while (--retries || !tegra_platform_is_silicon()); 1659 } while (!nvgpu_timeout_expired_msg(&timeout,
1656 1660 "timeout setting FE power to auto"));
1657 if (!retries)
1658 gk20a_err(g->dev, "timeout setting FE power to auto");
1659 } 1661 }
1660 1662
1661 /* clear scc ram */ 1663 /* clear scc ram */
@@ -4996,13 +4998,14 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
4996 4998
4997static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) 4999static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4998{ 5000{
4999 int retries = CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 5001 struct nvgpu_timeout timeout;
5000 CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT;
5001 bool fecs_scrubbing; 5002 bool fecs_scrubbing;
5002 bool gpccs_scrubbing; 5003 bool gpccs_scrubbing;
5003 5004
5004 gk20a_dbg_fn(""); 5005 gk20a_dbg_fn("");
5005 5006
5007 nvgpu_timeout_init(g, &timeout, CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 1000,
5008 NVGPU_TIMER_CPU_TIMER);
5006 do { 5009 do {
5007 fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) & 5010 fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) &
5008 (gr_fecs_dmactl_imem_scrubbing_m() | 5011 (gr_fecs_dmactl_imem_scrubbing_m() |
@@ -5018,7 +5021,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
5018 } 5021 }
5019 5022
5020 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 5023 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT);
5021 } while (--retries || !tegra_platform_is_silicon()); 5024 } while (!nvgpu_timeout_expired(&timeout));
5022 5025
5023 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 5026 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
5024 return -ETIMEDOUT; 5027 return -ETIMEDOUT;
@@ -8663,8 +8666,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8663 8666
8664 usleep_range(delay, delay * 2); 8667 usleep_range(delay, delay * 2);
8665 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 8668 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
8666 } while (!nvgpu_timeout_expired(&timeout) 8669 } while (!nvgpu_timeout_expired(&timeout));
8667 || !tegra_platform_is_silicon());
8668 8670
8669 dbgr_control0 = gk20a_readl(g, 8671 dbgr_control0 = gk20a_readl(g,
8670 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 8672 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);