summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c34
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c13
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gm206/bios_gm206.c51
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c40
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c8
8 files changed, 97 insertions, 74 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 63ae1da1..b23cabe8 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -20,7 +20,6 @@
20#include <linux/delay.h> /* for udelay */ 20#include <linux/delay.h> /* for udelay */
21#include <linux/mm.h> /* for totalram_pages */ 21#include <linux/mm.h> /* for totalram_pages */
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <soc/tegra/chip-id.h>
24#include <linux/debugfs.h> 23#include <linux/debugfs.h>
25#include <uapi/linux/nvgpu.h> 24#include <uapi/linux/nvgpu.h>
26#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
@@ -1587,7 +1586,6 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1587 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load; 1586 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load;
1588 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1587 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
1589 u32 last_method_data = 0; 1588 u32 last_method_data = 0;
1590 int retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
1591 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 1589 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
1592 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; 1590 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
1593 struct mem_desc *ctxheader = &ctx->mem; 1591 struct mem_desc *ctxheader = &ctx->mem;
@@ -1603,18 +1601,21 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1603 goto clean_up; 1601 goto clean_up;
1604 } 1602 }
1605 if (!platform->is_fmodel) { 1603 if (!platform->is_fmodel) {
1604 struct nvgpu_timeout timeout;
1605
1606 nvgpu_timeout_init(g, &timeout, FE_PWR_MODE_TIMEOUT_MAX / 1000,
1607 NVGPU_TIMER_CPU_TIMER);
1606 gk20a_writel(g, gr_fe_pwr_mode_r(), 1608 gk20a_writel(g, gr_fe_pwr_mode_r(),
1607 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f()); 1609 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f());
1608 do { 1610 do {
1609 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1611 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1610 if (req == gr_fe_pwr_mode_req_done_v()) 1612 if (req == gr_fe_pwr_mode_req_done_v())
1611 break; 1613 break;
1612 udelay(FE_PWR_MODE_TIMEOUT_MAX); 1614 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1613 } while (--retries || !tegra_platform_is_silicon()); 1615 } while (!nvgpu_timeout_expired_msg(&timeout,
1616 "timeout forcing FE on"));
1614 } 1617 }
1615 1618
1616 if (!retries)
1617 gk20a_err(g->dev, "timeout forcing FE on");
1618 1619
1619 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1620 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(),
1620 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | 1621 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() |
@@ -1643,19 +1644,20 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1643 udelay(10); 1644 udelay(10);
1644 1645
1645 if (!platform->is_fmodel) { 1646 if (!platform->is_fmodel) {
1647 struct nvgpu_timeout timeout;
1648
1649 nvgpu_timeout_init(g, &timeout, FE_PWR_MODE_TIMEOUT_MAX / 1000,
1650 NVGPU_TIMER_CPU_TIMER);
1646 gk20a_writel(g, gr_fe_pwr_mode_r(), 1651 gk20a_writel(g, gr_fe_pwr_mode_r(),
1647 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f()); 1652 gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f());
1648 1653
1649 retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
1650 do { 1654 do {
1651 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1655 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1652 if (req == gr_fe_pwr_mode_req_done_v()) 1656 if (req == gr_fe_pwr_mode_req_done_v())
1653 break; 1657 break;
1654 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1658 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1655 } while (--retries || !tegra_platform_is_silicon()); 1659 } while (!nvgpu_timeout_expired_msg(&timeout,
1656 1660 "timeout setting FE power to auto"));
1657 if (!retries)
1658 gk20a_err(g->dev, "timeout setting FE power to auto");
1659 } 1661 }
1660 1662
1661 /* clear scc ram */ 1663 /* clear scc ram */
@@ -4996,13 +4998,14 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
4996 4998
4997static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) 4999static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4998{ 5000{
4999 int retries = CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 5001 struct nvgpu_timeout timeout;
5000 CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT;
5001 bool fecs_scrubbing; 5002 bool fecs_scrubbing;
5002 bool gpccs_scrubbing; 5003 bool gpccs_scrubbing;
5003 5004
5004 gk20a_dbg_fn(""); 5005 gk20a_dbg_fn("");
5005 5006
5007 nvgpu_timeout_init(g, &timeout, CTXSW_MEM_SCRUBBING_TIMEOUT_MAX / 1000,
5008 NVGPU_TIMER_CPU_TIMER);
5006 do { 5009 do {
5007 fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) & 5010 fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) &
5008 (gr_fecs_dmactl_imem_scrubbing_m() | 5011 (gr_fecs_dmactl_imem_scrubbing_m() |
@@ -5018,7 +5021,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
5018 } 5021 }
5019 5022
5020 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 5023 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT);
5021 } while (--retries || !tegra_platform_is_silicon()); 5024 } while (!nvgpu_timeout_expired(&timeout));
5022 5025
5023 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 5026 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
5024 return -ETIMEDOUT; 5027 return -ETIMEDOUT;
@@ -8663,8 +8666,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8663 8666
8664 usleep_range(delay, delay * 2); 8667 usleep_range(delay, delay * 2);
8665 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 8668 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
8666 } while (!nvgpu_timeout_expired(&timeout) 8669 } while (!nvgpu_timeout_expired(&timeout));
8667 || !tegra_platform_is_silicon());
8668 8670
8669 dbgr_control0 = gk20a_readl(g, 8671 dbgr_control0 = gk20a_readl(g,
8670 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); 8672 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset);
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index 34a96971..9942e58f 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -17,7 +17,9 @@
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20
20#include <trace/events/gk20a.h> 21#include <trace/events/gk20a.h>
22#include <nvgpu/timers.h>
21 23
22#include "gk20a.h" 24#include "gk20a.h"
23 25
@@ -106,7 +108,6 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
106 int err = 0; 108 int err = 0;
107 struct gr_gk20a *gr = &g->gr; 109 struct gr_gk20a *gr = &g->gr;
108 u32 fbp, slice, ctrl1, val, hw_op = 0; 110 u32 fbp, slice, ctrl1, val, hw_op = 0;
109 int retry = 200;
110 u32 slices_per_fbp = 111 u32 slices_per_fbp =
111 ltc_ltcs_ltss_cbc_param_slices_per_fbp_v( 112 ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(
112 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); 113 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
@@ -140,6 +141,9 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
140 gk20a_readl(g, ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); 141 gk20a_readl(g, ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op);
141 142
142 for (fbp = 0; fbp < gr->num_fbps; fbp++) { 143 for (fbp = 0; fbp < gr->num_fbps; fbp++) {
144 struct nvgpu_timeout timeout;
145
146 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
143 for (slice = 0; slice < slices_per_fbp; slice++) { 147 for (slice = 0; slice < slices_per_fbp; slice++) {
144 148
145 149
@@ -147,18 +151,15 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
147 fbp * ltc_stride + 151 fbp * ltc_stride +
148 slice * lts_stride; 152 slice * lts_stride;
149 153
150 retry = 200;
151 do { 154 do {
152 val = gk20a_readl(g, ctrl1); 155 val = gk20a_readl(g, ctrl1);
153 if (!(val & hw_op)) 156 if (!(val & hw_op))
154 break; 157 break;
155 retry--;
156 udelay(5); 158 udelay(5);
157 159
158 } while (retry >= 0 || 160 } while (!nvgpu_timeout_expired(&timeout));
159 !tegra_platform_is_silicon());
160 161
161 if (retry < 0 && tegra_platform_is_silicon()) { 162 if (nvgpu_timeout_peek_expired(&timeout)) {
162 gk20a_err(dev_from_gk20a(g), 163 gk20a_err(dev_from_gk20a(g),
163 "comp tag clear timeout\n"); 164 "comp tag clear timeout\n");
164 err = -EBUSY; 165 err = -EBUSY;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 9e6dc74c..c31f8482 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -22,7 +22,6 @@
22#include <linux/nvhost.h> 22#include <linux/nvhost.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/nvmap.h> 24#include <linux/nvmap.h>
25#include <soc/tegra/chip-id.h>
26#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
27#include <linux/dma-buf.h> 26#include <linux/dma-buf.h>
28#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
@@ -1543,7 +1542,6 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1543 struct vm_gk20a_mapping_batch *batch) 1542 struct vm_gk20a_mapping_batch *batch)
1544{ 1543{
1545 struct device *d = dev_from_vm(vm); 1544 struct device *d = dev_from_vm(vm);
1546 int retries = 10000; /* 50 ms */
1547 struct mapped_buffer_node *mapped_buffer; 1545 struct mapped_buffer_node *mapped_buffer;
1548 1546
1549 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1547 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -1556,17 +1554,19 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1556 } 1554 }
1557 1555
1558 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1556 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1557 struct nvgpu_timeout timeout;
1558
1559 nvgpu_mutex_release(&vm->update_gmmu_lock); 1559 nvgpu_mutex_release(&vm->update_gmmu_lock);
1560 1560
1561 while (retries >= 0 || !tegra_platform_is_silicon()) { 1561 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
1562 NVGPU_TIMER_RETRY_TIMER);
1563 do {
1562 if (atomic_read(&mapped_buffer->ref.refcount) == 1) 1564 if (atomic_read(&mapped_buffer->ref.refcount) == 1)
1563 break; 1565 break;
1564 retries--;
1565 udelay(5); 1566 udelay(5);
1566 } 1567 } while (!nvgpu_timeout_expired_msg(&timeout,
1567 if (retries < 0 && tegra_platform_is_silicon()) 1568 "sync-unmap failed on 0x%llx"));
1568 gk20a_err(d, "sync-unmap failed on 0x%llx", 1569
1569 offset);
1570 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1570 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1571 } 1571 }
1572 1572
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 52e5d4db..81a0aac9 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -20,7 +20,6 @@
20 20
21#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
22#include <linux/iommu.h> 22#include <linux/iommu.h>
23#include <soc/tegra/chip-id.h>
24#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/version.h> 25#include <linux/version.h>
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 993cef7b..85fa8ea1 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2374,12 +2374,11 @@ void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable)
2374int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) 2374int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2375{ 2375{
2376 struct gk20a *g = gk20a_from_pmu(pmu); 2376 struct gk20a *g = gk20a_from_pmu(pmu);
2377 struct nvgpu_timeout timeout;
2377 2378
2378 gk20a_dbg_fn(""); 2379 gk20a_dbg_fn("");
2379 2380
2380 if (enable) { 2381 if (enable) {
2381 int retries = PMU_MEM_SCRUBBING_TIMEOUT_MAX /
2382 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT;
2383 g->ops.mc.enable(g, mc_enable_pwr_enabled_f()); 2382 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
2384 2383
2385 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) 2384 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
@@ -2389,6 +2388,9 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2389 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, 2388 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
2390 g->blcg_enabled); 2389 g->blcg_enabled);
2391 2390
2391 nvgpu_timeout_init(g, &timeout,
2392 PMU_MEM_SCRUBBING_TIMEOUT_MAX / 1000,
2393 NVGPU_TIMER_CPU_TIMER);
2392 do { 2394 do {
2393 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 2395 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
2394 (pwr_falcon_dmactl_dmem_scrubbing_m() | 2396 (pwr_falcon_dmactl_dmem_scrubbing_m() |
@@ -2399,7 +2401,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2399 return 0; 2401 return 0;
2400 } 2402 }
2401 udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT); 2403 udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
2402 } while (--retries || !tegra_platform_is_silicon()); 2404 } while (!nvgpu_timeout_expired(&timeout));
2403 2405
2404 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 2406 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
2405 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 2407 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.c b/drivers/gpu/nvgpu/gm206/bios_gm206.c
index 3993691a..b7260218 100644
--- a/drivers/gpu/nvgpu/gm206/bios_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/bios_gm206.c
@@ -19,6 +19,7 @@
19#include <nvgpu/bios.h> 19#include <nvgpu/bios.h>
20#include <nvgpu/kmem.h> 20#include <nvgpu/kmem.h>
21#include <nvgpu/nvgpu_common.h> 21#include <nvgpu/nvgpu_common.h>
22#include <nvgpu/timers.h>
22 23
23#include "gk20a/gk20a.h" 24#include "gk20a/gk20a.h"
24#include "gm20b/fifo_gm20b.h" 25#include "gm20b/fifo_gm20b.h"
@@ -99,13 +100,15 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
99 100
100static int gm206_bios_devinit(struct gk20a *g) 101static int gm206_bios_devinit(struct gk20a *g)
101{ 102{
102 int retries = PMU_BOOT_TIMEOUT_MAX / PMU_BOOT_TIMEOUT_DEFAULT;
103 int err = 0; 103 int err = 0;
104 int devinit_completed; 104 int devinit_completed;
105 struct nvgpu_timeout timeout;
105 106
106 gk20a_dbg_fn(""); 107 gk20a_dbg_fn("");
107 g->ops.pmu.reset(g); 108 g->ops.pmu.reset(g);
108 109
110 nvgpu_timeout_init(g, &timeout, PMU_BOOT_TIMEOUT_MAX / 1000,
111 NVGPU_TIMER_CPU_TIMER);
109 do { 112 do {
110 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 113 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
111 (pwr_falcon_dmactl_dmem_scrubbing_m() | 114 (pwr_falcon_dmactl_dmem_scrubbing_m() |
@@ -116,9 +119,13 @@ static int gm206_bios_devinit(struct gk20a *g)
116 break; 119 break;
117 } 120 }
118 udelay(PMU_BOOT_TIMEOUT_DEFAULT); 121 udelay(PMU_BOOT_TIMEOUT_DEFAULT);
119 } while (--retries || !tegra_platform_is_silicon()); 122 } while (!nvgpu_timeout_expired(&timeout));
123
124 if (nvgpu_timeout_peek_expired(&timeout)) {
125 err = -ETIMEDOUT;
126 goto out;
127 }
120 128
121 /* todo check retries */
122 upload_code(g, g->bios.devinit.bootloader_phys_base, 129 upload_code(g, g->bios.devinit.bootloader_phys_base,
123 g->bios.devinit.bootloader, 130 g->bios.devinit.bootloader,
124 g->bios.devinit.bootloader_size, 131 g->bios.devinit.bootloader_size,
@@ -147,35 +154,39 @@ static int gm206_bios_devinit(struct gk20a *g)
147 gk20a_writel(g, pwr_falcon_cpuctl_r(), 154 gk20a_writel(g, pwr_falcon_cpuctl_r(),
148 pwr_falcon_cpuctl_startcpu_f(1)); 155 pwr_falcon_cpuctl_startcpu_f(1));
149 156
150 retries = PMU_BOOT_TIMEOUT_MAX / PMU_BOOT_TIMEOUT_DEFAULT; 157 nvgpu_timeout_init(g, &timeout, PMU_BOOT_TIMEOUT_MAX / 1000,
158 NVGPU_TIMER_CPU_TIMER);
151 do { 159 do {
152 devinit_completed = pwr_falcon_cpuctl_halt_intr_v( 160 devinit_completed = pwr_falcon_cpuctl_halt_intr_v(
153 gk20a_readl(g, pwr_falcon_cpuctl_r())) && 161 gk20a_readl(g, pwr_falcon_cpuctl_r())) &&
154 top_scratch1_devinit_completed_v( 162 top_scratch1_devinit_completed_v(
155 gk20a_readl(g, top_scratch1_r())); 163 gk20a_readl(g, top_scratch1_r()));
156 udelay(PMU_BOOT_TIMEOUT_DEFAULT); 164 udelay(PMU_BOOT_TIMEOUT_DEFAULT);
157 } while (!devinit_completed && retries--); 165 } while (!devinit_completed && !nvgpu_timeout_expired(&timeout));
166
167 if (nvgpu_timeout_peek_expired(&timeout))
168 err = -ETIMEDOUT;
158 169
159 gk20a_writel(g, pwr_falcon_irqsclr_r(), 170 gk20a_writel(g, pwr_falcon_irqsclr_r(),
160 pwr_falcon_irqstat_halt_true_f()); 171 pwr_falcon_irqstat_halt_true_f());
161 gk20a_readl(g, pwr_falcon_irqsclr_r()); 172 gk20a_readl(g, pwr_falcon_irqsclr_r());
162 173
163 if (!retries) 174out:
164 err = -EINVAL;
165
166 gk20a_dbg_fn("done"); 175 gk20a_dbg_fn("done");
167 return err; 176 return err;
168} 177}
169 178
170static int gm206_bios_preos(struct gk20a *g) 179static int gm206_bios_preos(struct gk20a *g)
171{ 180{
172 int retries = PMU_BOOT_TIMEOUT_MAX / PMU_BOOT_TIMEOUT_DEFAULT;
173 int err = 0; 181 int err = 0;
174 int val; 182 int val;
183 struct nvgpu_timeout timeout;
175 184
176 gk20a_dbg_fn(""); 185 gk20a_dbg_fn("");
177 g->ops.pmu.reset(g); 186 g->ops.pmu.reset(g);
178 187
188 nvgpu_timeout_init(g, &timeout, PMU_BOOT_TIMEOUT_MAX / 1000,
189 NVGPU_TIMER_CPU_TIMER);
179 do { 190 do {
180 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 191 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
181 (pwr_falcon_dmactl_dmem_scrubbing_m() | 192 (pwr_falcon_dmactl_dmem_scrubbing_m() |
@@ -186,9 +197,13 @@ static int gm206_bios_preos(struct gk20a *g)
186 break; 197 break;
187 } 198 }
188 udelay(PMU_BOOT_TIMEOUT_DEFAULT); 199 udelay(PMU_BOOT_TIMEOUT_DEFAULT);
189 } while (--retries || !tegra_platform_is_silicon()); 200 } while (!nvgpu_timeout_expired(&timeout));
201
202 if (nvgpu_timeout_peek_expired(&timeout)) {
203 err = -ETIMEDOUT;
204 goto out;
205 }
190 206
191 /* todo check retries */
192 upload_code(g, g->bios.preos.bootloader_phys_base, 207 upload_code(g, g->bios.preos.bootloader_phys_base,
193 g->bios.preos.bootloader, 208 g->bios.preos.bootloader,
194 g->bios.preos.bootloader_size, 209 g->bios.preos.bootloader_size,
@@ -209,20 +224,24 @@ static int gm206_bios_preos(struct gk20a *g)
209 gk20a_writel(g, pwr_falcon_cpuctl_r(), 224 gk20a_writel(g, pwr_falcon_cpuctl_r(),
210 pwr_falcon_cpuctl_startcpu_f(1)); 225 pwr_falcon_cpuctl_startcpu_f(1));
211 226
212 retries = PMU_BOOT_TIMEOUT_MAX / PMU_BOOT_TIMEOUT_DEFAULT; 227 nvgpu_timeout_init(g, &timeout, PMU_BOOT_TIMEOUT_MAX / 1000,
228 NVGPU_TIMER_CPU_TIMER);
213 do { 229 do {
214 val = pwr_falcon_cpuctl_halt_intr_v( 230 val = pwr_falcon_cpuctl_halt_intr_v(
215 gk20a_readl(g, pwr_falcon_cpuctl_r())); 231 gk20a_readl(g, pwr_falcon_cpuctl_r()));
216 udelay(PMU_BOOT_TIMEOUT_DEFAULT); 232 udelay(PMU_BOOT_TIMEOUT_DEFAULT);
217 } while (!val && retries--); 233 } while (!val && !nvgpu_timeout_expired(&timeout));
234
235 if (nvgpu_timeout_peek_expired(&timeout)) {
236 err = -ETIMEDOUT;
237 goto out;
238 }
218 239
219 gk20a_writel(g, pwr_falcon_irqsclr_r(), 240 gk20a_writel(g, pwr_falcon_irqsclr_r(),
220 pwr_falcon_irqstat_halt_true_f()); 241 pwr_falcon_irqstat_halt_true_f());
221 gk20a_readl(g, pwr_falcon_irqsclr_r()); 242 gk20a_readl(g, pwr_falcon_irqsclr_r());
222 243
223 if (!retries) 244out:
224 err = -EINVAL;
225
226 gk20a_dbg_fn("done"); 245 gk20a_dbg_fn("done");
227 return err; 246 return err;
228} 247}
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 0032bce7..dd67f882 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -35,10 +35,10 @@
35int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) 35int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
36{ 36{
37 u32 data = 0; 37 u32 data = 0;
38 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 38 struct nvgpu_timeout to;
39 39
40 while (time_before(jiffies, end_jiffies) || 40 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
41 !tegra_platform_is_silicon()) { 41 do {
42 gk20a_writel(g, psec_falcon_irqsclr_r(), 42 gk20a_writel(g, psec_falcon_irqsclr_r(),
43 gk20a_readl(g, psec_falcon_irqsclr_r()) | (0x10)); 43 gk20a_readl(g, psec_falcon_irqsclr_r()) | (0x10));
44 data = gk20a_readl(g, psec_falcon_irqstat_r()); 44 data = gk20a_readl(g, psec_falcon_irqstat_r());
@@ -46,10 +46,10 @@ int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
46 psec_falcon_irqstat_halt_true_f()) 46 psec_falcon_irqstat_halt_true_f())
47 /*halt irq is clear*/ 47 /*halt irq is clear*/
48 break; 48 break;
49 timeout--;
50 udelay(1); 49 udelay(1);
51 } 50 } while (!nvgpu_timeout_expired(&to));
52 if (timeout == 0) 51
52 if (nvgpu_timeout_peek_expired(&to))
53 return -EBUSY; 53 return -EBUSY;
54 return 0; 54 return 0;
55} 55}
@@ -58,10 +58,10 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
58{ 58{
59 u32 data = 0; 59 u32 data = 0;
60 int completion = -EBUSY; 60 int completion = -EBUSY;
61 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 61 struct nvgpu_timeout to;
62 62
63 while (time_before(jiffies, end_jiffies) || 63 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
64 !tegra_platform_is_silicon()) { 64 do {
65 data = gk20a_readl(g, psec_falcon_cpuctl_r()); 65 data = gk20a_readl(g, psec_falcon_cpuctl_r());
66 if (data & psec_falcon_cpuctl_halt_intr_m()) { 66 if (data & psec_falcon_cpuctl_halt_intr_m()) {
67 /*CPU is halted break*/ 67 /*CPU is halted break*/
@@ -69,21 +69,21 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
69 break; 69 break;
70 } 70 }
71 udelay(1); 71 udelay(1);
72 } 72 } while (!nvgpu_timeout_expired(&to));
73 if (completion){ 73
74 if (completion) {
74 gk20a_err(dev_from_gk20a(g), "ACR boot timed out"); 75 gk20a_err(dev_from_gk20a(g), "ACR boot timed out");
76 return completion;
75 } 77 }
76 else {
77 78
78 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r()); 79 g->acr.capabilities = gk20a_readl(g, psec_falcon_mailbox1_r());
79 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 80 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities);
80 data = gk20a_readl(g, psec_falcon_mailbox0_r()); 81 data = gk20a_readl(g, psec_falcon_mailbox0_r());
81 if (data) { 82 if (data) {
82 83
83 gk20a_err(dev_from_gk20a(g), 84 gk20a_err(dev_from_gk20a(g),
84 "ACR boot failed, err %x", data); 85 "ACR boot failed, err %x", data);
85 completion = -EAGAIN; 86 completion = -EAGAIN;
86 }
87 } 87 }
88 88
89 init_pmu_setup_hw1(g); 89 init_pmu_setup_hw1(g);
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 7f43a6ce..cb6ef9c7 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -2041,21 +2041,21 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
2041 struct channel_ctx_gk20a *ch_ctx = 2041 struct channel_ctx_gk20a *ch_ctx =
2042 &cilp_preempt_pending_ch->ch_ctx; 2042 &cilp_preempt_pending_ch->ch_ctx;
2043 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx; 2043 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx;
2044 unsigned long end_jiffies = jiffies + 2044 struct nvgpu_timeout timeout;
2045 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
2046 2045
2047 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, 2046 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
2048 "CILP preempt pending, waiting %lu msecs for preemption", 2047 "CILP preempt pending, waiting %lu msecs for preemption",
2049 gk20a_get_gr_idle_timeout(g)); 2048 gk20a_get_gr_idle_timeout(g));
2050 2049
2050 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
2051 NVGPU_TIMER_CPU_TIMER);
2051 do { 2052 do {
2052 if (!gr_ctx->t18x.cilp_preempt_pending) 2053 if (!gr_ctx->t18x.cilp_preempt_pending)
2053 break; 2054 break;
2054 2055
2055 usleep_range(delay, delay * 2); 2056 usleep_range(delay, delay * 2);
2056 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 2057 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
2057 } while (time_before(jiffies, end_jiffies) 2058 } while (!nvgpu_timeout_expired(&timeout));
2058 || !tegra_platform_is_silicon());
2059 2059
2060 /* If cilp is still pending at this point, timeout */ 2060 /* If cilp is still pending at this point, timeout */
2061 if (gr_ctx->t18x.cilp_preempt_pending) 2061 if (gr_ctx->t18x.cilp_preempt_pending)