summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-12-19 18:23:01 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-18 19:46:38 -0500
commit8e53d790902b8a40098a5851584ae7ba58b357b6 (patch)
tree48fd2c6b26ac3137dd2dfe5255cc04f24bcc8834 /drivers/gpu/nvgpu
parent6e2237ef622113b8fa1149aa48988a99fa30594f (diff)
gpu: nvgpu: Use timer API in gm20b code
Use the timer API instead of Linux specific APIs for handling timeouts. Also, lower the L2 timeout from 1 second (absurdly long) to 5ms. Bug 1799159 Change-Id: I27dbc35b12e9bc22ff2207bb87543f76203e20f1 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1273825 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c72
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c11
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c73
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c31
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c13
5 files changed, 111 insertions, 89 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index f4311ee9..e47bc773 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -18,10 +18,13 @@
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include "nvgpu_common.h"
22 21
23#include <linux/platform/tegra/mc.h> 22#include <linux/platform/tegra/mc.h>
24 23
24#include <nvgpu/timers.h>
25
26#include "nvgpu_common.h"
27
25#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
26#include "gk20a/pmu_gk20a.h" 29#include "gk20a/pmu_gk20a.h"
27#include "gk20a/semaphore_gk20a.h" 30#include "gk20a/semaphore_gk20a.h"
@@ -1476,64 +1479,69 @@ err_done:
1476/*! 1479/*!
1477* Wait for PMU to halt 1480* Wait for PMU to halt
1478* @param[in] g GPU object pointer 1481* @param[in] g GPU object pointer
1479* @param[in] timeout Timeout in msec for PMU to halt 1482* @param[in] timeout_ms Timeout in msec for PMU to halt
1480* @return '0' if PMU halts 1483* @return '0' if PMU halts
1481*/ 1484*/
1482static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout) 1485static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
1483{ 1486{
1484 u32 data = 0; 1487 u32 data = 0;
1485 int completion = -EBUSY; 1488 int ret = -EBUSY;
1486 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 1489 struct nvgpu_timeout timeout;
1490
1491 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
1487 1492
1488 while (time_before(jiffies, end_jiffies) || 1493 do {
1489 !tegra_platform_is_silicon()) {
1490 data = gk20a_readl(g, pwr_falcon_cpuctl_r()); 1494 data = gk20a_readl(g, pwr_falcon_cpuctl_r());
1491 if (data & pwr_falcon_cpuctl_halt_intr_m()) { 1495 if (data & pwr_falcon_cpuctl_halt_intr_m()) {
1492 /*CPU is halted break*/ 1496 /* CPU is halted break */
1493 completion = 0; 1497 ret = 0;
1494 break; 1498 break;
1495 } 1499 }
1496 udelay(1); 1500 udelay(1);
1497 } 1501 } while (!nvgpu_timeout_expired(&timeout));
1498 if (completion) 1502
1503 if (ret) {
1499 gk20a_err(dev_from_gk20a(g), "ACR boot timed out"); 1504 gk20a_err(dev_from_gk20a(g), "ACR boot timed out");
1500 else { 1505 return ret;
1501 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r()); 1506 }
1502 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); 1507
1503 data = gk20a_readl(g, pwr_falcon_mailbox0_r()); 1508 g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
1504 if (data) { 1509 gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities);
1505 gk20a_err(dev_from_gk20a(g), 1510 data = gk20a_readl(g, pwr_falcon_mailbox0_r());
1506 "ACR boot failed, err %x", data); 1511 if (data) {
1507 completion = -EAGAIN; 1512 gk20a_err(dev_from_gk20a(g),
1508 } 1513 "ACR boot failed, err %x", data);
1514 ret = -EAGAIN;
1509 } 1515 }
1510 return completion; 1516
1517 return ret;
1511} 1518}
1512 1519
1513/*! 1520/*!
1514* Wait for PMU halt interrupt status to be cleared 1521* Wait for PMU halt interrupt status to be cleared
1515* @param[in] g GPU object pointer 1522* @param[in] g GPU object pointer
1516* @param[in] timeout_us Timeout in msec for halt to clear 1523* @param[in] timeout_ms Timeout in msec for halt to clear
1517* @return '0' if PMU halt irq status is clear 1524* @return '0' if PMU halt irq status is clear
1518*/ 1525*/
1519static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout) 1526static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
1520{ 1527{
1521 u32 data = 0; 1528 u32 data = 0;
1522 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 1529 struct nvgpu_timeout timeout;
1530
1531 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
1523 1532
1524 while (time_before(jiffies, end_jiffies) || 1533 do {
1525 !tegra_platform_is_silicon()) {
1526 gk20a_writel(g, pwr_falcon_irqsclr_r(), 1534 gk20a_writel(g, pwr_falcon_irqsclr_r(),
1527 gk20a_readl(g, pwr_falcon_irqsclr_r()) | (0x10)); 1535 gk20a_readl(g, pwr_falcon_irqsclr_r()) | (0x10));
1528 data = gk20a_readl(g, (pwr_falcon_irqstat_r())); 1536 data = gk20a_readl(g, (pwr_falcon_irqstat_r()));
1537
1529 if ((data & pwr_falcon_irqstat_halt_true_f()) != 1538 if ((data & pwr_falcon_irqstat_halt_true_f()) !=
1530 pwr_falcon_irqstat_halt_true_f()) 1539 pwr_falcon_irqstat_halt_true_f())
1531 /*halt irq is clear*/ 1540 /*halt irq is clear*/
1532 break; 1541 return 0;
1533 timeout--; 1542
1534 udelay(1); 1543 udelay(1);
1535 } 1544 } while (!nvgpu_timeout_expired(&timeout));
1536 if (timeout == 0) 1545
1537 return -EBUSY; 1546 return -ETIMEDOUT;
1538 return 0;
1539} 1547}
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 6be6be04..bd94a54b 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -20,6 +20,8 @@
20 20
21#include "fifo_gm20b.h" 21#include "fifo_gm20b.h"
22 22
23#include <nvgpu/timers.h>
24
23#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h> 25#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h>
24#include <nvgpu/hw/gm20b/hw_ram_gm20b.h> 26#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
25#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h> 27#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
@@ -69,11 +71,10 @@ static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
69static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, 71static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
70 unsigned long engine_ids) 72 unsigned long engine_ids)
71{ 73{
72 unsigned long end_jiffies = jiffies +
73 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
74 unsigned long delay = GR_IDLE_CHECK_DEFAULT; 74 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
75 unsigned long engine_id; 75 unsigned long engine_id;
76 int ret = -EBUSY; 76 int ret = -EBUSY;
77 struct nvgpu_timeout timeout;
77 78
78 /* trigger faults for all bad engines */ 79 /* trigger faults for all bad engines */
79 for_each_set_bit(engine_id, &engine_ids, 32) { 80 for_each_set_bit(engine_id, &engine_ids, 32) {
@@ -89,6 +90,9 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
89 } 90 }
90 } 91 }
91 92
93 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
94 NVGPU_TIMER_CPU_TIMER);
95
92 /* Wait for MMU fault to trigger */ 96 /* Wait for MMU fault to trigger */
93 do { 97 do {
94 if (gk20a_readl(g, fifo_intr_0_r()) & 98 if (gk20a_readl(g, fifo_intr_0_r()) &
@@ -99,8 +103,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
99 103
100 usleep_range(delay, delay * 2); 104 usleep_range(delay, delay * 2);
101 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 105 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
102 } while (time_before(jiffies, end_jiffies) || 106 } while (!nvgpu_timeout_expired(&timeout));
103 !tegra_platform_is_silicon());
104 107
105 if (ret) 108 if (ret)
106 gk20a_err(dev_from_gk20a(g), "mmu fault timeout"); 109 gk20a_err(dev_from_gk20a(g), "mmu fault timeout");
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 5b97b388..3324d3df 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B L2 2 * GM20B L2
3 * 3 *
4 * Copyright (c) 2014-2016 NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2017 NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -14,11 +14,12 @@
14 */ 14 */
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/jiffies.h>
18#include <trace/events/gk20a.h> 17#include <trace/events/gk20a.h>
19 18
20#include "gk20a/gk20a.h" 19#include "gk20a/gk20a.h"
21 20
21#include <nvgpu/timers.h>
22
22#include <nvgpu/hw/gm20b/hw_mc_gm20b.h> 23#include <nvgpu/hw/gm20b/hw_mc_gm20b.h>
23#include <nvgpu/hw/gm20b/hw_ltc_gm20b.h> 24#include <nvgpu/hw/gm20b/hw_ltc_gm20b.h>
24#include <nvgpu/hw/gm20b/hw_top_gm20b.h> 25#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
@@ -103,10 +104,10 @@ static int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
103int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, 104int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
104 u32 min, u32 max) 105 u32 min, u32 max)
105{ 106{
106 int err = 0;
107 struct gr_gk20a *gr = &g->gr; 107 struct gr_gk20a *gr = &g->gr;
108 struct nvgpu_timeout timeout;
109 int err = 0;
108 u32 ltc, slice, ctrl1, val, hw_op = 0; 110 u32 ltc, slice, ctrl1, val, hw_op = 0;
109 s32 retry = 200;
110 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( 111 u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(
111 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); 112 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
112 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 113 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
@@ -143,18 +144,16 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
143 ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + 144 ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
144 ltc * ltc_stride + slice * lts_stride; 145 ltc * ltc_stride + slice * lts_stride;
145 146
146 retry = 200; 147 nvgpu_timeout_init(g, &timeout, 200,
148 NVGPU_TIMER_RETRY_TIMER);
147 do { 149 do {
148 val = gk20a_readl(g, ctrl1); 150 val = gk20a_readl(g, ctrl1);
149 if (!(val & hw_op)) 151 if (!(val & hw_op))
150 break; 152 break;
151 retry--;
152 udelay(5); 153 udelay(5);
154 } while (!nvgpu_timeout_expired(&timeout));
153 155
154 } while (retry >= 0 || 156 if (nvgpu_timeout_peek_expired(&timeout)) {
155 !tegra_platform_is_silicon());
156
157 if (retry < 0 && tegra_platform_is_silicon()) {
158 gk20a_err(dev_from_gk20a(g), 157 gk20a_err(dev_from_gk20a(g),
159 "comp tag clear timeout\n"); 158 "comp tag clear timeout\n");
160 err = -EBUSY; 159 err = -EBUSY;
@@ -288,23 +287,10 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
288 */ 287 */
289void gm20b_flush_ltc(struct gk20a *g) 288void gm20b_flush_ltc(struct gk20a *g)
290{ 289{
291 unsigned long timeout; 290 struct nvgpu_timeout timeout;
292 unsigned int ltc; 291 unsigned int ltc;
293 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 292 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
294 293
295#define __timeout_init() \
296 do { \
297 timeout = jiffies + HZ; \
298 } while (0)
299#define __timeout_check() \
300 do { \
301 if (tegra_platform_is_silicon() && \
302 time_after(jiffies, timeout)) { \
303 gk20a_err(dev_from_gk20a(g), "L2 flush timeout!"); \
304 break; \
305 } \
306 } while (0)
307
308 /* Clean... */ 294 /* Clean... */
309 gk20a_writel(g, ltc_ltcs_ltss_tstg_cmgmt1_r(), 295 gk20a_writel(g, ltc_ltcs_ltss_tstg_cmgmt1_r(),
310 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() | 296 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() |
@@ -318,14 +304,33 @@ void gm20b_flush_ltc(struct gk20a *g)
318 for (ltc = 0; ltc < g->ltc_count; ltc++) { 304 for (ltc = 0; ltc < g->ltc_count; ltc++) {
319 u32 op_pending; 305 u32 op_pending;
320 306
321 __timeout_init(); 307 /*
308 * Use 5ms - this should be sufficient time to flush the cache.
309 * On tegra, rough EMC BW available for old tegra chips (newer
310 * chips are strictly faster) can be estimated as follows:
311 *
312 * Lowest reasonable EMC clock speed will be around 102MHz on
313 * t124 for display enabled boards and generally fixed to max
314 * for non-display boards (since they are generally plugged in).
315 *
316 * Thus, the available BW is 64b * 2 * 102MHz = 1.3GB/s. Of that
317 * BW the GPU will likely get about half (display and overhead/
318 * utilization inefficiency eating the rest) so 650MB/s at
319 * worst. Assuming at most 1MB of GPU L2 cache (less for most
320 * chips) worst case is we take 1MB/650MB/s = 1.5ms.
321 *
322 * So 5ms timeout here should be more than sufficient.
323 */
324 nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
325
322 do { 326 do {
323 int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() + 327 int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() +
324 ltc * ltc_stride; 328 ltc * ltc_stride;
325 op_pending = gk20a_readl(g, cmgmt1); 329 op_pending = gk20a_readl(g, cmgmt1);
326 __timeout_check(); 330 } while ((op_pending &
327 } while (op_pending & 331 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) &&
328 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()); 332 !nvgpu_timeout_expired_msg(&timeout,
333 "L2 flush timeout!"));
329 } 334 }
330 335
331 /* And invalidate. */ 336 /* And invalidate. */
@@ -339,14 +344,18 @@ void gm20b_flush_ltc(struct gk20a *g)
339 /* Wait on each LTC individually. */ 344 /* Wait on each LTC individually. */
340 for (ltc = 0; ltc < g->ltc_count; ltc++) { 345 for (ltc = 0; ltc < g->ltc_count; ltc++) {
341 u32 op_pending; 346 u32 op_pending;
342 __timeout_init(); 347
348 /* Again, 5ms. */
349 nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
350
343 do { 351 do {
344 int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() + 352 int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() +
345 ltc * ltc_stride; 353 ltc * ltc_stride;
346 op_pending = gk20a_readl(g, cmgmt0); 354 op_pending = gk20a_readl(g, cmgmt0);
347 __timeout_check(); 355 } while ((op_pending &
348 } while (op_pending & 356 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) &&
349 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()); 357 !nvgpu_timeout_expired_msg(&timeout,
358 "L2 flush timeout!"));
350 } 359 }
351} 360}
352 361
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index ca8fbaee..8f5d1e10 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B MMU 2 * GM20B MMU
3 * 3 *
4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,8 @@
20 20
21#include "mm_gm20b.h" 21#include "mm_gm20b.h"
22 22
23#include <nvgpu/timers.h>
24
23#include <nvgpu/hw/gm20b/hw_gmmu_gm20b.h> 25#include <nvgpu/hw/gm20b/hw_gmmu_gm20b.h>
24#include <nvgpu/hw/gm20b/hw_fb_gm20b.h> 26#include <nvgpu/hw/gm20b/hw_fb_gm20b.h>
25#include <nvgpu/hw/gm20b/hw_gr_gm20b.h> 27#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
@@ -27,28 +29,23 @@
27#include <nvgpu/hw/gm20b/hw_bus_gm20b.h> 29#include <nvgpu/hw/gm20b/hw_bus_gm20b.h>
28 30
29static int gm20b_mm_mmu_vpr_info_fetch_wait(struct gk20a *g, 31static int gm20b_mm_mmu_vpr_info_fetch_wait(struct gk20a *g,
30 const unsigned int msec) 32 unsigned int msec)
31{ 33{
32 unsigned long timeout; 34 struct nvgpu_timeout timeout;
33 35
34 if (tegra_platform_is_silicon()) 36 nvgpu_timeout_init(g, &timeout, msec, NVGPU_TIMER_CPU_TIMER);
35 timeout = jiffies + msecs_to_jiffies(msec);
36 else
37 timeout = msecs_to_jiffies(msec);
38 37
39 while (1) { 38 do {
40 u32 val; 39 u32 val;
40
41 val = gk20a_readl(g, fb_mmu_vpr_info_r()); 41 val = gk20a_readl(g, fb_mmu_vpr_info_r());
42 if (fb_mmu_vpr_info_fetch_v(val) == 42 if (fb_mmu_vpr_info_fetch_v(val) ==
43 fb_mmu_vpr_info_fetch_false_v()) 43 fb_mmu_vpr_info_fetch_false_v())
44 break; 44 return 0;
45 if (tegra_platform_is_silicon()) { 45
46 if (WARN_ON(time_after(jiffies, timeout))) 46 } while (!nvgpu_timeout_expired(&timeout));
47 return -ETIME; 47
48 } else if (--timeout == 0) 48 return -ETIMEDOUT;
49 return -ETIME;
50 }
51 return 0;
52} 49}
53 50
54int gm20b_mm_mmu_vpr_info_fetch(struct gk20a *g) 51int gm20b_mm_mmu_vpr_info_fetch(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 2e568e83..4b87b877 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -22,6 +22,8 @@
22#include "acr_gm20b.h" 22#include "acr_gm20b.h"
23#include "pmu_gm20b.h" 23#include "pmu_gm20b.h"
24 24
25#include <nvgpu/timers.h>
26
25#include <nvgpu/hw/gm20b/hw_gr_gm20b.h> 27#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
26#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h> 28#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
27#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 29#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
@@ -173,21 +175,24 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
173 gk20a_dbg_fn("done"); 175 gk20a_dbg_fn("done");
174} 176}
175 177
176static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout, u32 val) 178static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
179 u32 val)
177{ 180{
178 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
179 unsigned long delay = GR_FECS_POLL_INTERVAL; 181 unsigned long delay = GR_FECS_POLL_INTERVAL;
180 u32 reg; 182 u32 reg;
183 struct nvgpu_timeout timeout;
181 184
182 gk20a_dbg_fn(""); 185 gk20a_dbg_fn("");
183 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 186 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
187
188 nvgpu_timeout_init(g, &timeout, (int)timeout_ms, NVGPU_TIMER_CPU_TIMER);
189
184 do { 190 do {
185 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0)); 191 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
186 if (reg == val) 192 if (reg == val)
187 return 0; 193 return 0;
188 udelay(delay); 194 udelay(delay);
189 } while (time_before(jiffies, end_jiffies) || 195 } while (!nvgpu_timeout_expired(&timeout));
190 !tegra_platform_is_silicon());
191 196
192 return -ETIMEDOUT; 197 return -ETIMEDOUT;
193} 198}