summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-12-16 15:29:34 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-18 19:46:33 -0500
commit6e2237ef622113b8fa1149aa48988a99fa30594f (patch)
tree1356c45dda5751f7094f37aa93019f1199b635fb /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent8f5a42c4bf9c323b86452065d39ed7632b126561 (diff)
gpu: nvgpu: Use timer API in gk20a code
Use the timers API in the gk20a code instead of Linux specific API calls. This also changes the behavior of several functions to wait for the full timeout for each operation that can timeout. Previously the timeout was shared across each operation. Bug 1799159 Change-Id: I2bbed54630667b2b879b56a63a853266afc1e5d8 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1273826 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c101
1 files changed, 56 insertions, 45 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index c5e927c1..f2096383 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -31,6 +31,8 @@
31#include <linux/bsearch.h> 31#include <linux/bsearch.h>
32#include <trace/events/gk20a.h> 32#include <trace/events/gk20a.h>
33 33
34#include <nvgpu/timers.h>
35
34#include "gk20a.h" 36#include "gk20a.h"
35#include "kind_gk20a.h" 37#include "kind_gk20a.h"
36#include "gr_ctx_gk20a.h" 38#include "gr_ctx_gk20a.h"
@@ -321,7 +323,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g)
321 } 323 }
322} 324}
323 325
324int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies, 326int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
325 u32 expect_delay) 327 u32 expect_delay)
326{ 328{
327 u32 delay = expect_delay; 329 u32 delay = expect_delay;
@@ -331,11 +333,15 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
331 u32 gr_engine_id; 333 u32 gr_engine_id;
332 u32 engine_status; 334 u32 engine_status;
333 bool ctx_status_invalid; 335 bool ctx_status_invalid;
336 struct nvgpu_timeout timeout;
334 337
335 gk20a_dbg_fn(""); 338 gk20a_dbg_fn("");
336 339
337 gr_engine_id = gk20a_fifo_get_gr_engine_id(g); 340 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
338 341
342 nvgpu_timeout_init(g, &timeout, (int)duration_ms,
343 NVGPU_TIMER_CPU_TIMER);
344
339 do { 345 do {
340 /* fmodel: host gets fifo_engine_status(gr) from gr 346 /* fmodel: host gets fifo_engine_status(gr) from gr
341 only when gr_status is read */ 347 only when gr_status is read */
@@ -366,8 +372,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
366 usleep_range(delay, delay * 2); 372 usleep_range(delay, delay * 2);
367 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 373 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
368 374
369 } while (time_before(jiffies, end_jiffies) 375 } while (!nvgpu_timeout_expired(&timeout));
370 || !tegra_platform_is_silicon());
371 376
372 gk20a_err(dev_from_gk20a(g), 377 gk20a_err(dev_from_gk20a(g),
373 "timeout, ctxsw busy : %d, gr busy : %d", 378 "timeout, ctxsw busy : %d, gr busy : %d",
@@ -376,18 +381,22 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
376 return -EAGAIN; 381 return -EAGAIN;
377} 382}
378 383
379int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long end_jiffies, 384int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
380 u32 expect_delay) 385 u32 expect_delay)
381{ 386{
382 u32 val; 387 u32 val;
383 u32 delay = expect_delay; 388 u32 delay = expect_delay;
384 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 389 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
390 struct nvgpu_timeout timeout;
385 391
386 if (platform->is_fmodel) 392 if (platform->is_fmodel)
387 return 0; 393 return 0;
388 394
389 gk20a_dbg_fn(""); 395 gk20a_dbg_fn("");
390 396
397 nvgpu_timeout_init(g, &timeout, (int)duration_ms,
398 NVGPU_TIMER_CPU_TIMER);
399
391 do { 400 do {
392 val = gk20a_readl(g, gr_status_r()); 401 val = gk20a_readl(g, gr_status_r());
393 402
@@ -398,8 +407,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long end_jiffies,
398 407
399 usleep_range(delay, delay * 2); 408 usleep_range(delay, delay * 2);
400 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 409 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
401 } while (time_before(jiffies, end_jiffies) 410 } while (!nvgpu_timeout_expired(&timeout));
402 || !tegra_platform_is_silicon());
403 411
404 gk20a_err(dev_from_gk20a(g), 412 gk20a_err(dev_from_gk20a(g),
405 "timeout, fe busy : %x", val); 413 "timeout, fe busy : %x", val);
@@ -412,8 +420,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
412 u32 mailbox_ok, u32 opc_fail, 420 u32 mailbox_ok, u32 opc_fail,
413 u32 mailbox_fail, bool sleepduringwait) 421 u32 mailbox_fail, bool sleepduringwait)
414{ 422{
415 unsigned long end_jiffies = jiffies + 423 struct nvgpu_timeout timeout;
416 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
417 u32 delay = GR_FECS_POLL_INTERVAL; 424 u32 delay = GR_FECS_POLL_INTERVAL;
418 u32 check = WAIT_UCODE_LOOP; 425 u32 check = WAIT_UCODE_LOOP;
419 u32 reg; 426 u32 reg;
@@ -423,9 +430,11 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
423 if (sleepduringwait) 430 if (sleepduringwait)
424 delay = GR_IDLE_CHECK_DEFAULT; 431 delay = GR_IDLE_CHECK_DEFAULT;
425 432
433 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
434 NVGPU_TIMER_CPU_TIMER);
435
426 while (check == WAIT_UCODE_LOOP) { 436 while (check == WAIT_UCODE_LOOP) {
427 if (!time_before(jiffies, end_jiffies) && 437 if (nvgpu_timeout_expired(&timeout))
428 tegra_platform_is_silicon())
429 check = WAIT_UCODE_TIMEOUT; 438 check = WAIT_UCODE_TIMEOUT;
430 439
431 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(mailbox_id)); 440 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(mailbox_id));
@@ -1484,8 +1493,6 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g)
1484 u32 last_bundle_data = 0; 1493 u32 last_bundle_data = 0;
1485 u32 err = 0; 1494 u32 err = 0;
1486 unsigned int i; 1495 unsigned int i;
1487 unsigned long end_jiffies = jiffies +
1488 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1489 1496
1490 /* disable fe_go_idle */ 1497 /* disable fe_go_idle */
1491 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1498 gk20a_writel(g, gr_fe_go_idle_timeout_r(),
@@ -1507,11 +1514,12 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g)
1507 1514
1508 if (gr_pipe_bundle_address_value_v(sw_bundle_init->l[i].addr) == 1515 if (gr_pipe_bundle_address_value_v(sw_bundle_init->l[i].addr) ==
1509 GR_GO_IDLE_BUNDLE) 1516 GR_GO_IDLE_BUNDLE)
1510 err |= gr_gk20a_wait_idle(g, end_jiffies, 1517 err |= gr_gk20a_wait_idle(g,
1511 GR_IDLE_CHECK_DEFAULT); 1518 gk20a_get_gr_idle_timeout(g),
1519 GR_IDLE_CHECK_DEFAULT);
1512 1520
1513 err = gr_gk20a_wait_fe_idle(g, end_jiffies, 1521 err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g),
1514 GR_IDLE_CHECK_DEFAULT); 1522 GR_IDLE_CHECK_DEFAULT);
1515 if (err) 1523 if (err)
1516 break; 1524 break;
1517 } 1525 }
@@ -1521,7 +1529,8 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g)
1521 gk20a_writel(g, gr_pipe_bundle_config_r(), 1529 gk20a_writel(g, gr_pipe_bundle_config_r(),
1522 gr_pipe_bundle_config_override_pipe_mode_disabled_f()); 1530 gr_pipe_bundle_config_override_pipe_mode_disabled_f());
1523 1531
1524 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 1532 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1533 GR_IDLE_CHECK_DEFAULT);
1525 if (err) 1534 if (err)
1526 return err; 1535 return err;
1527 1536
@@ -1548,8 +1557,6 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1548 u32 err = 0; 1557 u32 err = 0;
1549 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load; 1558 struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load;
1550 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 1559 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
1551 unsigned long end_jiffies = jiffies +
1552 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1553 u32 last_method_data = 0; 1560 u32 last_method_data = 0;
1554 int retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT; 1561 int retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
1555 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 1562 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
@@ -1571,8 +1578,9 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1571 if (err) 1578 if (err)
1572 goto clean_up; 1579 goto clean_up;
1573 1580
1574 err = gr_gk20a_wait_idle(g, end_jiffies, 1581 err = gr_gk20a_wait_idle(g,
1575 GR_IDLE_CHECK_DEFAULT); 1582 gk20a_get_gr_idle_timeout(g),
1583 GR_IDLE_CHECK_DEFAULT);
1576 } 1584 }
1577 gk20a_mem_end(g, ctxheader); 1585 gk20a_mem_end(g, ctxheader);
1578 goto clean_up; 1586 goto clean_up;
@@ -1641,7 +1649,8 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1641 if (err) 1649 if (err)
1642 goto clean_up; 1650 goto clean_up;
1643 1651
1644 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 1652 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1653 GR_IDLE_CHECK_DEFAULT);
1645 1654
1646 /* load ctx init */ 1655 /* load ctx init */
1647 for (i = 0; i < sw_ctx_load->count; i++) 1656 for (i = 0; i < sw_ctx_load->count; i++)
@@ -1654,7 +1663,8 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1654 if (g->ops.clock_gating.blcg_gr_load_gating_prod) 1663 if (g->ops.clock_gating.blcg_gr_load_gating_prod)
1655 g->ops.clock_gating.blcg_gr_load_gating_prod(g, g->blcg_enabled); 1664 g->ops.clock_gating.blcg_gr_load_gating_prod(g, g->blcg_enabled);
1656 1665
1657 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 1666 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1667 GR_IDLE_CHECK_DEFAULT);
1658 if (err) 1668 if (err)
1659 goto clean_up; 1669 goto clean_up;
1660 1670
@@ -1672,7 +1682,8 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1672 /* floorsweep anything left */ 1682 /* floorsweep anything left */
1673 g->ops.gr.init_fs_state(g); 1683 g->ops.gr.init_fs_state(g);
1674 1684
1675 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 1685 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1686 GR_IDLE_CHECK_DEFAULT);
1676 if (err) 1687 if (err)
1677 goto restore_fe_go_idle; 1688 goto restore_fe_go_idle;
1678 1689
@@ -1685,7 +1696,8 @@ restore_fe_go_idle:
1685 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 1696 gk20a_writel(g, gr_fe_go_idle_timeout_r(),
1686 gr_fe_go_idle_timeout_count_prod_f()); 1697 gr_fe_go_idle_timeout_count_prod_f());
1687 1698
1688 if (err || gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT)) 1699 if (err || gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1700 GR_IDLE_CHECK_DEFAULT))
1689 goto clean_up; 1701 goto clean_up;
1690 1702
1691 /* load method init */ 1703 /* load method init */
@@ -1708,7 +1720,8 @@ restore_fe_go_idle:
1708 sw_method_init->l[i].addr); 1720 sw_method_init->l[i].addr);
1709 } 1721 }
1710 1722
1711 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 1723 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
1724 GR_IDLE_CHECK_DEFAULT);
1712 if (err) 1725 if (err)
1713 goto clean_up; 1726 goto clean_up;
1714 1727
@@ -3980,8 +3993,6 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
3980{ 3993{
3981 struct fifo_gk20a *f = &g->fifo; 3994 struct fifo_gk20a *f = &g->fifo;
3982 struct fifo_engine_info_gk20a *gr_info = NULL; 3995 struct fifo_engine_info_gk20a *gr_info = NULL;
3983 unsigned long end_jiffies = jiffies +
3984 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
3985 u32 ret; 3996 u32 ret;
3986 u32 engine_id; 3997 u32 engine_id;
3987 3998
@@ -3995,7 +4006,8 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
3995 return; 4006 return;
3996 } 4007 }
3997 4008
3998 ret = g->ops.gr.wait_empty(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4009 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4010 GR_IDLE_CHECK_DEFAULT);
3999 if (ret) { 4011 if (ret) {
4000 gk20a_err(dev_from_gk20a(g), 4012 gk20a_err(dev_from_gk20a(g),
4001 "failed to idle graphics"); 4013 "failed to idle graphics");
@@ -4300,7 +4312,6 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4300{ 4312{
4301 struct fifo_gk20a *f = &g->fifo; 4313 struct fifo_gk20a *f = &g->fifo;
4302 struct fifo_engine_info_gk20a *gr_info = NULL; 4314 struct fifo_engine_info_gk20a *gr_info = NULL;
4303 unsigned long end_jiffies;
4304 int ret; 4315 int ret;
4305 u32 engine_id; 4316 u32 engine_id;
4306 4317
@@ -4314,8 +4325,8 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4314 return ret; 4325 return ret;
4315 } 4326 }
4316 4327
4317 end_jiffies = jiffies + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); 4328 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4318 ret = g->ops.gr.wait_empty(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4329 GR_IDLE_CHECK_DEFAULT);
4319 if (ret) { 4330 if (ret) {
4320 gk20a_err(dev_from_gk20a(g), 4331 gk20a_err(dev_from_gk20a(g),
4321 "failed to idle graphics"); 4332 "failed to idle graphics");
@@ -4698,8 +4709,6 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4698 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init; 4709 struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
4699 u32 data; 4710 u32 data;
4700 u64 addr; 4711 u64 addr;
4701 unsigned long end_jiffies = jiffies +
4702 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
4703 u32 last_method_data = 0; 4712 u32 last_method_data = 0;
4704 u32 i, err; 4713 u32 i, err;
4705 4714
@@ -4791,7 +4800,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4791 gk20a_writel(g, sw_ctx_load->l[i].addr, 4800 gk20a_writel(g, sw_ctx_load->l[i].addr,
4792 sw_ctx_load->l[i].value); 4801 sw_ctx_load->l[i].value);
4793 4802
4794 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4803 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4804 GR_IDLE_CHECK_DEFAULT);
4795 if (err) 4805 if (err)
4796 goto out; 4806 goto out;
4797 4807
@@ -4813,7 +4823,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4813 if (err) 4823 if (err)
4814 goto out; 4824 goto out;
4815 4825
4816 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4826 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4827 GR_IDLE_CHECK_DEFAULT);
4817 if (err) 4828 if (err)
4818 goto restore_fe_go_idle; 4829 goto restore_fe_go_idle;
4819 4830
@@ -4822,7 +4833,8 @@ restore_fe_go_idle:
4822 gk20a_writel(g, gr_fe_go_idle_timeout_r(), 4833 gk20a_writel(g, gr_fe_go_idle_timeout_r(),
4823 gr_fe_go_idle_timeout_count_prod_f()); 4834 gr_fe_go_idle_timeout_count_prod_f());
4824 4835
4825 if (err || gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT)) 4836 if (err || gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4837 GR_IDLE_CHECK_DEFAULT))
4826 goto out; 4838 goto out;
4827 4839
4828 /* load method init */ 4840 /* load method init */
@@ -4845,7 +4857,8 @@ restore_fe_go_idle:
4845 sw_method_init->l[i].addr); 4857 sw_method_init->l[i].addr);
4846 } 4858 }
4847 4859
4848 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4860 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4861 GR_IDLE_CHECK_DEFAULT);
4849 if (err) 4862 if (err)
4850 goto out; 4863 goto out;
4851 4864
@@ -5008,8 +5021,6 @@ out:
5008static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) 5021static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
5009{ 5022{
5010 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 5023 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
5011 unsigned long end_jiffies = jiffies +
5012 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
5013 u32 i, err = 0; 5024 u32 i, err = 0;
5014 5025
5015 gk20a_dbg_fn(""); 5026 gk20a_dbg_fn("");
@@ -5027,7 +5038,8 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
5027 if (err) 5038 if (err)
5028 goto out; 5039 goto out;
5029 5040
5030 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 5041 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
5042 GR_IDLE_CHECK_DEFAULT);
5031 if (err) 5043 if (err)
5032 goto out; 5044 goto out;
5033 5045
@@ -6610,13 +6622,12 @@ int gr_gk20a_fecs_set_reglist_virtual_addr(struct gk20a *g, u64 pmu_va)
6610 6622
6611int gk20a_gr_suspend(struct gk20a *g) 6623int gk20a_gr_suspend(struct gk20a *g)
6612{ 6624{
6613 unsigned long end_jiffies = jiffies +
6614 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
6615 u32 ret = 0; 6625 u32 ret = 0;
6616 6626
6617 gk20a_dbg_fn(""); 6627 gk20a_dbg_fn("");
6618 6628
6619 ret = g->ops.gr.wait_empty(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 6629 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
6630 GR_IDLE_CHECK_DEFAULT);
6620 if (ret) 6631 if (ret)
6621 return ret; 6632 return ret;
6622 6633