summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-10 13:47:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-13 16:44:41 -0400
commit22426a5452ba943ac48867722fb0927baf66d4ac (patch)
tree4595c635cc920e4ba2d540a6e070b89e3037c28e /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parenta0fa2b0258dafcd7a2450ab5366a49663b1d2e89 (diff)
gpu: nvgpu: gk20a: Use new delay APIs
Use platform agnostic delay functions instead of Linux kernel APIs. This allows removing dependency to Linux header linux/delay.h. At the same time remove #include lines for other unused Linux headers. JIRA NVGPU-16 Change-Id: I46b9ccb80e0b67efb86ec85676e5a55ff835c0ec Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1460113 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
1 files changed, 13 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index d211242c..a4419885 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -17,14 +17,7 @@
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 18 */
19 19
20#include <linux/delay.h> /* for udelay */
21#include <linux/mm.h> /* for totalram_pages */
22#include <linux/scatterlist.h>
23#include <linux/debugfs.h>
24#include <uapi/linux/nvgpu.h>
25#include <linux/dma-mapping.h>
26#include <linux/firmware.h> 20#include <linux/firmware.h>
27#include <linux/nvhost.h>
28#include <trace/events/gk20a.h> 21#include <trace/events/gk20a.h>
29 22
30#include <nvgpu/dma.h> 23#include <nvgpu/dma.h>
@@ -370,7 +363,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
370 return 0; 363 return 0;
371 } 364 }
372 365
373 usleep_range(delay, delay * 2); 366 nvgpu_usleep_range(delay, delay * 2);
374 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 367 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
375 368
376 } while (!nvgpu_timeout_expired(&timeout)); 369 } while (!nvgpu_timeout_expired(&timeout));
@@ -406,7 +399,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
406 return 0; 399 return 0;
407 } 400 }
408 401
409 usleep_range(delay, delay * 2); 402 nvgpu_usleep_range(delay, delay * 2);
410 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 403 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
411 } while (!nvgpu_timeout_expired(&timeout)); 404 } while (!nvgpu_timeout_expired(&timeout));
412 405
@@ -507,10 +500,10 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
507 } 500 }
508 501
509 if (sleepduringwait) { 502 if (sleepduringwait) {
510 usleep_range(delay, delay * 2); 503 nvgpu_usleep_range(delay, delay * 2);
511 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 504 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
512 } else 505 } else
513 udelay(delay); 506 nvgpu_udelay(delay);
514 } 507 }
515 508
516 if (check == WAIT_UCODE_TIMEOUT) { 509 if (check == WAIT_UCODE_TIMEOUT) {
@@ -1613,7 +1606,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1613 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1606 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1614 if (req == gr_fe_pwr_mode_req_done_v()) 1607 if (req == gr_fe_pwr_mode_req_done_v())
1615 break; 1608 break;
1616 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1609 nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1617 } while (!nvgpu_timeout_expired_msg(&timeout, 1610 } while (!nvgpu_timeout_expired_msg(&timeout,
1618 "timeout forcing FE on")); 1611 "timeout forcing FE on"));
1619 } 1612 }
@@ -1630,7 +1623,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1630 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() | 1623 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() |
1631 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f()); 1624 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f());
1632 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1625 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r());
1633 udelay(10); 1626 nvgpu_udelay(10);
1634 1627
1635 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1628 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(),
1636 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | 1629 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() |
@@ -1643,7 +1636,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1643 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() | 1636 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() |
1644 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f()); 1637 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f());
1645 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1638 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r());
1646 udelay(10); 1639 nvgpu_udelay(10);
1647 1640
1648 if (!platform->is_fmodel) { 1641 if (!platform->is_fmodel) {
1649 struct nvgpu_timeout timeout; 1642 struct nvgpu_timeout timeout;
@@ -1657,7 +1650,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1657 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r())); 1650 u32 req = gr_fe_pwr_mode_req_v(gk20a_readl(g, gr_fe_pwr_mode_r()));
1658 if (req == gr_fe_pwr_mode_req_done_v()) 1651 if (req == gr_fe_pwr_mode_req_done_v())
1659 break; 1652 break;
1660 udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); 1653 nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT);
1661 } while (!nvgpu_timeout_expired_msg(&timeout, 1654 } while (!nvgpu_timeout_expired_msg(&timeout,
1662 "timeout setting FE power to auto")); 1655 "timeout setting FE power to auto"));
1663 } 1656 }
@@ -2369,7 +2362,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2369 2362
2370 while ((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & 2363 while ((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) &
2371 gr_fecs_ctxsw_status_1_arb_busy_m()) && retries) { 2364 gr_fecs_ctxsw_status_1_arb_busy_m()) && retries) {
2372 udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2365 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT);
2373 retries--; 2366 retries--;
2374 } 2367 }
2375 if (!retries) { 2368 if (!retries) {
@@ -2400,7 +2393,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2400 retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; 2393 retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT;
2401 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2394 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2402 while (gr_fecs_arb_ctx_cmd_cmd_v(val) && retries) { 2395 while (gr_fecs_arb_ctx_cmd_cmd_v(val) && retries) {
2403 udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2396 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT);
2404 retries--; 2397 retries--;
2405 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2398 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2406 } 2399 }
@@ -2417,7 +2410,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2417 retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; 2410 retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT;
2418 val = (gk20a_readl(g, gr_fecs_arb_ctx_cmd_r())); 2411 val = (gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()));
2419 while (gr_fecs_arb_ctx_cmd_cmd_v(val) && retries) { 2412 while (gr_fecs_arb_ctx_cmd_cmd_v(val) && retries) {
2420 udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); 2413 nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT);
2421 retries--; 2414 retries--;
2422 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2415 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2423 } 2416 }
@@ -5021,7 +5014,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
5021 return 0; 5014 return 0;
5022 } 5015 }
5023 5016
5024 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 5017 nvgpu_udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT);
5025 } while (!nvgpu_timeout_expired(&timeout)); 5018 } while (!nvgpu_timeout_expired(&timeout));
5026 5019
5027 nvgpu_err(g, "Falcon mem scrubbing timeout"); 5020 nvgpu_err(g, "Falcon mem scrubbing timeout");
@@ -8663,7 +8656,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8663 return -EFAULT; 8656 return -EFAULT;
8664 } 8657 }
8665 8658
8666 usleep_range(delay, delay * 2); 8659 nvgpu_usleep_range(delay, delay * 2);
8667 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 8660 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
8668 } while (!nvgpu_timeout_expired(&timeout)); 8661 } while (!nvgpu_timeout_expired(&timeout));
8669 8662