diff options
author | Alex Waterman <alexw@nvidia.com> | 2016-12-16 15:29:34 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-01-18 19:46:33 -0500 |
commit | 6e2237ef622113b8fa1149aa48988a99fa30594f (patch) | |
tree | 1356c45dda5751f7094f37aa93019f1199b635fb /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | 8f5a42c4bf9c323b86452065d39ed7632b126561 (diff) |
gpu: nvgpu: Use timer API in gk20a code
Use the timers API in the gk20a code instead of Linux specific
API calls.
This also changes the behavior of several functions to wait for
the full timeout for each operation that can timeout. Previously
the timeout was shared across each operation.
Bug 1799159
Change-Id: I2bbed54630667b2b879b56a63a853266afc1e5d8
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1273826
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 47 |
1 files changed, 23 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 2daeb1d0..469148c2 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * this program; if not, write to the Free Software Foundation, Inc., | 16 | * this program; if not, write to the Free Software Foundation, Inc., |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
18 | */ | 18 | */ |
19 | |||
19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
21 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
@@ -23,6 +24,8 @@ | |||
23 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
24 | #include <linux/nvhost.h> | 25 | #include <linux/nvhost.h> |
25 | 26 | ||
27 | #include <nvgpu/timers.h> | ||
28 | |||
26 | #include "gk20a.h" | 29 | #include "gk20a.h" |
27 | #include "debug_gk20a.h" | 30 | #include "debug_gk20a.h" |
28 | #include "ctxsw_trace_gk20a.h" | 31 | #include "ctxsw_trace_gk20a.h" |
@@ -1570,11 +1573,9 @@ static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, int engine_id, | |||
1570 | static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | 1573 | static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, |
1571 | unsigned long engine_ids) | 1574 | unsigned long engine_ids) |
1572 | { | 1575 | { |
1573 | unsigned long end_jiffies = jiffies + | 1576 | struct nvgpu_timeout timeout; |
1574 | msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
1575 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | 1577 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; |
1576 | unsigned long engine_id; | 1578 | unsigned long engine_id; |
1577 | int ret; | ||
1578 | 1579 | ||
1579 | /* trigger faults for all bad engines */ | 1580 | /* trigger faults for all bad engines */ |
1580 | for_each_set_bit(engine_id, &engine_ids, 32) { | 1581 | for_each_set_bit(engine_id, &engine_ids, 32) { |
@@ -1593,21 +1594,16 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1593 | } | 1594 | } |
1594 | 1595 | ||
1595 | /* Wait for MMU fault to trigger */ | 1596 | /* Wait for MMU fault to trigger */ |
1596 | ret = -EBUSY; | 1597 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), |
1598 | NVGPU_TIMER_CPU_TIMER); | ||
1597 | do { | 1599 | do { |
1598 | if (gk20a_readl(g, fifo_intr_0_r()) & | 1600 | if (gk20a_readl(g, fifo_intr_0_r()) & |
1599 | fifo_intr_0_mmu_fault_pending_f()) { | 1601 | fifo_intr_0_mmu_fault_pending_f()) |
1600 | ret = 0; | ||
1601 | break; | 1602 | break; |
1602 | } | ||
1603 | 1603 | ||
1604 | usleep_range(delay, delay * 2); | 1604 | usleep_range(delay, delay * 2); |
1605 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 1605 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
1606 | } while (time_before(jiffies, end_jiffies) || | 1606 | } while (!nvgpu_timeout_expired_msg(&timeout, "mmu fault timeout")); |
1607 | !tegra_platform_is_silicon()); | ||
1608 | |||
1609 | if (ret) | ||
1610 | gk20a_err(dev_from_gk20a(g), "mmu fault timeout"); | ||
1611 | 1607 | ||
1612 | /* release mmu fault trigger */ | 1608 | /* release mmu fault trigger */ |
1613 | for_each_set_bit(engine_id, &engine_ids, 32) | 1609 | for_each_set_bit(engine_id, &engine_ids, 32) |
@@ -2366,9 +2362,8 @@ void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2366 | 2362 | ||
2367 | static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | 2363 | static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) |
2368 | { | 2364 | { |
2365 | struct nvgpu_timeout timeout; | ||
2369 | u32 delay = GR_IDLE_CHECK_DEFAULT; | 2366 | u32 delay = GR_IDLE_CHECK_DEFAULT; |
2370 | unsigned long end_jiffies = jiffies | ||
2371 | + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
2372 | u32 ret = 0; | 2367 | u32 ret = 0; |
2373 | 2368 | ||
2374 | gk20a_dbg_fn("%d", id); | 2369 | gk20a_dbg_fn("%d", id); |
@@ -2379,6 +2374,8 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2379 | gk20a_dbg_fn("%d", id); | 2374 | gk20a_dbg_fn("%d", id); |
2380 | /* wait for preempt */ | 2375 | /* wait for preempt */ |
2381 | ret = -EBUSY; | 2376 | ret = -EBUSY; |
2377 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
2378 | NVGPU_TIMER_CPU_TIMER); | ||
2382 | do { | 2379 | do { |
2383 | if (!(gk20a_readl(g, fifo_preempt_r()) & | 2380 | if (!(gk20a_readl(g, fifo_preempt_r()) & |
2384 | fifo_preempt_pending_true_f())) { | 2381 | fifo_preempt_pending_true_f())) { |
@@ -2388,8 +2385,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2388 | 2385 | ||
2389 | usleep_range(delay, delay * 2); | 2386 | usleep_range(delay, delay * 2); |
2390 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 2387 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
2391 | } while (time_before(jiffies, end_jiffies) || | 2388 | } while (!nvgpu_timeout_expired(&timeout)); |
2392 | !tegra_platform_is_silicon()); | ||
2393 | 2389 | ||
2394 | gk20a_dbg_fn("%d", id); | 2390 | gk20a_dbg_fn("%d", id); |
2395 | if (ret) { | 2391 | if (ret) { |
@@ -2668,11 +2664,13 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) | |||
2668 | static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) | 2664 | static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) |
2669 | { | 2665 | { |
2670 | struct fifo_runlist_info_gk20a *runlist; | 2666 | struct fifo_runlist_info_gk20a *runlist; |
2671 | unsigned long end_jiffies = jiffies + | 2667 | struct nvgpu_timeout timeout; |
2672 | msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
2673 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | 2668 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; |
2674 | int ret = -ETIMEDOUT; | 2669 | int ret = -ETIMEDOUT; |
2675 | 2670 | ||
2671 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
2672 | NVGPU_TIMER_CPU_TIMER); | ||
2673 | |||
2676 | runlist = &g->fifo.runlist_info[runlist_id]; | 2674 | runlist = &g->fifo.runlist_info[runlist_id]; |
2677 | do { | 2675 | do { |
2678 | if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) & | 2676 | if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) & |
@@ -2683,8 +2681,7 @@ static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) | |||
2683 | 2681 | ||
2684 | usleep_range(delay, delay * 2); | 2682 | usleep_range(delay, delay * 2); |
2685 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 2683 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
2686 | } while (time_before(jiffies, end_jiffies) || | 2684 | } while (!nvgpu_timeout_expired(&timeout)); |
2687 | !tegra_platform_is_silicon()); | ||
2688 | 2685 | ||
2689 | return ret; | 2686 | return ret; |
2690 | } | 2687 | } |
@@ -3106,14 +3103,16 @@ bool gk20a_fifo_is_engine_busy(struct gk20a *g) | |||
3106 | 3103 | ||
3107 | int gk20a_fifo_wait_engine_idle(struct gk20a *g) | 3104 | int gk20a_fifo_wait_engine_idle(struct gk20a *g) |
3108 | { | 3105 | { |
3109 | unsigned long end_jiffies = jiffies + | 3106 | struct nvgpu_timeout timeout; |
3110 | msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
3111 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; | 3107 | unsigned long delay = GR_IDLE_CHECK_DEFAULT; |
3112 | int ret = -ETIMEDOUT; | 3108 | int ret = -ETIMEDOUT; |
3113 | u32 i; | 3109 | u32 i; |
3114 | 3110 | ||
3115 | gk20a_dbg_fn(""); | 3111 | gk20a_dbg_fn(""); |
3116 | 3112 | ||
3113 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
3114 | NVGPU_TIMER_CPU_TIMER); | ||
3115 | |||
3117 | for (i = 0; i < fifo_engine_status__size_1_v(); i++) { | 3116 | for (i = 0; i < fifo_engine_status__size_1_v(); i++) { |
3118 | do { | 3117 | do { |
3119 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | 3118 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); |
@@ -3125,8 +3124,8 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) | |||
3125 | usleep_range(delay, delay * 2); | 3124 | usleep_range(delay, delay * 2); |
3126 | delay = min_t(unsigned long, | 3125 | delay = min_t(unsigned long, |
3127 | delay << 1, GR_IDLE_CHECK_MAX); | 3126 | delay << 1, GR_IDLE_CHECK_MAX); |
3128 | } while (time_before(jiffies, end_jiffies) || | 3127 | } while (!nvgpu_timeout_expired(&timeout)); |
3129 | !tegra_platform_is_silicon()); | 3128 | |
3130 | if (ret) { | 3129 | if (ret) { |
3131 | gk20a_dbg_info("cannot idle engine %u", i); | 3130 | gk20a_dbg_info("cannot idle engine %u", i); |
3132 | break; | 3131 | break; |