summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-11-11 19:09:06 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2016-12-05 19:16:17 -0500
commit4dc977e25f4b23e188f8459157df4dd9a7fb0ced (patch)
treeb143837f1ed18cdf3f9f8b2cfddd58318c1bcdfb /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parentd8fd0e64678997b535c3208ce8cc081b1cac7fa9 (diff)
gpu: nvgpu: Use timeout API in PMU code
Instead of using custom code for timeout monitoring use the generic timeout API for nvgpu. Bug 1799159 Change-Id: If77e67b2d8678b824d6948620003d3892d5f41d2 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1255865 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 193938ba..1e9f291f 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -24,6 +24,8 @@
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26 26
27#include <nvgpu/timers.h>
28
27#include "gk20a.h" 29#include "gk20a.h"
28#include "gr_gk20a.h" 30#include "gr_gk20a.h"
29#include "semaphore_gk20a.h" 31#include "semaphore_gk20a.h"
@@ -2113,10 +2115,11 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2113int pmu_idle(struct pmu_gk20a *pmu) 2115int pmu_idle(struct pmu_gk20a *pmu)
2114{ 2116{
2115 struct gk20a *g = gk20a_from_pmu(pmu); 2117 struct gk20a *g = gk20a_from_pmu(pmu);
2116 unsigned long end_jiffies = jiffies + 2118 struct nvgpu_timeout timeout;
2117 msecs_to_jiffies(2000);
2118 u32 idle_stat; 2119 u32 idle_stat;
2119 2120
2121 nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_CPU_TIMER);
2122
2120 /* wait for pmu idle */ 2123 /* wait for pmu idle */
2121 do { 2124 do {
2122 idle_stat = gk20a_readl(g, pwr_falcon_idlestate_r()); 2125 idle_stat = gk20a_readl(g, pwr_falcon_idlestate_r());
@@ -2126,12 +2129,11 @@ int pmu_idle(struct pmu_gk20a *pmu)
2126 break; 2129 break;
2127 } 2130 }
2128 2131
2129 if (time_after_eq(jiffies, end_jiffies)) { 2132 if (nvgpu_timeout_check_msg(&timeout,
2130 gk20a_err(dev_from_gk20a(g), 2133 "waiting for pmu idle: 0x%08x",
2131 "timeout waiting pmu idle : 0x%08x", 2134 idle_stat))
2132 idle_stat);
2133 return -EBUSY; 2135 return -EBUSY;
2134 } 2136
2135 usleep_range(100, 200); 2137 usleep_range(100, 200);
2136 } while (1); 2138 } while (1);
2137 2139
@@ -3842,9 +3844,6 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
3842 return 0; 3844 return 0;
3843} 3845}
3844 3846
3845int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout,
3846 u32 *var, u32 val);
3847
3848static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 3847static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
3849 void *param, u32 handle, u32 status) 3848 void *param, u32 handle, u32 status)
3850{ 3849{
@@ -4028,17 +4027,20 @@ static int pmu_process_message(struct pmu_gk20a *pmu)
4028 return 0; 4027 return 0;
4029} 4028}
4030 4029
4031int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout, 4030int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms,
4032 u32 *var, u32 val) 4031 u32 *var, u32 val)
4033{ 4032{
4034 struct gk20a *g = gk20a_from_pmu(pmu); 4033 struct gk20a *g = gk20a_from_pmu(pmu);
4035 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 4034 struct nvgpu_timeout timeout;
4036 unsigned long delay = GR_IDLE_CHECK_DEFAULT; 4035 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
4037 u32 servicedpmuint; 4036 u32 servicedpmuint;
4038 4037
4039 servicedpmuint = pwr_falcon_irqstat_halt_true_f() | 4038 servicedpmuint = pwr_falcon_irqstat_halt_true_f() |
4040 pwr_falcon_irqstat_exterr_true_f() | 4039 pwr_falcon_irqstat_exterr_true_f() |
4041 pwr_falcon_irqstat_swgen0_true_f(); 4040 pwr_falcon_irqstat_swgen0_true_f();
4041
4042 nvgpu_timeout_init(g, &timeout, (int)timeout_ms, NVGPU_TIMER_CPU_TIMER);
4043
4042 do { 4044 do {
4043 if (*var == val) 4045 if (*var == val)
4044 return 0; 4046 return 0;
@@ -4048,8 +4050,7 @@ int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout,
4048 4050
4049 usleep_range(delay, delay * 2); 4051 usleep_range(delay, delay * 2);
4050 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 4052 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
4051 } while (time_before(jiffies, end_jiffies) || 4053 } while (!nvgpu_timeout_check(&timeout));
4052 !tegra_platform_is_silicon());
4053 4054
4054 return -ETIMEDOUT; 4055 return -ETIMEDOUT;
4055} 4056}
@@ -4386,22 +4387,21 @@ invalid_cmd:
4386} 4387}
4387 4388
4388static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, 4389static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4389 u32 queue_id, unsigned long timeout) 4390 u32 queue_id, unsigned long timeout_ms)
4390{ 4391{
4391 struct gk20a *g = gk20a_from_pmu(pmu); 4392 struct gk20a *g = gk20a_from_pmu(pmu);
4392 struct pmu_queue *queue; 4393 struct pmu_queue *queue;
4393 unsigned long end_jiffies = jiffies + 4394 struct nvgpu_timeout timeout;
4394 msecs_to_jiffies(timeout);
4395 int err; 4395 int err;
4396 4396
4397 gk20a_dbg_fn(""); 4397 gk20a_dbg_fn("");
4398 4398
4399 queue = &pmu->queue[queue_id]; 4399 queue = &pmu->queue[queue_id];
4400 4400 nvgpu_timeout_init(g, &timeout, (int)timeout_ms, NVGPU_TIMER_CPU_TIMER);
4401 4401
4402 do { 4402 do {
4403 err = pmu_queue_open_write(pmu, queue, cmd->hdr.size); 4403 err = pmu_queue_open_write(pmu, queue, cmd->hdr.size);
4404 if (err == -EAGAIN && time_before(jiffies, end_jiffies)) 4404 if (err == -EAGAIN && !nvgpu_timeout_check(&timeout))
4405 usleep_range(1000, 2000); 4405 usleep_range(1000, 2000);
4406 else 4406 else
4407 break; 4407 break;