summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorAingara Paramakuru <aparamakuru@nvidia.com>2016-08-15 14:17:43 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-15 15:23:29 -0400
commit33665060728693c28ed5222a0d9004e261c63e82 (patch)
treec3122ee8fd6cd95af074b27e91a33be8ec9c64f5 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentb700d3a0408fff6920cae9b8a3f434530586eb70 (diff)
gpu: nvgpu: move gpfifo submit wait to userspace
Instead of blocking for gpfifo space in the nvgpu driver, return -EAGAIN and allow userspace to decide the blocking policy. Bug 1795076 Change-Id: Ie091caa92aad3f68bc01a3456ad948e76883bc50 Signed-off-by: Aingara Paramakuru <aparamakuru@nvidia.com> Reviewed-on: http://git-master/r/1202591 (cherry picked from commit 8056f422c6a34a4239fc4993c40c2e517c932714) Reviewed-on: http://git-master/r/1203800 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c32
1 files changed, 9 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 4c03f955..b4c132ce 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1194,7 +1194,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
1194 1194
1195 init_waitqueue_head(&ch->notifier_wq); 1195 init_waitqueue_head(&ch->notifier_wq);
1196 init_waitqueue_head(&ch->semaphore_wq); 1196 init_waitqueue_head(&ch->semaphore_wq);
1197 init_waitqueue_head(&ch->submit_wq);
1198 1197
1199 ch->update_fn = NULL; 1198 ch->update_fn = NULL;
1200 ch->update_fn_data = NULL; 1199 ch->update_fn_data = NULL;
@@ -1974,9 +1973,6 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
1974 return; 1973 return;
1975 } 1974 }
1976 1975
1977 update_gp_get(c->g, c);
1978 wake_up(&c->submit_wq);
1979
1980 trace_gk20a_channel_update(c->hw_chid); 1976 trace_gk20a_channel_update(c->hw_chid);
1981 gk20a_channel_schedule_job_clean_up(c); 1977 gk20a_channel_schedule_job_clean_up(c);
1982 1978
@@ -2181,22 +2177,16 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2181 gk20a_dbg_info("pre-submit put %d, get %d, size %d", 2177 gk20a_dbg_info("pre-submit put %d, get %d, size %d",
2182 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num); 2178 c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
2183 2179
2184 /* Make sure we have enough space for gpfifo entries. If not, 2180 /*
2185 * wait for signals from completed submits */ 2181 * Make sure we have enough space for gpfifo entries. Check cached
2182 * values first and then read from HW. If no space, return EAGAIN
2183 * and let userpace decide to re-try request or not.
2184 */
2186 if (gp_free_count(c) < num_entries + extra_entries) { 2185 if (gp_free_count(c) < num_entries + extra_entries) {
2187 /* we can get here via locked ioctl and other paths too */ 2186 if (get_gp_free_count(c) < num_entries + extra_entries) {
2188 int locked_path = mutex_is_locked(&c->ioctl_lock); 2187 err = -EAGAIN;
2189 if (locked_path) 2188 goto clean_up;
2190 mutex_unlock(&c->ioctl_lock); 2189 }
2191
2192 trace_gk20a_gpfifo_submit_wait_for_space(dev_name(c->g->dev));
2193 err = wait_event_interruptible(c->submit_wq,
2194 get_gp_free_count(c) >= num_entries + extra_entries ||
2195 c->has_timedout);
2196 trace_gk20a_gpfifo_submit_wait_for_space_done(dev_name(c->g->dev));
2197
2198 if (locked_path)
2199 mutex_lock(&c->ioctl_lock);
2200 } 2190 }
2201 2191
2202 if (c->has_timedout) { 2192 if (c->has_timedout) {
@@ -2204,10 +2194,6 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2204 goto clean_up; 2194 goto clean_up;
2205 } 2195 }
2206 2196
2207 if (err) {
2208 err = -ENOSPC;
2209 goto clean_up;
2210 }
2211 2197
2212 mutex_lock(&c->sync_lock); 2198 mutex_lock(&c->sync_lock);
2213 if (!c->sync) { 2199 if (!c->sync) {