summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSachit Kadle <skadle@nvidia.com>2016-08-23 16:41:49 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-20 13:43:37 -0400
commit101689dd8b536afa3ee7e265dc4ea846fa053767 (patch)
treeecba2d8c83f87509ff95ccae636bcf72c5bbd605 /drivers
parent510cf2d2f39a4776db4cd162c7e7525dbe17c3eb (diff)
gpu: nvgpu: remove last_submit tracking
We previously used to wait on the last_submit fence before disabling a channel. Since this part of the code is no longer exercised, we can remove this tracking. Bug 1795076 Change-Id: I54ba2ebaf48772aa775654c0fb4ab614a7167969 Signed-off-by: Sachit Kadle <skadle@nvidia.com> Reviewed-on: http://git-master/r/1206585 Reviewed-by: Automatic_Commit_Validation_User (cherry picked from commit e4e236f2b487b8cfa31f7afd29fad3c97de5f844) Reviewed-on: http://git-master/r/1209166 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c59
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h8
2 files changed, 2 insertions, 65 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 795c7910..09116a88 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -985,12 +985,6 @@ unbind:
985 ch->vpr = false; 985 ch->vpr = false;
986 ch->vm = NULL; 986 ch->vm = NULL;
987 987
988 mutex_lock(&ch->last_submit.fence_lock);
989 gk20a_fence_put(ch->last_submit.pre_fence);
990 gk20a_fence_put(ch->last_submit.post_fence);
991 ch->last_submit.pre_fence = NULL;
992 ch->last_submit.post_fence = NULL;
993 mutex_unlock(&ch->last_submit.fence_lock);
994 WARN_ON(ch->sync); 988 WARN_ON(ch->sync);
995 989
996 /* unlink all debug sessions */ 990 /* unlink all debug sessions */
@@ -1451,14 +1445,6 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1451 } 1445 }
1452 ch_vm = c->vm; 1446 ch_vm = c->vm;
1453 1447
1454 c->cmds_pending = false;
1455 mutex_lock(&c->last_submit.fence_lock);
1456 gk20a_fence_put(c->last_submit.pre_fence);
1457 gk20a_fence_put(c->last_submit.post_fence);
1458 c->last_submit.pre_fence = NULL;
1459 c->last_submit.post_fence = NULL;
1460 mutex_unlock(&c->last_submit.fence_lock);
1461
1462 c->ramfc.offset = 0; 1448 c->ramfc.offset = 0;
1463 c->ramfc.size = ram_in_ramfc_s() / 8; 1449 c->ramfc.size = ram_in_ramfc_s() / 8;
1464 1450
@@ -1866,8 +1852,8 @@ static int gk20a_channel_add_job(struct channel_gk20a *c,
1866 if (c) { 1852 if (c) {
1867 job->num_mapped_buffers = num_mapped_buffers; 1853 job->num_mapped_buffers = num_mapped_buffers;
1868 job->mapped_buffers = mapped_buffers; 1854 job->mapped_buffers = mapped_buffers;
1869 job->pre_fence = gk20a_fence_get(pre_fence); 1855 job->pre_fence = pre_fence;
1870 job->post_fence = gk20a_fence_get(post_fence); 1856 job->post_fence = post_fence;
1871 job->wait_cmd = wait_cmd; 1857 job->wait_cmd = wait_cmd;
1872 job->incr_cmd = incr_cmd; 1858 job->incr_cmd = incr_cmd;
1873 1859
@@ -2352,14 +2338,8 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2352 if (incr_cmd) 2338 if (incr_cmd)
2353 gk20a_submit_append_priv_cmdbuf(c, incr_cmd); 2339 gk20a_submit_append_priv_cmdbuf(c, incr_cmd);
2354 2340
2355 mutex_lock(&c->last_submit.fence_lock);
2356 gk20a_fence_put(c->last_submit.pre_fence);
2357 gk20a_fence_put(c->last_submit.post_fence);
2358 c->last_submit.pre_fence = pre_fence;
2359 c->last_submit.post_fence = post_fence;
2360 if (fence_out) 2341 if (fence_out)
2361 *fence_out = gk20a_fence_get(post_fence); 2342 *fence_out = gk20a_fence_get(post_fence);
2362 mutex_unlock(&c->last_submit.fence_lock);
2363 2343
2364 if (need_job_tracking) 2344 if (need_job_tracking)
2365 /* TODO! Check for errors... */ 2345 /* TODO! Check for errors... */
@@ -2367,7 +2347,6 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2367 wait_cmd, incr_cmd, 2347 wait_cmd, incr_cmd,
2368 skip_buffer_refcounting); 2348 skip_buffer_refcounting);
2369 2349
2370 c->cmds_pending = true;
2371 gk20a_bar1_writel(g, 2350 gk20a_bar1_writel(g,
2372 c->userd_gpu_va + 4 * ram_userd_gp_put_w(), 2351 c->userd_gpu_va + 4 * ram_userd_gp_put_w(),
2373 c->gpfifo.put); 2352 c->gpfifo.put);
@@ -2408,7 +2387,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2408 init_waitqueue_head(&c->ref_count_dec_wq); 2387 init_waitqueue_head(&c->ref_count_dec_wq);
2409 mutex_init(&c->ioctl_lock); 2388 mutex_init(&c->ioctl_lock);
2410 spin_lock_init(&c->jobs_lock); 2389 spin_lock_init(&c->jobs_lock);
2411 mutex_init(&c->last_submit.fence_lock);
2412 raw_spin_lock_init(&c->timeout.lock); 2390 raw_spin_lock_init(&c->timeout.lock);
2413 mutex_init(&c->sync_lock); 2391 mutex_init(&c->sync_lock);
2414 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); 2392 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler);
@@ -2428,39 +2406,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2428 return 0; 2406 return 0;
2429} 2407}
2430 2408
2431int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout)
2432{
2433 int err = 0;
2434 struct gk20a_fence *fence;
2435
2436 if (!ch->cmds_pending)
2437 return 0;
2438
2439 mutex_lock(&ch->last_submit.fence_lock);
2440 fence = ch->last_submit.post_fence;
2441 if (!fence) {
2442 mutex_unlock(&ch->last_submit.fence_lock);
2443 return -EINVAL;
2444 }
2445 mutex_unlock(&ch->last_submit.fence_lock);
2446
2447 /* Do not wait for a timedout channel */
2448 if (ch->has_timedout)
2449 return -ETIMEDOUT;
2450
2451 gk20a_dbg_fn("waiting for channel to finish thresh:%d sema:%p",
2452 fence->syncpt_value, fence->semaphore);
2453
2454 err = gk20a_fence_wait(fence, timeout);
2455 if (WARN_ON(err))
2456 dev_warn(dev_from_gk20a(ch->g),
2457 "timed out waiting for gk20a channel to finish");
2458 else
2459 ch->cmds_pending = false;
2460
2461 return err;
2462}
2463
2464static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch, 2409static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
2465 ulong id, u32 offset, 2410 ulong id, u32 offset,
2466 u32 payload, long timeout) 2411 u32 payload, long timeout)
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 9b909962..a44321bc 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -152,13 +152,6 @@ struct channel_gk20a {
152 struct channel_gk20a_timeout timeout; 152 struct channel_gk20a_timeout timeout;
153 struct channel_gk20a_clean_up clean_up; 153 struct channel_gk20a_clean_up clean_up;
154 154
155 bool cmds_pending;
156 struct {
157 struct gk20a_fence *pre_fence;
158 struct gk20a_fence *post_fence;
159 struct mutex fence_lock;
160 } last_submit;
161
162#if defined(CONFIG_GK20A_CYCLE_STATS) 155#if defined(CONFIG_GK20A_CYCLE_STATS)
163 struct { 156 struct {
164 void *cyclestate_buffer; 157 void *cyclestate_buffer;
@@ -221,7 +214,6 @@ bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
221void gk20a_disable_channel(struct channel_gk20a *ch); 214void gk20a_disable_channel(struct channel_gk20a *ch);
222void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt); 215void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt);
223void gk20a_channel_abort_clean_up(struct channel_gk20a *ch); 216void gk20a_channel_abort_clean_up(struct channel_gk20a *ch);
224int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout);
225void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error); 217void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error);
226void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); 218void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);
227int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size, 219int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size,