summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-01-12 10:30:10 -0500
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-01-21 10:52:11 -0500
commitcd09ac26c7e10fbc4ca96b54be3b1578614e3680 (patch)
tree8cf5e6e78ae087d5610bdd3ed022bdbdefa974aa /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent3c6b40c762f0d4501c86f4d386a53165208a8677 (diff)
gpu: nvgpu: move resetup_ramfc() out of sync_lock
We currently have this sequence : - acquire sync_lock - sync_create - resetup_ramfc() - release sync_lock but this can lead to deadlock in case resetup_ramfc() triggers below stack : - resetup_ramfc() - channel_preempt() - preemption fails - trigger recovery - channel_abort() - acquire sync_lock Fix this by moving resetup_ramfc() out of sync_lock. resetup_ramfc() is still protected by submit_lock and hence we cannot free sync after allocation and before resetup Bug 200165811 Change-Id: Iebf74d950d6f6902b6d180c2cd8cd2d50493062c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/931726 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 0c28d0bb..45501d4f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1936,6 +1936,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1936 bool skip_buffer_refcounting = (flags & 1936 bool skip_buffer_refcounting = (flags &
1937 NVGPU_SUBMIT_GPFIFO_FLAGS_SKIP_BUFFER_REFCOUNTING); 1937 NVGPU_SUBMIT_GPFIFO_FLAGS_SKIP_BUFFER_REFCOUNTING);
1938 bool need_sync_fence = false; 1938 bool need_sync_fence = false;
1939 bool new_sync_created = false;
1939 1940
1940 /* 1941 /*
1941 * If user wants to allocate sync_fence_fd always, then respect that; 1942 * If user wants to allocate sync_fence_fd always, then respect that;
@@ -2033,16 +2034,22 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2033 c->sync = gk20a_channel_sync_create(c); 2034 c->sync = gk20a_channel_sync_create(c);
2034 if (!c->sync) { 2035 if (!c->sync) {
2035 err = -ENOMEM; 2036 err = -ENOMEM;
2037 mutex_unlock(&c->sync_lock);
2036 mutex_unlock(&c->submit_lock); 2038 mutex_unlock(&c->submit_lock);
2037 goto clean_up; 2039 goto clean_up;
2038 } 2040 }
2039 if (g->ops.fifo.resetup_ramfc) 2041 new_sync_created = true;
2040 err = g->ops.fifo.resetup_ramfc(c);
2041 if (err)
2042 return err;
2043 } 2042 }
2044 mutex_unlock(&c->sync_lock); 2043 mutex_unlock(&c->sync_lock);
2045 2044
2045 if (g->ops.fifo.resetup_ramfc && new_sync_created) {
2046 err = g->ops.fifo.resetup_ramfc(c);
2047 if (err) {
2048 mutex_unlock(&c->submit_lock);
2049 goto clean_up;
2050 }
2051 }
2052
2046 /* 2053 /*
2047 * optionally insert syncpt wait in the beginning of gpfifo submission 2054 * optionally insert syncpt wait in the beginning of gpfifo submission
2048 * when user requested and the wait hasn't expired. 2055 * when user requested and the wait hasn't expired.