summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-03-22 09:47:43 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-28 15:13:07 -0400
commitd20a501dcbf2e29d5d319670485f273afa3aefad (patch)
tree22d3443589f0cf3186ef2c4096d0f82569b5401b /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent9b70ad24934988141c585d9fd85c59f5c9d58a38 (diff)
gpu: nvgpu: simplify job semaphore release in abort
Instead of looping all jobs and releasing their semaphores separately, do just one semaphore release. All the jobs are using the same sema index, and the final, maximum value of it is known. Move also this resetting into ch->sync->set_min_eq_max() to be consistent with syncpoints. Change-Id: I03601aae67db0a65750c8df6b43387c042d383bd Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1680362 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c44
1 files changed, 0 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 5befe086..8b144864 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -185,10 +185,6 @@ int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
185 185
186void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) 186void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
187{ 187{
188 struct channel_gk20a_job *job, *n;
189 bool released_job_semaphore = false;
190 bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(ch);
191
192 /* synchronize with actual job cleanup */ 188 /* synchronize with actual job cleanup */
193 nvgpu_mutex_acquire(&ch->joblist.cleanup_lock); 189 nvgpu_mutex_acquire(&ch->joblist.cleanup_lock);
194 190
@@ -200,48 +196,8 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
200 ch->user_sync->set_safe_state(ch->user_sync); 196 ch->user_sync->set_safe_state(ch->user_sync);
201 nvgpu_mutex_release(&ch->sync_lock); 197 nvgpu_mutex_release(&ch->sync_lock);
202 198
203 /* release all job semaphores (applies only to jobs that use
204 semaphore synchronization) */
205 channel_gk20a_joblist_lock(ch);
206 if (pre_alloc_enabled) {
207 int tmp_get = ch->joblist.pre_alloc.get;
208 int put = ch->joblist.pre_alloc.put;
209
210 /*
211 * ensure put is read before any subsequent reads.
212 * see corresponding nvgpu_smp_wmb in gk20a_channel_add_job()
213 */
214 nvgpu_smp_rmb();
215
216 while (tmp_get != put) {
217 job = &ch->joblist.pre_alloc.jobs[tmp_get];
218 if (job->post_fence->semaphore) {
219 nvgpu_semaphore_reset(
220 job->post_fence->semaphore,
221 ch->hw_sema);
222 released_job_semaphore = true;
223 }
224 tmp_get = (tmp_get + 1) % ch->joblist.pre_alloc.length;
225 }
226 } else {
227 nvgpu_list_for_each_entry_safe(job, n,
228 &ch->joblist.dynamic.jobs,
229 channel_gk20a_job, list) {
230 if (job->post_fence->semaphore) {
231 nvgpu_semaphore_reset(
232 job->post_fence->semaphore,
233 ch->hw_sema);
234 released_job_semaphore = true;
235 }
236 }
237 }
238 channel_gk20a_joblist_unlock(ch);
239
240 nvgpu_mutex_release(&ch->joblist.cleanup_lock); 199 nvgpu_mutex_release(&ch->joblist.cleanup_lock);
241 200
242 if (released_job_semaphore)
243 nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
244
245 /* 201 /*
246 * When closing the channel, this scheduled update holds one ref which 202 * When closing the channel, this scheduled update holds one ref which
247 * is waited for before advancing with freeing. 203 * is waited for before advancing with freeing.