summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/fifo/channel.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-29 06:30:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-31 21:56:57 -0400
commit0dc9daf28e3fe6831bc535c8a45d28d974a11dad (patch)
tree3045032cd440ad3f0119706149f002a68e9caf3c /drivers/gpu/nvgpu/common/fifo/channel.c
parent7bf80a1c69a07f81225270e90a57a1c41d202859 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: If5e4350a337b61b8a82870860a690d06b89c88c1 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1808972 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/fifo/channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c251
1 files changed, 165 insertions, 86 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index a444766b..7c2bd4b4 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -83,8 +83,9 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
83 83
84 if (g->aggressive_sync_destroy_thresh && 84 if (g->aggressive_sync_destroy_thresh &&
85 (f->used_channels > 85 (f->used_channels >
86 g->aggressive_sync_destroy_thresh)) 86 g->aggressive_sync_destroy_thresh)) {
87 g->aggressive_sync_destroy = true; 87 g->aggressive_sync_destroy = true;
88 }
88 89
89 return ch; 90 return ch;
90} 91}
@@ -109,8 +110,9 @@ static void free_channel(struct fifo_gk20a *f,
109 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { 110 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
110 if (g->aggressive_sync_destroy_thresh && 111 if (g->aggressive_sync_destroy_thresh &&
111 (f->used_channels < 112 (f->used_channels <
112 g->aggressive_sync_destroy_thresh)) 113 g->aggressive_sync_destroy_thresh)) {
113 g->aggressive_sync_destroy = false; 114 g->aggressive_sync_destroy = false;
115 }
114 } 116 }
115} 117}
116 118
@@ -193,10 +195,12 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
193 195
194 /* ensure no fences are pending */ 196 /* ensure no fences are pending */
195 nvgpu_mutex_acquire(&ch->sync_lock); 197 nvgpu_mutex_acquire(&ch->sync_lock);
196 if (ch->sync) 198 if (ch->sync) {
197 ch->sync->set_min_eq_max(ch->sync); 199 ch->sync->set_min_eq_max(ch->sync);
198 if (ch->user_sync) 200 }
201 if (ch->user_sync) {
199 ch->user_sync->set_safe_state(ch->user_sync); 202 ch->user_sync->set_safe_state(ch->user_sync);
203 }
200 nvgpu_mutex_release(&ch->sync_lock); 204 nvgpu_mutex_release(&ch->sync_lock);
201 205
202 nvgpu_mutex_release(&ch->joblist.cleanup_lock); 206 nvgpu_mutex_release(&ch->joblist.cleanup_lock);
@@ -212,19 +216,22 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
212{ 216{
213 nvgpu_log_fn(ch->g, " "); 217 nvgpu_log_fn(ch->g, " ");
214 218
215 if (gk20a_is_channel_marked_as_tsg(ch)) 219 if (gk20a_is_channel_marked_as_tsg(ch)) {
216 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt); 220 return gk20a_fifo_abort_tsg(ch->g, ch->tsgid, channel_preempt);
221 }
217 222
218 /* make sure new kickoffs are prevented */ 223 /* make sure new kickoffs are prevented */
219 ch->has_timedout = true; 224 ch->has_timedout = true;
220 225
221 ch->g->ops.fifo.disable_channel(ch); 226 ch->g->ops.fifo.disable_channel(ch);
222 227
223 if (channel_preempt && gk20a_is_channel_marked_as_tsg(ch)) 228 if (channel_preempt && gk20a_is_channel_marked_as_tsg(ch)) {
224 ch->g->ops.fifo.preempt_channel(ch->g, ch->chid); 229 ch->g->ops.fifo.preempt_channel(ch->g, ch->chid);
230 }
225 231
226 if (ch->g->ops.fifo.ch_abort_clean_up) 232 if (ch->g->ops.fifo.ch_abort_clean_up) {
227 ch->g->ops.fifo.ch_abort_clean_up(ch); 233 ch->g->ops.fifo.ch_abort_clean_up(ch);
234 }
228} 235}
229 236
230int gk20a_wait_channel_idle(struct channel_gk20a *ch) 237int gk20a_wait_channel_idle(struct channel_gk20a *ch)
@@ -239,8 +246,9 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
239 channel_gk20a_joblist_lock(ch); 246 channel_gk20a_joblist_lock(ch);
240 channel_idle = channel_gk20a_joblist_is_empty(ch); 247 channel_idle = channel_gk20a_joblist_is_empty(ch);
241 channel_gk20a_joblist_unlock(ch); 248 channel_gk20a_joblist_unlock(ch);
242 if (channel_idle) 249 if (channel_idle) {
243 break; 250 break;
251 }
244 252
245 nvgpu_usleep_range(1000, 3000); 253 nvgpu_usleep_range(1000, 3000);
246 } while (!nvgpu_timeout_expired(&timeout)); 254 } while (!nvgpu_timeout_expired(&timeout));
@@ -268,8 +276,9 @@ void gk20a_wait_until_counter_is_N(
268 if (NVGPU_COND_WAIT( 276 if (NVGPU_COND_WAIT(
269 c, 277 c,
270 nvgpu_atomic_read(counter) == wait_value, 278 nvgpu_atomic_read(counter) == wait_value,
271 5000) == 0) 279 5000) == 0) {
272 break; 280 break;
281 }
273 282
274 nvgpu_warn(ch->g, 283 nvgpu_warn(ch->g,
275 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 284 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
@@ -299,8 +308,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
299 308
300 trace_gk20a_free_channel(ch->chid); 309 trace_gk20a_free_channel(ch->chid);
301 310
302 if (g->os_channel.close) 311 if (g->os_channel.close) {
303 g->os_channel.close(ch); 312 g->os_channel.close(ch);
313 }
304 314
305 /* 315 /*
306 * Disable channel/TSG and unbind here. This should not be executed if 316 * Disable channel/TSG and unbind here. This should not be executed if
@@ -311,10 +321,11 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
311 /* abort channel and remove from runlist */ 321 /* abort channel and remove from runlist */
312 if (gk20a_is_channel_marked_as_tsg(ch)) { 322 if (gk20a_is_channel_marked_as_tsg(ch)) {
313 err = gk20a_tsg_unbind_channel(ch); 323 err = gk20a_tsg_unbind_channel(ch);
314 if (err) 324 if (err) {
315 nvgpu_err(g, 325 nvgpu_err(g,
316 "failed to unbind channel %d from TSG", 326 "failed to unbind channel %d from TSG",
317 ch->chid); 327 ch->chid);
328 }
318 } else { 329 } else {
319 /* 330 /*
320 * Channel is already unbound from TSG by User with 331 * Channel is already unbound from TSG by User with
@@ -324,10 +335,11 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
324 } 335 }
325 } 336 }
326 /* wait until there's only our ref to the channel */ 337 /* wait until there's only our ref to the channel */
327 if (!force) 338 if (!force) {
328 gk20a_wait_until_counter_is_N( 339 gk20a_wait_until_counter_is_N(
329 ch, &ch->ref_count, 1, &ch->ref_count_dec_wq, 340 ch, &ch->ref_count, 1, &ch->ref_count_dec_wq,
330 __func__, "references"); 341 __func__, "references");
342 }
331 343
332 /* wait until all pending interrupts for recently completed 344 /* wait until all pending interrupts for recently completed
333 * jobs are handled */ 345 * jobs are handled */
@@ -349,10 +361,11 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
349 nvgpu_atomic_dec(&ch->ref_count); 361 nvgpu_atomic_dec(&ch->ref_count);
350 362
351 /* wait until no more refs to the channel */ 363 /* wait until no more refs to the channel */
352 if (!force) 364 if (!force) {
353 gk20a_wait_until_counter_is_N( 365 gk20a_wait_until_counter_is_N(
354 ch, &ch->ref_count, 0, &ch->ref_count_dec_wq, 366 ch, &ch->ref_count, 0, &ch->ref_count_dec_wq,
355 __func__, "references"); 367 __func__, "references");
368 }
356 369
357 /* if engine reset was deferred, perform it now */ 370 /* if engine reset was deferred, perform it now */
358 nvgpu_mutex_acquire(&f->deferred_reset_mutex); 371 nvgpu_mutex_acquire(&f->deferred_reset_mutex);
@@ -368,8 +381,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
368 } 381 }
369 nvgpu_mutex_release(&f->deferred_reset_mutex); 382 nvgpu_mutex_release(&f->deferred_reset_mutex);
370 383
371 if (!gk20a_channel_as_bound(ch)) 384 if (!gk20a_channel_as_bound(ch)) {
372 goto unbind; 385 goto unbind;
386 }
373 387
374 nvgpu_log_info(g, "freeing bound channel context, timeout=%ld", 388 nvgpu_log_info(g, "freeing bound channel context, timeout=%ld",
375 timeout); 389 timeout);
@@ -379,8 +393,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
379 g->ops.fecs_trace.unbind_channel(g, ch); 393 g->ops.fecs_trace.unbind_channel(g, ch);
380#endif 394#endif
381 395
382 if(g->ops.fifo.free_channel_ctx_header) 396 if (g->ops.fifo.free_channel_ctx_header) {
383 g->ops.fifo.free_channel_ctx_header(ch); 397 g->ops.fifo.free_channel_ctx_header(ch);
398 }
384 399
385 if (ch->usermode_submit_enabled) { 400 if (ch->usermode_submit_enabled) {
386 gk20a_channel_free_usermode_buffers(ch); 401 gk20a_channel_free_usermode_buffers(ch);
@@ -408,10 +423,11 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
408 * Set user managed syncpoint to safe state 423 * Set user managed syncpoint to safe state
409 * But it's already done if channel has timedout 424 * But it's already done if channel has timedout
410 */ 425 */
411 if (ch->has_timedout) 426 if (ch->has_timedout) {
412 gk20a_channel_sync_destroy(ch->user_sync, false); 427 gk20a_channel_sync_destroy(ch->user_sync, false);
413 else 428 } else {
414 gk20a_channel_sync_destroy(ch->user_sync, true); 429 gk20a_channel_sync_destroy(ch->user_sync, true);
430 }
415 ch->user_sync = NULL; 431 ch->user_sync = NULL;
416 } 432 }
417 nvgpu_mutex_release(&ch->sync_lock); 433 nvgpu_mutex_release(&ch->sync_lock);
@@ -421,8 +437,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
421 * we need to do this before releasing the address space, 437 * we need to do this before releasing the address space,
422 * as the semaphore pool might get freed after that point. 438 * as the semaphore pool might get freed after that point.
423 */ 439 */
424 if (ch->hw_sema) 440 if (ch->hw_sema) {
425 nvgpu_semaphore_free_hw_sema(ch); 441 nvgpu_semaphore_free_hw_sema(ch);
442 }
426 443
427 /* 444 /*
428 * When releasing the channel we unbind the VM - so release the ref. 445 * When releasing the channel we unbind the VM - so release the ref.
@@ -441,8 +458,9 @@ unbind:
441 if (ch->deterministic) { 458 if (ch->deterministic) {
442 nvgpu_rwsem_down_read(&g->deterministic_busy); 459 nvgpu_rwsem_down_read(&g->deterministic_busy);
443 ch->deterministic = false; 460 ch->deterministic = false;
444 if (!ch->deterministic_railgate_allowed) 461 if (!ch->deterministic_railgate_allowed) {
445 gk20a_idle(g); 462 gk20a_idle(g);
463 }
446 ch->deterministic_railgate_allowed = false; 464 ch->deterministic_railgate_allowed = false;
447 465
448 nvgpu_rwsem_up_read(&g->deterministic_busy); 466 nvgpu_rwsem_up_read(&g->deterministic_busy);
@@ -462,8 +480,9 @@ unbind:
462 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 480 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
463 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, 481 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list,
464 dbg_session_channel_data, ch_entry) { 482 dbg_session_channel_data, ch_entry) {
465 if (ch_data->chid == ch->chid) 483 if (ch_data->chid == ch->chid) {
466 ch_data->unbind_single_channel(dbg_s, ch_data); 484 ch_data->unbind_single_channel(dbg_s, ch_data);
485 }
467 } 486 }
468 nvgpu_mutex_release(&dbg_s->ch_list_lock); 487 nvgpu_mutex_release(&dbg_s->ch_list_lock);
469 } 488 }
@@ -471,8 +490,9 @@ unbind:
471 nvgpu_mutex_release(&g->dbg_sessions_lock); 490 nvgpu_mutex_release(&g->dbg_sessions_lock);
472 491
473 /* free pre-allocated resources, if applicable */ 492 /* free pre-allocated resources, if applicable */
474 if (channel_gk20a_is_prealloc_enabled(ch)) 493 if (channel_gk20a_is_prealloc_enabled(ch)) {
475 channel_gk20a_free_prealloc_resources(ch); 494 channel_gk20a_free_prealloc_resources(ch);
495 }
476 496
477#if GK20A_CHANNEL_REFCOUNT_TRACKING 497#if GK20A_CHANNEL_REFCOUNT_TRACKING
478 memset(ch->ref_actions, 0, sizeof(ch->ref_actions)); 498 memset(ch->ref_actions, 0, sizeof(ch->ref_actions));
@@ -576,13 +596,15 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
576 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); 596 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get);
577 nvgpu_atomic_inc(&ch->ref_count); 597 nvgpu_atomic_inc(&ch->ref_count);
578 ret = ch; 598 ret = ch;
579 } else 599 } else {
580 ret = NULL; 600 ret = NULL;
601 }
581 602
582 nvgpu_spinlock_release(&ch->ref_obtain_lock); 603 nvgpu_spinlock_release(&ch->ref_obtain_lock);
583 604
584 if (ret) 605 if (ret) {
585 trace_gk20a_channel_get(ch->chid, caller); 606 trace_gk20a_channel_get(ch->chid, caller);
607 }
586 608
587 return ret; 609 return ret;
588} 610}
@@ -694,8 +716,9 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
694 nvgpu_cond_init(&ch->notifier_wq); 716 nvgpu_cond_init(&ch->notifier_wq);
695 nvgpu_cond_init(&ch->semaphore_wq); 717 nvgpu_cond_init(&ch->semaphore_wq);
696 718
697 if (g->os_channel.open) 719 if (g->os_channel.open) {
698 g->os_channel.open(ch); 720 g->os_channel.open(ch);
721 }
699 722
700 /* Mark the channel alive, get-able, with 1 initial use 723 /* Mark the channel alive, get-able, with 1 initial use
701 * references. The initial reference will be decreased in 724 * references. The initial reference will be decreased in
@@ -757,8 +780,9 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
757 struct vm_gk20a *ch_vm = c->vm; 780 struct vm_gk20a *ch_vm = c->vm;
758 struct priv_cmd_queue *q = &c->priv_cmd_q; 781 struct priv_cmd_queue *q = &c->priv_cmd_q;
759 782
760 if (q->size == 0) 783 if (q->size == 0) {
761 return; 784 return;
785 }
762 786
763 nvgpu_dma_unmap_free(ch_vm, &q->mem); 787 nvgpu_dma_unmap_free(ch_vm, &q->mem);
764 788
@@ -784,16 +808,18 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
784 808
785 /* if free space in the end is less than requested, increase the size 809 /* if free space in the end is less than requested, increase the size
786 * to make the real allocated space start from beginning. */ 810 * to make the real allocated space start from beginning. */
787 if (q->put + size > q->size) 811 if (q->put + size > q->size) {
788 size = orig_size + (q->size - q->put); 812 size = orig_size + (q->size - q->put);
813 }
789 814
790 nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d", 815 nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d",
791 c->chid, q->get, q->put); 816 c->chid, q->get, q->put);
792 817
793 free_count = (q->size - (q->put - q->get) - 1) % q->size; 818 free_count = (q->size - (q->put - q->get) - 1) % q->size;
794 819
795 if (size > free_count) 820 if (size > free_count) {
796 return -EAGAIN; 821 return -EAGAIN;
822 }
797 823
798 e->size = orig_size; 824 e->size = orig_size;
799 e->mem = &q->mem; 825 e->mem = &q->mem;
@@ -830,10 +856,11 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
830void free_priv_cmdbuf(struct channel_gk20a *c, 856void free_priv_cmdbuf(struct channel_gk20a *c,
831 struct priv_cmd_entry *e) 857 struct priv_cmd_entry *e)
832{ 858{
833 if (channel_gk20a_is_prealloc_enabled(c)) 859 if (channel_gk20a_is_prealloc_enabled(c)) {
834 memset(e, 0, sizeof(struct priv_cmd_entry)); 860 memset(e, 0, sizeof(struct priv_cmd_entry));
835 else 861 } else {
836 nvgpu_kfree(c->g, e); 862 nvgpu_kfree(c->g, e);
863 }
837} 864}
838 865
839int channel_gk20a_alloc_job(struct channel_gk20a *c, 866int channel_gk20a_alloc_job(struct channel_gk20a *c,
@@ -852,9 +879,9 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
852 */ 879 */
853 nvgpu_smp_rmb(); 880 nvgpu_smp_rmb();
854 881
855 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length)) 882 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length)) {
856 *job_out = &c->joblist.pre_alloc.jobs[put]; 883 *job_out = &c->joblist.pre_alloc.jobs[put];
857 else { 884 } else {
858 nvgpu_warn(c->g, 885 nvgpu_warn(c->g,
859 "out of job ringbuffer space"); 886 "out of job ringbuffer space");
860 err = -EAGAIN; 887 err = -EAGAIN;
@@ -862,8 +889,9 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
862 } else { 889 } else {
863 *job_out = nvgpu_kzalloc(c->g, 890 *job_out = nvgpu_kzalloc(c->g,
864 sizeof(struct channel_gk20a_job)); 891 sizeof(struct channel_gk20a_job));
865 if (!*job_out) 892 if (!*job_out) {
866 err = -ENOMEM; 893 err = -ENOMEM;
894 }
867 } 895 }
868 896
869 return err; 897 return err;
@@ -883,24 +911,27 @@ void channel_gk20a_free_job(struct channel_gk20a *c,
883 memset(job, 0, sizeof(*job)); 911 memset(job, 0, sizeof(*job));
884 job->wait_cmd = wait_cmd; 912 job->wait_cmd = wait_cmd;
885 job->incr_cmd = incr_cmd; 913 job->incr_cmd = incr_cmd;
886 } else 914 } else {
887 nvgpu_kfree(c->g, job); 915 nvgpu_kfree(c->g, job);
916 }
888} 917}
889 918
890void channel_gk20a_joblist_lock(struct channel_gk20a *c) 919void channel_gk20a_joblist_lock(struct channel_gk20a *c)
891{ 920{
892 if (channel_gk20a_is_prealloc_enabled(c)) 921 if (channel_gk20a_is_prealloc_enabled(c)) {
893 nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock); 922 nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock);
894 else 923 } else {
895 nvgpu_spinlock_acquire(&c->joblist.dynamic.lock); 924 nvgpu_spinlock_acquire(&c->joblist.dynamic.lock);
925 }
896} 926}
897 927
898void channel_gk20a_joblist_unlock(struct channel_gk20a *c) 928void channel_gk20a_joblist_unlock(struct channel_gk20a *c)
899{ 929{
900 if (channel_gk20a_is_prealloc_enabled(c)) 930 if (channel_gk20a_is_prealloc_enabled(c)) {
901 nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock); 931 nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock);
902 else 932 } else {
903 nvgpu_spinlock_release(&c->joblist.dynamic.lock); 933 nvgpu_spinlock_release(&c->joblist.dynamic.lock);
934 }
904} 935}
905 936
906static struct channel_gk20a_job *channel_gk20a_joblist_peek( 937static struct channel_gk20a_job *channel_gk20a_joblist_peek(
@@ -915,9 +946,10 @@ static struct channel_gk20a_job *channel_gk20a_joblist_peek(
915 job = &c->joblist.pre_alloc.jobs[get]; 946 job = &c->joblist.pre_alloc.jobs[get];
916 } 947 }
917 } else { 948 } else {
918 if (!nvgpu_list_empty(&c->joblist.dynamic.jobs)) 949 if (!nvgpu_list_empty(&c->joblist.dynamic.jobs)) {
919 job = nvgpu_list_first_entry(&c->joblist.dynamic.jobs, 950 job = nvgpu_list_first_entry(&c->joblist.dynamic.jobs,
920 channel_gk20a_job, list); 951 channel_gk20a_job, list);
952 }
921 } 953 }
922 954
923 return job; 955 return job;
@@ -972,8 +1004,9 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
972 size_t size; 1004 size_t size;
973 struct priv_cmd_entry *entries = NULL; 1005 struct priv_cmd_entry *entries = NULL;
974 1006
975 if (channel_gk20a_is_prealloc_enabled(c) || !num_jobs) 1007 if (channel_gk20a_is_prealloc_enabled(c) || !num_jobs) {
976 return -EINVAL; 1008 return -EINVAL;
1009 }
977 1010
978 /* 1011 /*
979 * pre-allocate the job list. 1012 * pre-allocate the job list.
@@ -981,9 +1014,10 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
981 * to make sure we don't hit an overflow condition 1014 * to make sure we don't hit an overflow condition
982 */ 1015 */
983 size = sizeof(struct channel_gk20a_job); 1016 size = sizeof(struct channel_gk20a_job);
984 if (num_jobs <= ULONG_MAX / size) 1017 if (num_jobs <= ULONG_MAX / size) {
985 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g, 1018 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g,
986 num_jobs * size); 1019 num_jobs * size);
1020 }
987 if (!c->joblist.pre_alloc.jobs) { 1021 if (!c->joblist.pre_alloc.jobs) {
988 err = -ENOMEM; 1022 err = -ENOMEM;
989 goto clean_up; 1023 goto clean_up;
@@ -995,8 +1029,9 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
995 * to make sure we don't hit an overflow condition 1029 * to make sure we don't hit an overflow condition
996 */ 1030 */
997 size = sizeof(struct priv_cmd_entry); 1031 size = sizeof(struct priv_cmd_entry);
998 if (num_jobs <= ULONG_MAX / (size << 1)) 1032 if (num_jobs <= ULONG_MAX / (size << 1)) {
999 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size); 1033 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
1034 }
1000 if (!entries) { 1035 if (!entries) {
1001 err = -ENOMEM; 1036 err = -ENOMEM;
1002 goto clean_up_joblist; 1037 goto clean_up_joblist;
@@ -1010,8 +1045,9 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1010 1045
1011 /* pre-allocate a fence pool */ 1046 /* pre-allocate a fence pool */
1012 err = gk20a_alloc_fence_pool(c, num_jobs); 1047 err = gk20a_alloc_fence_pool(c, num_jobs);
1013 if (err) 1048 if (err) {
1014 goto clean_up_priv_cmd; 1049 goto clean_up_priv_cmd;
1050 }
1015 1051
1016 c->joblist.pre_alloc.length = num_jobs; 1052 c->joblist.pre_alloc.length = num_jobs;
1017 c->joblist.pre_alloc.put = 0; 1053 c->joblist.pre_alloc.put = 0;
@@ -1064,8 +1100,9 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1064 gpfifo_size = gpfifo_args->num_entries; 1100 gpfifo_size = gpfifo_args->num_entries;
1065 gpfifo_entry_size = nvgpu_get_gpfifo_entry_size(); 1101 gpfifo_entry_size = nvgpu_get_gpfifo_entry_size();
1066 1102
1067 if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_VPR) 1103 if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_VPR) {
1068 c->vpr = true; 1104 c->vpr = true;
1105 }
1069 1106
1070 if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC) { 1107 if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC) {
1071 nvgpu_rwsem_down_read(&g->deterministic_busy); 1108 nvgpu_rwsem_down_read(&g->deterministic_busy);
@@ -1163,38 +1200,44 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1163 1200
1164 if (g->ops.fifo.resetup_ramfc) { 1201 if (g->ops.fifo.resetup_ramfc) {
1165 err = g->ops.fifo.resetup_ramfc(c); 1202 err = g->ops.fifo.resetup_ramfc(c);
1166 if (err) 1203 if (err) {
1167 goto clean_up_sync; 1204 goto clean_up_sync;
1205 }
1168 } 1206 }
1169 } 1207 }
1170 1208
1171 if (!nvgpu_is_timeouts_enabled(c->g) || !c->timeout.enabled) 1209 if (!nvgpu_is_timeouts_enabled(c->g) || !c->timeout.enabled) {
1172 acquire_timeout = 0; 1210 acquire_timeout = 0;
1173 else 1211 } else {
1174 acquire_timeout = c->timeout.limit_ms; 1212 acquire_timeout = c->timeout.limit_ms;
1213 }
1175 1214
1176 err = g->ops.fifo.setup_ramfc(c, gpfifo_gpu_va, 1215 err = g->ops.fifo.setup_ramfc(c, gpfifo_gpu_va,
1177 c->gpfifo.entry_num, 1216 c->gpfifo.entry_num,
1178 acquire_timeout, gpfifo_args->flags); 1217 acquire_timeout, gpfifo_args->flags);
1179 if (err) 1218 if (err) {
1180 goto clean_up_sync; 1219 goto clean_up_sync;
1220 }
1181 1221
1182 /* TBD: setup engine contexts */ 1222 /* TBD: setup engine contexts */
1183 1223
1184 if (gpfifo_args->num_inflight_jobs) { 1224 if (gpfifo_args->num_inflight_jobs) {
1185 err = channel_gk20a_prealloc_resources(c, 1225 err = channel_gk20a_prealloc_resources(c,
1186 gpfifo_args->num_inflight_jobs); 1226 gpfifo_args->num_inflight_jobs);
1187 if (err) 1227 if (err) {
1188 goto clean_up_sync; 1228 goto clean_up_sync;
1229 }
1189 } 1230 }
1190 1231
1191 err = channel_gk20a_alloc_priv_cmdbuf(c); 1232 err = channel_gk20a_alloc_priv_cmdbuf(c);
1192 if (err) 1233 if (err) {
1193 goto clean_up_prealloc; 1234 goto clean_up_prealloc;
1235 }
1194 1236
1195 err = channel_gk20a_update_runlist(c, true); 1237 err = channel_gk20a_update_runlist(c, true);
1196 if (err) 1238 if (err) {
1197 goto clean_up_priv_cmd; 1239 goto clean_up_priv_cmd;
1240 }
1198 1241
1199 g->ops.fifo.bind_channel(c); 1242 g->ops.fifo.bind_channel(c);
1200 1243
@@ -1204,8 +1247,9 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1204clean_up_priv_cmd: 1247clean_up_priv_cmd:
1205 channel_gk20a_free_priv_cmdbuf(c); 1248 channel_gk20a_free_priv_cmdbuf(c);
1206clean_up_prealloc: 1249clean_up_prealloc:
1207 if (gpfifo_args->num_inflight_jobs) 1250 if (gpfifo_args->num_inflight_jobs) {
1208 channel_gk20a_free_prealloc_resources(c); 1251 channel_gk20a_free_prealloc_resources(c);
1252 }
1209clean_up_sync: 1253clean_up_sync:
1210 if (c->sync) { 1254 if (c->sync) {
1211 gk20a_channel_sync_destroy(c->sync, false); 1255 gk20a_channel_sync_destroy(c->sync, false);
@@ -1235,10 +1279,12 @@ clean_up_idle:
1235 1279
1236void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c) 1280void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c)
1237{ 1281{
1238 if (nvgpu_mem_is_valid(&c->usermode_userd)) 1282 if (nvgpu_mem_is_valid(&c->usermode_userd)) {
1239 nvgpu_dma_free(c->g, &c->usermode_userd); 1283 nvgpu_dma_free(c->g, &c->usermode_userd);
1240 if (nvgpu_mem_is_valid(&c->usermode_gpfifo)) 1284 }
1285 if (nvgpu_mem_is_valid(&c->usermode_gpfifo)) {
1241 nvgpu_dma_free(c->g, &c->usermode_gpfifo); 1286 nvgpu_dma_free(c->g, &c->usermode_gpfifo);
1287 }
1242} 1288}
1243 1289
1244/* Update with this periodically to determine how the gpfifo is draining. */ 1290/* Update with this periodically to determine how the gpfifo is draining. */
@@ -1247,8 +1293,9 @@ static inline u32 update_gp_get(struct gk20a *g,
1247{ 1293{
1248 u32 new_get = g->ops.fifo.userd_gp_get(g, c); 1294 u32 new_get = g->ops.fifo.userd_gp_get(g, c);
1249 1295
1250 if (new_get < c->gpfifo.get) 1296 if (new_get < c->gpfifo.get) {
1251 c->gpfifo.wrap = !c->gpfifo.wrap; 1297 c->gpfifo.wrap = !c->gpfifo.wrap;
1298 }
1252 c->gpfifo.get = new_get; 1299 c->gpfifo.get = new_get;
1253 return new_get; 1300 return new_get;
1254} 1301}
@@ -1314,11 +1361,13 @@ static void __gk20a_channel_timeout_start(struct channel_gk20a *ch)
1314 */ 1361 */
1315static void gk20a_channel_timeout_start(struct channel_gk20a *ch) 1362static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
1316{ 1363{
1317 if (!nvgpu_is_timeouts_enabled(ch->g)) 1364 if (!nvgpu_is_timeouts_enabled(ch->g)) {
1318 return; 1365 return;
1366 }
1319 1367
1320 if (!ch->timeout.enabled) 1368 if (!ch->timeout.enabled) {
1321 return; 1369 return;
1370 }
1322 1371
1323 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1372 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
1324 1373
@@ -1384,12 +1433,14 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1384 for (chid = 0; chid < f->num_channels; chid++) { 1433 for (chid = 0; chid < f->num_channels; chid++) {
1385 struct channel_gk20a *ch = &f->channel[chid]; 1434 struct channel_gk20a *ch = &f->channel[chid];
1386 1435
1387 if (!gk20a_channel_get(ch)) 1436 if (!gk20a_channel_get(ch)) {
1388 continue; 1437 continue;
1438 }
1389 1439
1390 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1440 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
1391 if (ch->timeout.running) 1441 if (ch->timeout.running) {
1392 __gk20a_channel_timeout_start(ch); 1442 __gk20a_channel_timeout_start(ch);
1443 }
1393 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1444 nvgpu_raw_spinlock_release(&ch->timeout.lock);
1394 1445
1395 gk20a_channel_put(ch); 1446 gk20a_channel_put(ch);
@@ -1441,8 +1492,9 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1441 ch->chid); 1492 ch->chid);
1442 1493
1443 /* force reset calls gk20a_debug_dump but not this */ 1494 /* force reset calls gk20a_debug_dump but not this */
1444 if (ch->timeout.debug_dump) 1495 if (ch->timeout.debug_dump) {
1445 gk20a_gr_debug_dump(g); 1496 gk20a_gr_debug_dump(g);
1497 }
1446 1498
1447 g->ops.fifo.force_reset_ch(ch, 1499 g->ops.fifo.force_reset_ch(ch,
1448 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT, 1500 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT,
@@ -1468,8 +1520,9 @@ static void gk20a_channel_timeout_check(struct channel_gk20a *ch)
1468 running = ch->timeout.running; 1520 running = ch->timeout.running;
1469 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1521 nvgpu_raw_spinlock_release(&ch->timeout.lock);
1470 1522
1471 if (running) 1523 if (running) {
1472 gk20a_channel_timeout_handler(ch); 1524 gk20a_channel_timeout_handler(ch);
1525 }
1473} 1526}
1474 1527
1475/** 1528/**
@@ -1624,8 +1677,9 @@ static int gk20a_channel_poll_worker(void *arg)
1624 __gk20a_channel_worker_pending(g, get), 1677 __gk20a_channel_worker_pending(g, get),
1625 watchdog_interval); 1678 watchdog_interval);
1626 1679
1627 if (ret == 0) 1680 if (ret == 0) {
1628 gk20a_channel_worker_process(g, &get); 1681 gk20a_channel_worker_process(g, &get);
1682 }
1629 1683
1630 if (nvgpu_timeout_peek_expired(&timeout)) { 1684 if (nvgpu_timeout_peek_expired(&timeout)) {
1631 gk20a_channel_poll_timeouts(g); 1685 gk20a_channel_poll_timeouts(g);
@@ -1641,8 +1695,9 @@ static int __nvgpu_channel_worker_start(struct gk20a *g)
1641 char thread_name[64]; 1695 char thread_name[64];
1642 int err = 0; 1696 int err = 0;
1643 1697
1644 if (nvgpu_thread_is_running(&g->channel_worker.poll_task)) 1698 if (nvgpu_thread_is_running(&g->channel_worker.poll_task)) {
1645 return err; 1699 return err;
1700 }
1646 1701
1647 nvgpu_mutex_acquire(&g->channel_worker.start_lock); 1702 nvgpu_mutex_acquire(&g->channel_worker.start_lock);
1648 1703
@@ -1683,8 +1738,9 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1683 nvgpu_init_list_node(&g->channel_worker.items); 1738 nvgpu_init_list_node(&g->channel_worker.items);
1684 nvgpu_spinlock_init(&g->channel_worker.items_lock); 1739 nvgpu_spinlock_init(&g->channel_worker.items_lock);
1685 err = nvgpu_mutex_init(&g->channel_worker.start_lock); 1740 err = nvgpu_mutex_init(&g->channel_worker.start_lock);
1686 if (err) 1741 if (err) {
1687 goto error_check; 1742 goto error_check;
1743 }
1688 1744
1689 err = __nvgpu_channel_worker_start(g); 1745 err = __nvgpu_channel_worker_start(g);
1690error_check: 1746error_check:
@@ -1758,15 +1814,17 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1758 struct priv_cmd_queue *q = &c->priv_cmd_q; 1814 struct priv_cmd_queue *q = &c->priv_cmd_q;
1759 struct gk20a *g = c->g; 1815 struct gk20a *g = c->g;
1760 1816
1761 if (!e) 1817 if (!e) {
1762 return 0; 1818 return 0;
1819 }
1763 1820
1764 if (e->valid) { 1821 if (e->valid) {
1765 /* read the entry's valid flag before reading its contents */ 1822 /* read the entry's valid flag before reading its contents */
1766 nvgpu_smp_rmb(); 1823 nvgpu_smp_rmb();
1767 if ((q->get != e->off) && e->off != 0) 1824 if ((q->get != e->off) && e->off != 0) {
1768 nvgpu_err(g, "requests out-of-order, ch=%d", 1825 nvgpu_err(g, "requests out-of-order, ch=%d",
1769 c->chid); 1826 c->chid);
1827 }
1770 q->get = e->off + e->size; 1828 q->get = e->off + e->size;
1771 } 1829 }
1772 1830
@@ -1787,8 +1845,9 @@ int gk20a_channel_add_job(struct channel_gk20a *c,
1787 if (!skip_buffer_refcounting) { 1845 if (!skip_buffer_refcounting) {
1788 err = nvgpu_vm_get_buffers(vm, &mapped_buffers, 1846 err = nvgpu_vm_get_buffers(vm, &mapped_buffers,
1789 &num_mapped_buffers); 1847 &num_mapped_buffers);
1790 if (err) 1848 if (err) {
1791 return err; 1849 return err;
1850 }
1792 } 1851 }
1793 1852
1794 /* 1853 /*
@@ -1803,8 +1862,9 @@ int gk20a_channel_add_job(struct channel_gk20a *c,
1803 1862
1804 gk20a_channel_timeout_start(c); 1863 gk20a_channel_timeout_start(c);
1805 1864
1806 if (!pre_alloc_enabled) 1865 if (!pre_alloc_enabled) {
1807 channel_gk20a_joblist_lock(c); 1866 channel_gk20a_joblist_lock(c);
1867 }
1808 1868
1809 /* 1869 /*
1810 * ensure all pending write complete before adding to the list. 1870 * ensure all pending write complete before adding to the list.
@@ -1814,8 +1874,9 @@ int gk20a_channel_add_job(struct channel_gk20a *c,
1814 nvgpu_smp_wmb(); 1874 nvgpu_smp_wmb();
1815 channel_gk20a_joblist_add(c, job); 1875 channel_gk20a_joblist_add(c, job);
1816 1876
1817 if (!pre_alloc_enabled) 1877 if (!pre_alloc_enabled) {
1818 channel_gk20a_joblist_unlock(c); 1878 channel_gk20a_joblist_unlock(c);
1879 }
1819 } else { 1880 } else {
1820 err = -ETIMEDOUT; 1881 err = -ETIMEDOUT;
1821 goto err_put_buffers; 1882 goto err_put_buffers;
@@ -1849,8 +1910,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1849 bool watchdog_on = false; 1910 bool watchdog_on = false;
1850 1911
1851 c = gk20a_channel_get(c); 1912 c = gk20a_channel_get(c);
1852 if (!c) 1913 if (!c) {
1853 return; 1914 return;
1915 }
1854 1916
1855 if (!c->g->power_on) { /* shutdown case */ 1917 if (!c->g->power_on) { /* shutdown case */
1856 gk20a_channel_put(c); 1918 gk20a_channel_put(c);
@@ -1864,8 +1926,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1864 * If !clean_all, we're in a condition where watchdog isn't supported 1926 * If !clean_all, we're in a condition where watchdog isn't supported
1865 * anyway (this would be a no-op). 1927 * anyway (this would be a no-op).
1866 */ 1928 */
1867 if (clean_all) 1929 if (clean_all) {
1868 watchdog_on = gk20a_channel_timeout_stop(c); 1930 watchdog_on = gk20a_channel_timeout_stop(c);
1931 }
1869 1932
1870 /* Synchronize with abort cleanup that needs the jobs. */ 1933 /* Synchronize with abort cleanup that needs the jobs. */
1871 nvgpu_mutex_acquire(&c->joblist.cleanup_lock); 1934 nvgpu_mutex_acquire(&c->joblist.cleanup_lock);
@@ -1901,8 +1964,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1901 * this - in that case, this is a no-op and the new 1964 * this - in that case, this is a no-op and the new
1902 * later timeout is still used. 1965 * later timeout is still used.
1903 */ 1966 */
1904 if (clean_all && watchdog_on) 1967 if (clean_all && watchdog_on) {
1905 gk20a_channel_timeout_continue(c); 1968 gk20a_channel_timeout_continue(c);
1969 }
1906 break; 1970 break;
1907 } 1971 }
1908 1972
@@ -1910,8 +1974,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1910 1974
1911 if (c->sync) { 1975 if (c->sync) {
1912 if (c->has_os_fence_framework_support && 1976 if (c->has_os_fence_framework_support &&
1913 g->os_channel.os_fence_framework_inst_exists(c)) 1977 g->os_channel.os_fence_framework_inst_exists(c)) {
1914 g->os_channel.signal_os_fence_framework(c); 1978 g->os_channel.signal_os_fence_framework(c);
1979 }
1915 1980
1916 if (g->aggressive_sync_destroy_thresh) { 1981 if (g->aggressive_sync_destroy_thresh) {
1917 nvgpu_mutex_acquire(&c->sync_lock); 1982 nvgpu_mutex_acquire(&c->sync_lock);
@@ -1926,9 +1991,10 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1926 } 1991 }
1927 } 1992 }
1928 1993
1929 if (job->num_mapped_buffers) 1994 if (job->num_mapped_buffers) {
1930 nvgpu_vm_put_buffers(vm, job->mapped_buffers, 1995 nvgpu_vm_put_buffers(vm, job->mapped_buffers,
1931 job->num_mapped_buffers); 1996 job->num_mapped_buffers);
1997 }
1932 1998
1933 /* Remove job from channel's job list before we close the 1999 /* Remove job from channel's job list before we close the
1934 * fences, to prevent other callers (gk20a_channel_abort) from 2000 * fences, to prevent other callers (gk20a_channel_abort) from
@@ -1964,8 +2030,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1964 * Deterministic channels have a channel-wide power reference; 2030 * Deterministic channels have a channel-wide power reference;
1965 * for others, there's one per submit. 2031 * for others, there's one per submit.
1966 */ 2032 */
1967 if (!c->deterministic) 2033 if (!c->deterministic) {
1968 gk20a_idle(g); 2034 gk20a_idle(g);
2035 }
1969 2036
1970 if (!clean_all) { 2037 if (!clean_all) {
1971 /* Timeout isn't supported here so don't touch it. */ 2038 /* Timeout isn't supported here so don't touch it. */
@@ -1975,8 +2042,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1975 2042
1976 nvgpu_mutex_release(&c->joblist.cleanup_lock); 2043 nvgpu_mutex_release(&c->joblist.cleanup_lock);
1977 2044
1978 if (job_finished && g->os_channel.work_completion_signal) 2045 if (job_finished && g->os_channel.work_completion_signal) {
1979 g->os_channel.work_completion_signal(c); 2046 g->os_channel.work_completion_signal(c);
2047 }
1980 2048
1981 gk20a_channel_put(c); 2049 gk20a_channel_put(c);
1982} 2050}
@@ -2021,8 +2089,9 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2021 for (chid = 0; chid < f->num_channels; chid++) { 2089 for (chid = 0; chid < f->num_channels; chid++) {
2022 struct channel_gk20a *ch = &f->channel[chid]; 2090 struct channel_gk20a *ch = &f->channel[chid];
2023 2091
2024 if (!gk20a_channel_get(ch)) 2092 if (!gk20a_channel_get(ch)) {
2025 continue; 2093 continue;
2094 }
2026 2095
2027 if (ch->deterministic && !ch->deterministic_railgate_allowed) { 2096 if (ch->deterministic && !ch->deterministic_railgate_allowed) {
2028 /* 2097 /*
@@ -2058,16 +2127,18 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2058 for (chid = 0; chid < f->num_channels; chid++) { 2127 for (chid = 0; chid < f->num_channels; chid++) {
2059 struct channel_gk20a *ch = &f->channel[chid]; 2128 struct channel_gk20a *ch = &f->channel[chid];
2060 2129
2061 if (!gk20a_channel_get(ch)) 2130 if (!gk20a_channel_get(ch)) {
2062 continue; 2131 continue;
2132 }
2063 2133
2064 /* 2134 /*
2065 * Deterministic state changes inside deterministic_busy lock, 2135 * Deterministic state changes inside deterministic_busy lock,
2066 * which we took in deterministic_idle. 2136 * which we took in deterministic_idle.
2067 */ 2137 */
2068 if (ch->deterministic && !ch->deterministic_railgate_allowed) { 2138 if (ch->deterministic && !ch->deterministic_railgate_allowed) {
2069 if (gk20a_busy(g)) 2139 if (gk20a_busy(g)) {
2070 nvgpu_err(g, "cannot busy() again!"); 2140 nvgpu_err(g, "cannot busy() again!");
2141 }
2071 /* Took this in idle() */ 2142 /* Took this in idle() */
2072 gk20a_channel_put(ch); 2143 gk20a_channel_put(ch);
2073 } 2144 }
@@ -2103,17 +2174,21 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2103 nvgpu_init_list_node(&c->worker_item); 2174 nvgpu_init_list_node(&c->worker_item);
2104 2175
2105 err = nvgpu_mutex_init(&c->ioctl_lock); 2176 err = nvgpu_mutex_init(&c->ioctl_lock);
2106 if (err) 2177 if (err) {
2107 return err; 2178 return err;
2179 }
2108 err = nvgpu_mutex_init(&c->joblist.cleanup_lock); 2180 err = nvgpu_mutex_init(&c->joblist.cleanup_lock);
2109 if (err) 2181 if (err) {
2110 goto fail_1; 2182 goto fail_1;
2183 }
2111 err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); 2184 err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
2112 if (err) 2185 if (err) {
2113 goto fail_2; 2186 goto fail_2;
2187 }
2114 err = nvgpu_mutex_init(&c->sync_lock); 2188 err = nvgpu_mutex_init(&c->sync_lock);
2115 if (err) 2189 if (err) {
2116 goto fail_3; 2190 goto fail_3;
2191 }
2117#if defined(CONFIG_GK20A_CYCLE_STATS) 2192#if defined(CONFIG_GK20A_CYCLE_STATS)
2118 err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); 2193 err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
2119 if (err) 2194 if (err)
@@ -2123,8 +2198,9 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2123 goto fail_5; 2198 goto fail_5;
2124#endif 2199#endif
2125 err = nvgpu_mutex_init(&c->dbg_s_lock); 2200 err = nvgpu_mutex_init(&c->dbg_s_lock);
2126 if (err) 2201 if (err) {
2127 goto fail_6; 2202 goto fail_6;
2203 }
2128 2204
2129 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); 2205 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
2130 2206
@@ -2168,8 +2244,9 @@ int gk20a_channel_suspend(struct gk20a *g)
2168 /* preempt the channel */ 2244 /* preempt the channel */
2169 gk20a_fifo_preempt(g, ch); 2245 gk20a_fifo_preempt(g, ch);
2170 /* wait for channel update notifiers */ 2246 /* wait for channel update notifiers */
2171 if (g->os_channel.work_completion_cancel_sync) 2247 if (g->os_channel.work_completion_cancel_sync) {
2172 g->os_channel.work_completion_cancel_sync(ch); 2248 g->os_channel.work_completion_cancel_sync(ch);
2249 }
2173 2250
2174 channels_in_use = true; 2251 channels_in_use = true;
2175 2252
@@ -2213,8 +2290,9 @@ int gk20a_channel_resume(struct gk20a *g)
2213 } 2290 }
2214 } 2291 }
2215 2292
2216 if (channels_in_use) 2293 if (channels_in_use) {
2217 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true); 2294 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true);
2295 }
2218 2296
2219 nvgpu_log_fn(g, "done"); 2297 nvgpu_log_fn(g, "done");
2220 return 0; 2298 return 0;
@@ -2259,8 +2337,9 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2259 * user-space managed 2337 * user-space managed
2260 * semaphore. 2338 * semaphore.
2261 */ 2339 */
2262 if (!c->deterministic) 2340 if (!c->deterministic) {
2263 gk20a_channel_update(c); 2341 gk20a_channel_update(c);
2342 }
2264 } 2343 }
2265 gk20a_channel_put(c); 2344 gk20a_channel_put(c);
2266 } 2345 }