summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c220
1 files changed, 110 insertions, 110 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 376a64b0..83a3a523 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -102,7 +102,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
102 102
103 platform = gk20a_get_platform(f->g->dev); 103 platform = gk20a_get_platform(f->g->dev);
104 104
105 mutex_lock(&f->free_chs_mutex); 105 nvgpu_mutex_acquire(&f->free_chs_mutex);
106 if (!list_empty(&f->free_chs)) { 106 if (!list_empty(&f->free_chs)) {
107 ch = list_first_entry(&f->free_chs, struct channel_gk20a, 107 ch = list_first_entry(&f->free_chs, struct channel_gk20a,
108 free_chs); 108 free_chs);
@@ -111,7 +111,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
111 WARN_ON(ch->referenceable); 111 WARN_ON(ch->referenceable);
112 f->used_channels++; 112 f->used_channels++;
113 } 113 }
114 mutex_unlock(&f->free_chs_mutex); 114 nvgpu_mutex_release(&f->free_chs_mutex);
115 115
116 if (platform->aggressive_sync_destroy_thresh && 116 if (platform->aggressive_sync_destroy_thresh &&
117 (f->used_channels > 117 (f->used_channels >
@@ -128,11 +128,11 @@ static void free_channel(struct fifo_gk20a *f,
128 128
129 trace_gk20a_release_used_channel(ch->hw_chid); 129 trace_gk20a_release_used_channel(ch->hw_chid);
130 /* refcount is zero here and channel is in a freed/dead state */ 130 /* refcount is zero here and channel is in a freed/dead state */
131 mutex_lock(&f->free_chs_mutex); 131 nvgpu_mutex_acquire(&f->free_chs_mutex);
132 /* add to head to increase visibility of timing-related bugs */ 132 /* add to head to increase visibility of timing-related bugs */
133 list_add(&ch->free_chs, &f->free_chs); 133 list_add(&ch->free_chs, &f->free_chs);
134 f->used_channels--; 134 f->used_channels--;
135 mutex_unlock(&f->free_chs_mutex); 135 nvgpu_mutex_release(&f->free_chs_mutex);
136 136
137 if (platform->aggressive_sync_destroy_thresh && 137 if (platform->aggressive_sync_destroy_thresh &&
138 (f->used_channels < 138 (f->used_channels <
@@ -494,10 +494,10 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
494 gk20a_channel_cancel_job_clean_up(ch, true); 494 gk20a_channel_cancel_job_clean_up(ch, true);
495 495
496 /* ensure no fences are pending */ 496 /* ensure no fences are pending */
497 mutex_lock(&ch->sync_lock); 497 nvgpu_mutex_acquire(&ch->sync_lock);
498 if (ch->sync) 498 if (ch->sync)
499 ch->sync->set_min_eq_max(ch->sync); 499 ch->sync->set_min_eq_max(ch->sync);
500 mutex_unlock(&ch->sync_lock); 500 nvgpu_mutex_release(&ch->sync_lock);
501 501
502 /* release all job semaphores (applies only to jobs that use 502 /* release all job semaphores (applies only to jobs that use
503 semaphore synchronization) */ 503 semaphore synchronization) */
@@ -595,7 +595,7 @@ void gk20a_disable_channel(struct channel_gk20a *ch)
595static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch) 595static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
596{ 596{
597 /* disable existing cyclestats buffer */ 597 /* disable existing cyclestats buffer */
598 mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex); 598 nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
599 if (ch->cyclestate.cyclestate_buffer_handler) { 599 if (ch->cyclestate.cyclestate_buffer_handler) {
600 dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler, 600 dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler,
601 ch->cyclestate.cyclestate_buffer); 601 ch->cyclestate.cyclestate_buffer);
@@ -604,7 +604,7 @@ static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
604 ch->cyclestate.cyclestate_buffer = NULL; 604 ch->cyclestate.cyclestate_buffer = NULL;
605 ch->cyclestate.cyclestate_buffer_size = 0; 605 ch->cyclestate.cyclestate_buffer_size = 0;
606 } 606 }
607 mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); 607 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
608} 608}
609 609
610static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, 610static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
@@ -654,12 +654,12 @@ static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
654{ 654{
655 int ret; 655 int ret;
656 656
657 mutex_lock(&ch->cs_client_mutex); 657 nvgpu_mutex_acquire(&ch->cs_client_mutex);
658 if (ch->cs_client) 658 if (ch->cs_client)
659 ret = gr_gk20a_css_flush(ch, ch->cs_client); 659 ret = gr_gk20a_css_flush(ch, ch->cs_client);
660 else 660 else
661 ret = -EBADF; 661 ret = -EBADF;
662 mutex_unlock(&ch->cs_client_mutex); 662 nvgpu_mutex_release(&ch->cs_client_mutex);
663 663
664 return ret; 664 return ret;
665} 665}
@@ -671,7 +671,7 @@ static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
671{ 671{
672 int ret; 672 int ret;
673 673
674 mutex_lock(&ch->cs_client_mutex); 674 nvgpu_mutex_acquire(&ch->cs_client_mutex);
675 if (ch->cs_client) { 675 if (ch->cs_client) {
676 ret = -EEXIST; 676 ret = -EEXIST;
677 } else { 677 } else {
@@ -681,7 +681,7 @@ static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
681 perfmon_id_start, 681 perfmon_id_start,
682 &ch->cs_client); 682 &ch->cs_client);
683 } 683 }
684 mutex_unlock(&ch->cs_client_mutex); 684 nvgpu_mutex_release(&ch->cs_client_mutex);
685 685
686 return ret; 686 return ret;
687} 687}
@@ -690,14 +690,14 @@ static int gk20a_free_cycle_stats_snapshot(struct channel_gk20a *ch)
690{ 690{
691 int ret; 691 int ret;
692 692
693 mutex_lock(&ch->cs_client_mutex); 693 nvgpu_mutex_acquire(&ch->cs_client_mutex);
694 if (ch->cs_client) { 694 if (ch->cs_client) {
695 ret = gr_gk20a_css_detach(ch, ch->cs_client); 695 ret = gr_gk20a_css_detach(ch, ch->cs_client);
696 ch->cs_client = NULL; 696 ch->cs_client = NULL;
697 } else { 697 } else {
698 ret = 0; 698 ret = 0;
699 } 699 }
700 mutex_unlock(&ch->cs_client_mutex); 700 nvgpu_mutex_release(&ch->cs_client_mutex);
701 701
702 return ret; 702 return ret;
703} 703}
@@ -824,9 +824,9 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch,
824 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification)); 824 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification));
825 825
826 /* set channel notifiers pointer */ 826 /* set channel notifiers pointer */
827 mutex_lock(&ch->error_notifier_mutex); 827 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
828 ch->error_notifier_ref = dmabuf; 828 ch->error_notifier_ref = dmabuf;
829 mutex_unlock(&ch->error_notifier_mutex); 829 nvgpu_mutex_release(&ch->error_notifier_mutex);
830 830
831 return 0; 831 return 0;
832} 832}
@@ -857,14 +857,14 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
857 857
858void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error) 858void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error)
859{ 859{
860 mutex_lock(&ch->error_notifier_mutex); 860 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
861 gk20a_set_error_notifier_locked(ch, error); 861 gk20a_set_error_notifier_locked(ch, error);
862 mutex_unlock(&ch->error_notifier_mutex); 862 nvgpu_mutex_release(&ch->error_notifier_mutex);
863} 863}
864 864
865static void gk20a_free_error_notifiers(struct channel_gk20a *ch) 865static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
866{ 866{
867 mutex_lock(&ch->error_notifier_mutex); 867 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
868 if (ch->error_notifier_ref) { 868 if (ch->error_notifier_ref) {
869 dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va); 869 dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
870 dma_buf_put(ch->error_notifier_ref); 870 dma_buf_put(ch->error_notifier_ref);
@@ -872,7 +872,7 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
872 ch->error_notifier = NULL; 872 ch->error_notifier = NULL;
873 ch->error_notifier_va = NULL; 873 ch->error_notifier_va = NULL;
874 } 874 }
875 mutex_unlock(&ch->error_notifier_mutex); 875 nvgpu_mutex_release(&ch->error_notifier_mutex);
876} 876}
877 877
878static void gk20a_wait_until_counter_is_N( 878static void gk20a_wait_until_counter_is_N(
@@ -927,16 +927,16 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
927 nvgpu_wait_for_deferred_interrupts(g); 927 nvgpu_wait_for_deferred_interrupts(g);
928 928
929 /* prevent new refs */ 929 /* prevent new refs */
930 spin_lock(&ch->ref_obtain_lock); 930 nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
931 if (!ch->referenceable) { 931 if (!ch->referenceable) {
932 spin_unlock(&ch->ref_obtain_lock); 932 nvgpu_spinlock_release(&ch->ref_obtain_lock);
933 gk20a_err(dev_from_gk20a(ch->g), 933 gk20a_err(dev_from_gk20a(ch->g),
934 "Extra %s() called to channel %u", 934 "Extra %s() called to channel %u",
935 __func__, ch->hw_chid); 935 __func__, ch->hw_chid);
936 return; 936 return;
937 } 937 }
938 ch->referenceable = false; 938 ch->referenceable = false;
939 spin_unlock(&ch->ref_obtain_lock); 939 nvgpu_spinlock_release(&ch->ref_obtain_lock);
940 940
941 /* matches with the initial reference in gk20a_open_new_channel() */ 941 /* matches with the initial reference in gk20a_open_new_channel() */
942 atomic_dec(&ch->ref_count); 942 atomic_dec(&ch->ref_count);
@@ -948,18 +948,18 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
948 __func__, "references"); 948 __func__, "references");
949 949
950 /* if engine reset was deferred, perform it now */ 950 /* if engine reset was deferred, perform it now */
951 mutex_lock(&f->deferred_reset_mutex); 951 nvgpu_mutex_acquire(&f->deferred_reset_mutex);
952 if (g->fifo.deferred_reset_pending) { 952 if (g->fifo.deferred_reset_pending) {
953 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" 953 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
954 " deferred, running now"); 954 " deferred, running now");
955 /* if lock is already taken, a reset is taking place 955 /* if lock is already taken, a reset is taking place
956 so no need to repeat */ 956 so no need to repeat */
957 if (mutex_trylock(&g->fifo.gr_reset_mutex)) { 957 if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) {
958 gk20a_fifo_deferred_reset(g, ch); 958 gk20a_fifo_deferred_reset(g, ch);
959 mutex_unlock(&g->fifo.gr_reset_mutex); 959 nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
960 } 960 }
961 } 961 }
962 mutex_unlock(&f->deferred_reset_mutex); 962 nvgpu_mutex_release(&f->deferred_reset_mutex);
963 963
964 if (!gk20a_channel_as_bound(ch)) 964 if (!gk20a_channel_as_bound(ch))
965 goto unbind; 965 goto unbind;
@@ -991,12 +991,12 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
991 channel_gk20a_free_priv_cmdbuf(ch); 991 channel_gk20a_free_priv_cmdbuf(ch);
992 992
993 /* sync must be destroyed before releasing channel vm */ 993 /* sync must be destroyed before releasing channel vm */
994 mutex_lock(&ch->sync_lock); 994 nvgpu_mutex_acquire(&ch->sync_lock);
995 if (ch->sync) { 995 if (ch->sync) {
996 gk20a_channel_sync_destroy(ch->sync); 996 gk20a_channel_sync_destroy(ch->sync);
997 ch->sync = NULL; 997 ch->sync = NULL;
998 } 998 }
999 mutex_unlock(&ch->sync_lock); 999 nvgpu_mutex_release(&ch->sync_lock);
1000 1000
1001 /* 1001 /*
1002 * free the channel used semaphore index. 1002 * free the channel used semaphore index.
@@ -1011,10 +1011,10 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
1011 */ 1011 */
1012 gk20a_vm_put(ch_vm); 1012 gk20a_vm_put(ch_vm);
1013 1013
1014 spin_lock(&ch->update_fn_lock); 1014 nvgpu_spinlock_acquire(&ch->update_fn_lock);
1015 ch->update_fn = NULL; 1015 ch->update_fn = NULL;
1016 ch->update_fn_data = NULL; 1016 ch->update_fn_data = NULL;
1017 spin_unlock(&ch->update_fn_lock); 1017 nvgpu_spinlock_release(&ch->update_fn_lock);
1018 cancel_work_sync(&ch->update_fn_work); 1018 cancel_work_sync(&ch->update_fn_work);
1019 cancel_delayed_work_sync(&ch->clean_up.wq); 1019 cancel_delayed_work_sync(&ch->clean_up.wq);
1020 cancel_delayed_work_sync(&ch->timeout.wq); 1020 cancel_delayed_work_sync(&ch->timeout.wq);
@@ -1037,21 +1037,21 @@ unbind:
1037 WARN_ON(ch->sync); 1037 WARN_ON(ch->sync);
1038 1038
1039 /* unlink all debug sessions */ 1039 /* unlink all debug sessions */
1040 mutex_lock(&g->dbg_sessions_lock); 1040 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1041 1041
1042 list_for_each_entry_safe(session_data, tmp_s, 1042 list_for_each_entry_safe(session_data, tmp_s,
1043 &ch->dbg_s_list, dbg_s_entry) { 1043 &ch->dbg_s_list, dbg_s_entry) {
1044 dbg_s = session_data->dbg_s; 1044 dbg_s = session_data->dbg_s;
1045 mutex_lock(&dbg_s->ch_list_lock); 1045 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1046 list_for_each_entry_safe(ch_data, tmp, 1046 list_for_each_entry_safe(ch_data, tmp,
1047 &dbg_s->ch_list, ch_entry) { 1047 &dbg_s->ch_list, ch_entry) {
1048 if (ch_data->chid == ch->hw_chid) 1048 if (ch_data->chid == ch->hw_chid)
1049 dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 1049 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
1050 } 1050 }
1051 mutex_unlock(&dbg_s->ch_list_lock); 1051 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1052 } 1052 }
1053 1053
1054 mutex_unlock(&g->dbg_sessions_lock); 1054 nvgpu_mutex_release(&g->dbg_sessions_lock);
1055 1055
1056 /* free pre-allocated resources, if applicable */ 1056 /* free pre-allocated resources, if applicable */
1057 if (channel_gk20a_is_prealloc_enabled(ch)) 1057 if (channel_gk20a_is_prealloc_enabled(ch))
@@ -1079,7 +1079,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
1079 unsigned long prev_jiffies = 0; 1079 unsigned long prev_jiffies = 0;
1080 struct device *dev = dev_from_gk20a(ch->g); 1080 struct device *dev = dev_from_gk20a(ch->g);
1081 1081
1082 spin_lock(&ch->ref_actions_lock); 1082 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
1083 1083
1084 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", 1084 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
1085 ch->hw_chid, atomic_read(&ch->ref_count)); 1085 ch->hw_chid, atomic_read(&ch->ref_count));
@@ -1109,7 +1109,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
1109 get = (get + 1) % GK20A_CHANNEL_REFCOUNT_TRACKING; 1109 get = (get + 1) % GK20A_CHANNEL_REFCOUNT_TRACKING;
1110 } 1110 }
1111 1111
1112 spin_unlock(&ch->ref_actions_lock); 1112 nvgpu_spinlock_release(&ch->ref_actions_lock);
1113#endif 1113#endif
1114} 1114}
1115 1115
@@ -1119,7 +1119,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
1119#if GK20A_CHANNEL_REFCOUNT_TRACKING 1119#if GK20A_CHANNEL_REFCOUNT_TRACKING
1120 struct channel_gk20a_ref_action *act; 1120 struct channel_gk20a_ref_action *act;
1121 1121
1122 spin_lock(&ch->ref_actions_lock); 1122 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
1123 1123
1124 act = &ch->ref_actions[ch->ref_actions_put]; 1124 act = &ch->ref_actions[ch->ref_actions_put];
1125 act->type = type; 1125 act->type = type;
@@ -1132,7 +1132,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
1132 ch->ref_actions_put = (ch->ref_actions_put + 1) % 1132 ch->ref_actions_put = (ch->ref_actions_put + 1) %
1133 GK20A_CHANNEL_REFCOUNT_TRACKING; 1133 GK20A_CHANNEL_REFCOUNT_TRACKING;
1134 1134
1135 spin_unlock(&ch->ref_actions_lock); 1135 nvgpu_spinlock_release(&ch->ref_actions_lock);
1136#endif 1136#endif
1137} 1137}
1138 1138
@@ -1152,7 +1152,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
1152 const char *caller) { 1152 const char *caller) {
1153 struct channel_gk20a *ret; 1153 struct channel_gk20a *ret;
1154 1154
1155 spin_lock(&ch->ref_obtain_lock); 1155 nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
1156 1156
1157 if (likely(ch->referenceable)) { 1157 if (likely(ch->referenceable)) {
1158 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); 1158 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get);
@@ -1161,7 +1161,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
1161 } else 1161 } else
1162 ret = NULL; 1162 ret = NULL;
1163 1163
1164 spin_unlock(&ch->ref_obtain_lock); 1164 nvgpu_spinlock_release(&ch->ref_obtain_lock);
1165 1165
1166 if (ret) 1166 if (ret)
1167 trace_gk20a_channel_get(ch->hw_chid, caller); 1167 trace_gk20a_channel_get(ch->hw_chid, caller);
@@ -1250,10 +1250,10 @@ static void gk20a_channel_update_runcb_fn(struct work_struct *work)
1250 void (*update_fn)(struct channel_gk20a *, void *); 1250 void (*update_fn)(struct channel_gk20a *, void *);
1251 void *update_fn_data; 1251 void *update_fn_data;
1252 1252
1253 spin_lock(&ch->update_fn_lock); 1253 nvgpu_spinlock_acquire(&ch->update_fn_lock);
1254 update_fn = ch->update_fn; 1254 update_fn = ch->update_fn;
1255 update_fn_data = ch->update_fn_data; 1255 update_fn_data = ch->update_fn_data;
1256 spin_unlock(&ch->update_fn_lock); 1256 nvgpu_spinlock_release(&ch->update_fn_lock);
1257 1257
1258 if (update_fn) 1258 if (update_fn)
1259 update_fn(ch, update_fn_data); 1259 update_fn(ch, update_fn_data);
@@ -1268,10 +1268,10 @@ struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
1268 struct channel_gk20a *ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel); 1268 struct channel_gk20a *ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel);
1269 1269
1270 if (ch) { 1270 if (ch) {
1271 spin_lock(&ch->update_fn_lock); 1271 nvgpu_spinlock_acquire(&ch->update_fn_lock);
1272 ch->update_fn = update_fn; 1272 ch->update_fn = update_fn;
1273 ch->update_fn_data = update_fn_data; 1273 ch->update_fn_data = update_fn_data;
1274 spin_unlock(&ch->update_fn_lock); 1274 nvgpu_spinlock_release(&ch->update_fn_lock);
1275 } 1275 }
1276 1276
1277 return ch; 1277 return ch;
@@ -1325,13 +1325,13 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
1325 ch->tgid = current->tgid; /* process granularity for FECS traces */ 1325 ch->tgid = current->tgid; /* process granularity for FECS traces */
1326 1326
1327 /* unhook all events created on this channel */ 1327 /* unhook all events created on this channel */
1328 mutex_lock(&ch->event_id_list_lock); 1328 nvgpu_mutex_acquire(&ch->event_id_list_lock);
1329 list_for_each_entry_safe(event_id_data, event_id_data_temp, 1329 list_for_each_entry_safe(event_id_data, event_id_data_temp,
1330 &ch->event_id_list, 1330 &ch->event_id_list,
1331 event_id_node) { 1331 event_id_node) {
1332 list_del_init(&event_id_data->event_id_node); 1332 list_del_init(&event_id_data->event_id_node);
1333 } 1333 }
1334 mutex_unlock(&ch->event_id_list_lock); 1334 nvgpu_mutex_release(&ch->event_id_list_lock);
1335 1335
1336 /* By default, channel is regular (non-TSG) channel */ 1336 /* By default, channel is regular (non-TSG) channel */
1337 ch->tsgid = NVGPU_INVALID_TSG_ID; 1337 ch->tsgid = NVGPU_INVALID_TSG_ID;
@@ -1357,7 +1357,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
1357 1357
1358 ch->update_fn = NULL; 1358 ch->update_fn = NULL;
1359 ch->update_fn_data = NULL; 1359 ch->update_fn_data = NULL;
1360 spin_lock_init(&ch->update_fn_lock); 1360 nvgpu_spinlock_init(&ch->update_fn_lock);
1361 INIT_WORK(&ch->update_fn_work, gk20a_channel_update_runcb_fn); 1361 INIT_WORK(&ch->update_fn_work, gk20a_channel_update_runcb_fn);
1362 1362
1363 /* Mark the channel alive, get-able, with 1 initial use 1363 /* Mark the channel alive, get-able, with 1 initial use
@@ -1652,17 +1652,17 @@ static void channel_gk20a_free_job(struct channel_gk20a *c,
1652void channel_gk20a_joblist_lock(struct channel_gk20a *c) 1652void channel_gk20a_joblist_lock(struct channel_gk20a *c)
1653{ 1653{
1654 if (channel_gk20a_is_prealloc_enabled(c)) 1654 if (channel_gk20a_is_prealloc_enabled(c))
1655 mutex_lock(&c->joblist.pre_alloc.read_lock); 1655 nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock);
1656 else 1656 else
1657 spin_lock(&c->joblist.dynamic.lock); 1657 nvgpu_spinlock_acquire(&c->joblist.dynamic.lock);
1658} 1658}
1659 1659
1660void channel_gk20a_joblist_unlock(struct channel_gk20a *c) 1660void channel_gk20a_joblist_unlock(struct channel_gk20a *c)
1661{ 1661{
1662 if (channel_gk20a_is_prealloc_enabled(c)) 1662 if (channel_gk20a_is_prealloc_enabled(c))
1663 mutex_unlock(&c->joblist.pre_alloc.read_lock); 1663 nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock);
1664 else 1664 else
1665 spin_unlock(&c->joblist.dynamic.lock); 1665 nvgpu_spinlock_release(&c->joblist.dynamic.lock);
1666} 1666}
1667 1667
1668static struct channel_gk20a_job *channel_gk20a_joblist_peek( 1668static struct channel_gk20a_job *channel_gk20a_joblist_peek(
@@ -1871,14 +1871,14 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1871 channel_gk20a_setup_userd(c); 1871 channel_gk20a_setup_userd(c);
1872 1872
1873 if (!platform->aggressive_sync_destroy_thresh) { 1873 if (!platform->aggressive_sync_destroy_thresh) {
1874 mutex_lock(&c->sync_lock); 1874 nvgpu_mutex_acquire(&c->sync_lock);
1875 c->sync = gk20a_channel_sync_create(c); 1875 c->sync = gk20a_channel_sync_create(c);
1876 if (!c->sync) { 1876 if (!c->sync) {
1877 err = -ENOMEM; 1877 err = -ENOMEM;
1878 mutex_unlock(&c->sync_lock); 1878 nvgpu_mutex_release(&c->sync_lock);
1879 goto clean_up_unmap; 1879 goto clean_up_unmap;
1880 } 1880 }
1881 mutex_unlock(&c->sync_lock); 1881 nvgpu_mutex_release(&c->sync_lock);
1882 1882
1883 if (g->ops.fifo.resetup_ramfc) { 1883 if (g->ops.fifo.resetup_ramfc) {
1884 err = g->ops.fifo.resetup_ramfc(c); 1884 err = g->ops.fifo.resetup_ramfc(c);
@@ -2085,16 +2085,16 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
2085 if (!ch->wdt_enabled) 2085 if (!ch->wdt_enabled)
2086 return; 2086 return;
2087 2087
2088 raw_spin_lock(&ch->timeout.lock); 2088 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
2089 2089
2090 if (ch->timeout.initialized) { 2090 if (ch->timeout.initialized) {
2091 raw_spin_unlock(&ch->timeout.lock); 2091 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2092 return; 2092 return;
2093 } 2093 }
2094 2094
2095 ch->timeout.gp_get = gk20a_userd_gp_get(ch->g, ch); 2095 ch->timeout.gp_get = gk20a_userd_gp_get(ch->g, ch);
2096 ch->timeout.initialized = true; 2096 ch->timeout.initialized = true;
2097 raw_spin_unlock(&ch->timeout.lock); 2097 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2098 2098
2099 schedule_delayed_work(&ch->timeout.wq, 2099 schedule_delayed_work(&ch->timeout.wq,
2100 msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch))); 2100 msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch)));
@@ -2102,18 +2102,18 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
2102 2102
2103static void gk20a_channel_timeout_stop(struct channel_gk20a *ch) 2103static void gk20a_channel_timeout_stop(struct channel_gk20a *ch)
2104{ 2104{
2105 raw_spin_lock(&ch->timeout.lock); 2105 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
2106 if (!ch->timeout.initialized) { 2106 if (!ch->timeout.initialized) {
2107 raw_spin_unlock(&ch->timeout.lock); 2107 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2108 return; 2108 return;
2109 } 2109 }
2110 raw_spin_unlock(&ch->timeout.lock); 2110 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2111 2111
2112 cancel_delayed_work_sync(&ch->timeout.wq); 2112 cancel_delayed_work_sync(&ch->timeout.wq);
2113 2113
2114 raw_spin_lock(&ch->timeout.lock); 2114 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
2115 ch->timeout.initialized = false; 2115 ch->timeout.initialized = false;
2116 raw_spin_unlock(&ch->timeout.lock); 2116 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2117} 2117}
2118 2118
2119void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) 2119void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
@@ -2125,13 +2125,13 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
2125 struct channel_gk20a *ch = &f->channel[chid]; 2125 struct channel_gk20a *ch = &f->channel[chid];
2126 2126
2127 if (gk20a_channel_get(ch)) { 2127 if (gk20a_channel_get(ch)) {
2128 raw_spin_lock(&ch->timeout.lock); 2128 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
2129 if (!ch->timeout.initialized) { 2129 if (!ch->timeout.initialized) {
2130 raw_spin_unlock(&ch->timeout.lock); 2130 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2131 gk20a_channel_put(ch); 2131 gk20a_channel_put(ch);
2132 continue; 2132 continue;
2133 } 2133 }
2134 raw_spin_unlock(&ch->timeout.lock); 2134 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2135 2135
2136 cancel_delayed_work_sync(&ch->timeout.wq); 2136 cancel_delayed_work_sync(&ch->timeout.wq);
2137 if (!ch->has_timedout) 2137 if (!ch->has_timedout)
@@ -2164,13 +2164,13 @@ static void gk20a_channel_timeout_handler(struct work_struct *work)
2164 } 2164 }
2165 2165
2166 /* Need global lock since multiple channels can timeout at a time */ 2166 /* Need global lock since multiple channels can timeout at a time */
2167 mutex_lock(&g->ch_wdt_lock); 2167 nvgpu_mutex_acquire(&g->ch_wdt_lock);
2168 2168
2169 /* Get timed out job and reset the timer */ 2169 /* Get timed out job and reset the timer */
2170 raw_spin_lock(&ch->timeout.lock); 2170 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
2171 gp_get = ch->timeout.gp_get; 2171 gp_get = ch->timeout.gp_get;
2172 ch->timeout.initialized = false; 2172 ch->timeout.initialized = false;
2173 raw_spin_unlock(&ch->timeout.lock); 2173 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2174 2174
2175 if (gk20a_userd_gp_get(ch->g, ch) != gp_get) { 2175 if (gk20a_userd_gp_get(ch->g, ch) != gp_get) {
2176 gk20a_channel_timeout_start(ch); 2176 gk20a_channel_timeout_start(ch);
@@ -2187,7 +2187,7 @@ static void gk20a_channel_timeout_handler(struct work_struct *work)
2187 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT, true); 2187 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT, true);
2188 2188
2189fail_unlock: 2189fail_unlock:
2190 mutex_unlock(&g->ch_wdt_lock); 2190 nvgpu_mutex_release(&g->ch_wdt_lock);
2191 gk20a_channel_put(ch); 2191 gk20a_channel_put(ch);
2192 gk20a_idle(dev_from_gk20a(g)); 2192 gk20a_idle(dev_from_gk20a(g));
2193} 2193}
@@ -2216,17 +2216,17 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
2216 2216
2217static void gk20a_channel_schedule_job_clean_up(struct channel_gk20a *c) 2217static void gk20a_channel_schedule_job_clean_up(struct channel_gk20a *c)
2218{ 2218{
2219 mutex_lock(&c->clean_up.lock); 2219 nvgpu_mutex_acquire(&c->clean_up.lock);
2220 2220
2221 if (c->clean_up.scheduled) { 2221 if (c->clean_up.scheduled) {
2222 mutex_unlock(&c->clean_up.lock); 2222 nvgpu_mutex_release(&c->clean_up.lock);
2223 return; 2223 return;
2224 } 2224 }
2225 2225
2226 c->clean_up.scheduled = true; 2226 c->clean_up.scheduled = true;
2227 schedule_delayed_work(&c->clean_up.wq, 1); 2227 schedule_delayed_work(&c->clean_up.wq, 1);
2228 2228
2229 mutex_unlock(&c->clean_up.lock); 2229 nvgpu_mutex_release(&c->clean_up.lock);
2230} 2230}
2231 2231
2232static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c, 2232static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c,
@@ -2235,9 +2235,9 @@ static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c,
2235 if (wait_for_completion) 2235 if (wait_for_completion)
2236 cancel_delayed_work_sync(&c->clean_up.wq); 2236 cancel_delayed_work_sync(&c->clean_up.wq);
2237 2237
2238 mutex_lock(&c->clean_up.lock); 2238 nvgpu_mutex_acquire(&c->clean_up.lock);
2239 c->clean_up.scheduled = false; 2239 c->clean_up.scheduled = false;
2240 mutex_unlock(&c->clean_up.lock); 2240 nvgpu_mutex_release(&c->clean_up.lock);
2241} 2241}
2242 2242
2243static int gk20a_channel_add_job(struct channel_gk20a *c, 2243static int gk20a_channel_add_job(struct channel_gk20a *c,
@@ -2353,13 +2353,13 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2353 c->sync->signal_timeline(c->sync); 2353 c->sync->signal_timeline(c->sync);
2354 2354
2355 if (platform->aggressive_sync_destroy_thresh) { 2355 if (platform->aggressive_sync_destroy_thresh) {
2356 mutex_lock(&c->sync_lock); 2356 nvgpu_mutex_acquire(&c->sync_lock);
2357 if (atomic_dec_and_test(&c->sync->refcount) && 2357 if (atomic_dec_and_test(&c->sync->refcount) &&
2358 platform->aggressive_sync_destroy) { 2358 platform->aggressive_sync_destroy) {
2359 gk20a_channel_sync_destroy(c->sync); 2359 gk20a_channel_sync_destroy(c->sync);
2360 c->sync = NULL; 2360 c->sync = NULL;
2361 } 2361 }
2362 mutex_unlock(&c->sync_lock); 2362 nvgpu_mutex_release(&c->sync_lock);
2363 } 2363 }
2364 } 2364 }
2365 2365
@@ -2563,18 +2563,18 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2563 need_sync_fence = true; 2563 need_sync_fence = true;
2564 2564
2565 if (platform->aggressive_sync_destroy_thresh) { 2565 if (platform->aggressive_sync_destroy_thresh) {
2566 mutex_lock(&c->sync_lock); 2566 nvgpu_mutex_acquire(&c->sync_lock);
2567 if (!c->sync) { 2567 if (!c->sync) {
2568 c->sync = gk20a_channel_sync_create(c); 2568 c->sync = gk20a_channel_sync_create(c);
2569 if (!c->sync) { 2569 if (!c->sync) {
2570 err = -ENOMEM; 2570 err = -ENOMEM;
2571 mutex_unlock(&c->sync_lock); 2571 nvgpu_mutex_release(&c->sync_lock);
2572 goto fail; 2572 goto fail;
2573 } 2573 }
2574 new_sync_created = true; 2574 new_sync_created = true;
2575 } 2575 }
2576 atomic_inc(&c->sync->refcount); 2576 atomic_inc(&c->sync->refcount);
2577 mutex_unlock(&c->sync_lock); 2577 nvgpu_mutex_release(&c->sync_lock);
2578 } 2578 }
2579 2579
2580 if (g->ops.fifo.resetup_ramfc && new_sync_created) { 2580 if (g->ops.fifo.resetup_ramfc && new_sync_created) {
@@ -2920,31 +2920,31 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2920 c->g = NULL; 2920 c->g = NULL;
2921 c->hw_chid = chid; 2921 c->hw_chid = chid;
2922 atomic_set(&c->bound, false); 2922 atomic_set(&c->bound, false);
2923 spin_lock_init(&c->ref_obtain_lock); 2923 nvgpu_spinlock_init(&c->ref_obtain_lock);
2924 atomic_set(&c->ref_count, 0); 2924 atomic_set(&c->ref_count, 0);
2925 c->referenceable = false; 2925 c->referenceable = false;
2926 init_waitqueue_head(&c->ref_count_dec_wq); 2926 init_waitqueue_head(&c->ref_count_dec_wq);
2927#if GK20A_CHANNEL_REFCOUNT_TRACKING 2927#if GK20A_CHANNEL_REFCOUNT_TRACKING
2928 spin_lock_init(&c->ref_actions_lock); 2928 nvgpu_spinlock_init(&c->ref_actions_lock);
2929#endif 2929#endif
2930 mutex_init(&c->ioctl_lock); 2930 nvgpu_mutex_init(&c->ioctl_lock);
2931 mutex_init(&c->error_notifier_mutex); 2931 nvgpu_mutex_init(&c->error_notifier_mutex);
2932 spin_lock_init(&c->joblist.dynamic.lock); 2932 nvgpu_spinlock_init(&c->joblist.dynamic.lock);
2933 mutex_init(&c->joblist.pre_alloc.read_lock); 2933 nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
2934 raw_spin_lock_init(&c->timeout.lock); 2934 nvgpu_raw_spinlock_init(&c->timeout.lock);
2935 mutex_init(&c->sync_lock); 2935 nvgpu_mutex_init(&c->sync_lock);
2936 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); 2936 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler);
2937 INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_runcb_fn); 2937 INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_runcb_fn);
2938 mutex_init(&c->clean_up.lock); 2938 nvgpu_mutex_init(&c->clean_up.lock);
2939 INIT_LIST_HEAD(&c->joblist.dynamic.jobs); 2939 INIT_LIST_HEAD(&c->joblist.dynamic.jobs);
2940#if defined(CONFIG_GK20A_CYCLE_STATS) 2940#if defined(CONFIG_GK20A_CYCLE_STATS)
2941 mutex_init(&c->cyclestate.cyclestate_buffer_mutex); 2941 nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
2942 mutex_init(&c->cs_client_mutex); 2942 nvgpu_mutex_init(&c->cs_client_mutex);
2943#endif 2943#endif
2944 INIT_LIST_HEAD(&c->dbg_s_list); 2944 INIT_LIST_HEAD(&c->dbg_s_list);
2945 INIT_LIST_HEAD(&c->event_id_list); 2945 INIT_LIST_HEAD(&c->event_id_list);
2946 mutex_init(&c->event_id_list_lock); 2946 nvgpu_mutex_init(&c->event_id_list_lock);
2947 mutex_init(&c->dbg_s_lock); 2947 nvgpu_mutex_init(&c->dbg_s_lock);
2948 list_add(&c->free_chs, &g->fifo.free_chs); 2948 list_add(&c->free_chs, &g->fifo.free_chs);
2949 2949
2950 return 0; 2950 return 0;
@@ -3102,7 +3102,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
3102 3102
3103 poll_wait(filep, &event_id_data->event_id_wq, wait); 3103 poll_wait(filep, &event_id_data->event_id_wq, wait);
3104 3104
3105 mutex_lock(&event_id_data->lock); 3105 nvgpu_mutex_acquire(&event_id_data->lock);
3106 3106
3107 if (event_id_data->is_tsg) { 3107 if (event_id_data->is_tsg) {
3108 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; 3108 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
@@ -3127,7 +3127,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
3127 } 3127 }
3128 } 3128 }
3129 3129
3130 mutex_unlock(&event_id_data->lock); 3130 nvgpu_mutex_release(&event_id_data->lock);
3131 3131
3132 return mask; 3132 return mask;
3133} 3133}
@@ -3140,15 +3140,15 @@ static int gk20a_event_id_release(struct inode *inode, struct file *filp)
3140 if (event_id_data->is_tsg) { 3140 if (event_id_data->is_tsg) {
3141 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; 3141 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
3142 3142
3143 mutex_lock(&tsg->event_id_list_lock); 3143 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
3144 list_del_init(&event_id_data->event_id_node); 3144 list_del_init(&event_id_data->event_id_node);
3145 mutex_unlock(&tsg->event_id_list_lock); 3145 nvgpu_mutex_release(&tsg->event_id_list_lock);
3146 } else { 3146 } else {
3147 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id; 3147 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
3148 3148
3149 mutex_lock(&ch->event_id_list_lock); 3149 nvgpu_mutex_acquire(&ch->event_id_list_lock);
3150 list_del_init(&event_id_data->event_id_node); 3150 list_del_init(&event_id_data->event_id_node);
3151 mutex_unlock(&ch->event_id_list_lock); 3151 nvgpu_mutex_release(&ch->event_id_list_lock);
3152 } 3152 }
3153 3153
3154 kfree(event_id_data); 3154 kfree(event_id_data);
@@ -3170,7 +3170,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
3170 struct gk20a_event_id_data *local_event_id_data; 3170 struct gk20a_event_id_data *local_event_id_data;
3171 bool event_found = false; 3171 bool event_found = false;
3172 3172
3173 mutex_lock(&ch->event_id_list_lock); 3173 nvgpu_mutex_acquire(&ch->event_id_list_lock);
3174 list_for_each_entry(local_event_id_data, &ch->event_id_list, 3174 list_for_each_entry(local_event_id_data, &ch->event_id_list,
3175 event_id_node) { 3175 event_id_node) {
3176 if (local_event_id_data->event_id == event_id) { 3176 if (local_event_id_data->event_id == event_id) {
@@ -3178,7 +3178,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
3178 break; 3178 break;
3179 } 3179 }
3180 } 3180 }
3181 mutex_unlock(&ch->event_id_list_lock); 3181 nvgpu_mutex_release(&ch->event_id_list_lock);
3182 3182
3183 if (event_found) { 3183 if (event_found) {
3184 *event_id_data = local_event_id_data; 3184 *event_id_data = local_event_id_data;
@@ -3199,7 +3199,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
3199 if (err) 3199 if (err)
3200 return; 3200 return;
3201 3201
3202 mutex_lock(&event_id_data->lock); 3202 nvgpu_mutex_acquire(&event_id_data->lock);
3203 3203
3204 gk20a_dbg_info( 3204 gk20a_dbg_info(
3205 "posting event for event_id=%d on ch=%d\n", 3205 "posting event for event_id=%d on ch=%d\n",
@@ -3208,7 +3208,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
3208 3208
3209 wake_up_interruptible_all(&event_id_data->event_id_wq); 3209 wake_up_interruptible_all(&event_id_data->event_id_wq);
3210 3210
3211 mutex_unlock(&event_id_data->lock); 3211 nvgpu_mutex_release(&event_id_data->lock);
3212} 3212}
3213 3213
3214static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, 3214static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
@@ -3253,12 +3253,12 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
3253 event_id_data->event_id = event_id; 3253 event_id_data->event_id = event_id;
3254 3254
3255 init_waitqueue_head(&event_id_data->event_id_wq); 3255 init_waitqueue_head(&event_id_data->event_id_wq);
3256 mutex_init(&event_id_data->lock); 3256 nvgpu_mutex_init(&event_id_data->lock);
3257 INIT_LIST_HEAD(&event_id_data->event_id_node); 3257 INIT_LIST_HEAD(&event_id_data->event_id_node);
3258 3258
3259 mutex_lock(&ch->event_id_list_lock); 3259 nvgpu_mutex_acquire(&ch->event_id_list_lock);
3260 list_add_tail(&event_id_data->event_id_node, &ch->event_id_list); 3260 list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
3261 mutex_unlock(&ch->event_id_list_lock); 3261 nvgpu_mutex_release(&ch->event_id_list_lock);
3262 3262
3263 fd_install(local_fd, file); 3263 fd_install(local_fd, file);
3264 file->private_data = event_id_data; 3264 file->private_data = event_id_data;
@@ -3569,7 +3569,7 @@ long gk20a_channel_ioctl(struct file *filp,
3569 3569
3570 /* protect our sanity for threaded userspace - most of the channel is 3570 /* protect our sanity for threaded userspace - most of the channel is
3571 * not thread safe */ 3571 * not thread safe */
3572 mutex_lock(&ch->ioctl_lock); 3572 nvgpu_mutex_acquire(&ch->ioctl_lock);
3573 3573
3574 /* this ioctl call keeps a ref to the file which keeps a ref to the 3574 /* this ioctl call keeps a ref to the file which keeps a ref to the
3575 * channel */ 3575 * channel */
@@ -3660,12 +3660,12 @@ long gk20a_channel_ioctl(struct file *filp,
3660 3660
3661 /* waiting is thread-safe, not dropping this mutex could 3661 /* waiting is thread-safe, not dropping this mutex could
3662 * deadlock in certain conditions */ 3662 * deadlock in certain conditions */
3663 mutex_unlock(&ch->ioctl_lock); 3663 nvgpu_mutex_release(&ch->ioctl_lock);
3664 3664
3665 err = gk20a_channel_wait(ch, 3665 err = gk20a_channel_wait(ch,
3666 (struct nvgpu_wait_args *)buf); 3666 (struct nvgpu_wait_args *)buf);
3667 3667
3668 mutex_lock(&ch->ioctl_lock); 3668 nvgpu_mutex_acquire(&ch->ioctl_lock);
3669 3669
3670 gk20a_idle(dev); 3670 gk20a_idle(dev);
3671 break; 3671 break;
@@ -3899,7 +3899,7 @@ long gk20a_channel_ioctl(struct file *filp,
3899 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) 3899 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
3900 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); 3900 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
3901 3901
3902 mutex_unlock(&ch->ioctl_lock); 3902 nvgpu_mutex_release(&ch->ioctl_lock);
3903 3903
3904 gk20a_channel_put(ch); 3904 gk20a_channel_put(ch);
3905 3905