summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-10-16 17:58:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-17 17:05:19 -0400
commite039dcbc9dd7d0c47895bdbb49cdc3e1d11a3cae (patch)
tree38de57b02173520e7a279775f82e3b48e3c1aa87 /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parent8f55976d4952020f1e7f257087bb79cfeb64f193 (diff)
gpu: nvgpu: Use nvgpu_rwsem as TSG channel lock
Use abstract nvgpu_rwsem as TSG channel list lock instead of the Linux specific rw_semaphore. JIRA NVGPU-259 Change-Id: I41a38b29d4651838b1962d69f102af1384e12cb6 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1579935 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 6c1c2955..cde281ad 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -44,7 +44,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
44 * we first need to enable all channels with NEXT and CTX_RELOAD set, 44 * we first need to enable all channels with NEXT and CTX_RELOAD set,
45 * and then rest of the channels should be enabled 45 * and then rest of the channels should be enabled
46 */ 46 */
47 down_read(&tsg->ch_list_lock); 47 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
48 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 48 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
49 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 49 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
50 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 50 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
@@ -62,7 +62,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
62 62
63 g->ops.fifo.enable_channel(ch); 63 g->ops.fifo.enable_channel(ch);
64 } 64 }
65 up_read(&tsg->ch_list_lock); 65 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
66 66
67 gk20a_fifo_enable_tsg_sched(g, tsg); 67 gk20a_fifo_enable_tsg_sched(g, tsg);
68 68
@@ -74,11 +74,11 @@ int gk20a_disable_tsg(struct tsg_gk20a *tsg)
74 struct gk20a *g = tsg->g; 74 struct gk20a *g = tsg->g;
75 struct channel_gk20a *ch; 75 struct channel_gk20a *ch;
76 76
77 down_read(&tsg->ch_list_lock); 77 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
78 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 78 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
79 g->ops.fifo.disable_channel(ch); 79 g->ops.fifo.disable_channel(ch);
80 } 80 }
81 up_read(&tsg->ch_list_lock); 81 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
82 82
83 return 0; 83 return 0;
84} 84}
@@ -130,9 +130,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
130 return -EINVAL; 130 return -EINVAL;
131 } 131 }
132 132
133 down_write(&tsg->ch_list_lock); 133 nvgpu_rwsem_down_write(&tsg->ch_list_lock);
134 nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); 134 nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list);
135 up_write(&tsg->ch_list_lock); 135 nvgpu_rwsem_up_write(&tsg->ch_list_lock);
136 136
137 nvgpu_ref_get(&tsg->refcount); 137 nvgpu_ref_get(&tsg->refcount);
138 138
@@ -158,9 +158,9 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
158 /* If channel unbind fails, channel is still part of runlist */ 158 /* If channel unbind fails, channel is still part of runlist */
159 channel_gk20a_update_runlist(ch, false); 159 channel_gk20a_update_runlist(ch, false);
160 160
161 down_write(&tsg->ch_list_lock); 161 nvgpu_rwsem_down_write(&tsg->ch_list_lock);
162 nvgpu_list_del(&ch->ch_entry); 162 nvgpu_list_del(&ch->ch_entry);
163 up_write(&tsg->ch_list_lock); 163 nvgpu_rwsem_up_write(&tsg->ch_list_lock);
164 } 164 }
165 165
166 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); 166 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
@@ -186,7 +186,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
186 tsg->tsgid = tsgid; 186 tsg->tsgid = tsgid;
187 187
188 nvgpu_init_list_node(&tsg->ch_list); 188 nvgpu_init_list_node(&tsg->ch_list);
189 init_rwsem(&tsg->ch_list_lock); 189 nvgpu_rwsem_init(&tsg->ch_list_lock);
190 190
191 nvgpu_init_list_node(&tsg->event_id_list); 191 nvgpu_init_list_node(&tsg->event_id_list);
192 err = nvgpu_mutex_init(&tsg->event_id_list_lock); 192 err = nvgpu_mutex_init(&tsg->event_id_list_lock);