summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-10-16 17:58:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-17 17:05:19 -0400
commite039dcbc9dd7d0c47895bdbb49cdc3e1d11a3cae (patch)
tree38de57b02173520e7a279775f82e3b48e3c1aa87 /drivers/gpu/nvgpu
parent8f55976d4952020f1e7f257087bb79cfeb64f193 (diff)
gpu: nvgpu: Use nvgpu_rwsem as TSG channel lock
Use abstract nvgpu_rwsem as TSG channel list lock instead of the Linux specific rw_semaphore. JIRA NVGPU-259 Change-Id: I41a38b29d4651838b1962d69f102af1384e12cb6 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1579935 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c38
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c4
6 files changed, 36 insertions, 35 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 9c41c7ef..b636679e 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1356,14 +1356,14 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
1356 struct channel_gk20a *ch = NULL; 1356 struct channel_gk20a *ch = NULL;
1357 bool verbose = false; 1357 bool verbose = false;
1358 1358
1359 down_read(&tsg->ch_list_lock); 1359 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1360 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1360 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1361 if (gk20a_channel_get(ch)) { 1361 if (gk20a_channel_get(ch)) {
1362 verbose |= gk20a_fifo_error_ch(g, ch); 1362 verbose |= gk20a_fifo_error_ch(g, ch);
1363 gk20a_channel_put(ch); 1363 gk20a_channel_put(ch);
1364 } 1364 }
1365 } 1365 }
1366 up_read(&tsg->ch_list_lock); 1366 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
1367 1367
1368 return verbose; 1368 return verbose;
1369 1369
@@ -1386,14 +1386,14 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1386 nvgpu_err(g, 1386 nvgpu_err(g,
1387 "TSG %d generated a mmu fault", tsg->tsgid); 1387 "TSG %d generated a mmu fault", tsg->tsgid);
1388 1388
1389 down_read(&tsg->ch_list_lock); 1389 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1390 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1390 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1391 if (gk20a_channel_get(ch)) { 1391 if (gk20a_channel_get(ch)) {
1392 gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1392 gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1393 gk20a_channel_put(ch); 1393 gk20a_channel_put(ch);
1394 } 1394 }
1395 } 1395 }
1396 up_read(&tsg->ch_list_lock); 1396 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
1397 1397
1398} 1398}
1399 1399
@@ -1409,7 +1409,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1409 if (preempt) 1409 if (preempt)
1410 g->ops.fifo.preempt_tsg(g, tsgid); 1410 g->ops.fifo.preempt_tsg(g, tsgid);
1411 1411
1412 down_read(&tsg->ch_list_lock); 1412 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1413 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1413 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1414 if (gk20a_channel_get(ch)) { 1414 if (gk20a_channel_get(ch)) {
1415 ch->has_timedout = true; 1415 ch->has_timedout = true;
@@ -1417,7 +1417,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1417 gk20a_channel_put(ch); 1417 gk20a_channel_put(ch);
1418 } 1418 }
1419 } 1419 }
1420 up_read(&tsg->ch_list_lock); 1420 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
1421} 1421}
1422 1422
1423int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) 1423int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
@@ -1906,7 +1906,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1906 if (gk20a_is_channel_marked_as_tsg(ch)) { 1906 if (gk20a_is_channel_marked_as_tsg(ch)) {
1907 tsg = &g->fifo.tsg[ch->tsgid]; 1907 tsg = &g->fifo.tsg[ch->tsgid];
1908 1908
1909 down_read(&tsg->ch_list_lock); 1909 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1910 1910
1911 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1911 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1912 if (gk20a_channel_get(ch_tsg)) { 1912 if (gk20a_channel_get(ch_tsg)) {
@@ -1915,7 +1915,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1915 } 1915 }
1916 } 1916 }
1917 1917
1918 up_read(&tsg->ch_list_lock); 1918 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
1919 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); 1919 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1920 } else { 1920 } else {
1921 gk20a_set_error_notifier(ch, err_code); 1921 gk20a_set_error_notifier(ch, err_code);
@@ -1971,9 +1971,9 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
1971 goto fail_enable_tsg; 1971 goto fail_enable_tsg;
1972 1972
1973 /* Remove channel from TSG and re-enable rest of the channels */ 1973 /* Remove channel from TSG and re-enable rest of the channels */
1974 down_write(&tsg->ch_list_lock); 1974 nvgpu_rwsem_down_write(&tsg->ch_list_lock);
1975 nvgpu_list_del(&ch->ch_entry); 1975 nvgpu_list_del(&ch->ch_entry);
1976 up_write(&tsg->ch_list_lock); 1976 nvgpu_rwsem_up_write(&tsg->ch_list_lock);
1977 1977
1978 g->ops.fifo.enable_tsg(tsg); 1978 g->ops.fifo.enable_tsg(tsg);
1979 1979
@@ -2084,7 +2084,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2084 *verbose = false; 2084 *verbose = false;
2085 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2085 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
2086 2086
2087 down_read(&tsg->ch_list_lock); 2087 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2088 2088
2089 /* check if there was some progress on any of the TSG channels. 2089 /* check if there was some progress on any of the TSG channels.
2090 * fifo recovery is needed if at least one channel reached the 2090 * fifo recovery is needed if at least one channel reached the
@@ -2140,7 +2140,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2140 * of them has reached the timeout, there is nothing more to do: 2140 * of them has reached the timeout, there is nothing more to do:
2141 * timeout_accumulated_ms has been updated for all of them. 2141 * timeout_accumulated_ms has been updated for all of them.
2142 */ 2142 */
2143 up_read(&tsg->ch_list_lock); 2143 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
2144 return recover; 2144 return recover;
2145} 2145}
2146 2146
@@ -2470,7 +2470,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
2470 struct tsg_gk20a *tsg = &f->tsg[id]; 2470 struct tsg_gk20a *tsg = &f->tsg[id];
2471 struct channel_gk20a *ch = NULL; 2471 struct channel_gk20a *ch = NULL;
2472 2472
2473 down_read(&tsg->ch_list_lock); 2473 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2474 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2474 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2475 if (gk20a_channel_get(ch)) { 2475 if (gk20a_channel_get(ch)) {
2476 gk20a_set_error_notifier(ch, 2476 gk20a_set_error_notifier(ch,
@@ -2478,7 +2478,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
2478 gk20a_channel_put(ch); 2478 gk20a_channel_put(ch);
2479 } 2479 }
2480 } 2480 }
2481 up_read(&tsg->ch_list_lock); 2481 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
2482 gk20a_fifo_recover_tsg(g, id, true); 2482 gk20a_fifo_recover_tsg(g, id, true);
2483 } 2483 }
2484 } 2484 }
@@ -2599,7 +2599,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2599 nvgpu_err(g, 2599 nvgpu_err(g,
2600 "preempt TSG %d timeout", id); 2600 "preempt TSG %d timeout", id);
2601 2601
2602 down_read(&tsg->ch_list_lock); 2602 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2603 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2603 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2604 if (!gk20a_channel_get(ch)) 2604 if (!gk20a_channel_get(ch))
2605 continue; 2605 continue;
@@ -2607,7 +2607,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2607 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 2607 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2608 gk20a_channel_put(ch); 2608 gk20a_channel_put(ch);
2609 } 2609 }
2610 up_read(&tsg->ch_list_lock); 2610 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
2611 gk20a_fifo_recover_tsg(g, id, true); 2611 gk20a_fifo_recover_tsg(g, id, true);
2612 } else { 2612 } else {
2613 struct channel_gk20a *ch = &g->fifo.channel[id]; 2613 struct channel_gk20a *ch = &g->fifo.channel[id];
@@ -3095,7 +3095,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3095 count++; 3095 count++;
3096 (*entries_left)--; 3096 (*entries_left)--;
3097 3097
3098 down_read(&tsg->ch_list_lock); 3098 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
3099 /* add runnable channels bound to this TSG */ 3099 /* add runnable channels bound to this TSG */
3100 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 3100 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
3101 if (!test_bit(ch->chid, 3101 if (!test_bit(ch->chid,
@@ -3103,7 +3103,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3103 continue; 3103 continue;
3104 3104
3105 if (!(*entries_left)) { 3105 if (!(*entries_left)) {
3106 up_read(&tsg->ch_list_lock); 3106 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
3107 return NULL; 3107 return NULL;
3108 } 3108 }
3109 3109
@@ -3117,7 +3117,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3117 runlist_entry += runlist_entry_words; 3117 runlist_entry += runlist_entry_words;
3118 (*entries_left)--; 3118 (*entries_left)--;
3119 } 3119 }
3120 up_read(&tsg->ch_list_lock); 3120 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
3121 } 3121 }
3122 3122
3123 /* append entries from higher level if this level is empty */ 3123 /* append entries from higher level if this level is empty */
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 1ade6b6a..57c1c0bc 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -5091,7 +5091,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5091 5091
5092 if (gk20a_is_channel_marked_as_tsg(ch)) { 5092 if (gk20a_is_channel_marked_as_tsg(ch)) {
5093 tsg = &g->fifo.tsg[ch->tsgid]; 5093 tsg = &g->fifo.tsg[ch->tsgid];
5094 down_read(&tsg->ch_list_lock); 5094 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
5095 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 5095 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
5096 if (gk20a_channel_get(ch_tsg)) { 5096 if (gk20a_channel_get(ch_tsg)) {
5097 gk20a_set_error_notifier(ch_tsg, 5097 gk20a_set_error_notifier(ch_tsg,
@@ -5099,7 +5099,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5099 gk20a_channel_put(ch_tsg); 5099 gk20a_channel_put(ch_tsg);
5100 } 5100 }
5101 } 5101 }
5102 up_read(&tsg->ch_list_lock); 5102 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
5103 } else { 5103 } else {
5104 gk20a_set_error_notifier(ch, error_notifier); 5104 gk20a_set_error_notifier(ch, error_notifier);
5105 } 5105 }
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 6c1c2955..cde281ad 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -44,7 +44,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
44 * we first need to enable all channels with NEXT and CTX_RELOAD set, 44 * we first need to enable all channels with NEXT and CTX_RELOAD set,
45 * and then rest of the channels should be enabled 45 * and then rest of the channels should be enabled
46 */ 46 */
47 down_read(&tsg->ch_list_lock); 47 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
48 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 48 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
49 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 49 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
50 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 50 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
@@ -62,7 +62,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
62 62
63 g->ops.fifo.enable_channel(ch); 63 g->ops.fifo.enable_channel(ch);
64 } 64 }
65 up_read(&tsg->ch_list_lock); 65 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
66 66
67 gk20a_fifo_enable_tsg_sched(g, tsg); 67 gk20a_fifo_enable_tsg_sched(g, tsg);
68 68
@@ -74,11 +74,11 @@ int gk20a_disable_tsg(struct tsg_gk20a *tsg)
74 struct gk20a *g = tsg->g; 74 struct gk20a *g = tsg->g;
75 struct channel_gk20a *ch; 75 struct channel_gk20a *ch;
76 76
77 down_read(&tsg->ch_list_lock); 77 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
78 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 78 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
79 g->ops.fifo.disable_channel(ch); 79 g->ops.fifo.disable_channel(ch);
80 } 80 }
81 up_read(&tsg->ch_list_lock); 81 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
82 82
83 return 0; 83 return 0;
84} 84}
@@ -130,9 +130,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
130 return -EINVAL; 130 return -EINVAL;
131 } 131 }
132 132
133 down_write(&tsg->ch_list_lock); 133 nvgpu_rwsem_down_write(&tsg->ch_list_lock);
134 nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); 134 nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list);
135 up_write(&tsg->ch_list_lock); 135 nvgpu_rwsem_up_write(&tsg->ch_list_lock);
136 136
137 nvgpu_ref_get(&tsg->refcount); 137 nvgpu_ref_get(&tsg->refcount);
138 138
@@ -158,9 +158,9 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
158 /* If channel unbind fails, channel is still part of runlist */ 158 /* If channel unbind fails, channel is still part of runlist */
159 channel_gk20a_update_runlist(ch, false); 159 channel_gk20a_update_runlist(ch, false);
160 160
161 down_write(&tsg->ch_list_lock); 161 nvgpu_rwsem_down_write(&tsg->ch_list_lock);
162 nvgpu_list_del(&ch->ch_entry); 162 nvgpu_list_del(&ch->ch_entry);
163 up_write(&tsg->ch_list_lock); 163 nvgpu_rwsem_up_write(&tsg->ch_list_lock);
164 } 164 }
165 165
166 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); 166 nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
@@ -186,7 +186,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
186 tsg->tsgid = tsgid; 186 tsg->tsgid = tsgid;
187 187
188 nvgpu_init_list_node(&tsg->ch_list); 188 nvgpu_init_list_node(&tsg->ch_list);
189 init_rwsem(&tsg->ch_list_lock); 189 nvgpu_rwsem_init(&tsg->ch_list_lock);
190 190
191 nvgpu_init_list_node(&tsg->event_id_list); 191 nvgpu_init_list_node(&tsg->event_id_list);
192 err = nvgpu_mutex_init(&tsg->event_id_list_lock); 192 err = nvgpu_mutex_init(&tsg->event_id_list_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 3f6ef4de..51bc1086 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -24,6 +24,7 @@
24 24
25#include <nvgpu/lock.h> 25#include <nvgpu/lock.h>
26#include <nvgpu/kref.h> 26#include <nvgpu/kref.h>
27#include <nvgpu/rwsem.h>
27 28
28#define NVGPU_INVALID_TSG_ID (-1) 29#define NVGPU_INVALID_TSG_ID (-1)
29 30
@@ -46,7 +47,7 @@ struct tsg_gk20a {
46 47
47 struct nvgpu_list_node ch_list; 48 struct nvgpu_list_node ch_list;
48 int num_active_channels; 49 int num_active_channels;
49 struct rw_semaphore ch_list_lock; 50 struct nvgpu_rwsem ch_list_lock;
50 51
51 unsigned int timeslice_us; 52 unsigned int timeslice_us;
52 unsigned int timeslice_timeout; 53 unsigned int timeslice_timeout;
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index d6429871..f4ddd92f 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -210,13 +210,13 @@ void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
210 210
211 /* If CTX_RELOAD is set on a channel, move it to some other channel */ 211 /* If CTX_RELOAD is set on a channel, move it to some other channel */
212 if (gk20a_fifo_channel_status_is_ctx_reload(ch->g, ch->chid)) { 212 if (gk20a_fifo_channel_status_is_ctx_reload(ch->g, ch->chid)) {
213 down_read(&tsg->ch_list_lock); 213 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
214 nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, channel_gk20a, ch_entry) { 214 nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, channel_gk20a, ch_entry) {
215 if (temp_ch->chid != ch->chid) { 215 if (temp_ch->chid != ch->chid) {
216 gm20b_fifo_set_ctx_reload(temp_ch); 216 gm20b_fifo_set_ctx_reload(temp_ch);
217 break; 217 break;
218 } 218 }
219 } 219 }
220 up_read(&tsg->ch_list_lock); 220 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
221 } 221 }
222} 222}
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 582894b9..eac720ca 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -712,7 +712,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
712 if (gk20a_is_channel_marked_as_tsg(ch)) { 712 if (gk20a_is_channel_marked_as_tsg(ch)) {
713 tsg = &g->fifo.tsg[ch->tsgid]; 713 tsg = &g->fifo.tsg[ch->tsgid];
714 714
715 down_read(&tsg->ch_list_lock); 715 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
716 716
717 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 717 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
718 if (gk20a_channel_get(ch_tsg)) { 718 if (gk20a_channel_get(ch_tsg)) {
@@ -722,7 +722,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
722 } 722 }
723 } 723 }
724 724
725 up_read(&tsg->ch_list_lock); 725 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
726 } else { 726 } else {
727 gk20a_set_error_notifier(ch, err_code); 727 gk20a_set_error_notifier(ch, err_code);
728 ch->has_timedout = true; 728 ch->has_timedout = true;