summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-12-06 07:39:59 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-08 14:58:07 -0500
commit6ec7da5eba5481e5ff106d7c616d84ac1c847d21 (patch)
tree175742a684660ed0e5600c269e08d4935159ff14
parentd4c51a7321a506a73ad6c9c64b3a443ce98c1700 (diff)
gpu: nvgpu: use nvgpu list APIs instead of linux APIs
Use nvgpu specific list APIs nvgpu_list_for_each_entry() instead of calling Linux specific list APIs list_for_each_entry() Jira NVGPU-444 Change-Id: I3c1fd495ed9e8bebab1f23b6769944373b46059b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1612442 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c4
8 files changed, 42 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index b0d1ccff..5319b829 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -791,8 +791,8 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
791 bool event_found = false; 791 bool event_found = false;
792 792
793 nvgpu_mutex_acquire(&ch->event_id_list_lock); 793 nvgpu_mutex_acquire(&ch->event_id_list_lock);
794 list_for_each_entry(local_event_id_data, &ch->event_id_list, 794 nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
795 event_id_node) { 795 gk20a_event_id_data, event_id_node) {
796 if (local_event_id_data->event_id == event_id) { 796 if (local_event_id_data->event_id == event_id) {
797 event_found = true; 797 event_found = true;
798 break; 798 break;
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
index 7a2a02e9..e06b41da 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/fifo_vgpu.c
@@ -692,7 +692,8 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
692 692
693 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 693 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
694 694
695 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 695 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
696 channel_gk20a, ch_entry) {
696 if (gk20a_channel_get(ch_tsg)) { 697 if (gk20a_channel_get(ch_tsg)) {
697 nvgpu_set_error_notifier(ch_tsg, err_code); 698 nvgpu_set_error_notifier(ch_tsg, err_code);
698 ch_tsg->has_timedout = true; 699 ch_tsg->has_timedout = true;
@@ -747,7 +748,8 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
747 748
748 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 749 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
749 750
750 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 751 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
752 channel_gk20a, ch_entry) {
751 if (gk20a_channel_get(ch_tsg)) { 753 if (gk20a_channel_get(ch_tsg)) {
752 vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg); 754 vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
753 gk20a_channel_put(ch_tsg); 755 gk20a_channel_put(ch_tsg);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
index 33551d17..4d36e66b 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/gr_vgpu.c
@@ -1132,7 +1132,8 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
1132 } 1132 }
1133 1133
1134 n = 0; 1134 n = 0;
1135 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) 1135 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
1136 dbg_session_channel_data, ch_entry)
1136 n++; 1137 n++;
1137 1138
1138 if (oob_size < n * sizeof(u16)) { 1139 if (oob_size < n * sizeof(u16)) {
@@ -1145,7 +1146,8 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
1145 p = &msg.params.suspend_contexts; 1146 p = &msg.params.suspend_contexts;
1146 p->num_channels = n; 1147 p->num_channels = n;
1147 n = 0; 1148 n = 0;
1148 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) 1149 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
1150 dbg_session_channel_data, ch_entry)
1149 oob[n++] = (u16)ch_data->chid; 1151 oob[n++] = (u16)ch_data->chid;
1150 1152
1151 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 1153 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -1155,7 +1157,8 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
1155 } 1157 }
1156 1158
1157 if (p->resident_chid != (u16)~0) { 1159 if (p->resident_chid != (u16)~0) {
1158 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 1160 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
1161 dbg_session_channel_data, ch_entry) {
1159 if (ch_data->chid == p->resident_chid) { 1162 if (ch_data->chid == p->resident_chid) {
1160 channel_fd = ch_data->channel_fd; 1163 channel_fd = ch_data->channel_fd;
1161 break; 1164 break;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index b8fe640c..e10be3c9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -521,12 +521,12 @@ unbind:
521 /* unlink all debug sessions */ 521 /* unlink all debug sessions */
522 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 522 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
523 523
524 list_for_each_entry_safe(session_data, tmp_s, 524 nvgpu_list_for_each_entry_safe(session_data, tmp_s,
525 &ch->dbg_s_list, dbg_s_entry) { 525 &ch->dbg_s_list, dbg_session_data, dbg_s_entry) {
526 dbg_s = session_data->dbg_s; 526 dbg_s = session_data->dbg_s;
527 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 527 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
528 list_for_each_entry_safe(ch_data, tmp, 528 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list,
529 &dbg_s->ch_list, ch_entry) { 529 dbg_session_channel_data, ch_entry) {
530 if (ch_data->chid == ch->chid) 530 if (ch_data->chid == ch->chid)
531 ch_data->unbind_single_channel(dbg_s, ch_data); 531 ch_data->unbind_single_channel(dbg_s, ch_data);
532 } 532 }
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c3fd05d1..c5c06df9 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1378,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
1378 bool verbose = false; 1378 bool verbose = false;
1379 1379
1380 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1380 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1381 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1381 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1382 if (gk20a_channel_get(ch)) { 1382 if (gk20a_channel_get(ch)) {
1383 verbose |= gk20a_fifo_error_ch(g, ch); 1383 verbose |= gk20a_fifo_error_ch(g, ch);
1384 gk20a_channel_put(ch); 1384 gk20a_channel_put(ch);
@@ -1408,7 +1408,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1408 "TSG %d generated a mmu fault", tsg->tsgid); 1408 "TSG %d generated a mmu fault", tsg->tsgid);
1409 1409
1410 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1410 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1411 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1411 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1412 if (gk20a_channel_get(ch)) { 1412 if (gk20a_channel_get(ch)) {
1413 gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1413 gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1414 gk20a_channel_put(ch); 1414 gk20a_channel_put(ch);
@@ -1431,7 +1431,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1431 g->ops.fifo.preempt_tsg(g, tsgid); 1431 g->ops.fifo.preempt_tsg(g, tsgid);
1432 1432
1433 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1433 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1434 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1434 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1435 if (gk20a_channel_get(ch)) { 1435 if (gk20a_channel_get(ch)) {
1436 ch->has_timedout = true; 1436 ch->has_timedout = true;
1437 gk20a_channel_abort_clean_up(ch); 1437 gk20a_channel_abort_clean_up(ch);
@@ -1932,7 +1932,8 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1932 1932
1933 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1933 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1934 1934
1935 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1935 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
1936 channel_gk20a, ch_entry) {
1936 if (gk20a_channel_get(ch_tsg)) { 1937 if (gk20a_channel_get(ch_tsg)) {
1937 nvgpu_set_error_notifier(ch_tsg, err_code); 1938 nvgpu_set_error_notifier(ch_tsg, err_code);
1938 gk20a_channel_put(ch_tsg); 1939 gk20a_channel_put(ch_tsg);
@@ -2127,7 +2128,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2127 * fifo recovery is needed if at least one channel reached the 2128 * fifo recovery is needed if at least one channel reached the
2128 * maximum timeout without progress (update in gpfifo pointers). 2129 * maximum timeout without progress (update in gpfifo pointers).
2129 */ 2130 */
2130 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2131 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
2131 if (gk20a_channel_get(ch)) { 2132 if (gk20a_channel_get(ch)) {
2132 recover = gk20a_channel_update_and_check_timeout(ch, 2133 recover = gk20a_channel_update_and_check_timeout(ch,
2133 *ms, &progress); 2134 *ms, &progress);
@@ -2146,7 +2147,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2146 tsg->tsgid, ch->chid); 2147 tsg->tsgid, ch->chid);
2147 gk20a_channel_put(ch); 2148 gk20a_channel_put(ch);
2148 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2149 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
2149 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2150 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2151 channel_gk20a, ch_entry) {
2150 if (gk20a_channel_get(ch)) { 2152 if (gk20a_channel_get(ch)) {
2151 ch->timeout_accumulated_ms = *ms; 2153 ch->timeout_accumulated_ms = *ms;
2152 gk20a_channel_put(ch); 2154 gk20a_channel_put(ch);
@@ -2163,7 +2165,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2163 tsg->tsgid, ch->chid); 2165 tsg->tsgid, ch->chid);
2164 *ms = ch->timeout_accumulated_ms; 2166 *ms = ch->timeout_accumulated_ms;
2165 gk20a_channel_put(ch); 2167 gk20a_channel_put(ch);
2166 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2168 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2169 channel_gk20a, ch_entry) {
2167 if (gk20a_channel_get(ch)) { 2170 if (gk20a_channel_get(ch)) {
2168 nvgpu_set_error_notifier(ch, 2171 nvgpu_set_error_notifier(ch,
2169 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2172 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
@@ -2490,7 +2493,8 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
2490 struct channel_gk20a *ch = NULL; 2493 struct channel_gk20a *ch = NULL;
2491 2494
2492 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2495 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2493 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2496 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2497 channel_gk20a, ch_entry) {
2494 if (gk20a_channel_get(ch)) { 2498 if (gk20a_channel_get(ch)) {
2495 nvgpu_set_error_notifier(ch, 2499 nvgpu_set_error_notifier(ch,
2496 error_notifier); 2500 error_notifier);
@@ -2650,7 +2654,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2650 "preempt TSG %d timeout", id); 2654 "preempt TSG %d timeout", id);
2651 2655
2652 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2656 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2653 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2657 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2658 channel_gk20a, ch_entry) {
2654 if (!gk20a_channel_get(ch)) 2659 if (!gk20a_channel_get(ch))
2655 continue; 2660 continue;
2656 nvgpu_set_error_notifier(ch, 2661 nvgpu_set_error_notifier(ch,
@@ -3147,7 +3152,8 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3147 3152
3148 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 3153 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
3149 /* add runnable channels bound to this TSG */ 3154 /* add runnable channels bound to this TSG */
3150 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 3155 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
3156 channel_gk20a, ch_entry) {
3151 if (!test_bit(ch->chid, 3157 if (!test_bit(ch->chid,
3152 runlist->active_channels)) 3158 runlist->active_channels))
3153 continue; 3159 continue;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index bc8d3ea8..3b888559 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -5106,7 +5106,8 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
5106 if (gk20a_is_channel_marked_as_tsg(ch)) { 5106 if (gk20a_is_channel_marked_as_tsg(ch)) {
5107 tsg = &g->fifo.tsg[ch->tsgid]; 5107 tsg = &g->fifo.tsg[ch->tsgid];
5108 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 5108 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
5109 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 5109 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
5110 channel_gk20a, ch_entry) {
5110 if (gk20a_channel_get(ch_tsg)) { 5111 if (gk20a_channel_get(ch_tsg)) {
5111 nvgpu_set_error_notifier(ch_tsg, 5112 nvgpu_set_error_notifier(ch_tsg,
5112 error_notifier); 5113 error_notifier);
@@ -8384,7 +8385,8 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
8384 8385
8385 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 8386 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
8386 8387
8387 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 8388 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
8389 dbg_session_channel_data, ch_entry) {
8388 ch = g->fifo.channel + ch_data->chid; 8390 ch = g->fifo.channel + ch_data->chid;
8389 8391
8390 ctx_resident = gr_gk20a_suspend_context(ch); 8392 ctx_resident = gr_gk20a_suspend_context(ch);
@@ -8424,7 +8426,8 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
8424 goto clean_up; 8426 goto clean_up;
8425 } 8427 }
8426 8428
8427 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 8429 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
8430 dbg_session_channel_data, ch_entry) {
8428 ch = g->fifo.channel + ch_data->chid; 8431 ch = g->fifo.channel + ch_data->chid;
8429 8432
8430 ctx_resident = gr_gk20a_resume_context(ch); 8433 ctx_resident = gr_gk20a_resume_context(ch);
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 578432e5..3b63626c 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -2084,7 +2084,8 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
2084 2084
2085 nvgpu_mutex_acquire(&dbg_s->ch_list_lock); 2085 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
2086 2086
2087 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 2087 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
2088 dbg_session_channel_data, ch_entry) {
2088 ch = g->fifo.channel + ch_data->chid; 2089 ch = g->fifo.channel + ch_data->chid;
2089 2090
2090 ctx_resident = gr_gp10b_suspend_context(ch, 2091 ctx_resident = gr_gp10b_suspend_context(ch,
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index ae2b6cfc..d90c622d 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -560,7 +560,7 @@ static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg)
560 struct channel_gk20a *ch; 560 struct channel_gk20a *ch;
561 561
562 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 562 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
563 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 563 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
564 gv11b_reset_eng_faulted_ch(g, ch->chid); 564 gv11b_reset_eng_faulted_ch(g, ch->chid);
565 } 565 }
566 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 566 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
@@ -581,7 +581,7 @@ static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg)
581 struct channel_gk20a *ch; 581 struct channel_gk20a *ch;
582 582
583 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 583 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
584 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 584 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
585 gv11b_reset_pbdma_faulted_ch(g, ch->chid); 585 gv11b_reset_pbdma_faulted_ch(g, ch->chid);
586 } 586 }
587 nvgpu_rwsem_up_read(&tsg->ch_list_lock); 587 nvgpu_rwsem_up_read(&tsg->ch_list_lock);