summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-12-06 07:39:59 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-08 14:58:07 -0500
commit6ec7da5eba5481e5ff106d7c616d84ac1c847d21 (patch)
tree175742a684660ed0e5600c269e08d4935159ff14 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parentd4c51a7321a506a73ad6c9c64b3a443ce98c1700 (diff)
gpu: nvgpu: use nvgpu list APIs instead of linux APIs
Use nvgpu specific list APIs nvgpu_list_for_each_entry() instead of calling Linux specific list APIs list_for_each_entry() Jira NVGPU-444 Change-Id: I3c1fd495ed9e8bebab1f23b6769944373b46059b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1612442 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c3fd05d1..c5c06df9 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1378,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
1378 bool verbose = false; 1378 bool verbose = false;
1379 1379
1380 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1380 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1381 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1381 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1382 if (gk20a_channel_get(ch)) { 1382 if (gk20a_channel_get(ch)) {
1383 verbose |= gk20a_fifo_error_ch(g, ch); 1383 verbose |= gk20a_fifo_error_ch(g, ch);
1384 gk20a_channel_put(ch); 1384 gk20a_channel_put(ch);
@@ -1408,7 +1408,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1408 "TSG %d generated a mmu fault", tsg->tsgid); 1408 "TSG %d generated a mmu fault", tsg->tsgid);
1409 1409
1410 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1410 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1411 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1411 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1412 if (gk20a_channel_get(ch)) { 1412 if (gk20a_channel_get(ch)) {
1413 gk20a_fifo_set_ctx_mmu_error_ch(g, ch); 1413 gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1414 gk20a_channel_put(ch); 1414 gk20a_channel_put(ch);
@@ -1431,7 +1431,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1431 g->ops.fifo.preempt_tsg(g, tsgid); 1431 g->ops.fifo.preempt_tsg(g, tsgid);
1432 1432
1433 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1433 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1434 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1434 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
1435 if (gk20a_channel_get(ch)) { 1435 if (gk20a_channel_get(ch)) {
1436 ch->has_timedout = true; 1436 ch->has_timedout = true;
1437 gk20a_channel_abort_clean_up(ch); 1437 gk20a_channel_abort_clean_up(ch);
@@ -1932,7 +1932,8 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1932 1932
1933 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 1933 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
1934 1934
1935 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1935 nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
1936 channel_gk20a, ch_entry) {
1936 if (gk20a_channel_get(ch_tsg)) { 1937 if (gk20a_channel_get(ch_tsg)) {
1937 nvgpu_set_error_notifier(ch_tsg, err_code); 1938 nvgpu_set_error_notifier(ch_tsg, err_code);
1938 gk20a_channel_put(ch_tsg); 1939 gk20a_channel_put(ch_tsg);
@@ -2127,7 +2128,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2127 * fifo recovery is needed if at least one channel reached the 2128 * fifo recovery is needed if at least one channel reached the
2128 * maximum timeout without progress (update in gpfifo pointers). 2129 * maximum timeout without progress (update in gpfifo pointers).
2129 */ 2130 */
2130 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2131 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
2131 if (gk20a_channel_get(ch)) { 2132 if (gk20a_channel_get(ch)) {
2132 recover = gk20a_channel_update_and_check_timeout(ch, 2133 recover = gk20a_channel_update_and_check_timeout(ch,
2133 *ms, &progress); 2134 *ms, &progress);
@@ -2146,7 +2147,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2146 tsg->tsgid, ch->chid); 2147 tsg->tsgid, ch->chid);
2147 gk20a_channel_put(ch); 2148 gk20a_channel_put(ch);
2148 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 2149 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
2149 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2150 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2151 channel_gk20a, ch_entry) {
2150 if (gk20a_channel_get(ch)) { 2152 if (gk20a_channel_get(ch)) {
2151 ch->timeout_accumulated_ms = *ms; 2153 ch->timeout_accumulated_ms = *ms;
2152 gk20a_channel_put(ch); 2154 gk20a_channel_put(ch);
@@ -2163,7 +2165,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
2163 tsg->tsgid, ch->chid); 2165 tsg->tsgid, ch->chid);
2164 *ms = ch->timeout_accumulated_ms; 2166 *ms = ch->timeout_accumulated_ms;
2165 gk20a_channel_put(ch); 2167 gk20a_channel_put(ch);
2166 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2168 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2169 channel_gk20a, ch_entry) {
2167 if (gk20a_channel_get(ch)) { 2170 if (gk20a_channel_get(ch)) {
2168 nvgpu_set_error_notifier(ch, 2171 nvgpu_set_error_notifier(ch,
2169 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2172 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
@@ -2490,7 +2493,8 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
2490 struct channel_gk20a *ch = NULL; 2493 struct channel_gk20a *ch = NULL;
2491 2494
2492 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2495 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2493 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2496 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2497 channel_gk20a, ch_entry) {
2494 if (gk20a_channel_get(ch)) { 2498 if (gk20a_channel_get(ch)) {
2495 nvgpu_set_error_notifier(ch, 2499 nvgpu_set_error_notifier(ch,
2496 error_notifier); 2500 error_notifier);
@@ -2650,7 +2654,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2650 "preempt TSG %d timeout", id); 2654 "preempt TSG %d timeout", id);
2651 2655
2652 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 2656 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
2653 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2657 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
2658 channel_gk20a, ch_entry) {
2654 if (!gk20a_channel_get(ch)) 2659 if (!gk20a_channel_get(ch))
2655 continue; 2660 continue;
2656 nvgpu_set_error_notifier(ch, 2661 nvgpu_set_error_notifier(ch,
@@ -3147,7 +3152,8 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3147 3152
3148 nvgpu_rwsem_down_read(&tsg->ch_list_lock); 3153 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
3149 /* add runnable channels bound to this TSG */ 3154 /* add runnable channels bound to this TSG */
3150 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 3155 nvgpu_list_for_each_entry(ch, &tsg->ch_list,
3156 channel_gk20a, ch_entry) {
3151 if (!test_bit(ch->chid, 3157 if (!test_bit(ch->chid,
3152 runlist->active_channels)) 3158 runlist->active_channels))
3153 continue; 3159 continue;