diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-10-16 17:58:17 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-17 17:05:19 -0400 |
commit | e039dcbc9dd7d0c47895bdbb49cdc3e1d11a3cae (patch) | |
tree | 38de57b02173520e7a279775f82e3b48e3c1aa87 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | 8f55976d4952020f1e7f257087bb79cfeb64f193 (diff) |
gpu: nvgpu: Use nvgpu_rwsem as TSG channel lock
Use abstract nvgpu_rwsem as TSG channel list lock instead of the Linux
specific rw_semaphore.
JIRA NVGPU-259
Change-Id: I41a38b29d4651838b1962d69f102af1384e12cb6
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1579935
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 9c41c7ef..b636679e 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -1356,14 +1356,14 @@ bool gk20a_fifo_error_tsg(struct gk20a *g, | |||
1356 | struct channel_gk20a *ch = NULL; | 1356 | struct channel_gk20a *ch = NULL; |
1357 | bool verbose = false; | 1357 | bool verbose = false; |
1358 | 1358 | ||
1359 | down_read(&tsg->ch_list_lock); | 1359 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
1360 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 1360 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
1361 | if (gk20a_channel_get(ch)) { | 1361 | if (gk20a_channel_get(ch)) { |
1362 | verbose |= gk20a_fifo_error_ch(g, ch); | 1362 | verbose |= gk20a_fifo_error_ch(g, ch); |
1363 | gk20a_channel_put(ch); | 1363 | gk20a_channel_put(ch); |
1364 | } | 1364 | } |
1365 | } | 1365 | } |
1366 | up_read(&tsg->ch_list_lock); | 1366 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1367 | 1367 | ||
1368 | return verbose; | 1368 | return verbose; |
1369 | 1369 | ||
@@ -1386,14 +1386,14 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, | |||
1386 | nvgpu_err(g, | 1386 | nvgpu_err(g, |
1387 | "TSG %d generated a mmu fault", tsg->tsgid); | 1387 | "TSG %d generated a mmu fault", tsg->tsgid); |
1388 | 1388 | ||
1389 | down_read(&tsg->ch_list_lock); | 1389 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
1390 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 1390 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
1391 | if (gk20a_channel_get(ch)) { | 1391 | if (gk20a_channel_get(ch)) { |
1392 | gk20a_fifo_set_ctx_mmu_error_ch(g, ch); | 1392 | gk20a_fifo_set_ctx_mmu_error_ch(g, ch); |
1393 | gk20a_channel_put(ch); | 1393 | gk20a_channel_put(ch); |
1394 | } | 1394 | } |
1395 | } | 1395 | } |
1396 | up_read(&tsg->ch_list_lock); | 1396 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1397 | 1397 | ||
1398 | } | 1398 | } |
1399 | 1399 | ||
@@ -1409,7 +1409,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) | |||
1409 | if (preempt) | 1409 | if (preempt) |
1410 | g->ops.fifo.preempt_tsg(g, tsgid); | 1410 | g->ops.fifo.preempt_tsg(g, tsgid); |
1411 | 1411 | ||
1412 | down_read(&tsg->ch_list_lock); | 1412 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
1413 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 1413 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
1414 | if (gk20a_channel_get(ch)) { | 1414 | if (gk20a_channel_get(ch)) { |
1415 | ch->has_timedout = true; | 1415 | ch->has_timedout = true; |
@@ -1417,7 +1417,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt) | |||
1417 | gk20a_channel_put(ch); | 1417 | gk20a_channel_put(ch); |
1418 | } | 1418 | } |
1419 | } | 1419 | } |
1420 | up_read(&tsg->ch_list_lock); | 1420 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | 1423 | int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) |
@@ -1906,7 +1906,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1906 | if (gk20a_is_channel_marked_as_tsg(ch)) { | 1906 | if (gk20a_is_channel_marked_as_tsg(ch)) { |
1907 | tsg = &g->fifo.tsg[ch->tsgid]; | 1907 | tsg = &g->fifo.tsg[ch->tsgid]; |
1908 | 1908 | ||
1909 | down_read(&tsg->ch_list_lock); | 1909 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
1910 | 1910 | ||
1911 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { | 1911 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { |
1912 | if (gk20a_channel_get(ch_tsg)) { | 1912 | if (gk20a_channel_get(ch_tsg)) { |
@@ -1915,7 +1915,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1915 | } | 1915 | } |
1916 | } | 1916 | } |
1917 | 1917 | ||
1918 | up_read(&tsg->ch_list_lock); | 1918 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1919 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); | 1919 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); |
1920 | } else { | 1920 | } else { |
1921 | gk20a_set_error_notifier(ch, err_code); | 1921 | gk20a_set_error_notifier(ch, err_code); |
@@ -1971,9 +1971,9 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch) | |||
1971 | goto fail_enable_tsg; | 1971 | goto fail_enable_tsg; |
1972 | 1972 | ||
1973 | /* Remove channel from TSG and re-enable rest of the channels */ | 1973 | /* Remove channel from TSG and re-enable rest of the channels */ |
1974 | down_write(&tsg->ch_list_lock); | 1974 | nvgpu_rwsem_down_write(&tsg->ch_list_lock); |
1975 | nvgpu_list_del(&ch->ch_entry); | 1975 | nvgpu_list_del(&ch->ch_entry); |
1976 | up_write(&tsg->ch_list_lock); | 1976 | nvgpu_rwsem_up_write(&tsg->ch_list_lock); |
1977 | 1977 | ||
1978 | g->ops.fifo.enable_tsg(tsg); | 1978 | g->ops.fifo.enable_tsg(tsg); |
1979 | 1979 | ||
@@ -2084,7 +2084,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2084 | *verbose = false; | 2084 | *verbose = false; |
2085 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; | 2085 | *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; |
2086 | 2086 | ||
2087 | down_read(&tsg->ch_list_lock); | 2087 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2088 | 2088 | ||
2089 | /* check if there was some progress on any of the TSG channels. | 2089 | /* check if there was some progress on any of the TSG channels. |
2090 | * fifo recovery is needed if at least one channel reached the | 2090 | * fifo recovery is needed if at least one channel reached the |
@@ -2140,7 +2140,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2140 | * of them has reached the timeout, there is nothing more to do: | 2140 | * of them has reached the timeout, there is nothing more to do: |
2141 | * timeout_accumulated_ms has been updated for all of them. | 2141 | * timeout_accumulated_ms has been updated for all of them. |
2142 | */ | 2142 | */ |
2143 | up_read(&tsg->ch_list_lock); | 2143 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
2144 | return recover; | 2144 | return recover; |
2145 | } | 2145 | } |
2146 | 2146 | ||
@@ -2470,7 +2470,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, | |||
2470 | struct tsg_gk20a *tsg = &f->tsg[id]; | 2470 | struct tsg_gk20a *tsg = &f->tsg[id]; |
2471 | struct channel_gk20a *ch = NULL; | 2471 | struct channel_gk20a *ch = NULL; |
2472 | 2472 | ||
2473 | down_read(&tsg->ch_list_lock); | 2473 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2474 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2474 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2475 | if (gk20a_channel_get(ch)) { | 2475 | if (gk20a_channel_get(ch)) { |
2476 | gk20a_set_error_notifier(ch, | 2476 | gk20a_set_error_notifier(ch, |
@@ -2478,7 +2478,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, | |||
2478 | gk20a_channel_put(ch); | 2478 | gk20a_channel_put(ch); |
2479 | } | 2479 | } |
2480 | } | 2480 | } |
2481 | up_read(&tsg->ch_list_lock); | 2481 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
2482 | gk20a_fifo_recover_tsg(g, id, true); | 2482 | gk20a_fifo_recover_tsg(g, id, true); |
2483 | } | 2483 | } |
2484 | } | 2484 | } |
@@ -2599,7 +2599,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2599 | nvgpu_err(g, | 2599 | nvgpu_err(g, |
2600 | "preempt TSG %d timeout", id); | 2600 | "preempt TSG %d timeout", id); |
2601 | 2601 | ||
2602 | down_read(&tsg->ch_list_lock); | 2602 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2603 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2603 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2604 | if (!gk20a_channel_get(ch)) | 2604 | if (!gk20a_channel_get(ch)) |
2605 | continue; | 2605 | continue; |
@@ -2607,7 +2607,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2607 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2607 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); |
2608 | gk20a_channel_put(ch); | 2608 | gk20a_channel_put(ch); |
2609 | } | 2609 | } |
2610 | up_read(&tsg->ch_list_lock); | 2610 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
2611 | gk20a_fifo_recover_tsg(g, id, true); | 2611 | gk20a_fifo_recover_tsg(g, id, true); |
2612 | } else { | 2612 | } else { |
2613 | struct channel_gk20a *ch = &g->fifo.channel[id]; | 2613 | struct channel_gk20a *ch = &g->fifo.channel[id]; |
@@ -3095,7 +3095,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3095 | count++; | 3095 | count++; |
3096 | (*entries_left)--; | 3096 | (*entries_left)--; |
3097 | 3097 | ||
3098 | down_read(&tsg->ch_list_lock); | 3098 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
3099 | /* add runnable channels bound to this TSG */ | 3099 | /* add runnable channels bound to this TSG */ |
3100 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 3100 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
3101 | if (!test_bit(ch->chid, | 3101 | if (!test_bit(ch->chid, |
@@ -3103,7 +3103,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3103 | continue; | 3103 | continue; |
3104 | 3104 | ||
3105 | if (!(*entries_left)) { | 3105 | if (!(*entries_left)) { |
3106 | up_read(&tsg->ch_list_lock); | 3106 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
3107 | return NULL; | 3107 | return NULL; |
3108 | } | 3108 | } |
3109 | 3109 | ||
@@ -3117,7 +3117,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f, | |||
3117 | runlist_entry += runlist_entry_words; | 3117 | runlist_entry += runlist_entry_words; |
3118 | (*entries_left)--; | 3118 | (*entries_left)--; |
3119 | } | 3119 | } |
3120 | up_read(&tsg->ch_list_lock); | 3120 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
3121 | } | 3121 | } |
3122 | 3122 | ||
3123 | /* append entries from higher level if this level is empty */ | 3123 | /* append entries from higher level if this level is empty */ |