summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-12-08 06:02:16 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2016-12-20 18:15:51 -0500
commit339a67b2e84cea20a59334b0640b9ab7e0d75ca9 (patch)
treefdefebf92b85a7b7b1ccea562f6b000fecf056a4 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent75e52218cec5ccfbb8ec61cb8ba5e41f5e5ec7e5 (diff)
gpu: nvgpu: replace tsg list mutex with rwsem
Lock only for modifications to the tsg channel list, and allow multiple concurrent readers. Bug 1848834 Bug 1814773 Change-Id: Ie3938d4239cfe36a14211f4649ce72b7fc3e2fa4 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1269579 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 1818a57e..488ae309 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1261,7 +1261,7 @@ bool gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1261 gk20a_err(dev_from_gk20a(g), 1261 gk20a_err(dev_from_gk20a(g),
1262 "TSG %d generated a mmu fault", tsg->tsgid); 1262 "TSG %d generated a mmu fault", tsg->tsgid);
1263 1263
1264 mutex_lock(&tsg->ch_list_lock); 1264 down_read(&tsg->ch_list_lock);
1265 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1265 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1266 if (gk20a_channel_get(ch)) { 1266 if (gk20a_channel_get(ch)) {
1267 if (!gk20a_fifo_set_ctx_mmu_error(g, ch)) 1267 if (!gk20a_fifo_set_ctx_mmu_error(g, ch))
@@ -1269,7 +1269,7 @@ bool gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1269 gk20a_channel_put(ch); 1269 gk20a_channel_put(ch);
1270 } 1270 }
1271 } 1271 }
1272 mutex_unlock(&tsg->ch_list_lock); 1272 up_read(&tsg->ch_list_lock);
1273 1273
1274 return ret; 1274 return ret;
1275} 1275}
@@ -1286,7 +1286,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1286 if (preempt) 1286 if (preempt)
1287 g->ops.fifo.preempt_tsg(g, tsgid); 1287 g->ops.fifo.preempt_tsg(g, tsgid);
1288 1288
1289 mutex_lock(&tsg->ch_list_lock); 1289 down_read(&tsg->ch_list_lock);
1290 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1290 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1291 if (gk20a_channel_get(ch)) { 1291 if (gk20a_channel_get(ch)) {
1292 ch->has_timedout = true; 1292 ch->has_timedout = true;
@@ -1294,7 +1294,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1294 gk20a_channel_put(ch); 1294 gk20a_channel_put(ch);
1295 } 1295 }
1296 } 1296 }
1297 mutex_unlock(&tsg->ch_list_lock); 1297 up_read(&tsg->ch_list_lock);
1298} 1298}
1299 1299
1300int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) 1300int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
@@ -1793,7 +1793,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1793 if (gk20a_is_channel_marked_as_tsg(ch)) { 1793 if (gk20a_is_channel_marked_as_tsg(ch)) {
1794 tsg = &g->fifo.tsg[ch->tsgid]; 1794 tsg = &g->fifo.tsg[ch->tsgid];
1795 1795
1796 mutex_lock(&tsg->ch_list_lock); 1796 down_read(&tsg->ch_list_lock);
1797 1797
1798 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1798 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1799 if (gk20a_channel_get(ch_tsg)) { 1799 if (gk20a_channel_get(ch_tsg)) {
@@ -1802,7 +1802,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1802 } 1802 }
1803 } 1803 }
1804 1804
1805 mutex_unlock(&tsg->ch_list_lock); 1805 up_read(&tsg->ch_list_lock);
1806 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); 1806 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1807 } else { 1807 } else {
1808 gk20a_set_error_notifier(ch, err_code); 1808 gk20a_set_error_notifier(ch, err_code);
@@ -1910,7 +1910,7 @@ static bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
1910 *verbose = false; 1910 *verbose = false;
1911 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 1911 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
1912 1912
1913 mutex_lock(&tsg->ch_list_lock); 1913 down_read(&tsg->ch_list_lock);
1914 1914
1915 /* check if there was some progress on any of the TSG channels. 1915 /* check if there was some progress on any of the TSG channels.
1916 * fifo recovery is needed if at least one channel reached the 1916 * fifo recovery is needed if at least one channel reached the
@@ -1966,7 +1966,7 @@ static bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
1966 * of them has reached the timeout, there is nothing more to do: 1966 * of them has reached the timeout, there is nothing more to do:
1967 * timeout_accumulated_ms has been updated for all of them. 1967 * timeout_accumulated_ms has been updated for all of them.
1968 */ 1968 */
1969 mutex_unlock(&tsg->ch_list_lock); 1969 up_read(&tsg->ch_list_lock);
1970 return recover; 1970 return recover;
1971} 1971}
1972 1972
@@ -2256,7 +2256,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2256 struct tsg_gk20a *tsg = &f->tsg[id]; 2256 struct tsg_gk20a *tsg = &f->tsg[id];
2257 struct channel_gk20a *ch = NULL; 2257 struct channel_gk20a *ch = NULL;
2258 2258
2259 mutex_lock(&tsg->ch_list_lock); 2259 down_read(&tsg->ch_list_lock);
2260 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2260 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2261 if (gk20a_channel_get(ch)) { 2261 if (gk20a_channel_get(ch)) {
2262 gk20a_set_error_notifier(ch, 2262 gk20a_set_error_notifier(ch,
@@ -2264,7 +2264,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2264 gk20a_channel_put(ch); 2264 gk20a_channel_put(ch);
2265 } 2265 }
2266 } 2266 }
2267 mutex_unlock(&tsg->ch_list_lock); 2267 up_read(&tsg->ch_list_lock);
2268 gk20a_fifo_recover_tsg(g, id, true); 2268 gk20a_fifo_recover_tsg(g, id, true);
2269 } 2269 }
2270 } 2270 }
@@ -2395,7 +2395,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2395 gk20a_err(dev_from_gk20a(g), 2395 gk20a_err(dev_from_gk20a(g),
2396 "preempt TSG %d timeout\n", id); 2396 "preempt TSG %d timeout\n", id);
2397 2397
2398 mutex_lock(&tsg->ch_list_lock); 2398 down_read(&tsg->ch_list_lock);
2399 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2399 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2400 if (!gk20a_channel_get(ch)) 2400 if (!gk20a_channel_get(ch))
2401 continue; 2401 continue;
@@ -2403,7 +2403,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2403 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 2403 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2404 gk20a_channel_put(ch); 2404 gk20a_channel_put(ch);
2405 } 2405 }
2406 mutex_unlock(&tsg->ch_list_lock); 2406 up_read(&tsg->ch_list_lock);
2407 gk20a_fifo_recover_tsg(g, id, true); 2407 gk20a_fifo_recover_tsg(g, id, true);
2408 } else { 2408 } else {
2409 struct channel_gk20a *ch = &g->fifo.channel[id]; 2409 struct channel_gk20a *ch = &g->fifo.channel[id];
@@ -2797,7 +2797,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2797 count++; 2797 count++;
2798 (*entries_left)--; 2798 (*entries_left)--;
2799 2799
2800 mutex_lock(&tsg->ch_list_lock); 2800 down_read(&tsg->ch_list_lock);
2801 /* add runnable channels bound to this TSG */ 2801 /* add runnable channels bound to this TSG */
2802 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2802 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2803 if (!test_bit(ch->hw_chid, 2803 if (!test_bit(ch->hw_chid,
@@ -2805,7 +2805,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2805 continue; 2805 continue;
2806 2806
2807 if (!(*entries_left)) { 2807 if (!(*entries_left)) {
2808 mutex_unlock(&tsg->ch_list_lock); 2808 up_read(&tsg->ch_list_lock);
2809 return NULL; 2809 return NULL;
2810 } 2810 }
2811 2811
@@ -2819,7 +2819,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2819 runlist_entry += runlist_entry_words; 2819 runlist_entry += runlist_entry_words;
2820 (*entries_left)--; 2820 (*entries_left)--;
2821 } 2821 }
2822 mutex_unlock(&tsg->ch_list_lock); 2822 up_read(&tsg->ch_list_lock);
2823 } 2823 }
2824 2824
2825 /* append entries from higher level if this level is empty */ 2825 /* append entries from higher level if this level is empty */