summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-12-08 06:02:16 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2016-12-20 18:15:51 -0500
commit339a67b2e84cea20a59334b0640b9ab7e0d75ca9 (patch)
treefdefebf92b85a7b7b1ccea562f6b000fecf056a4 /drivers/gpu/nvgpu
parent75e52218cec5ccfbb8ec61cb8ba5e41f5e5ec7e5 (diff)
gpu: nvgpu: replace tsg list mutex with rwsem
Lock only for modifications to the tsg channel list, and allow multiple concurrent readers. Bug 1848834 Bug 1814773 Change-Id: Ie3938d4239cfe36a14211f4649ce72b7fc3e2fa4 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1269579 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c30
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c4
4 files changed, 27 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 1818a57e..488ae309 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1261,7 +1261,7 @@ bool gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1261 gk20a_err(dev_from_gk20a(g), 1261 gk20a_err(dev_from_gk20a(g),
1262 "TSG %d generated a mmu fault", tsg->tsgid); 1262 "TSG %d generated a mmu fault", tsg->tsgid);
1263 1263
1264 mutex_lock(&tsg->ch_list_lock); 1264 down_read(&tsg->ch_list_lock);
1265 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1265 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1266 if (gk20a_channel_get(ch)) { 1266 if (gk20a_channel_get(ch)) {
1267 if (!gk20a_fifo_set_ctx_mmu_error(g, ch)) 1267 if (!gk20a_fifo_set_ctx_mmu_error(g, ch))
@@ -1269,7 +1269,7 @@ bool gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1269 gk20a_channel_put(ch); 1269 gk20a_channel_put(ch);
1270 } 1270 }
1271 } 1271 }
1272 mutex_unlock(&tsg->ch_list_lock); 1272 up_read(&tsg->ch_list_lock);
1273 1273
1274 return ret; 1274 return ret;
1275} 1275}
@@ -1286,7 +1286,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1286 if (preempt) 1286 if (preempt)
1287 g->ops.fifo.preempt_tsg(g, tsgid); 1287 g->ops.fifo.preempt_tsg(g, tsgid);
1288 1288
1289 mutex_lock(&tsg->ch_list_lock); 1289 down_read(&tsg->ch_list_lock);
1290 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1290 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1291 if (gk20a_channel_get(ch)) { 1291 if (gk20a_channel_get(ch)) {
1292 ch->has_timedout = true; 1292 ch->has_timedout = true;
@@ -1294,7 +1294,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
1294 gk20a_channel_put(ch); 1294 gk20a_channel_put(ch);
1295 } 1295 }
1296 } 1296 }
1297 mutex_unlock(&tsg->ch_list_lock); 1297 up_read(&tsg->ch_list_lock);
1298} 1298}
1299 1299
1300int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) 1300int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
@@ -1793,7 +1793,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1793 if (gk20a_is_channel_marked_as_tsg(ch)) { 1793 if (gk20a_is_channel_marked_as_tsg(ch)) {
1794 tsg = &g->fifo.tsg[ch->tsgid]; 1794 tsg = &g->fifo.tsg[ch->tsgid];
1795 1795
1796 mutex_lock(&tsg->ch_list_lock); 1796 down_read(&tsg->ch_list_lock);
1797 1797
1798 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1798 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1799 if (gk20a_channel_get(ch_tsg)) { 1799 if (gk20a_channel_get(ch_tsg)) {
@@ -1802,7 +1802,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
1802 } 1802 }
1803 } 1803 }
1804 1804
1805 mutex_unlock(&tsg->ch_list_lock); 1805 up_read(&tsg->ch_list_lock);
1806 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); 1806 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1807 } else { 1807 } else {
1808 gk20a_set_error_notifier(ch, err_code); 1808 gk20a_set_error_notifier(ch, err_code);
@@ -1910,7 +1910,7 @@ static bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
1910 *verbose = false; 1910 *verbose = false;
1911 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; 1911 *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
1912 1912
1913 mutex_lock(&tsg->ch_list_lock); 1913 down_read(&tsg->ch_list_lock);
1914 1914
1915 /* check if there was some progress on any of the TSG channels. 1915 /* check if there was some progress on any of the TSG channels.
1916 * fifo recovery is needed if at least one channel reached the 1916 * fifo recovery is needed if at least one channel reached the
@@ -1966,7 +1966,7 @@ static bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
1966 * of them has reached the timeout, there is nothing more to do: 1966 * of them has reached the timeout, there is nothing more to do:
1967 * timeout_accumulated_ms has been updated for all of them. 1967 * timeout_accumulated_ms has been updated for all of them.
1968 */ 1968 */
1969 mutex_unlock(&tsg->ch_list_lock); 1969 up_read(&tsg->ch_list_lock);
1970 return recover; 1970 return recover;
1971} 1971}
1972 1972
@@ -2256,7 +2256,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2256 struct tsg_gk20a *tsg = &f->tsg[id]; 2256 struct tsg_gk20a *tsg = &f->tsg[id];
2257 struct channel_gk20a *ch = NULL; 2257 struct channel_gk20a *ch = NULL;
2258 2258
2259 mutex_lock(&tsg->ch_list_lock); 2259 down_read(&tsg->ch_list_lock);
2260 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2260 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2261 if (gk20a_channel_get(ch)) { 2261 if (gk20a_channel_get(ch)) {
2262 gk20a_set_error_notifier(ch, 2262 gk20a_set_error_notifier(ch,
@@ -2264,7 +2264,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2264 gk20a_channel_put(ch); 2264 gk20a_channel_put(ch);
2265 } 2265 }
2266 } 2266 }
2267 mutex_unlock(&tsg->ch_list_lock); 2267 up_read(&tsg->ch_list_lock);
2268 gk20a_fifo_recover_tsg(g, id, true); 2268 gk20a_fifo_recover_tsg(g, id, true);
2269 } 2269 }
2270 } 2270 }
@@ -2395,7 +2395,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2395 gk20a_err(dev_from_gk20a(g), 2395 gk20a_err(dev_from_gk20a(g),
2396 "preempt TSG %d timeout\n", id); 2396 "preempt TSG %d timeout\n", id);
2397 2397
2398 mutex_lock(&tsg->ch_list_lock); 2398 down_read(&tsg->ch_list_lock);
2399 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2399 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2400 if (!gk20a_channel_get(ch)) 2400 if (!gk20a_channel_get(ch))
2401 continue; 2401 continue;
@@ -2403,7 +2403,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2403 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 2403 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2404 gk20a_channel_put(ch); 2404 gk20a_channel_put(ch);
2405 } 2405 }
2406 mutex_unlock(&tsg->ch_list_lock); 2406 up_read(&tsg->ch_list_lock);
2407 gk20a_fifo_recover_tsg(g, id, true); 2407 gk20a_fifo_recover_tsg(g, id, true);
2408 } else { 2408 } else {
2409 struct channel_gk20a *ch = &g->fifo.channel[id]; 2409 struct channel_gk20a *ch = &g->fifo.channel[id];
@@ -2797,7 +2797,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2797 count++; 2797 count++;
2798 (*entries_left)--; 2798 (*entries_left)--;
2799 2799
2800 mutex_lock(&tsg->ch_list_lock); 2800 down_read(&tsg->ch_list_lock);
2801 /* add runnable channels bound to this TSG */ 2801 /* add runnable channels bound to this TSG */
2802 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2802 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2803 if (!test_bit(ch->hw_chid, 2803 if (!test_bit(ch->hw_chid,
@@ -2805,7 +2805,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2805 continue; 2805 continue;
2806 2806
2807 if (!(*entries_left)) { 2807 if (!(*entries_left)) {
2808 mutex_unlock(&tsg->ch_list_lock); 2808 up_read(&tsg->ch_list_lock);
2809 return NULL; 2809 return NULL;
2810 } 2810 }
2811 2811
@@ -2819,7 +2819,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
2819 runlist_entry += runlist_entry_words; 2819 runlist_entry += runlist_entry_words;
2820 (*entries_left)--; 2820 (*entries_left)--;
2821 } 2821 }
2822 mutex_unlock(&tsg->ch_list_lock); 2822 up_read(&tsg->ch_list_lock);
2823 } 2823 }
2824 2824
2825 /* append entries from higher level if this level is empty */ 2825 /* append entries from higher level if this level is empty */
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 0541f430..8e6f763a 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -43,11 +43,11 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
43 struct gk20a *g = tsg->g; 43 struct gk20a *g = tsg->g;
44 struct channel_gk20a *ch; 44 struct channel_gk20a *ch;
45 45
46 mutex_lock(&tsg->ch_list_lock); 46 down_read(&tsg->ch_list_lock);
47 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 47 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
48 g->ops.fifo.enable_channel(ch); 48 g->ops.fifo.enable_channel(ch);
49 } 49 }
50 mutex_unlock(&tsg->ch_list_lock); 50 up_read(&tsg->ch_list_lock);
51 51
52 return 0; 52 return 0;
53} 53}
@@ -57,11 +57,11 @@ int gk20a_disable_tsg(struct tsg_gk20a *tsg)
57 struct gk20a *g = tsg->g; 57 struct gk20a *g = tsg->g;
58 struct channel_gk20a *ch; 58 struct channel_gk20a *ch;
59 59
60 mutex_lock(&tsg->ch_list_lock); 60 down_read(&tsg->ch_list_lock);
61 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 61 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
62 g->ops.fifo.disable_channel(ch); 62 g->ops.fifo.disable_channel(ch);
63 } 63 }
64 mutex_unlock(&tsg->ch_list_lock); 64 up_read(&tsg->ch_list_lock);
65 65
66 return 0; 66 return 0;
67} 67}
@@ -126,9 +126,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
126 return -EINVAL; 126 return -EINVAL;
127 } 127 }
128 128
129 mutex_lock(&tsg->ch_list_lock); 129 down_write(&tsg->ch_list_lock);
130 list_add_tail(&ch->ch_entry, &tsg->ch_list); 130 list_add_tail(&ch->ch_entry, &tsg->ch_list);
131 mutex_unlock(&tsg->ch_list_lock); 131 up_write(&tsg->ch_list_lock);
132 132
133 kref_get(&tsg->refcount); 133 kref_get(&tsg->refcount);
134 134
@@ -144,9 +144,9 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
144 struct fifo_gk20a *f = &ch->g->fifo; 144 struct fifo_gk20a *f = &ch->g->fifo;
145 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid]; 145 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
146 146
147 mutex_lock(&tsg->ch_list_lock); 147 down_write(&tsg->ch_list_lock);
148 list_del_init(&ch->ch_entry); 148 list_del_init(&ch->ch_entry);
149 mutex_unlock(&tsg->ch_list_lock); 149 up_write(&tsg->ch_list_lock);
150 150
151 kref_put(&tsg->refcount, gk20a_tsg_release); 151 kref_put(&tsg->refcount, gk20a_tsg_release);
152 152
@@ -168,7 +168,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
168 tsg->tsgid = tsgid; 168 tsg->tsgid = tsgid;
169 169
170 INIT_LIST_HEAD(&tsg->ch_list); 170 INIT_LIST_HEAD(&tsg->ch_list);
171 mutex_init(&tsg->ch_list_lock); 171 init_rwsem(&tsg->ch_list_lock);
172 172
173 INIT_LIST_HEAD(&tsg->event_id_list); 173 INIT_LIST_HEAD(&tsg->event_id_list);
174 mutex_init(&tsg->event_id_list_lock); 174 mutex_init(&tsg->event_id_list_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index dbfb068b..8509398c 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -41,7 +41,7 @@ struct tsg_gk20a {
41 41
42 struct list_head ch_list; 42 struct list_head ch_list;
43 int num_active_channels; 43 int num_active_channels;
44 struct mutex ch_list_lock; 44 struct rw_semaphore ch_list_lock;
45 45
46 unsigned int timeslice_us; 46 unsigned int timeslice_us;
47 unsigned int timeslice_timeout; 47 unsigned int timeslice_timeout;
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index c3669990..5ffc6a00 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -651,7 +651,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
651 if (gk20a_is_channel_marked_as_tsg(ch)) { 651 if (gk20a_is_channel_marked_as_tsg(ch)) {
652 tsg = &g->fifo.tsg[ch->tsgid]; 652 tsg = &g->fifo.tsg[ch->tsgid];
653 653
654 mutex_lock(&tsg->ch_list_lock); 654 down_read(&tsg->ch_list_lock);
655 655
656 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 656 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
657 if (gk20a_channel_get(ch_tsg)) { 657 if (gk20a_channel_get(ch_tsg)) {
@@ -661,7 +661,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
661 } 661 }
662 } 662 }
663 663
664 mutex_unlock(&tsg->ch_list_lock); 664 up_read(&tsg->ch_list_lock);
665 } else { 665 } else {
666 gk20a_set_error_notifier(ch, err_code); 666 gk20a_set_error_notifier(ch, err_code);
667 ch->has_timedout = true; 667 ch->has_timedout = true;