summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/fifo
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-11-13 08:36:19 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-02-05 12:04:20 -0500
commit3794afbeb177ed0932d166d30bb2af9d9859dff9 (patch)
treeddec116ed6ebee65f14108aaa54b84e3869589d4 /drivers/gpu/nvgpu/common/fifo
parented6e3960903b9e2ba82ded06d1abfe3dec0ac865 (diff)
gpu: nvgpu: add safe channel id lookup
Add gk20a_channel_from_id() to retrieve a channel, given a raw channel ID, with a reference taken (or NULL if the channel was dead). This makes it harder to mistakenly use a channel that's dead and thus uncovers bugs sooner. Convert code to use the new lookup when applicable; work remains to convert complex uses where a ref should have been taken but hasn't. The channel ID is also validated against FIFO_INVAL_CHANNEL_ID; NULL is returned for such IDs. This is often useful and does not hurt when unnecessary. However, this does not prevent the case where a channel would be closed and reopened again when someone would hold a stale channel number. In all such conditions the caller should hold a reference already. The only conditions where a channel can be safely looked up by an id and used without taking a ref are when initializing or deinitializing the list of channels. Jira NVGPU-1460 Change-Id: I0a30968d17c1e0784d315a676bbe69c03a73481c Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1955400 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> (cherry picked from commit 7df3d587502c2de997dfbe8ea8ddc114d0a0481e in dev-kernel) Reviewed-on: https://git-master.nvidia.com/r/2008515 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/fifo')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c50
1 files changed, 33 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index 54fa4747..45c02a75 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -626,6 +626,18 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
626 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable); 626 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
627} 627}
628 628
629struct channel_gk20a *_gk20a_channel_from_id(struct gk20a *g, u32 chid,
630 const char *caller)
631{
632 nvgpu_log_fn(g, " ");
633
634 if (chid == FIFO_INVAL_CHANNEL_ID) {
635 return NULL;
636 }
637
638 return _gk20a_channel_get(&g->fifo.channel[chid], caller);
639}
640
629void gk20a_channel_close(struct channel_gk20a *ch) 641void gk20a_channel_close(struct channel_gk20a *ch)
630{ 642{
631 gk20a_free_channel(ch, false); 643 gk20a_free_channel(ch, false);
@@ -1450,11 +1462,10 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1450 u32 chid; 1462 u32 chid;
1451 1463
1452 for (chid = 0; chid < f->num_channels; chid++) { 1464 for (chid = 0; chid < f->num_channels; chid++) {
1453 struct channel_gk20a *ch = &f->channel[chid]; 1465 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
1454 1466
1455 if (gk20a_channel_get(ch) == NULL) { 1467 if (ch == NULL)
1456 continue; 1468 continue;
1457 }
1458 1469
1459 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1470 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
1460 if (ch->timeout.running) { 1471 if (ch->timeout.running) {
@@ -1553,9 +1564,9 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
1553 1564
1554 1565
1555 for (chid = 0; chid < g->fifo.num_channels; chid++) { 1566 for (chid = 0; chid < g->fifo.num_channels; chid++) {
1556 struct channel_gk20a *ch = &g->fifo.channel[chid]; 1567 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
1557 1568
1558 if (gk20a_channel_get(ch)) { 1569 if (ch != NULL) {
1559 gk20a_channel_timeout_check(ch); 1570 gk20a_channel_timeout_check(ch);
1560 gk20a_channel_put(ch); 1571 gk20a_channel_put(ch);
1561 } 1572 }
@@ -2107,9 +2118,9 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2107 nvgpu_rwsem_down_write(&g->deterministic_busy); 2118 nvgpu_rwsem_down_write(&g->deterministic_busy);
2108 2119
2109 for (chid = 0; chid < f->num_channels; chid++) { 2120 for (chid = 0; chid < f->num_channels; chid++) {
2110 struct channel_gk20a *ch = &f->channel[chid]; 2121 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2111 2122
2112 if (gk20a_channel_get(ch) == NULL) { 2123 if (ch == NULL) {
2113 continue; 2124 continue;
2114 } 2125 }
2115 2126
@@ -2145,9 +2156,9 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2145 u32 chid; 2156 u32 chid;
2146 2157
2147 for (chid = 0; chid < f->num_channels; chid++) { 2158 for (chid = 0; chid < f->num_channels; chid++) {
2148 struct channel_gk20a *ch = &f->channel[chid]; 2159 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2149 2160
2150 if (gk20a_channel_get(ch) == NULL) { 2161 if (ch == NULL) {
2151 continue; 2162 continue;
2152 } 2163 }
2153 2164
@@ -2256,8 +2267,9 @@ int gk20a_channel_suspend(struct gk20a *g)
2256 nvgpu_log_fn(g, " "); 2267 nvgpu_log_fn(g, " ");
2257 2268
2258 for (chid = 0; chid < f->num_channels; chid++) { 2269 for (chid = 0; chid < f->num_channels; chid++) {
2259 struct channel_gk20a *ch = &f->channel[chid]; 2270 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2260 if (gk20a_channel_get(ch) != NULL) { 2271
2272 if (ch != NULL) {
2261 nvgpu_log_info(g, "suspend channel %d", chid); 2273 nvgpu_log_info(g, "suspend channel %d", chid);
2262 /* disable channel */ 2274 /* disable channel */
2263 gk20a_disable_channel_tsg(g, ch); 2275 gk20a_disable_channel_tsg(g, ch);
@@ -2280,9 +2292,11 @@ int gk20a_channel_suspend(struct gk20a *g)
2280 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true); 2292 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true);
2281 2293
2282 for (chid = 0; chid < f->num_channels; chid++) { 2294 for (chid = 0; chid < f->num_channels; chid++) {
2283 if (gk20a_channel_get(&f->channel[chid])) { 2295 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2284 g->ops.fifo.unbind_channel(&f->channel[chid]); 2296
2285 gk20a_channel_put(&f->channel[chid]); 2297 if (ch != NULL) {
2298 g->ops.fifo.unbind_channel(ch);
2299 gk20a_channel_put(ch);
2286 } 2300 }
2287 } 2301 }
2288 } 2302 }
@@ -2301,12 +2315,14 @@ int gk20a_channel_resume(struct gk20a *g)
2301 nvgpu_log_fn(g, " "); 2315 nvgpu_log_fn(g, " ");
2302 2316
2303 for (chid = 0; chid < f->num_channels; chid++) { 2317 for (chid = 0; chid < f->num_channels; chid++) {
2304 if (gk20a_channel_get(&f->channel[chid])) { 2318 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2319
2320 if (ch != NULL) {
2305 nvgpu_log_info(g, "resume channel %d", chid); 2321 nvgpu_log_info(g, "resume channel %d", chid);
2306 g->ops.fifo.bind_channel(&f->channel[chid]); 2322 g->ops.fifo.bind_channel(ch);
2307 channels_in_use = true; 2323 channels_in_use = true;
2308 active_runlist_ids |= BIT(f->channel[chid].runlist_id); 2324 active_runlist_ids |= BIT(f->channel[chid].runlist_id);
2309 gk20a_channel_put(&f->channel[chid]); 2325 gk20a_channel_put(ch);
2310 } 2326 }
2311 } 2327 }
2312 2328