summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c50
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c9
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/channel.h5
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c2
8 files changed, 59 insertions, 39 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index 54fa4747..45c02a75 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -626,6 +626,18 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
626 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable); 626 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
627} 627}
628 628
629struct channel_gk20a *_gk20a_channel_from_id(struct gk20a *g, u32 chid,
630 const char *caller)
631{
632 nvgpu_log_fn(g, " ");
633
634 if (chid == FIFO_INVAL_CHANNEL_ID) {
635 return NULL;
636 }
637
638 return _gk20a_channel_get(&g->fifo.channel[chid], caller);
639}
640
629void gk20a_channel_close(struct channel_gk20a *ch) 641void gk20a_channel_close(struct channel_gk20a *ch)
630{ 642{
631 gk20a_free_channel(ch, false); 643 gk20a_free_channel(ch, false);
@@ -1450,11 +1462,10 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1450 u32 chid; 1462 u32 chid;
1451 1463
1452 for (chid = 0; chid < f->num_channels; chid++) { 1464 for (chid = 0; chid < f->num_channels; chid++) {
1453 struct channel_gk20a *ch = &f->channel[chid]; 1465 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
1454 1466
1455 if (gk20a_channel_get(ch) == NULL) { 1467 if (ch == NULL)
1456 continue; 1468 continue;
1457 }
1458 1469
1459 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1470 nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
1460 if (ch->timeout.running) { 1471 if (ch->timeout.running) {
@@ -1553,9 +1564,9 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
1553 1564
1554 1565
1555 for (chid = 0; chid < g->fifo.num_channels; chid++) { 1566 for (chid = 0; chid < g->fifo.num_channels; chid++) {
1556 struct channel_gk20a *ch = &g->fifo.channel[chid]; 1567 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
1557 1568
1558 if (gk20a_channel_get(ch)) { 1569 if (ch != NULL) {
1559 gk20a_channel_timeout_check(ch); 1570 gk20a_channel_timeout_check(ch);
1560 gk20a_channel_put(ch); 1571 gk20a_channel_put(ch);
1561 } 1572 }
@@ -2107,9 +2118,9 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2107 nvgpu_rwsem_down_write(&g->deterministic_busy); 2118 nvgpu_rwsem_down_write(&g->deterministic_busy);
2108 2119
2109 for (chid = 0; chid < f->num_channels; chid++) { 2120 for (chid = 0; chid < f->num_channels; chid++) {
2110 struct channel_gk20a *ch = &f->channel[chid]; 2121 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2111 2122
2112 if (gk20a_channel_get(ch) == NULL) { 2123 if (ch == NULL) {
2113 continue; 2124 continue;
2114 } 2125 }
2115 2126
@@ -2145,9 +2156,9 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2145 u32 chid; 2156 u32 chid;
2146 2157
2147 for (chid = 0; chid < f->num_channels; chid++) { 2158 for (chid = 0; chid < f->num_channels; chid++) {
2148 struct channel_gk20a *ch = &f->channel[chid]; 2159 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2149 2160
2150 if (gk20a_channel_get(ch) == NULL) { 2161 if (ch == NULL) {
2151 continue; 2162 continue;
2152 } 2163 }
2153 2164
@@ -2256,8 +2267,9 @@ int gk20a_channel_suspend(struct gk20a *g)
2256 nvgpu_log_fn(g, " "); 2267 nvgpu_log_fn(g, " ");
2257 2268
2258 for (chid = 0; chid < f->num_channels; chid++) { 2269 for (chid = 0; chid < f->num_channels; chid++) {
2259 struct channel_gk20a *ch = &f->channel[chid]; 2270 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2260 if (gk20a_channel_get(ch) != NULL) { 2271
2272 if (ch != NULL) {
2261 nvgpu_log_info(g, "suspend channel %d", chid); 2273 nvgpu_log_info(g, "suspend channel %d", chid);
2262 /* disable channel */ 2274 /* disable channel */
2263 gk20a_disable_channel_tsg(g, ch); 2275 gk20a_disable_channel_tsg(g, ch);
@@ -2280,9 +2292,11 @@ int gk20a_channel_suspend(struct gk20a *g)
2280 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true); 2292 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true);
2281 2293
2282 for (chid = 0; chid < f->num_channels; chid++) { 2294 for (chid = 0; chid < f->num_channels; chid++) {
2283 if (gk20a_channel_get(&f->channel[chid])) { 2295 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2284 g->ops.fifo.unbind_channel(&f->channel[chid]); 2296
2285 gk20a_channel_put(&f->channel[chid]); 2297 if (ch != NULL) {
2298 g->ops.fifo.unbind_channel(ch);
2299 gk20a_channel_put(ch);
2286 } 2300 }
2287 } 2301 }
2288 } 2302 }
@@ -2301,12 +2315,14 @@ int gk20a_channel_resume(struct gk20a *g)
2301 nvgpu_log_fn(g, " "); 2315 nvgpu_log_fn(g, " ");
2302 2316
2303 for (chid = 0; chid < f->num_channels; chid++) { 2317 for (chid = 0; chid < f->num_channels; chid++) {
2304 if (gk20a_channel_get(&f->channel[chid])) { 2318 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
2319
2320 if (ch != NULL) {
2305 nvgpu_log_info(g, "resume channel %d", chid); 2321 nvgpu_log_info(g, "resume channel %d", chid);
2306 g->ops.fifo.bind_channel(&f->channel[chid]); 2322 g->ops.fifo.bind_channel(ch);
2307 channels_in_use = true; 2323 channels_in_use = true;
2308 active_runlist_ids |= BIT(f->channel[chid].runlist_id); 2324 active_runlist_ids |= BIT(f->channel[chid].runlist_id);
2309 gk20a_channel_put(&f->channel[chid]); 2325 gk20a_channel_put(ch);
2310 } 2326 }
2311 } 2327 }
2312 2328
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 9691d51b..f4a4591d 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1172,7 +1172,7 @@ gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr)
1172 struct channel_gk20a *ch; 1172 struct channel_gk20a *ch;
1173 u64 ch_inst_ptr; 1173 u64 ch_inst_ptr;
1174 1174
1175 ch = gk20a_channel_get(&f->channel[ci]); 1175 ch = gk20a_channel_from_id(g, ci);
1176 /* only alive channels are searched */ 1176 /* only alive channels are searched */
1177 if (!ch) { 1177 if (!ch) {
1178 continue; 1178 continue;
@@ -1959,9 +1959,9 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, int rc_type)
1959 gk20a_fifo_recover(g, engines, chid, false, true, verbose, 1959 gk20a_fifo_recover(g, engines, chid, false, true, verbose,
1960 rc_type); 1960 rc_type);
1961 } else { 1961 } else {
1962 struct channel_gk20a *ch = &g->fifo.channel[chid]; 1962 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
1963 1963
1964 if (gk20a_channel_get(ch)) { 1964 if (ch != NULL) {
1965 gk20a_channel_abort(ch, false); 1965 gk20a_channel_abort(ch, false);
1966 1966
1967 if (gk20a_fifo_error_ch(g, ch)) { 1967 if (gk20a_fifo_error_ch(g, ch)) {
@@ -2710,9 +2710,9 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
2710 id = fifo_pbdma_status_id_v(status); 2710 id = fifo_pbdma_status_id_v(status);
2711 if (fifo_pbdma_status_id_type_v(status) 2711 if (fifo_pbdma_status_id_type_v(status)
2712 == fifo_pbdma_status_id_type_chid_v()) { 2712 == fifo_pbdma_status_id_type_chid_v()) {
2713 struct channel_gk20a *ch = &f->channel[id]; 2713 struct channel_gk20a *ch = gk20a_channel_from_id(g, id);
2714 2714
2715 if (gk20a_channel_get(ch)) { 2715 if (ch != NULL) {
2716 g->ops.fifo.set_error_notifier(ch, error_notifier); 2716 g->ops.fifo.set_error_notifier(ch, error_notifier);
2717 gk20a_fifo_recover_ch(g, id, true, RC_TYPE_PBDMA_FAULT); 2717 gk20a_fifo_recover_ch(g, id, true, RC_TYPE_PBDMA_FAULT);
2718 gk20a_channel_put(ch); 2718 gk20a_channel_put(ch);
@@ -2924,12 +2924,12 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2924 gk20a_fifo_recover_tsg(g, id, true, 2924 gk20a_fifo_recover_tsg(g, id, true,
2925 RC_TYPE_PREEMPT_TIMEOUT); 2925 RC_TYPE_PREEMPT_TIMEOUT);
2926 } else { 2926 } else {
2927 struct channel_gk20a *ch = &g->fifo.channel[id]; 2927 struct channel_gk20a *ch = gk20a_channel_from_id(g, id);
2928 2928
2929 nvgpu_err(g, 2929 nvgpu_err(g,
2930 "preempt channel %d timeout", id); 2930 "preempt channel %d timeout", id);
2931 2931
2932 if (gk20a_channel_get(ch)) { 2932 if (ch != NULL) {
2933 g->ops.fifo.set_error_notifier(ch, 2933 g->ops.fifo.set_error_notifier(ch,
2934 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2934 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
2935 gk20a_fifo_recover_ch(g, id, true, 2935 gk20a_fifo_recover_ch(g, id, true,
@@ -4031,8 +4031,8 @@ void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
4031 } 4031 }
4032 4032
4033 for (chid = 0; chid < f->num_channels; chid++) { 4033 for (chid = 0; chid < f->num_channels; chid++) {
4034 struct channel_gk20a *ch = &f->channel[chid]; 4034 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
4035 if (gk20a_channel_get(ch)) { 4035 if (ch != NULL) {
4036 ch_state[chid] = 4036 ch_state[chid] =
4037 nvgpu_kmalloc(g, sizeof(struct ch_state) + 4037 nvgpu_kmalloc(g, sizeof(struct ch_state) +
4038 ram_in_alloc_size_v()); 4038 ram_in_alloc_size_v());
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 8cb66279..928d80cb 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -5581,15 +5581,16 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5581 if (gr->chid_tlb[i].curr_ctx == curr_ctx) { 5581 if (gr->chid_tlb[i].curr_ctx == curr_ctx) {
5582 chid = gr->chid_tlb[i].chid; 5582 chid = gr->chid_tlb[i].chid;
5583 tsgid = gr->chid_tlb[i].tsgid; 5583 tsgid = gr->chid_tlb[i].tsgid;
5584 ret = gk20a_channel_get(&f->channel[chid]); 5584 ret = gk20a_channel_from_id(g, chid);
5585 goto unlock; 5585 goto unlock;
5586 } 5586 }
5587 } 5587 }
5588 5588
5589 /* slow path */ 5589 /* slow path */
5590 for (chid = 0; chid < f->num_channels; chid++) { 5590 for (chid = 0; chid < f->num_channels; chid++) {
5591 struct channel_gk20a *ch = &f->channel[chid]; 5591 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
5592 if (gk20a_channel_get(ch) == NULL) { 5592
5593 if (ch == NULL) {
5593 continue; 5594 continue;
5594 } 5595 }
5595 5596
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index f70a5a00..a3655146 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1961,8 +1961,8 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
1961 1961
1962 chid = g->gr.cilp_preempt_pending_chid; 1962 chid = g->gr.cilp_preempt_pending_chid;
1963 1963
1964 ch = gk20a_channel_get(gk20a_fifo_channel_from_chid(g, chid)); 1964 ch = gk20a_channel_from_id(g, chid);
1965 if (!ch) { 1965 if (ch == NULL) {
1966 return ret; 1966 return ret;
1967 } 1967 }
1968 1968
@@ -2014,9 +2014,8 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
2014 goto clean_up; 2014 goto clean_up;
2015 } 2015 }
2016 2016
2017 ch = gk20a_channel_get( 2017 ch = gk20a_channel_from_id(g, chid);
2018 gk20a_fifo_channel_from_chid(g, chid)); 2018 if (ch == NULL) {
2019 if (!ch) {
2020 goto clean_up; 2019 goto clean_up;
2021 } 2020 }
2022 2021
diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h
index 6cdcb973..1851b9e2 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/channel.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h
@@ -393,6 +393,11 @@ struct channel_gk20a *__must_check _gk20a_channel_get(struct channel_gk20a *ch,
393void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller); 393void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller);
394#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__) 394#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__)
395 395
396/* returns NULL if could not take a ref to the channel */
397struct channel_gk20a *__must_check _gk20a_channel_from_id(struct gk20a *g,
398 u32 chid, const char *caller);
399#define gk20a_channel_from_id(g, chid) _gk20a_channel_from_id(g, chid, __func__)
400
396int gk20a_wait_channel_idle(struct channel_gk20a *ch); 401int gk20a_wait_channel_idle(struct channel_gk20a *ch);
397 402
398/* runlist_id -1 is synonym for ENGINE_GR_GK20A runlist id */ 403/* runlist_id -1 is synonym for ENGINE_GR_GK20A runlist id */
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index aa5abec9..8821e799 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -721,8 +721,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
721 721
722int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) 722int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
723{ 723{
724 struct fifo_gk20a *f = &g->fifo; 724 struct channel_gk20a *ch = gk20a_channel_from_id(g, info->chid);
725 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
726 725
727 nvgpu_log_fn(g, " "); 726 nvgpu_log_fn(g, " ");
728 if (!ch) 727 if (!ch)
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 9fafa52f..6a86c9a0 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -953,10 +953,10 @@ int vgpu_init_gr_support(struct gk20a *g)
953 953
954int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) 954int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
955{ 955{
956 struct fifo_gk20a *f = &g->fifo; 956 struct channel_gk20a *ch = gk20a_channel_from_id(g, info->chid);
957 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
958 957
959 nvgpu_log_fn(g, " "); 958 nvgpu_log_fn(g, " ");
959
960 if (!ch) 960 if (!ch)
961 return 0; 961 return 0;
962 962
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index 266b801e..07361afe 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -119,7 +119,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
119 119
120static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) 120static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
121{ 121{
122 struct channel_gk20a *ch = gk20a_channel_get(&g->fifo.channel[chid]); 122 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
123 123
124 if (ch == NULL) { 124 if (ch == NULL) {
125 nvgpu_err(g, "invalid channel id %d", chid); 125 nvgpu_err(g, "invalid channel id %d", chid);