summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b')
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c3
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c18
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c9
3 files changed, 20 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index 696eb015..d0928335 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -144,9 +144,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
144 goto err_release_acr_fw; 144 goto err_release_acr_fw;
145 } 145 }
146 146
147 for (index = 0; index < 9; index++) 147 for (index = 0; index < 9; index++) {
148 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n", 148 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
149 acr_ucode_header_t210_load[index]); 149 acr_ucode_header_t210_load[index]);
150 }
150 151
151 acr_dmem = (u64 *) 152 acr_dmem = (u64 *)
152 &(((u8 *)acr_ucode_data_t210_load)[ 153 &(((u8 *)acr_ucode_data_t210_load)[
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index d1bd7111..56012dd7 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -808,14 +808,16 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask; 808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
810 810
811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) 811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id); 812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
813 }
813 814
814 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 815 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
815 816
816 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) 817 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
817 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 818 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
818 &f->runlist_info[runlist_id].reset_eng_bitmask); 819 &f->runlist_info[runlist_id].reset_eng_bitmask);
820 }
819 return ret; 821 return ret;
820} 822}
821 823
@@ -1028,9 +1030,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1028 u32 num_runlists = 0; 1030 u32 num_runlists = 0;
1029 1031
1030 nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); 1032 nvgpu_log_fn(g, "acquire runlist_lock for all runlists");
1031 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1033 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1032 nvgpu_mutex_acquire(&f->runlist_info[rlid]. 1034 nvgpu_mutex_acquire(&f->runlist_info[rlid].
1033 runlist_lock); 1035 runlist_lock);
1036 }
1034 1037
1035 /* get runlist id and tsg */ 1038 /* get runlist id and tsg */
1036 if (id_type == ID_TYPE_TSG) { 1039 if (id_type == ID_TYPE_TSG) {
@@ -1206,9 +1209,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1206 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); 1209 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
1207 } else { 1210 } else {
1208 nvgpu_log_fn(g, "release runlist_lock for all runlists"); 1211 nvgpu_log_fn(g, "release runlist_lock for all runlists");
1209 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1212 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1210 nvgpu_mutex_release(&f->runlist_info[rlid]. 1213 nvgpu_mutex_release(&f->runlist_info[rlid].
1211 runlist_lock); 1214 runlist_lock);
1215 }
1212 } 1216 }
1213} 1217}
1214 1218
@@ -1756,9 +1760,10 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
1756 break; 1760 break;
1757 } 1761 }
1758 if (err) { 1762 if (err) {
1759 for (i = (runque - 1); i >= 0; i--) 1763 for (i = (runque - 1); i >= 0; i--) {
1760 nvgpu_dma_unmap_free(vm, 1764 nvgpu_dma_unmap_free(vm,
1761 &tsg->eng_method_buffers[i]); 1765 &tsg->eng_method_buffers[i]);
1766 }
1762 1767
1763 nvgpu_kfree(g, tsg->eng_method_buffers); 1768 nvgpu_kfree(g, tsg->eng_method_buffers);
1764 tsg->eng_method_buffers = NULL; 1769 tsg->eng_method_buffers = NULL;
@@ -1778,8 +1783,9 @@ void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
1778 if (tsg->eng_method_buffers == NULL) 1783 if (tsg->eng_method_buffers == NULL)
1779 return; 1784 return;
1780 1785
1781 for (runque = 0; runque < g->fifo.num_pbdma; runque++) 1786 for (runque = 0; runque < g->fifo.num_pbdma; runque++) {
1782 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); 1787 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
1788 }
1783 1789
1784 nvgpu_kfree(g, tsg->eng_method_buffers); 1790 nvgpu_kfree(g, tsg->eng_method_buffers);
1785 tsg->eng_method_buffers = NULL; 1791 tsg->eng_method_buffers = NULL;
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index 9e36071f..791c0d6f 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -2801,8 +2801,9 @@ int gr_gv11b_load_smid_config(struct gk20a *g)
2801 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg); 2801 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
2802 } 2802 }
2803 2803
2804 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) 2804 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) {
2805 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]); 2805 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
2806 }
2806 nvgpu_kfree(g, tpc_sm_id); 2807 nvgpu_kfree(g, tpc_sm_id);
2807 2808
2808 return 0; 2809 return 0;
@@ -4894,11 +4895,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4894 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 4895 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
4895 for (tpc_num = 0; 4896 for (tpc_num = 0;
4896 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 4897 tpc_num < g->gr.gpc_tpc_count[gpc_num];
4897 tpc_num++) 4898 tpc_num++) {
4898 priv_addr_table[t++] = 4899 priv_addr_table[t++] =
4899 pri_tpc_addr(g, 4900 pri_tpc_addr(g,
4900 pri_tpccs_addr_mask(addr), 4901 pri_tpccs_addr_mask(addr),
4901 gpc_num, tpc_num); 4902 gpc_num, tpc_num);
4903 }
4902 4904
4903 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 4905 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
4904 err = gr_gk20a_split_ppc_broadcast_addr(g, 4906 err = gr_gk20a_split_ppc_broadcast_addr(g,
@@ -4998,11 +5000,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4998 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 5000 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
4999 for (tpc_num = 0; 5001 for (tpc_num = 0;
5000 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 5002 tpc_num < g->gr.gpc_tpc_count[gpc_num];
5001 tpc_num++) 5003 tpc_num++) {
5002 priv_addr_table[t++] = 5004 priv_addr_table[t++] =
5003 pri_tpc_addr(g, 5005 pri_tpc_addr(g,
5004 pri_tpccs_addr_mask(addr), 5006 pri_tpccs_addr_mask(addr),
5005 gpc_num, tpc_num); 5007 gpc_num, tpc_num);
5008 }
5006 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) 5009 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC)
5007 err = gr_gk20a_split_ppc_broadcast_addr(g, 5010 err = gr_gk20a_split_ppc_broadcast_addr(g,
5008 addr, gpc_num, priv_addr_table, &t); 5011 addr, gpc_num, priv_addr_table, &t);