summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-12-15 12:04:15 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-17 15:29:09 -0500
commit2f6698b863c9cc1db6455637b7c72e812b470b93 (patch)
treed0c8abf32d6994b9f54bf5eddafd8316e038c829 /drivers/gpu/nvgpu/gm20b/gr_gm20b.c
parent6a73114788ffafe4c53771c707ecbd9c9ea0a117 (diff)
gpu: nvgpu: Make graphics context property of TSG
Move graphics context ownership to TSG instead of channel. Combine channel_ctx_gk20a and gr_ctx_desc to one structure, because the split between them was arbitrary. Move context header to be property of channel. Bug 1842197 Change-Id: I410e3262f80b318d8528bcbec270b63a2d8d2ff9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1639532 Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c50
1 files changed, 38 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 36fad8b3..a2434320 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -124,7 +124,7 @@ int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g)
124} 124}
125 125
126void gr_gm20b_commit_global_attrib_cb(struct gk20a *g, 126void gr_gm20b_commit_global_attrib_cb(struct gk20a *g,
127 struct channel_ctx_gk20a *ch_ctx, 127 struct nvgpu_gr_ctx *ch_ctx,
128 u64 addr, bool patch) 128 u64 addr, bool patch)
129{ 129{
130 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_setup_attrib_cb_base_r(), 130 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_setup_attrib_cb_base_r(),
@@ -141,7 +141,7 @@ void gr_gm20b_commit_global_attrib_cb(struct gk20a *g,
141} 141}
142 142
143void gr_gm20b_commit_global_bundle_cb(struct gk20a *g, 143void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
144 struct channel_ctx_gk20a *ch_ctx, 144 struct nvgpu_gr_ctx *ch_ctx,
145 u64 addr, u64 size, bool patch) 145 u64 addr, u64 size, bool patch)
146{ 146{
147 u32 data; 147 u32 data;
@@ -180,7 +180,8 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
180 struct channel_gk20a *c, bool patch) 180 struct channel_gk20a *c, bool patch)
181{ 181{
182 struct gr_gk20a *gr = &g->gr; 182 struct gr_gk20a *gr = &g->gr;
183 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; 183 struct tsg_gk20a *tsg;
184 struct nvgpu_gr_ctx *ch_ctx;
184 u32 attrib_offset_in_chunk = 0; 185 u32 attrib_offset_in_chunk = 0;
185 u32 alpha_offset_in_chunk = 0; 186 u32 alpha_offset_in_chunk = 0;
186 u32 pd_ab_max_output; 187 u32 pd_ab_max_output;
@@ -193,6 +194,12 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
193 194
194 gk20a_dbg_fn(""); 195 gk20a_dbg_fn("");
195 196
197 tsg = tsg_gk20a_from_ch(c);
198 if (!tsg)
199 return -EINVAL;
200
201 ch_ctx = &tsg->gr_ctx;
202
196 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_tga_constraintlogic_r(), 203 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_tga_constraintlogic_r(),
197 gr_ds_tga_constraintlogic_beta_cbsize_f(gr->attrib_cb_default_size) | 204 gr_ds_tga_constraintlogic_beta_cbsize_f(gr->attrib_cb_default_size) |
198 gr_ds_tga_constraintlogic_alpha_cbsize_f(gr->alpha_cb_default_size), 205 gr_ds_tga_constraintlogic_alpha_cbsize_f(gr->alpha_cb_default_size),
@@ -257,7 +264,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
257} 264}
258 265
259void gr_gm20b_commit_global_pagepool(struct gk20a *g, 266void gr_gm20b_commit_global_pagepool(struct gk20a *g,
260 struct channel_ctx_gk20a *ch_ctx, 267 struct nvgpu_gr_ctx *ch_ctx,
261 u64 addr, u32 size, bool patch) 268 u64 addr, u32 size, bool patch)
262{ 269{
263 gr_gk20a_commit_global_pagepool(g, ch_ctx, addr, size, patch); 270 gr_gk20a_commit_global_pagepool(g, ch_ctx, addr, size, patch);
@@ -845,7 +852,7 @@ u32 gr_gm20b_pagepool_default_size(struct gk20a *g)
845} 852}
846 853
847int gr_gm20b_alloc_gr_ctx(struct gk20a *g, 854int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
848 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm, 855 struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
849 u32 class, 856 u32 class,
850 u32 flags) 857 u32 flags)
851{ 858{
@@ -858,7 +865,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
858 return err; 865 return err;
859 866
860 if (class == MAXWELL_COMPUTE_B) 867 if (class == MAXWELL_COMPUTE_B)
861 (*gr_ctx)->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; 868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
862 869
863 gk20a_dbg_fn("done"); 870 gk20a_dbg_fn("done");
864 871
@@ -866,15 +873,21 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
866} 873}
867 874
868void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, 875void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
869 struct channel_ctx_gk20a *ch_ctx, 876 struct channel_gk20a *c,
870 struct nvgpu_mem *mem) 877 struct nvgpu_mem *mem)
871{ 878{
872 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx; 879 struct tsg_gk20a *tsg;
880 struct nvgpu_gr_ctx *gr_ctx;
873 u32 cta_preempt_option = 881 u32 cta_preempt_option =
874 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); 882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f();
875 883
876 gk20a_dbg_fn(""); 884 gk20a_dbg_fn("");
877 885
886 tsg = tsg_gk20a_from_ch(c);
887 if (!tsg)
888 return;
889
890 gr_ctx = &tsg->gr_ctx;
878 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
879 gk20a_dbg_info("CTA: %x", cta_preempt_option); 892 gk20a_dbg_info("CTA: %x", cta_preempt_option);
880 nvgpu_mem_wr(g, mem, 893 nvgpu_mem_wr(g, mem,
@@ -1026,16 +1039,22 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
1026int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, 1039int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1027 bool enable) 1040 bool enable)
1028{ 1041{
1029 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; 1042 struct tsg_gk20a *tsg;
1043 struct nvgpu_gr_ctx *gr_ctx;
1030 struct nvgpu_mem *mem; 1044 struct nvgpu_mem *mem;
1031 u32 v; 1045 u32 v;
1032 1046
1033 gk20a_dbg_fn(""); 1047 gk20a_dbg_fn("");
1034 1048
1035 if (!ch_ctx || !ch_ctx->gr_ctx || c->vpr) 1049 tsg = tsg_gk20a_from_ch(c);
1050 if (!tsg)
1051 return -EINVAL;
1052
1053 gr_ctx = &tsg->gr_ctx;
1054 mem = &gr_ctx->mem;
1055 if (!nvgpu_mem_is_valid(mem) || c->vpr)
1036 return -EINVAL; 1056 return -EINVAL;
1037 1057
1038 mem = &ch_ctx->gr_ctx->mem;
1039 1058
1040 if (nvgpu_mem_begin(c->g, mem)) 1059 if (nvgpu_mem_begin(c->g, mem))
1041 return -ENOMEM; 1060 return -ENOMEM;
@@ -1289,12 +1308,19 @@ int gm20b_gr_update_sm_error_state(struct gk20a *g,
1289{ 1308{
1290 u32 gpc, tpc, offset; 1309 u32 gpc, tpc, offset;
1291 struct gr_gk20a *gr = &g->gr; 1310 struct gr_gk20a *gr = &g->gr;
1292 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; 1311 struct tsg_gk20a *tsg;
1312 struct nvgpu_gr_ctx *ch_ctx;
1293 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 1313 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
1294 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, 1314 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
1295 GPU_LIT_TPC_IN_GPC_STRIDE); 1315 GPU_LIT_TPC_IN_GPC_STRIDE);
1296 int err = 0; 1316 int err = 0;
1297 1317
1318 tsg = tsg_gk20a_from_ch(ch);
1319 if (!tsg)
1320 return -EINVAL;
1321
1322 ch_ctx = &tsg->gr_ctx;
1323
1298 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1324 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1299 1325
1300 gr->sm_error_states[sm_id].hww_global_esr = 1326 gr->sm_error_states[sm_id].hww_global_esr =