summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 1c966c22..331c3af9 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -47,7 +47,7 @@ void gr_gm20b_init_gpc_mmu(struct gk20a *g)
47{ 47{
48 u32 temp; 48 u32 temp;
49 49
50 gk20a_dbg_info("initialize gpc mmu"); 50 nvgpu_log_info(g, "initialize gpc mmu");
51 51
52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 52 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
53 /* Bypass MMU check for non-secure boot. For 53 /* Bypass MMU check for non-secure boot. For
@@ -168,7 +168,7 @@ void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
168 168
169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth); 169 data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
170 170
171 gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", 171 nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d",
172 g->gr.bundle_cb_token_limit, data); 172 g->gr.bundle_cb_token_limit, data);
173 173
174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(), 174 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -193,7 +193,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g, 193 u32 num_pes_per_gpc = nvgpu_get_litter_value(g,
194 GPU_LIT_NUM_PES_PER_GPC); 194 GPU_LIT_NUM_PES_PER_GPC);
195 195
196 gk20a_dbg_fn(""); 196 nvgpu_log_fn(g, " ");
197 197
198 tsg = tsg_gk20a_from_ch(c); 198 tsg = tsg_gk20a_from_ch(c);
199 if (!tsg) 199 if (!tsg)
@@ -280,20 +280,20 @@ void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data)
280{ 280{
281 u32 val; 281 u32 val;
282 282
283 gk20a_dbg_fn(""); 283 nvgpu_log_fn(g, " ");
284 284
285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r()); 285 val = gk20a_readl(g, gr_gpcs_tpcs_tex_m_dbg2_r());
286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(), 286 val = set_field(val, gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_m(),
287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data)); 287 gr_gpcs_tpcs_tex_m_dbg2_lg_rd_coalesce_en_f(data));
288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val); 288 gk20a_writel(g, gr_gpcs_tpcs_tex_m_dbg2_r(), val);
289 289
290 gk20a_dbg_fn("done"); 290 nvgpu_log_fn(g, "done");
291} 291}
292 292
293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, 293int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
294 u32 class_num, u32 offset, u32 data) 294 u32 class_num, u32 offset, u32 data)
295{ 295{
296 gk20a_dbg_fn(""); 296 nvgpu_log_fn(g, " ");
297 297
298 if (class_num == MAXWELL_COMPUTE_B) { 298 if (class_num == MAXWELL_COMPUTE_B) {
299 switch (offset << 2) { 299 switch (offset << 2) {
@@ -341,7 +341,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 341 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 342 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
343 343
344 gk20a_dbg_fn(""); 344 nvgpu_log_fn(g, " ");
345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) 345 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
346 return; */ 346 return; */
347 347
@@ -390,7 +390,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 390 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); 391 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
392 392
393 gk20a_dbg_fn(""); 393 nvgpu_log_fn(g, " ");
394 394
395 if (cb_size > gr->attrib_cb_size) 395 if (cb_size > gr->attrib_cb_size)
396 cb_size = gr->attrib_cb_size; 396 cb_size = gr->attrib_cb_size;
@@ -665,7 +665,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
665{ 665{
666 int err = 0; 666 int err = 0;
667 667
668 gk20a_dbg_fn(""); 668 nvgpu_log_fn(g, " ");
669 669
670 err = gr_gk20a_init_fs_state(g); 670 err = gr_gk20a_init_fs_state(g);
671 if (err) 671 if (err)
@@ -762,7 +762,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
762 gr_fecs_falcon_hwcfg_r(); 762 gr_fecs_falcon_hwcfg_r();
763 u8 falcon_id_mask = 0; 763 u8 falcon_id_mask = 0;
764 764
765 gk20a_dbg_fn(""); 765 nvgpu_log_fn(g, " ");
766 766
767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 767 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7), 768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -829,7 +829,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); 829 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff);
830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(), 830 gk20a_writel(g, gr_fecs_cpuctl_alias_r(),
831 gr_fecs_cpuctl_startcpu_f(1)); 831 gr_fecs_cpuctl_startcpu_f(1));
832 gk20a_dbg_fn("done"); 832 nvgpu_log_fn(g, "done");
833 833
834 return 0; 834 return 0;
835} 835}
@@ -858,7 +858,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
858{ 858{
859 int err; 859 int err;
860 860
861 gk20a_dbg_fn(""); 861 nvgpu_log_fn(g, " ");
862 862
863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 863 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
864 if (err) 864 if (err)
@@ -867,7 +867,7 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
867 if (class == MAXWELL_COMPUTE_B) 867 if (class == MAXWELL_COMPUTE_B)
868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; 868 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
869 869
870 gk20a_dbg_fn("done"); 870 nvgpu_log_fn(g, "done");
871 871
872 return 0; 872 return 0;
873} 873}
@@ -881,7 +881,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
881 u32 cta_preempt_option = 881 u32 cta_preempt_option =
882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f(); 882 ctxsw_prog_main_image_preemption_options_control_cta_enabled_f();
883 883
884 gk20a_dbg_fn(""); 884 nvgpu_log_fn(g, " ");
885 885
886 tsg = tsg_gk20a_from_ch(c); 886 tsg = tsg_gk20a_from_ch(c);
887 if (!tsg) 887 if (!tsg)
@@ -889,13 +889,13 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
889 889
890 gr_ctx = &tsg->gr_ctx; 890 gr_ctx = &tsg->gr_ctx;
891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 891 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
892 gk20a_dbg_info("CTA: %x", cta_preempt_option); 892 nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
893 nvgpu_mem_wr(g, mem, 893 nvgpu_mem_wr(g, mem,
894 ctxsw_prog_main_image_preemption_options_o(), 894 ctxsw_prog_main_image_preemption_options_o(),
895 cta_preempt_option); 895 cta_preempt_option);
896 } 896 }
897 897
898 gk20a_dbg_fn("done"); 898 nvgpu_log_fn(g, "done");
899} 899}
900 900
901int gr_gm20b_dump_gr_status_regs(struct gk20a *g, 901int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
@@ -1044,7 +1044,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1044 struct nvgpu_mem *mem; 1044 struct nvgpu_mem *mem;
1045 u32 v; 1045 u32 v;
1046 1046
1047 gk20a_dbg_fn(""); 1047 nvgpu_log_fn(c->g, " ");
1048 1048
1049 tsg = tsg_gk20a_from_ch(c); 1049 tsg = tsg_gk20a_from_ch(c);
1050 if (!tsg) 1050 if (!tsg)
@@ -1066,7 +1066,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1066 1066
1067 nvgpu_mem_end(c->g, mem); 1067 nvgpu_mem_end(c->g, mem);
1068 1068
1069 gk20a_dbg_fn("done"); 1069 nvgpu_log_fn(c->g, "done");
1070 1070
1071 return 0; 1071 return 0;
1072} 1072}
@@ -1220,19 +1220,19 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
1220 1220
1221 /* Only for debug purpose */ 1221 /* Only for debug purpose */
1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 1222 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
1223 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", 1223 nvgpu_log_fn(g, "w_state[%d].valid_warps[0]: %llx\n",
1224 sm_id, w_state[sm_id].valid_warps[0]); 1224 sm_id, w_state[sm_id].valid_warps[0]);
1225 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", 1225 nvgpu_log_fn(g, "w_state[%d].valid_warps[1]: %llx\n",
1226 sm_id, w_state[sm_id].valid_warps[1]); 1226 sm_id, w_state[sm_id].valid_warps[1]);
1227 1227
1228 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", 1228 nvgpu_log_fn(g, "w_state[%d].trapped_warps[0]: %llx\n",
1229 sm_id, w_state[sm_id].trapped_warps[0]); 1229 sm_id, w_state[sm_id].trapped_warps[0]);
1230 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", 1230 nvgpu_log_fn(g, "w_state[%d].trapped_warps[1]: %llx\n",
1231 sm_id, w_state[sm_id].trapped_warps[1]); 1231 sm_id, w_state[sm_id].trapped_warps[1]);
1232 1232
1233 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", 1233 nvgpu_log_fn(g, "w_state[%d].paused_warps[0]: %llx\n",
1234 sm_id, w_state[sm_id].paused_warps[0]); 1234 sm_id, w_state[sm_id].paused_warps[0]);
1235 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", 1235 nvgpu_log_fn(g, "w_state[%d].paused_warps[1]: %llx\n",
1236 sm_id, w_state[sm_id].paused_warps[1]); 1236 sm_id, w_state[sm_id].paused_warps[1]);
1237 } 1237 }
1238} 1238}