summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c82
1 files changed, 43 insertions, 39 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 2f1280ac..1e633d5f 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -43,7 +43,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g)
43{ 43{
44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 44 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
45 45
46 gk20a_dbg_fn(""); 46 nvgpu_log_fn(g, " ");
47 47
48 g->params.sm_arch_sm_version = 48 g->params.sm_arch_sm_version =
49 priv->constants.sm_arch_sm_version; 49 priv->constants.sm_arch_sm_version;
@@ -58,8 +58,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
58 struct tegra_vgpu_cmd_msg msg; 58 struct tegra_vgpu_cmd_msg msg;
59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 59 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
60 int err; 60 int err;
61 struct gk20a *g = c->g;
61 62
62 gk20a_dbg_fn(""); 63 nvgpu_log_fn(g, " ");
63 64
64 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; 65 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
65 msg.handle = vgpu_get_handle(c->g); 66 msg.handle = vgpu_get_handle(c->g);
@@ -76,7 +77,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
76 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 77 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
77 int err; 78 int err;
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 81
81 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; 82 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
82 msg.handle = vgpu_get_handle(g); 83 msg.handle = vgpu_get_handle(g);
@@ -94,7 +95,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
94 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 95 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
95 int err; 96 int err;
96 97
97 gk20a_dbg_fn(""); 98 nvgpu_log_fn(g, " ");
98 99
99 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; 100 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
100 msg.handle = vgpu_get_handle(g); 101 msg.handle = vgpu_get_handle(g);
@@ -109,7 +110,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
109 struct gr_gk20a *gr = &g->gr; 110 struct gr_gk20a *gr = &g->gr;
110 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 111 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
111 112
112 gk20a_dbg_fn(""); 113 nvgpu_log_fn(g, " ");
113 114
114 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size; 115 g->gr.ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
115 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size; 116 g->gr.ctx_vars.zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
@@ -135,20 +136,20 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
135 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) * 136 u32 pagepool_buffer_size = g->ops.gr.pagepool_default_size(g) *
136 gr_scc_pagepool_total_pages_byte_granularity_v(); 137 gr_scc_pagepool_total_pages_byte_granularity_v();
137 138
138 gk20a_dbg_fn(""); 139 nvgpu_log_fn(g, " ");
139 140
140 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 141 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
141 142
142 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 143 nvgpu_log_info(g, "cb_buffer_size : %d", cb_buffer_size);
143 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size; 144 gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
144 145
145 gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size); 146 nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
146 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size; 147 gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
147 148
148 gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size); 149 nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
149 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size; 150 gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
150 151
151 gk20a_dbg_info("priv access map size : %d", 152 nvgpu_log_info(g, "priv access map size : %d",
152 gr->ctx_vars.priv_access_map_size); 153 gr->ctx_vars.priv_access_map_size);
153 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size = 154 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
154 gr->ctx_vars.priv_access_map_size; 155 gr->ctx_vars.priv_access_map_size;
@@ -170,7 +171,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
170 u32 i; 171 u32 i;
171 int err; 172 int err;
172 173
173 gk20a_dbg_fn(""); 174 nvgpu_log_fn(g, " ");
174 175
175 tsg = tsg_gk20a_from_ch(c); 176 tsg = tsg_gk20a_from_ch(c);
176 if (!tsg) 177 if (!tsg)
@@ -249,8 +250,9 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
249 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; 250 u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va;
250 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; 251 u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size;
251 u32 i; 252 u32 i;
253 struct gk20a *g = tsg->g;
252 254
253 gk20a_dbg_fn(""); 255 nvgpu_log_fn(g, " ");
254 256
255 if (tsg->gr_ctx.global_ctx_buffer_mapped) { 257 if (tsg->gr_ctx.global_ctx_buffer_mapped) {
256 /* server will unmap on channel close */ 258 /* server will unmap on channel close */
@@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
279 struct gr_gk20a *gr = &g->gr; 281 struct gr_gk20a *gr = &g->gr;
280 int err; 282 int err;
281 283
282 gk20a_dbg_fn(""); 284 nvgpu_log_fn(g, " ");
283 285
284 if (gr->ctx_vars.buffer_size == 0) 286 if (gr->ctx_vars.buffer_size == 0)
285 return 0; 287 return 0;
@@ -328,7 +330,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
328 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 330 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
329 int err; 331 int err;
330 332
331 gk20a_dbg_fn(""); 333 nvgpu_log_fn(g, " ");
332 334
333 tsg = tsg_gk20a_from_ch(c); 335 tsg = tsg_gk20a_from_ch(c);
334 if (!tsg) 336 if (!tsg)
@@ -359,8 +361,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
359static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) 361static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
360{ 362{
361 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; 363 struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx;
364 struct gk20a *g = tsg->g;
362 365
363 gk20a_dbg_fn(""); 366 nvgpu_log_fn(g, " ");
364 367
365 if (patch_ctx->mem.gpu_va) { 368 if (patch_ctx->mem.gpu_va) {
366 /* server will free on channel close */ 369 /* server will free on channel close */
@@ -375,8 +378,9 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
375{ 378{
376 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; 379 struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx;
377 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; 380 struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
381 struct gk20a *g = tsg->g;
378 382
379 gk20a_dbg_fn(""); 383 nvgpu_log_fn(g, " ");
380 384
381 /* check if hwpm was ever initialized. If not, nothing to do */ 385 /* check if hwpm was ever initialized. If not, nothing to do */
382 if (pm_ctx->mem.gpu_va == 0) 386 if (pm_ctx->mem.gpu_va == 0)
@@ -394,7 +398,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
394{ 398{
395 struct tsg_gk20a *tsg; 399 struct tsg_gk20a *tsg;
396 400
397 gk20a_dbg_fn(""); 401 nvgpu_log_fn(g, " ");
398 402
399 if (gr_ctx->mem.gpu_va) { 403 if (gr_ctx->mem.gpu_va) {
400 struct tegra_vgpu_cmd_msg msg; 404 struct tegra_vgpu_cmd_msg msg;
@@ -477,7 +481,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
477 struct tsg_gk20a *tsg = NULL; 481 struct tsg_gk20a *tsg = NULL;
478 int err = 0; 482 int err = 0;
479 483
480 gk20a_dbg_fn(""); 484 nvgpu_log_fn(g, " ");
481 485
482 /* an address space needs to have been bound at this point.*/ 486 /* an address space needs to have been bound at this point.*/
483 if (!gk20a_channel_as_bound(c)) { 487 if (!gk20a_channel_as_bound(c)) {
@@ -577,7 +581,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
577 /* PM ctxt switch is off by default */ 581 /* PM ctxt switch is off by default */
578 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f(); 582 gr_ctx->pm_ctx.pm_mode = ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
579 583
580 gk20a_dbg_fn("done"); 584 nvgpu_log_fn(g, "done");
581 return 0; 585 return 0;
582out: 586out:
583 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping 587 /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
@@ -595,7 +599,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
595 u32 sm_per_tpc; 599 u32 sm_per_tpc;
596 int err = -ENOMEM; 600 int err = -ENOMEM;
597 601
598 gk20a_dbg_fn(""); 602 nvgpu_log_fn(g, " ");
599 603
600 gr->max_gpc_count = priv->constants.max_gpc_count; 604 gr->max_gpc_count = priv->constants.max_gpc_count;
601 gr->gpc_count = priv->constants.gpc_count; 605 gr->gpc_count = priv->constants.gpc_count;
@@ -658,7 +662,7 @@ int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
658 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; 662 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
659 int err; 663 int err;
660 664
661 gk20a_dbg_fn(""); 665 nvgpu_log_fn(g, " ");
662 666
663 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; 667 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
664 msg.handle = vgpu_get_handle(g); 668 msg.handle = vgpu_get_handle(g);
@@ -677,7 +681,7 @@ int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
677 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; 681 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
678 int err; 682 int err;
679 683
680 gk20a_dbg_fn(""); 684 nvgpu_log_fn(g, " ");
681 685
682 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; 686 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
683 msg.handle = vgpu_get_handle(g); 687 msg.handle = vgpu_get_handle(g);
@@ -712,7 +716,7 @@ u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
712{ 716{
713 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 717 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
714 718
715 gk20a_dbg_fn(""); 719 nvgpu_log_fn(g, " ");
716 720
717 return priv->constants.num_fbps; 721 return priv->constants.num_fbps;
718} 722}
@@ -721,7 +725,7 @@ u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
721{ 725{
722 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 726 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
723 727
724 gk20a_dbg_fn(""); 728 nvgpu_log_fn(g, " ");
725 729
726 return priv->constants.fbp_en_mask; 730 return priv->constants.fbp_en_mask;
727} 731}
@@ -730,7 +734,7 @@ u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
730{ 734{
731 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 735 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
732 736
733 gk20a_dbg_fn(""); 737 nvgpu_log_fn(g, " ");
734 738
735 return priv->constants.ltc_per_fbp; 739 return priv->constants.ltc_per_fbp;
736} 740}
@@ -739,7 +743,7 @@ u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
739{ 743{
740 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 744 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
741 745
742 gk20a_dbg_fn(""); 746 nvgpu_log_fn(g, " ");
743 747
744 return priv->constants.max_lts_per_ltc; 748 return priv->constants.max_lts_per_ltc;
745} 749}
@@ -749,7 +753,7 @@ u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
749 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 753 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
750 u32 i, max_fbps_count = priv->constants.num_fbps; 754 u32 i, max_fbps_count = priv->constants.num_fbps;
751 755
752 gk20a_dbg_fn(""); 756 nvgpu_log_fn(g, " ");
753 757
754 if (g->gr.fbp_rop_l2_en_mask == NULL) { 758 if (g->gr.fbp_rop_l2_en_mask == NULL) {
755 g->gr.fbp_rop_l2_en_mask = 759 g->gr.fbp_rop_l2_en_mask =
@@ -772,7 +776,7 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
772 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; 776 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
773 int err; 777 int err;
774 778
775 gk20a_dbg_fn(""); 779 nvgpu_log_fn(g, " ");
776 780
777 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; 781 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
778 msg.handle = vgpu_get_handle(g); 782 msg.handle = vgpu_get_handle(g);
@@ -804,7 +808,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
804 &msg.params.zbc_query_table; 808 &msg.params.zbc_query_table;
805 int err; 809 int err;
806 810
807 gk20a_dbg_fn(""); 811 nvgpu_log_fn(g, " ");
808 812
809 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; 813 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
810 msg.handle = vgpu_get_handle(g); 814 msg.handle = vgpu_get_handle(g);
@@ -840,7 +844,7 @@ int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
840 844
841static void vgpu_remove_gr_support(struct gr_gk20a *gr) 845static void vgpu_remove_gr_support(struct gr_gk20a *gr)
842{ 846{
843 gk20a_dbg_fn(""); 847 nvgpu_log_fn(gr->g, " ");
844 848
845 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags); 849 gk20a_comptag_allocator_destroy(gr->g, &gr->comp_tags);
846 850
@@ -865,10 +869,10 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
865 struct gr_gk20a *gr = &g->gr; 869 struct gr_gk20a *gr = &g->gr;
866 int err; 870 int err;
867 871
868 gk20a_dbg_fn(""); 872 nvgpu_log_fn(g, " ");
869 873
870 if (gr->sw_ready) { 874 if (gr->sw_ready) {
871 gk20a_dbg_fn("skip init"); 875 nvgpu_log_fn(g, "skip init");
872 return 0; 876 return 0;
873 } 877 }
874 878
@@ -907,7 +911,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
907 gr->remove_support = vgpu_remove_gr_support; 911 gr->remove_support = vgpu_remove_gr_support;
908 gr->sw_ready = true; 912 gr->sw_ready = true;
909 913
910 gk20a_dbg_fn("done"); 914 nvgpu_log_fn(g, "done");
911 return 0; 915 return 0;
912 916
913clean_up: 917clean_up:
@@ -918,7 +922,7 @@ clean_up:
918 922
919int vgpu_init_gr_support(struct gk20a *g) 923int vgpu_init_gr_support(struct gk20a *g)
920{ 924{
921 gk20a_dbg_fn(""); 925 nvgpu_log_fn(g, " ");
922 926
923 return vgpu_gr_init_gr_setup_sw(g); 927 return vgpu_gr_init_gr_setup_sw(g);
924} 928}
@@ -928,7 +932,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
928 struct fifo_gk20a *f = &g->fifo; 932 struct fifo_gk20a *f = &g->fifo;
929 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]); 933 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[info->chid]);
930 934
931 gk20a_dbg_fn(""); 935 nvgpu_log_fn(g, " ");
932 if (!ch) 936 if (!ch)
933 return 0; 937 return 0;
934 938
@@ -985,7 +989,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
985int vgpu_gr_nonstall_isr(struct gk20a *g, 989int vgpu_gr_nonstall_isr(struct gk20a *g,
986 struct tegra_vgpu_gr_nonstall_intr_info *info) 990 struct tegra_vgpu_gr_nonstall_intr_info *info)
987{ 991{
988 gk20a_dbg_fn(""); 992 nvgpu_log_fn(g, " ");
989 993
990 switch (info->type) { 994 switch (info->type) {
991 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE: 995 case TEGRA_VGPU_GR_NONSTALL_INTR_SEMAPHORE:
@@ -1006,7 +1010,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1006 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; 1010 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1007 int err; 1011 int err;
1008 1012
1009 gk20a_dbg_fn(""); 1013 nvgpu_log_fn(g, " ");
1010 1014
1011 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; 1015 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1012 msg.handle = vgpu_get_handle(g); 1016 msg.handle = vgpu_get_handle(g);
@@ -1026,7 +1030,7 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1026 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1030 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1027 int err; 1031 int err;
1028 1032
1029 gk20a_dbg_fn(""); 1033 nvgpu_log_fn(g, " ");
1030 1034
1031 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; 1035 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1032 msg.handle = vgpu_get_handle(g); 1036 msg.handle = vgpu_get_handle(g);
@@ -1053,7 +1057,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1053 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1057 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1054 int err; 1058 int err;
1055 1059
1056 gk20a_dbg_fn(""); 1060 nvgpu_log_fn(g, " ");
1057 1061
1058 tsg = tsg_gk20a_from_ch(ch); 1062 tsg = tsg_gk20a_from_ch(ch);
1059 if (!tsg) 1063 if (!tsg)