summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 19:42:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:48 -0400
commitb69020bff5dfa69cad926c9374cdbe9a62509ffd (patch)
tree222f6b6bc23561a38004a257cbac401e431ff3be /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parentfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 95590e40..fc831e75 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1039,51 +1039,51 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
1039{ 1039{
1040 struct mem_desc *mem = &gr_ctx->mem; 1040 struct mem_desc *mem = &gr_ctx->mem;
1041 1041
1042 if (gk20a_mem_begin(g, mem)) { 1042 if (nvgpu_mem_begin(g, mem)) {
1043 WARN_ON("Cannot map context"); 1043 WARN_ON("Cannot map context");
1044 return; 1044 return;
1045 } 1045 }
1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
1047 gk20a_mem_rd(g, mem, 1047 nvgpu_mem_rd(g, mem,
1048 ctxsw_prog_main_image_magic_value_o()), 1048 ctxsw_prog_main_image_magic_value_o()),
1049 ctxsw_prog_main_image_magic_value_v_value_v()); 1049 ctxsw_prog_main_image_magic_value_v_value_v());
1050 1050
1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n",
1052 gk20a_mem_rd(g, mem, 1052 nvgpu_mem_rd(g, mem,
1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); 1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
1054 1054
1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n",
1056 gk20a_mem_rd(g, mem, 1056 nvgpu_mem_rd(g, mem,
1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); 1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
1058 1058
1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n",
1060 gk20a_mem_rd(g, mem, 1060 nvgpu_mem_rd(g, mem,
1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o())); 1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
1062 1062
1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
1064 gk20a_mem_rd(g, mem, 1064 nvgpu_mem_rd(g, mem,
1065 ctxsw_prog_main_image_num_save_ops_o())); 1065 ctxsw_prog_main_image_num_save_ops_o()));
1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
1067 gk20a_mem_rd(g, mem, 1067 nvgpu_mem_rd(g, mem,
1068 ctxsw_prog_main_image_num_wfi_save_ops_o())); 1068 ctxsw_prog_main_image_num_wfi_save_ops_o()));
1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
1070 gk20a_mem_rd(g, mem, 1070 nvgpu_mem_rd(g, mem,
1071 ctxsw_prog_main_image_num_cta_save_ops_o())); 1071 ctxsw_prog_main_image_num_cta_save_ops_o()));
1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
1073 gk20a_mem_rd(g, mem, 1073 nvgpu_mem_rd(g, mem,
1074 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 1074 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
1076 gk20a_mem_rd(g, mem, 1076 nvgpu_mem_rd(g, mem,
1077 ctxsw_prog_main_image_num_cilp_save_ops_o())); 1077 ctxsw_prog_main_image_num_cilp_save_ops_o()));
1078 gk20a_err(dev_from_gk20a(g), 1078 gk20a_err(dev_from_gk20a(g),
1079 "image gfx preemption option (GFXP is 1) %x\n", 1079 "image gfx preemption option (GFXP is 1) %x\n",
1080 gk20a_mem_rd(g, mem, 1080 nvgpu_mem_rd(g, mem,
1081 ctxsw_prog_main_image_graphics_preemption_options_o())); 1081 ctxsw_prog_main_image_graphics_preemption_options_o()));
1082 gk20a_err(dev_from_gk20a(g), 1082 gk20a_err(dev_from_gk20a(g),
1083 "image compute preemption option (CTA is 1) %x\n", 1083 "image compute preemption option (CTA is 1) %x\n",
1084 gk20a_mem_rd(g, mem, 1084 nvgpu_mem_rd(g, mem,
1085 ctxsw_prog_main_image_compute_preemption_options_o())); 1085 ctxsw_prog_main_image_compute_preemption_options_o()));
1086 gk20a_mem_end(g, mem); 1086 nvgpu_mem_end(g, mem);
1087} 1087}
1088 1088
1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
@@ -1123,21 +1123,21 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1123 1123
1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { 1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
1126 gk20a_mem_wr(g, mem, 1126 nvgpu_mem_wr(g, mem,
1127 ctxsw_prog_main_image_graphics_preemption_options_o(), 1127 ctxsw_prog_main_image_graphics_preemption_options_o(),
1128 gfxp_preempt_option); 1128 gfxp_preempt_option);
1129 } 1129 }
1130 1130
1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
1133 gk20a_mem_wr(g, mem, 1133 nvgpu_mem_wr(g, mem,
1134 ctxsw_prog_main_image_compute_preemption_options_o(), 1134 ctxsw_prog_main_image_compute_preemption_options_o(),
1135 cilp_preempt_option); 1135 cilp_preempt_option);
1136 } 1136 }
1137 1137
1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) { 1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) {
1139 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1139 gk20a_dbg_info("CTA: %x", cta_preempt_option);
1140 gk20a_mem_wr(g, mem, 1140 nvgpu_mem_wr(g, mem,
1141 ctxsw_prog_main_image_compute_preemption_options_o(), 1141 ctxsw_prog_main_image_compute_preemption_options_o(),
1142 cta_preempt_option); 1142 cta_preempt_option);
1143 } 1143 }
@@ -1147,7 +1147,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1147 u32 size; 1147 u32 size;
1148 u32 cbes_reserve; 1148 u32 cbes_reserve;
1149 1149
1150 gk20a_mem_wr(g, mem, 1150 nvgpu_mem_wr(g, mem,
1151 ctxsw_prog_main_image_full_preemption_ptr_o(), 1151 ctxsw_prog_main_image_full_preemption_ptr_o(),
1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); 1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8);
1153 1153
@@ -2077,7 +2077,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2077 2077
2078 gr_ctx->boosted_ctx = boost; 2078 gr_ctx->boosted_ctx = boost;
2079 2079
2080 if (gk20a_mem_begin(g, mem)) 2080 if (nvgpu_mem_begin(g, mem))
2081 return -ENOMEM; 2081 return -ENOMEM;
2082 2082
2083 err = gk20a_disable_channel_tsg(g, ch); 2083 err = gk20a_disable_channel_tsg(g, ch);
@@ -2096,7 +2096,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2096enable_ch: 2096enable_ch:
2097 gk20a_enable_channel_tsg(g, ch); 2097 gk20a_enable_channel_tsg(g, ch);
2098unmap_ctx: 2098unmap_ctx:
2099 gk20a_mem_end(g, mem); 2099 nvgpu_mem_end(g, mem);
2100 2100
2101 return err; 2101 return err;
2102} 2102}
@@ -2107,7 +2107,7 @@ static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct mem_desc *mem,
2107 2107
2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f( 2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(
2109 gr_ctx->boosted_ctx); 2109 gr_ctx->boosted_ctx);
2110 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v); 2110 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v);
2111} 2111}
2112 2112
2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, 2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
@@ -2164,7 +2164,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2164 } 2164 }
2165 } 2165 }
2166 2166
2167 if (gk20a_mem_begin(g, mem)) 2167 if (nvgpu_mem_begin(g, mem))
2168 return -ENOMEM; 2168 return -ENOMEM;
2169 2169
2170 err = gk20a_disable_channel_tsg(g, ch); 2170 err = gk20a_disable_channel_tsg(g, ch);
@@ -2191,7 +2191,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2191enable_ch: 2191enable_ch:
2192 gk20a_enable_channel_tsg(g, ch); 2192 gk20a_enable_channel_tsg(g, ch);
2193unmap_ctx: 2193unmap_ctx:
2194 gk20a_mem_end(g, mem); 2194 nvgpu_mem_end(g, mem);
2195 2195
2196 return err; 2196 return err;
2197} 2197}