summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-06-25 08:47:15 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-28 14:13:35 -0400
commitdd146d42fc910128b0e2987d12b83430bf97bae0 (patch)
tree23d3f808eb6ea65dd8ab90398a22ff52fce09736 /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parent2dda362e6395a6d486ba3d1a75e707933690023e (diff)
gpu: nvgpu: don't mem_{begin,end}() for gr
Now that GR buffers always have a kernel mapping, remove the unnecessary calls to nvgpu_mem_begin() and nvgpu_mem_end() on these buffers: - global ctx buffer mem in gr - gr ctx mem in a tsg - patch ctx mem in a gr ctx - pm ctx mem in a gr ctx - ctx_header mem in a channel (subctx header) Change-Id: Id2a8ad108aef8db8b16dce5bae8003bbcd3b23e4 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1760599 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c27
1 files changed, 2 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index be9a7cf6..6249992a 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1115,10 +1115,6 @@ void gr_gp10b_dump_ctxsw_stats(struct gk20a *g, struct vm_gk20a *vm,
1115{ 1115{
1116 struct nvgpu_mem *mem = &gr_ctx->mem; 1116 struct nvgpu_mem *mem = &gr_ctx->mem;
1117 1117
1118 if (nvgpu_mem_begin(g, mem)) {
1119 WARN_ON("Cannot map context");
1120 return;
1121 }
1122 nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)", 1118 nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
1123 nvgpu_mem_rd(g, mem, 1119 nvgpu_mem_rd(g, mem,
1124 ctxsw_prog_main_image_magic_value_o()), 1120 ctxsw_prog_main_image_magic_value_o()),
@@ -1159,7 +1155,6 @@ void gr_gp10b_dump_ctxsw_stats(struct gk20a *g, struct vm_gk20a *vm,
1159 "image compute preemption option (CTA is 1) %x", 1155 "image compute preemption option (CTA is 1) %x",
1160 nvgpu_mem_rd(g, mem, 1156 nvgpu_mem_rd(g, mem,
1161 ctxsw_prog_main_image_compute_preemption_options_o())); 1157 ctxsw_prog_main_image_compute_preemption_options_o()));
1162 nvgpu_mem_end(g, mem);
1163} 1158}
1164 1159
1165void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, 1160void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
@@ -2175,12 +2170,9 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2175 gr_ctx->boosted_ctx = boost; 2170 gr_ctx->boosted_ctx = boost;
2176 mem = &gr_ctx->mem; 2171 mem = &gr_ctx->mem;
2177 2172
2178 if (nvgpu_mem_begin(g, mem))
2179 return -ENOMEM;
2180
2181 err = gk20a_disable_channel_tsg(g, ch); 2173 err = gk20a_disable_channel_tsg(g, ch);
2182 if (err) 2174 if (err)
2183 goto unmap_ctx; 2175 return err;
2184 2176
2185 err = gk20a_fifo_preempt(g, ch); 2177 err = gk20a_fifo_preempt(g, ch);
2186 if (err) 2178 if (err)
@@ -2193,8 +2185,6 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2193 2185
2194enable_ch: 2186enable_ch:
2195 gk20a_enable_channel_tsg(g, ch); 2187 gk20a_enable_channel_tsg(g, ch);
2196unmap_ctx:
2197 nvgpu_mem_end(g, mem);
2198 2188
2199 return err; 2189 return err;
2200} 2190}
@@ -2217,8 +2207,6 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2217 struct tsg_gk20a *tsg; 2207 struct tsg_gk20a *tsg;
2218 struct vm_gk20a *vm; 2208 struct vm_gk20a *vm;
2219 struct nvgpu_mem *mem; 2209 struct nvgpu_mem *mem;
2220 struct ctx_header_desc *ctx = &ch->ctx_header;
2221 struct nvgpu_mem *ctxheader = &ctx->mem;
2222 u32 class; 2210 u32 class;
2223 int err = 0; 2211 int err = 0;
2224 2212
@@ -2263,15 +2251,9 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2263 } 2251 }
2264 } 2252 }
2265 2253
2266 if (nvgpu_mem_begin(g, mem))
2267 return -ENOMEM;
2268
2269 if (nvgpu_mem_begin(g, ctxheader))
2270 goto unamp_ctx_header;
2271
2272 err = gk20a_disable_channel_tsg(g, ch); 2254 err = gk20a_disable_channel_tsg(g, ch);
2273 if (err) 2255 if (err)
2274 goto unmap_ctx; 2256 return err;
2275 2257
2276 err = gk20a_fifo_preempt(g, ch); 2258 err = gk20a_fifo_preempt(g, ch);
2277 if (err) 2259 if (err)
@@ -2292,11 +2274,6 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2292 2274
2293enable_ch: 2275enable_ch:
2294 gk20a_enable_channel_tsg(g, ch); 2276 gk20a_enable_channel_tsg(g, ch);
2295unmap_ctx:
2296 nvgpu_mem_end(g, ctxheader);
2297unamp_ctx_header:
2298 nvgpu_mem_end(g, mem);
2299
2300 return err; 2277 return err;
2301} 2278}
2302 2279