summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-03-23 16:17:57 -0400
committerIshan Mittal <imittal@nvidia.com>2015-05-18 01:48:27 -0400
commit3b8c8972efabf32346a90575c26e6718f0e844ea (patch)
treebd0806ad2de55e991e14fc6748d6bd7332ad302e /drivers/gpu/nvgpu/gm20b/acr_gm20b.c
parentc5f2d00d04f5048e1414f1a2cbe702026528b4db (diff)
gpu: nvgpu: Use common allocator for ACR
Reduce amount of duplicate code around memory allocation by using common helpers, and common data structure for storing results of allocations. Bug 1605769 Change-Id: Ib70db4dff782176ed7f92b6809c8415b8c35abe1 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/721120
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 02706bad..1e1d27eb 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -212,20 +212,17 @@ rel_sig:
212 212
213int prepare_ucode_blob(struct gk20a *g) 213int prepare_ucode_blob(struct gk20a *g)
214{ 214{
215 struct device *d = dev_from_gk20a(g); 215 int err;
216 dma_addr_t iova;
217 u32 status;
218 void *nonwpr_addr;
219 struct ls_flcn_mgr lsfm_l, *plsfm; 216 struct ls_flcn_mgr lsfm_l, *plsfm;
220 struct pmu_gk20a *pmu = &g->pmu; 217 struct pmu_gk20a *pmu = &g->pmu;
221 218
222 if (g->acr.ucode_blob_start) { 219 if (g->acr.ucode_blob_start) {
223 /*Recovery case, we do not need to form 220 /*Recovery case, we do not need to form
224 non WPR blob of ucodes*/ 221 non WPR blob of ucodes*/
225 status = gk20a_init_pmu(pmu); 222 err = gk20a_init_pmu(pmu);
226 if (status) { 223 if (err) {
227 gm20b_dbg_pmu("failed to set function pointers\n"); 224 gm20b_dbg_pmu("failed to set function pointers\n");
228 return status; 225 return err;
229 } 226 }
230 return 0; 227 return 0;
231 } 228 }
@@ -236,28 +233,27 @@ int prepare_ucode_blob(struct gk20a *g)
236 gr_gk20a_init_ctxsw_ucode(g); 233 gr_gk20a_init_ctxsw_ucode(g);
237 234
238 /* Discover all managed falcons*/ 235 /* Discover all managed falcons*/
239 status = lsfm_discover_ucode_images(g, plsfm); 236 err = lsfm_discover_ucode_images(g, plsfm);
240 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 237 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
241 if (status != 0) 238 if (err)
242 return status; 239 return err;
243 240
244 if (plsfm->managed_flcn_cnt) { 241 if (plsfm->managed_flcn_cnt) {
245 /* Generate WPR requirements*/ 242 /* Generate WPR requirements*/
246 status = lsf_gen_wpr_requirements(g, plsfm); 243 err = lsf_gen_wpr_requirements(g, plsfm);
247 if (status != 0) 244 if (err)
248 return status; 245 return err;
249 246
250 /*Alloc memory to hold ucode blob contents*/ 247 /*Alloc memory to hold ucode blob contents*/
251 nonwpr_addr = dma_alloc_coherent(d, plsfm->wpr_size, &iova, 248 err = gk20a_gmmu_alloc(g, plsfm->wpr_size, &plsfm->mem);
252 GFP_KERNEL); 249 if (err)
253 if (nonwpr_addr == NULL) 250 return err;
254 return -ENOMEM;
255 251
256 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 252 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n",
257 plsfm->managed_flcn_cnt, plsfm->wpr_size); 253 plsfm->managed_flcn_cnt, plsfm->wpr_size);
258 lsfm_init_wpr_contents(g, plsfm, nonwpr_addr); 254 lsfm_init_wpr_contents(g, plsfm, plsfm->mem.cpu_va);
259 g->acr.ucode_blob_start = 255 g->acr.ucode_blob_start = g->ops.mm.get_iova_addr(g,
260 gk20a_mm_smmu_vaddr_translate(g, iova); 256 plsfm->mem.sgt->sgl, 0);
261 g->acr.ucode_blob_size = plsfm->wpr_size; 257 g->acr.ucode_blob_size = plsfm->wpr_size;
262 gm20b_dbg_pmu("base reg carveout 2:%x\n", 258 gm20b_dbg_pmu("base reg carveout 2:%x\n",
263 readl(mc + MC_SECURITY_CARVEOUT2_BOM_0)); 259 readl(mc + MC_SECURITY_CARVEOUT2_BOM_0));