summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2014-10-14 08:14:34 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:46 -0400
commit1b6e655724d2cdfa441a34119ffe3c7d2acd9596 (patch)
treeddcd1454b67634c282b8238e927fe784a5c3ebc7 /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent7c35b023a737a1765fc64af2427767e4235be793 (diff)
gk20a: Moved bind fecs to init_gr_support
-Moved bind fecs from work queue to init_gr_support. -It makes all CPU->FECS communication to happen before booting PMU, and after we boot PMU, only PMU talks to FECS. So it removes possibility to race between CPU and PMU talking to FECS. Bug 200032923 Change-Id: I01d6d7f61f5e3c0e788d9d77fcabe5a91fe86c84 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/559733
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c93
1 files changed, 1 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 395afdd4..e60de70b 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2174,93 +2174,11 @@ void pmu_setup_hw(struct work_struct *work)
2174int gk20a_init_pmu_bind_fecs(struct gk20a *g) 2174int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2175{ 2175{
2176 struct pmu_gk20a *pmu = &g->pmu; 2176 struct pmu_gk20a *pmu = &g->pmu;
2177 struct mm_gk20a *mm = &g->mm;
2178 struct vm_gk20a *vm = &mm->pmu.vm;
2179 struct device *d = dev_from_gk20a(g);
2180 struct pmu_cmd cmd; 2177 struct pmu_cmd cmd;
2181 u32 desc; 2178 u32 desc;
2182 int err; 2179 int err = 0;
2183 u32 size;
2184 struct sg_table *sgt_pg_buf;
2185 dma_addr_t iova;
2186
2187 gk20a_dbg_fn(""); 2180 gk20a_dbg_fn("");
2188 2181
2189 size = 0;
2190 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
2191 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2192 gk20a_err(dev_from_gk20a(g),
2193 "fail to query fecs pg buffer size");
2194 return err;
2195 }
2196
2197 if (err) {
2198 gk20a_err(dev_from_gk20a(g),
2199 "fail to query fecs pg buffer size invalid boot state");
2200 return err;
2201 }
2202
2203 if (!pmu->pg_buf.cpuva) {
2204 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
2205 &iova,
2206 GFP_KERNEL);
2207 if (!pmu->pg_buf.cpuva) {
2208 gk20a_err(d, "failed to allocate memory\n");
2209 return -ENOMEM;
2210 }
2211
2212 pmu->pg_buf.iova = iova;
2213 pmu->pg_buf.size = size;
2214
2215 err = gk20a_get_sgtable(d, &sgt_pg_buf,
2216 pmu->pg_buf.cpuva,
2217 pmu->pg_buf.iova,
2218 size);
2219 if (err) {
2220 gk20a_err(d, "failed to create sg table\n");
2221 goto err_free_pg_buf;
2222 }
2223
2224 pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm,
2225 &sgt_pg_buf,
2226 size,
2227 0, /* flags */
2228 gk20a_mem_flag_none);
2229 if (!pmu->pg_buf.pmu_va) {
2230 gk20a_err(d, "failed to map fecs pg buffer");
2231 err = -ENOMEM;
2232 goto err_free_sgtable;
2233 }
2234
2235 gk20a_free_sgtable(&sgt_pg_buf);
2236 }
2237
2238 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
2239 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2240 gk20a_err(dev_from_gk20a(g),
2241 "fail to bind pmu inst to gr");
2242 return err;
2243 }
2244
2245 if (err) {
2246 gk20a_err(dev_from_gk20a(g),
2247 "fail to bind pmu inst to gr invalid boot state");
2248 return err;
2249 }
2250
2251 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va);
2252 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2253 gk20a_err(dev_from_gk20a(g),
2254 "fail to set pg buffer pmu va");
2255 return err;
2256 }
2257
2258 if (err) {
2259 gk20a_err(dev_from_gk20a(g),
2260 "fail to set pg buffer pmu va invalid boot state");
2261 return err;
2262 }
2263
2264 memset(&cmd, 0, sizeof(struct pmu_cmd)); 2182 memset(&cmd, 0, sizeof(struct pmu_cmd));
2265 cmd.hdr.unit_id = PMU_UNIT_PG; 2183 cmd.hdr.unit_id = PMU_UNIT_PG;
2266 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load); 2184 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load);
@@ -2278,15 +2196,6 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2278 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 2196 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
2279 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF; 2197 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF;
2280 return err; 2198 return err;
2281
2282err_free_sgtable:
2283 gk20a_free_sgtable(&sgt_pg_buf);
2284err_free_pg_buf:
2285 dma_free_coherent(d, size,
2286 pmu->pg_buf.cpuva, pmu->pg_buf.iova);
2287 pmu->pg_buf.cpuva = NULL;
2288 pmu->pg_buf.iova = 0;
2289 return err;
2290} 2199}
2291 2200
2292static void pmu_setup_hw_load_zbc(struct gk20a *g) 2201static void pmu_setup_hw_load_zbc(struct gk20a *g)