summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2014-10-14 08:14:34 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:46 -0400
commit1b6e655724d2cdfa441a34119ffe3c7d2acd9596 (patch)
treeddcd1454b67634c282b8238e927fe784a5c3ebc7 /drivers/gpu/nvgpu/gk20a
parent7c35b023a737a1765fc64af2427767e4235be793 (diff)
gk20a: Moved bind fecs to init_gr_support
-Moved bind fecs from work queue to init_gr_support. -It makes all CPU->FECS communication to happen before booting PMU, and after we boot PMU, only PMU talks to FECS. So it removes possibility to race between CPU and PMU talking to FECS. Bug 200032923 Change-Id: I01d6d7f61f5e3c0e788d9d77fcabe5a91fe86c84 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/559733
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c90
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c93
2 files changed, 91 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 1dc5603f..817cb98d 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -55,6 +55,7 @@
55 55
56#define BLK_SIZE (256) 56#define BLK_SIZE (256)
57 57
58static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g);
58static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va); 59static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va);
59 60
60/* global ctx buffer */ 61/* global ctx buffer */
@@ -4560,6 +4561,91 @@ clean_up:
4560 return err; 4561 return err;
4561} 4562}
4562 4563
4564static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
4565{
4566 struct pmu_gk20a *pmu = &g->pmu;
4567 struct mm_gk20a *mm = &g->mm;
4568 struct vm_gk20a *vm = &mm->pmu.vm;
4569 struct device *d = dev_from_gk20a(g);
4570 int err = 0;
4571
4572 u32 size;
4573 struct sg_table *sgt_pg_buf;
4574 dma_addr_t iova;
4575
4576 gk20a_dbg_fn("");
4577
4578 size = 0;
4579
4580 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
4581 if (err) {
4582 gk20a_err(dev_from_gk20a(g),
4583 "fail to query fecs pg buffer size");
4584 return err;
4585 }
4586
4587 if (!pmu->pg_buf.cpuva) {
4588 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
4589 &iova,
4590 GFP_KERNEL);
4591 if (!pmu->pg_buf.cpuva) {
4592 gk20a_err(d, "failed to allocate memory\n");
4593 return -ENOMEM;
4594 }
4595
4596 pmu->pg_buf.iova = iova;
4597 pmu->pg_buf.size = size;
4598
4599 err = gk20a_get_sgtable(d, &sgt_pg_buf,
4600 pmu->pg_buf.cpuva,
4601 pmu->pg_buf.iova,
4602 size);
4603 if (err) {
4604 gk20a_err(d, "failed to create sg table\n");
4605 goto err_free_pg_buf;
4606 }
4607
4608 pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm,
4609 &sgt_pg_buf,
4610 size,
4611 0, /* flags */
4612 gk20a_mem_flag_none);
4613 if (!pmu->pg_buf.pmu_va) {
4614 gk20a_err(d, "failed to map fecs pg buffer");
4615 err = -ENOMEM;
4616 goto err_free_sgtable;
4617 }
4618
4619 gk20a_free_sgtable(&sgt_pg_buf);
4620 }
4621
4622
4623 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
4624 if (err) {
4625 gk20a_err(dev_from_gk20a(g),
4626 "fail to bind pmu inst to gr");
4627 return err;
4628 }
4629
4630 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va);
4631 if (err) {
4632 gk20a_err(dev_from_gk20a(g),
4633 "fail to set pg buffer pmu va");
4634 return err;
4635 }
4636
4637 return err;
4638
4639err_free_sgtable:
4640 gk20a_free_sgtable(&sgt_pg_buf);
4641err_free_pg_buf:
4642 dma_free_coherent(d, size,
4643 pmu->pg_buf.cpuva, pmu->pg_buf.iova);
4644 pmu->pg_buf.cpuva = NULL;
4645 pmu->pg_buf.iova = 0;
4646 return err;
4647}
4648
4563int gk20a_init_gr_support(struct gk20a *g) 4649int gk20a_init_gr_support(struct gk20a *g)
4564{ 4650{
4565 u32 err; 4651 u32 err;
@@ -4581,6 +4667,10 @@ int gk20a_init_gr_support(struct gk20a *g)
4581 if (err) 4667 if (err)
4582 return err; 4668 return err;
4583 4669
4670 err = gk20a_init_gr_bind_fecs_elpg(g);
4671 if (err)
4672 return err;
4673
4584 /* GR is inialized, signal possible waiters */ 4674 /* GR is inialized, signal possible waiters */
4585 g->gr.initialized = true; 4675 g->gr.initialized = true;
4586 wake_up(&g->gr.init_wq); 4676 wake_up(&g->gr.init_wq);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 395afdd4..e60de70b 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2174,93 +2174,11 @@ void pmu_setup_hw(struct work_struct *work)
2174int gk20a_init_pmu_bind_fecs(struct gk20a *g) 2174int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2175{ 2175{
2176 struct pmu_gk20a *pmu = &g->pmu; 2176 struct pmu_gk20a *pmu = &g->pmu;
2177 struct mm_gk20a *mm = &g->mm;
2178 struct vm_gk20a *vm = &mm->pmu.vm;
2179 struct device *d = dev_from_gk20a(g);
2180 struct pmu_cmd cmd; 2177 struct pmu_cmd cmd;
2181 u32 desc; 2178 u32 desc;
2182 int err; 2179 int err = 0;
2183 u32 size;
2184 struct sg_table *sgt_pg_buf;
2185 dma_addr_t iova;
2186
2187 gk20a_dbg_fn(""); 2180 gk20a_dbg_fn("");
2188 2181
2189 size = 0;
2190 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
2191 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2192 gk20a_err(dev_from_gk20a(g),
2193 "fail to query fecs pg buffer size");
2194 return err;
2195 }
2196
2197 if (err) {
2198 gk20a_err(dev_from_gk20a(g),
2199 "fail to query fecs pg buffer size invalid boot state");
2200 return err;
2201 }
2202
2203 if (!pmu->pg_buf.cpuva) {
2204 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
2205 &iova,
2206 GFP_KERNEL);
2207 if (!pmu->pg_buf.cpuva) {
2208 gk20a_err(d, "failed to allocate memory\n");
2209 return -ENOMEM;
2210 }
2211
2212 pmu->pg_buf.iova = iova;
2213 pmu->pg_buf.size = size;
2214
2215 err = gk20a_get_sgtable(d, &sgt_pg_buf,
2216 pmu->pg_buf.cpuva,
2217 pmu->pg_buf.iova,
2218 size);
2219 if (err) {
2220 gk20a_err(d, "failed to create sg table\n");
2221 goto err_free_pg_buf;
2222 }
2223
2224 pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm,
2225 &sgt_pg_buf,
2226 size,
2227 0, /* flags */
2228 gk20a_mem_flag_none);
2229 if (!pmu->pg_buf.pmu_va) {
2230 gk20a_err(d, "failed to map fecs pg buffer");
2231 err = -ENOMEM;
2232 goto err_free_sgtable;
2233 }
2234
2235 gk20a_free_sgtable(&sgt_pg_buf);
2236 }
2237
2238 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
2239 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2240 gk20a_err(dev_from_gk20a(g),
2241 "fail to bind pmu inst to gr");
2242 return err;
2243 }
2244
2245 if (err) {
2246 gk20a_err(dev_from_gk20a(g),
2247 "fail to bind pmu inst to gr invalid boot state");
2248 return err;
2249 }
2250
2251 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va);
2252 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2253 gk20a_err(dev_from_gk20a(g),
2254 "fail to set pg buffer pmu va");
2255 return err;
2256 }
2257
2258 if (err) {
2259 gk20a_err(dev_from_gk20a(g),
2260 "fail to set pg buffer pmu va invalid boot state");
2261 return err;
2262 }
2263
2264 memset(&cmd, 0, sizeof(struct pmu_cmd)); 2182 memset(&cmd, 0, sizeof(struct pmu_cmd));
2265 cmd.hdr.unit_id = PMU_UNIT_PG; 2183 cmd.hdr.unit_id = PMU_UNIT_PG;
2266 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load); 2184 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load);
@@ -2278,15 +2196,6 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2278 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 2196 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
2279 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF; 2197 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF;
2280 return err; 2198 return err;
2281
2282err_free_sgtable:
2283 gk20a_free_sgtable(&sgt_pg_buf);
2284err_free_pg_buf:
2285 dma_free_coherent(d, size,
2286 pmu->pg_buf.cpuva, pmu->pg_buf.iova);
2287 pmu->pg_buf.cpuva = NULL;
2288 pmu->pg_buf.iova = 0;
2289 return err;
2290} 2199}
2291 2200
2292static void pmu_setup_hw_load_zbc(struct gk20a *g) 2201static void pmu_setup_hw_load_zbc(struct gk20a *g)