summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c55
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.h2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c96
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c63
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.h2
6 files changed, 144 insertions, 77 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index ba47d235..a238c523 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -284,9 +284,17 @@ rel_sig:
284 284
285int prepare_ucode_blob(struct gk20a *g) 285int prepare_ucode_blob(struct gk20a *g)
286{ 286{
287
287 int err; 288 int err;
288 struct ls_flcn_mgr lsfm_l, *plsfm; 289 struct ls_flcn_mgr lsfm_l, *plsfm;
289 struct pmu_gk20a *pmu = &g->pmu; 290 struct pmu_gk20a *pmu = &g->pmu;
291 phys_addr_t wpr_addr;
292 u32 wprsize;
293 struct mm_gk20a *mm = &g->mm;
294 struct vm_gk20a *vm = &mm->pmu.vm;
295 struct mc_carveout_info inf;
296 struct sg_table *sgt;
297 struct page *page;
290 298
291 if (g->acr.ucode_blob.cpu_va) { 299 if (g->acr.ucode_blob.cpu_va) {
292 /*Recovery case, we do not need to form 300 /*Recovery case, we do not need to form
@@ -304,22 +312,46 @@ int prepare_ucode_blob(struct gk20a *g)
304 gm20b_mm_mmu_vpr_info_fetch(g); 312 gm20b_mm_mmu_vpr_info_fetch(g);
305 gr_gk20a_init_ctxsw_ucode(g); 313 gr_gk20a_init_ctxsw_ucode(g);
306 314
315 mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2);
316 gm20b_dbg_pmu("wpr carveout base:%llx\n", inf.base);
317 wpr_addr = (phys_addr_t)inf.base;
318 gm20b_dbg_pmu("wpr carveout size :%llx\n", inf.size);
319 wprsize = (u32)inf.size;
320 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
321 if (!sgt) {
322 gk20a_err(dev_from_gk20a(g), "failed to allocate memory\n");
323 return -ENOMEM;
324 }
325 err = sg_alloc_table(sgt, 1, GFP_KERNEL);
326 if (err) {
327 gk20a_err(dev_from_gk20a(g), "failed to allocate sg_table\n");
328 goto free_sgt;
329 }
330 page = phys_to_page(wpr_addr);
331 sg_set_page(sgt->sgl, page, wprsize, 0);
332 /* This bypasses SMMU for WPR during gmmu_map. */
333 sg_dma_address(sgt->sgl) = 0;
334
335 g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &sgt, wprsize,
336 0, gk20a_mem_flag_none);
337 gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va);
338
307 /* Discover all managed falcons*/ 339 /* Discover all managed falcons*/
308 err = lsfm_discover_ucode_images(g, plsfm); 340 err = lsfm_discover_ucode_images(g, plsfm);
309 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); 341 gm20b_dbg_pmu(" Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
310 if (err) 342 if (err)
311 return err; 343 goto free_sgt;
312 344
313 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) { 345 if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
314 /* Generate WPR requirements*/ 346 /* Generate WPR requirements*/
315 err = lsf_gen_wpr_requirements(g, plsfm); 347 err = lsf_gen_wpr_requirements(g, plsfm);
316 if (err) 348 if (err)
317 return err; 349 goto free_sgt;
318 350
319 /*Alloc memory to hold ucode blob contents*/ 351 /*Alloc memory to hold ucode blob contents*/
320 err = gk20a_gmmu_alloc(g, plsfm->wpr_size, &g->acr.ucode_blob); 352 err = gk20a_gmmu_alloc(g, plsfm->wpr_size, &g->acr.ucode_blob);
321 if (err) 353 if (err)
322 return err; 354 goto free_sgt;
323 355
324 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n", 356 gm20b_dbg_pmu("managed LS falcon %d, WPR size %d bytes.\n",
325 plsfm->managed_flcn_cnt, plsfm->wpr_size); 357 plsfm->managed_flcn_cnt, plsfm->wpr_size);
@@ -329,7 +361,9 @@ int prepare_ucode_blob(struct gk20a *g)
329 } 361 }
330 gm20b_dbg_pmu("prepare ucode blob return 0\n"); 362 gm20b_dbg_pmu("prepare ucode blob return 0\n");
331 free_acr_resources(g, plsfm); 363 free_acr_resources(g, plsfm);
332 return 0; 364 free_sgt:
365 kfree(sgt);
366 return err;
333} 367}
334 368
335static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, 369static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
@@ -495,7 +529,8 @@ static int pmu_populate_loader_cfg(struct gk20a *g,
495 529
496static int flcn_populate_bl_dmem_desc(struct gk20a *g, 530static int flcn_populate_bl_dmem_desc(struct gk20a *g,
497 struct lsfm_managed_ucode_img *lsfm, 531 struct lsfm_managed_ucode_img *lsfm,
498 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size) 532 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size,
533 u32 falconid)
499{ 534{
500 struct mc_carveout_info inf; 535 struct mc_carveout_info inf;
501 struct flcn_ucode_img *p_img = &(lsfm->ucode_img); 536 struct flcn_ucode_img *p_img = &(lsfm->ucode_img);
@@ -520,7 +555,10 @@ static int flcn_populate_bl_dmem_desc(struct gk20a *g,
520 */ 555 */
521 addr_base = lsfm->lsb_header.ucode_off; 556 addr_base = lsfm->lsb_header.ucode_off;
522 mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2); 557 mc_get_carveout_info(&inf, NULL, MC_SECURITY_CARVEOUT2);
523 addr_base += inf.base; 558 if (falconid == LSF_FALCON_ID_GPCCS)
559 addr_base += g->pmu.wpr_buf.gpu_va;
560 else
561 addr_base += inf.base;
524 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base, 562 gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
525 lsfm->wpr_header.falcon_id); 563 lsfm->wpr_header.falcon_id);
526 addr_code = u64_lo32((addr_base + 564 addr_code = u64_lo32((addr_base +
@@ -555,7 +593,8 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
555 if (pnode->wpr_header.falcon_id != pmu->falcon_id) { 593 if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
556 gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); 594 gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n");
557 flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc, 595 flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc,
558 &pnode->bl_gen_desc_size); 596 &pnode->bl_gen_desc_size,
597 pnode->wpr_header.falcon_id);
559 return 0; 598 return 0;
560 } 599 }
561 600
@@ -797,7 +836,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
797 } 836 }
798 if (falcon_id == LSF_FALCON_ID_GPCCS) { 837 if (falcon_id == LSF_FALCON_ID_GPCCS) {
799 pnode->lsb_header.flags |= 838 pnode->lsb_header.flags |=
800 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE; 839 NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE;
801 } 840 }
802 } 841 }
803} 842}
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
index 3a5fa7d0..bd3b633a 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.h
@@ -50,8 +50,10 @@
50 * Defines a common Light Secure Falcon identifier. 50 * Defines a common Light Secure Falcon identifier.
51 */ 51 */
52#define LSF_FALCON_ID_PMU (0) 52#define LSF_FALCON_ID_PMU (0)
53#define LSF_FALCON_ID_RESERVED (1)
53#define LSF_FALCON_ID_FECS (2) 54#define LSF_FALCON_ID_FECS (2)
54#define LSF_FALCON_ID_GPCCS (3) 55#define LSF_FALCON_ID_GPCCS (3)
56#define LSF_FALCON_ID_END (4)
55#define LSF_FALCON_ID_INVALID (0xFFFFFFFF) 57#define LSF_FALCON_ID_INVALID (0xFFFFFFFF)
56 58
57/*! 59/*!
diff --git a/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
index 2a654760..01cc1f16 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * GM20B Graphics Context 4 * GM20B Graphics Context
5 * 5 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 6 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -69,4 +69,5 @@ static bool gr_gm20b_is_firmware_defined(void)
69void gm20b_init_gr_ctx(struct gpu_ops *gops) { 69void gm20b_init_gr_ctx(struct gpu_ops *gops) {
70 gops->gr_ctx.get_netlist_name = gr_gm20b_get_netlist_name; 70 gops->gr_ctx.get_netlist_name = gr_gm20b_get_netlist_name;
71 gops->gr_ctx.is_fw_defined = gr_gm20b_is_firmware_defined; 71 gops->gr_ctx.is_fw_defined = gr_gm20b_is_firmware_defined;
72 gops->gr_ctx.use_dma_for_fw_bootstrap = true;
72} 73}
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index e1204dad..6c7831d5 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -688,29 +688,9 @@ static void gr_gm20b_load_gpccs_with_bootloader(struct gk20a *g)
688 gr_fecs_falcon_hwcfg_r()); 688 gr_fecs_falcon_hwcfg_r());
689} 689}
690 690
691static int gr_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout, u32 val)
692{
693 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
694 unsigned long delay = GR_FECS_POLL_INTERVAL;
695 u32 reg;
696
697 gk20a_dbg_fn("");
698 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
699 do {
700 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
701 if (reg == val)
702 return 0;
703 udelay(delay);
704 } while (time_before(jiffies, end_jiffies) ||
705 !tegra_platform_is_silicon());
706
707 return -ETIMEDOUT;
708}
709
710static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) 691static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
711{ 692{
712 u32 err; 693 u32 err, flags;
713 unsigned long timeout = gk20a_get_gr_idle_timeout(g);
714 u32 reg_offset = gr_gpcs_gpccs_falcon_hwcfg_r() - 694 u32 reg_offset = gr_gpcs_gpccs_falcon_hwcfg_r() -
715 gr_fecs_falcon_hwcfg_r(); 695 gr_fecs_falcon_hwcfg_r();
716 696
@@ -723,63 +703,57 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
723 gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777)); 703 gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777));
724 } 704 }
725 705
706 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
707 g->ops.pmu.lsfloadedfalconid = 0;
726 if (g->ops.pmu.fecsbootstrapdone) { 708 if (g->ops.pmu.fecsbootstrapdone) {
727 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0); 709 /* this must be recovery so bootstrap fecs and gpccs */
728 gm20b_pmu_load_lsf(g, LSF_FALCON_ID_FECS); 710 if (!g->ops.securegpccs) {
729 err = gr_gm20b_ctx_wait_lsf_ready(g, timeout, 0x55AA55AA); 711 gr_gm20b_load_gpccs_with_bootloader(g);
712 err = g->ops.pmu.load_lsfalcon_ucode(g,
713 (1 << LSF_FALCON_ID_FECS));
714 } else {
715 /* bind WPR VA inst block */
716 gr_gk20a_load_falcon_bind_instblk(g);
717 err = g->ops.pmu.load_lsfalcon_ucode(g,
718 (1 << LSF_FALCON_ID_FECS) |
719 (1 << LSF_FALCON_ID_GPCCS));
720 }
730 if (err) { 721 if (err) {
731 gk20a_err(dev_from_gk20a(g), "Unable to recover FECS"); 722 gk20a_err(dev_from_gk20a(g),
723 "Unable to recover GR falcon");
732 return err; 724 return err;
733 } else {
734 if (!g->ops.securegpccs) {
735 gr_gm20b_load_gpccs_with_bootloader(g);
736 gk20a_writel(g, gr_gpccs_dmactl_r(),
737 gr_gpccs_dmactl_require_ctx_f(0));
738 gk20a_writel(g, gr_gpccs_cpuctl_r(),
739 gr_gpccs_cpuctl_startcpu_f(1));
740 } else {
741 gk20a_writel(g,
742 gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0);
743 gm20b_pmu_load_lsf(g, LSF_FALCON_ID_GPCCS);
744 err = gr_gm20b_ctx_wait_lsf_ready(g, timeout,
745 0x55AA55AA);
746 gk20a_writel(g, reg_offset +
747 gr_fecs_cpuctl_alias_r(),
748 gr_gpccs_cpuctl_startcpu_f(1));
749 }
750 } 725 }
726
751 } else { 727 } else {
728 /* cold boot or rg exit */
752 g->ops.pmu.fecsbootstrapdone = true; 729 g->ops.pmu.fecsbootstrapdone = true;
753 if (!g->ops.securegpccs) { 730 if (!g->ops.securegpccs) {
754 gr_gm20b_load_gpccs_with_bootloader(g); 731 gr_gm20b_load_gpccs_with_bootloader(g);
755 gk20a_writel(g, gr_gpccs_dmactl_r(),
756 gr_gpccs_dmactl_require_ctx_f(0));
757 gk20a_writel(g, gr_gpccs_cpuctl_r(),
758 gr_gpccs_cpuctl_startcpu_f(1));
759 } else { 732 } else {
760 pmu_wait_message_cond(&g->pmu, 733 /* bind WPR VA inst block */
761 gk20a_get_gr_idle_timeout(g), 734 gr_gk20a_load_falcon_bind_instblk(g);
762 &g->ops.pmu.lspmuwprinitdone, 1); 735 err = g->ops.pmu.load_lsfalcon_ucode(g,
763 if (!g->ops.pmu.lspmuwprinitdone) { 736 (1 << LSF_FALCON_ID_GPCCS));
764 gk20a_err(dev_from_gk20a(g),
765 "PMU WPR needed but not ready yet");
766 return -ETIMEDOUT;
767 }
768 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0);
769 gm20b_pmu_load_lsf(g, LSF_FALCON_ID_GPCCS);
770 err = gr_gm20b_ctx_wait_lsf_ready(g, timeout,
771 0x55AA55AA);
772 if (err) { 737 if (err) {
773 gk20a_err(dev_from_gk20a(g), 738 gk20a_err(dev_from_gk20a(g),
774 "Unable to boot GPCCS\n"); 739 "Unable to boot GPCCS\n");
775 return err; 740 return err;
776 } 741 }
777 gk20a_writel(g, reg_offset +
778 gr_fecs_cpuctl_alias_r(),
779 gr_gpccs_cpuctl_startcpu_f(1));
780 } 742 }
781 } 743 }
782 744
745 /*start gpccs */
746 if (g->ops.securegpccs) {
747 gk20a_writel(g, reg_offset +
748 gr_fecs_cpuctl_alias_r(),
749 gr_gpccs_cpuctl_startcpu_f(1));
750 } else {
751 gk20a_writel(g, gr_gpccs_dmactl_r(),
752 gr_gpccs_dmactl_require_ctx_f(0));
753 gk20a_writel(g, gr_gpccs_cpuctl_r(),
754 gr_gpccs_cpuctl_startcpu_f(1));
755 }
756 /* start fecs */
783 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0); 757 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0);
784 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(1), 0x1); 758 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(1), 0x1);
785 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff); 759 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(6), 0xffffffff);
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 28b40b1c..ac19e99c 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -18,6 +18,7 @@
18#include "gk20a/pmu_gk20a.h" 18#include "gk20a/pmu_gk20a.h"
19#include "acr_gm20b.h" 19#include "acr_gm20b.h"
20#include "pmu_gm20b.h" 20#include "pmu_gm20b.h"
21#include "hw_gr_gm20b.h"
21 22
22/*! 23/*!
23 * Structure/object which single register write need to be done during PG init 24 * Structure/object which single register write need to be done during PG init
@@ -190,21 +191,40 @@ int gm20b_pmu_init_acr(struct gk20a *g)
190 return 0; 191 return 0;
191} 192}
192 193
193static void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, 194void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
194 void *param, u32 handle, u32 status) 195 void *param, u32 handle, u32 status)
195{ 196{
196 197
197 gk20a_dbg_fn(""); 198 gk20a_dbg_fn("");
198 199
199 200
200 if (msg->msg.acr.acrmsg.falconid == LSF_FALCON_ID_FECS) 201 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
201 gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
202 202
203 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid); 203 gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid);
204 g->ops.pmu.lsfloadedfalconid = msg->msg.acr.acrmsg.falconid;
204 gk20a_dbg_fn("done"); 205 gk20a_dbg_fn("done");
205} 206}
206 207
207void gm20b_pmu_load_lsf(struct gk20a *g, u8 falcon_id) 208static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout, u32 val)
209{
210 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
211 unsigned long delay = GR_FECS_POLL_INTERVAL;
212 u32 reg;
213
214 gk20a_dbg_fn("");
215 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
216 do {
217 reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
218 if (reg == val)
219 return 0;
220 udelay(delay);
221 } while (time_before(jiffies, end_jiffies) ||
222 !tegra_platform_is_silicon());
223
224 return -ETIMEDOUT;
225}
226
227void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
208{ 228{
209 struct pmu_gk20a *pmu = &g->pmu; 229 struct pmu_gk20a *pmu = &g->pmu;
210 struct pmu_cmd cmd; 230 struct pmu_cmd cmd;
@@ -221,8 +241,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u8 falcon_id)
221 sizeof(struct pmu_acr_cmd_bootstrap_falcon); 241 sizeof(struct pmu_acr_cmd_bootstrap_falcon);
222 cmd.cmd.acr.bootstrap_falcon.cmd_type = 242 cmd.cmd.acr.bootstrap_falcon.cmd_type =
223 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON; 243 PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
224 cmd.cmd.acr.bootstrap_falcon.flags = 244 cmd.cmd.acr.bootstrap_falcon.flags = flags;
225 PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
226 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id; 245 cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
227 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n", 246 gm20b_dbg_pmu("cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n",
228 falcon_id); 247 falcon_id);
@@ -234,13 +253,45 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u8 falcon_id)
234 return; 253 return;
235} 254}
236 255
256int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
257{
258 u32 err = 0;
259 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
260 unsigned long timeout = gk20a_get_gr_idle_timeout(g);
261
262 /* GM20B PMU supports loading FECS only */
263 if (!(falconidmask == (1 << LSF_FALCON_ID_FECS)))
264 return -EINVAL;
265 /* check whether pmu is ready to bootstrap lsf if not wait for it */
266 if (!g->ops.pmu.lspmuwprinitdone) {
267 pmu_wait_message_cond(&g->pmu,
268 gk20a_get_gr_idle_timeout(g),
269 &g->ops.pmu.lspmuwprinitdone, 1);
270 /* check again if it still not ready indicate an error */
271 if (!g->ops.pmu.lspmuwprinitdone) {
272 gk20a_err(dev_from_gk20a(g),
273 "PMU not ready to load LSF");
274 return -ETIMEDOUT;
275 }
276 }
277 /* load FECS */
278 gk20a_writel(g,
279 gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0);
280 gm20b_pmu_load_lsf(g, LSF_FALCON_ID_FECS, flags);
281 err = pmu_gm20b_ctx_wait_lsf_ready(g, timeout,
282 0x55AA55AA);
283 return err;
284}
285
237void gm20b_init_pmu_ops(struct gpu_ops *gops) 286void gm20b_init_pmu_ops(struct gpu_ops *gops)
238{ 287{
239 if (gops->privsecurity) { 288 if (gops->privsecurity) {
240 gm20b_init_secure_pmu(gops); 289 gm20b_init_secure_pmu(gops);
241 gops->pmu.init_wpr_region = gm20b_pmu_init_acr; 290 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
291 gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
242 } else { 292 } else {
243 gk20a_init_pmu_ops(gops); 293 gk20a_init_pmu_ops(gops);
294 gops->pmu.load_lsfalcon_ucode = NULL;
244 gops->pmu.init_wpr_region = NULL; 295 gops->pmu.init_wpr_region = NULL;
245 } 296 }
246 gops->pmu.pmu_setup_elpg = gm20b_pmu_setup_elpg; 297 gops->pmu.pmu_setup_elpg = gm20b_pmu_setup_elpg;
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
index 93745498..68f342cc 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.h
@@ -17,7 +17,7 @@
17#define __PMU_GM20B_H_ 17#define __PMU_GM20B_H_
18 18
19void gm20b_init_pmu_ops(struct gpu_ops *gops); 19void gm20b_init_pmu_ops(struct gpu_ops *gops);
20void gm20b_pmu_load_lsf(struct gk20a *g, u8 falcon_id); 20void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
21int gm20b_pmu_init_acr(struct gk20a *g); 21int gm20b_pmu_init_acr(struct gk20a *g);
22 22
23#endif /*__PMU_GM20B_H_*/ 23#endif /*__PMU_GM20B_H_*/