summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b')
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c206
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c9
2 files changed, 0 insertions, 215 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index c7b854bf..f5ca144a 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -47,11 +47,6 @@
47#define gv11b_dbg_pmu(g, fmt, arg...) \ 47#define gv11b_dbg_pmu(g, fmt, arg...) \
48 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) 48 nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
49 49
50static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
51{
52 dma_addr->lo |= u64_lo32(value);
53 dma_addr->hi |= u64_hi32(value);
54}
55/*Externs*/ 50/*Externs*/
56 51
57/*Forwards*/ 52/*Forwards*/
@@ -68,173 +63,6 @@ int gv11b_alloc_blob_space(struct gk20a *g,
68 return err; 63 return err;
69} 64}
70 65
71/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
72 * start and end are addresses of ucode blob in non-WPR region*/
73int gv11b_bootstrap_hs_flcn(struct gk20a *g)
74{
75 struct mm_gk20a *mm = &g->mm;
76 struct vm_gk20a *vm = mm->pmu.vm;
77 int err = 0;
78 u64 *acr_dmem;
79 u32 img_size_in_bytes = 0;
80 u32 status, size, index;
81 u64 start;
82 struct acr_desc *acr = &g->acr;
83 struct nvgpu_firmware *acr_fw = acr->acr_fw;
84 struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
85 u32 *acr_ucode_header_t210_load;
86 u32 *acr_ucode_data_t210_load;
87
88 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
89 size = acr->ucode_blob.size;
90
91 gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start);
92 gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size);
93
94 gv11b_dbg_pmu(g, " ");
95
96 if (!acr_fw) {
97 /*First time init case*/
98 acr_fw = nvgpu_request_firmware(g,
99 GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
100 if (!acr_fw) {
101 nvgpu_err(g, "pmu ucode get fail");
102 return -ENOENT;
103 }
104 acr->acr_fw = acr_fw;
105 acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
106 acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
107 acr->hsbin_hdr->header_offset);
108 acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
109 acr->hsbin_hdr->data_offset);
110 acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
111 acr->fw_hdr->hdr_offset);
112 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
113
114 gv11b_dbg_pmu(g, "sig dbg offset %u\n",
115 acr->fw_hdr->sig_dbg_offset);
116 gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
117 gv11b_dbg_pmu(g, "sig prod offset %u\n",
118 acr->fw_hdr->sig_prod_offset);
119 gv11b_dbg_pmu(g, "sig prod size %u\n",
120 acr->fw_hdr->sig_prod_size);
121 gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc);
122 gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig);
123 gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset);
124 gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size);
125
126 /* Lets patch the signatures first.. */
127 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
128 (u32 *)(acr_fw->data +
129 acr->fw_hdr->sig_prod_offset),
130 (u32 *)(acr_fw->data +
131 acr->fw_hdr->sig_dbg_offset),
132 (u32 *)(acr_fw->data +
133 acr->fw_hdr->patch_loc),
134 (u32 *)(acr_fw->data +
135 acr->fw_hdr->patch_sig)) < 0) {
136 nvgpu_err(g, "patch signatures fail");
137 err = -1;
138 goto err_release_acr_fw;
139 }
140 err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
141 &acr->acr_ucode);
142 if (err) {
143 err = -ENOMEM;
144 goto err_release_acr_fw;
145 }
146
147 for (index = 0; index < 9; index++) {
148 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
149 acr_ucode_header_t210_load[index]);
150 }
151
152 acr_dmem = (u64 *)
153 &(((u8 *)acr_ucode_data_t210_load)[
154 acr_ucode_header_t210_load[2]]);
155 acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
156 acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
157 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
158 (start);
159 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
160 size;
161 ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
162 ((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
163
164 nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
165 acr_ucode_data_t210_load, img_size_in_bytes);
166 /*
167 * In order to execute this binary, we will be using
168 * a bootloader which will load this image into PMU IMEM/DMEM.
169 * Fill up the bootloader descriptor for PMU HAL to use..
170 * TODO: Use standard descriptor which the generic bootloader is
171 * checked in.
172 */
173 bl_dmem_desc->signature[0] = 0;
174 bl_dmem_desc->signature[1] = 0;
175 bl_dmem_desc->signature[2] = 0;
176 bl_dmem_desc->signature[3] = 0;
177 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
178 flcn64_set_dma(&bl_dmem_desc->code_dma_base,
179 acr->acr_ucode.gpu_va);
180 bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
181 bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
182 bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
183 bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
184 bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
185 flcn64_set_dma(&bl_dmem_desc->data_dma_base,
186 acr->acr_ucode.gpu_va +
187 acr_ucode_header_t210_load[2]);
188 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
189 } else {
190 acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0;
191 }
192 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
193 if (status != 0) {
194 err = status;
195 goto err_free_ucode_map;
196 }
197
198 return 0;
199err_free_ucode_map:
200 nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
201err_release_acr_fw:
202 nvgpu_release_firmware(g, acr_fw);
203 acr->acr_fw = NULL;
204
205 return err;
206}
207
208static int bl_bootstrap(struct nvgpu_pmu *pmu,
209 struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
210{
211 struct gk20a *g = gk20a_from_pmu(pmu);
212 struct mm_gk20a *mm = &g->mm;
213 struct nvgpu_falcon_bl_info bl_info;
214
215 nvgpu_log_fn(g, " ");
216
217 gk20a_writel(g, pwr_falcon_itfen_r(),
218 gk20a_readl(g, pwr_falcon_itfen_r()) |
219 pwr_falcon_itfen_ctxen_enable_f());
220 gk20a_writel(g, pwr_pmu_new_instblk_r(),
221 pwr_pmu_new_instblk_ptr_f(
222 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
223 pwr_pmu_new_instblk_valid_f(1) |
224 (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
225 pwr_pmu_new_instblk_target_sys_coh_f() :
226 pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
227
228 bl_info.bl_src = g->acr.hsbl_ucode.cpu_va;
229 bl_info.bl_desc = (u8 *)pbl_desc;
230 bl_info.bl_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
231 bl_info.bl_size = bl_sz;
232 bl_info.bl_start_tag = g->acr.pmu_hsbl_desc->bl_start_tag;
233 nvgpu_flcn_bl_bootstrap(&g->pmu_flcn, &bl_info);
234
235 return 0;
236}
237
238void gv11b_setup_apertures(struct gk20a *g) 66void gv11b_setup_apertures(struct gk20a *g)
239{ 67{
240 struct mm_gk20a *mm = &g->mm; 68 struct mm_gk20a *mm = &g->mm;
@@ -263,37 +91,3 @@ void gv11b_setup_apertures(struct gk20a *g)
263 pwr_fbif_transcfg_mem_type_physical_f() | 91 pwr_fbif_transcfg_mem_type_physical_f() |
264 pwr_fbif_transcfg_target_noncoherent_sysmem_f()); 92 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
265} 93}
266
267int gv11b_init_pmu_setup_hw1(struct gk20a *g,
268 void *desc, u32 bl_sz)
269{
270 struct nvgpu_pmu *pmu = &g->pmu;
271 int err;
272
273 nvgpu_log_fn(g, " ");
274
275 nvgpu_mutex_acquire(&pmu->isr_mutex);
276 nvgpu_flcn_reset(pmu->flcn);
277 pmu->isr_enabled = true;
278 nvgpu_mutex_release(&pmu->isr_mutex);
279
280 if (g->ops.pmu.setup_apertures) {
281 g->ops.pmu.setup_apertures(g);
282 }
283 if (g->ops.pmu.update_lspmu_cmdline_args) {
284 g->ops.pmu.update_lspmu_cmdline_args(g);
285 }
286
287 /*disable irqs for hs falcon booting as we will poll for halt*/
288 nvgpu_mutex_acquire(&pmu->isr_mutex);
289 g->ops.pmu.pmu_enable_irq(pmu, false);
290 pmu->isr_enabled = false;
291 nvgpu_mutex_release(&pmu->isr_mutex);
292 /*Clearing mailbox register used to reflect capabilities*/
293 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
294 err = bl_bootstrap(pmu, desc, bl_sz);
295 if (err) {
296 return err;
297 }
298 return 0;
299}
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 9444002b..0d9f65bf 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -877,17 +877,10 @@ int gv11b_init_hal(struct gk20a *g)
877 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { 877 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
878 /* Add in ops from gm20b acr */ 878 /* Add in ops from gm20b acr */
879 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob, 879 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
880 gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
881 gops->pmu.get_wpr = gm20b_wpr_info,
882 gops->pmu.alloc_blob_space = gv11b_alloc_blob_space,
883 gops->pmu.pmu_populate_loader_cfg = 880 gops->pmu.pmu_populate_loader_cfg =
884 gp106_pmu_populate_loader_cfg, 881 gp106_pmu_populate_loader_cfg,
885 gops->pmu.flcn_populate_bl_dmem_desc = 882 gops->pmu.flcn_populate_bl_dmem_desc =
886 gp106_flcn_populate_bl_dmem_desc, 883 gp106_flcn_populate_bl_dmem_desc,
887 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
888 gops->pmu.falcon_clear_halt_interrupt_status =
889 clear_halt_interrupt_status,
890 gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
891 gops->pmu.update_lspmu_cmdline_args = 884 gops->pmu.update_lspmu_cmdline_args =
892 gm20b_update_lspmu_cmdline_args; 885 gm20b_update_lspmu_cmdline_args;
893 gops->pmu.setup_apertures = gv11b_setup_apertures; 886 gops->pmu.setup_apertures = gv11b_setup_apertures;
@@ -901,11 +894,9 @@ int gv11b_init_hal(struct gk20a *g)
901 } else { 894 } else {
902 /* Inherit from gk20a */ 895 /* Inherit from gk20a */
903 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob, 896 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
904 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
905 897
906 gops->pmu.load_lsfalcon_ucode = NULL; 898 gops->pmu.load_lsfalcon_ucode = NULL;
907 gops->pmu.init_wpr_region = NULL; 899 gops->pmu.init_wpr_region = NULL;
908 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
909 900
910 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; 901 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
911 } 902 }