summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-08-30 05:33:25 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-31 04:55:06 -0400
commitc094ea161785a8c00bb2dc8c55e1a2bb8ffbcfc7 (patch)
treec5c257b2de9391a79a2cb32eda65c1d18dfa4d2a /drivers/gpu/nvgpu/gv11b/acr_gv11b.c
parent1ac8f6477df7bd1e1b1c5922b1916ae6450c07ad (diff)
gpu: nvgpu: gv11b: Secure boot support.
This patch adds Secure boot support for T194. JIRA GPUT19X-5 Change-Id: If78e5e0ecfa58bcac132716c7f2c155f21899027 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1514558 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/acr_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c285
1 files changed, 285 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
new file mode 100644
index 00000000..26c5a891
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifdef CONFIG_DEBUG_FS
15#include <linux/debugfs.h>
16#endif
17
18#include <nvgpu/types.h>
19#include <linux/platform/tegra/mc.h>
20
21#include <nvgpu/dma.h>
22#include <nvgpu/gmmu.h>
23#include <nvgpu/timers.h>
24#include <nvgpu/nvgpu_common.h>
25#include <nvgpu/kmem.h>
26#include <nvgpu/nvgpu_mem.h>
27#include <nvgpu/acr/nvgpu_acr.h>
28#include <nvgpu/firmware.h>
29
30#include "gk20a/gk20a.h"
31#include "acr_gv11b.h"
32#include "pmu_gv11b.h"
33#include "gk20a/pmu_gk20a.h"
34#include "gm20b/mm_gm20b.h"
35#include "gm20b/acr_gm20b.h"
36#include "gp106/acr_gp106.h"
37
38#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
39
40/*Defines*/
41#define gv11b_dbg_pmu(fmt, arg...) \
42 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
43
44static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
45{
46 dma_addr->lo |= u64_lo32(value);
47 dma_addr->hi |= u64_hi32(value);
48}
49/*Externs*/
50
51/*Forwards*/
52
53/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
54 * start and end are addresses of ucode blob in non-WPR region*/
55int gv11b_bootstrap_hs_flcn(struct gk20a *g)
56{
57 struct mm_gk20a *mm = &g->mm;
58 struct vm_gk20a *vm = mm->pmu.vm;
59 int err = 0;
60 u64 *acr_dmem;
61 u32 img_size_in_bytes = 0;
62 u32 status, size, index;
63 u64 start;
64 struct acr_desc *acr = &g->acr;
65 struct nvgpu_firmware *acr_fw = acr->acr_fw;
66 struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
67 u32 *acr_ucode_header_t210_load;
68 u32 *acr_ucode_data_t210_load;
69
70 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
71 size = acr->ucode_blob.size;
72
73 gv11b_dbg_pmu("acr ucode blob start %llx\n", start);
74 gv11b_dbg_pmu("acr ucode blob size %x\n", size);
75
76 gv11b_dbg_pmu("");
77
78 if (!acr_fw) {
79 /*First time init case*/
80 acr_fw = nvgpu_request_firmware(g,
81 GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
82 if (!acr_fw) {
83 nvgpu_err(g, "pmu ucode get fail");
84 return -ENOENT;
85 }
86 acr->acr_fw = acr_fw;
87 acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
88 acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
89 acr->hsbin_hdr->header_offset);
90 acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
91 acr->hsbin_hdr->data_offset);
92 acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
93 acr->fw_hdr->hdr_offset);
94 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
95
96 gv11b_dbg_pmu("sig dbg offset %u\n",
97 acr->fw_hdr->sig_dbg_offset);
98 gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
99 gv11b_dbg_pmu("sig prod offset %u\n",
100 acr->fw_hdr->sig_prod_offset);
101 gv11b_dbg_pmu("sig prod size %u\n",
102 acr->fw_hdr->sig_prod_size);
103 gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc);
104 gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig);
105 gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset);
106 gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size);
107
108 /* Lets patch the signatures first.. */
109 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
110 (u32 *)(acr_fw->data +
111 acr->fw_hdr->sig_prod_offset),
112 (u32 *)(acr_fw->data +
113 acr->fw_hdr->sig_dbg_offset),
114 (u32 *)(acr_fw->data +
115 acr->fw_hdr->patch_loc),
116 (u32 *)(acr_fw->data +
117 acr->fw_hdr->patch_sig)) < 0) {
118 nvgpu_err(g, "patch signatures fail");
119 err = -1;
120 goto err_release_acr_fw;
121 }
122 err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
123 &acr->acr_ucode);
124 if (err) {
125 err = -ENOMEM;
126 goto err_release_acr_fw;
127 }
128
129 for (index = 0; index < 9; index++)
130 gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n",
131 acr_ucode_header_t210_load[index]);
132
133 acr_dmem = (u64 *)
134 &(((u8 *)acr_ucode_data_t210_load)[
135 acr_ucode_header_t210_load[2]]);
136 acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
137 acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
138 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
139 (start);
140 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
141 size;
142 ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
143 ((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
144
145 nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
146 acr_ucode_data_t210_load, img_size_in_bytes);
147 /*
148 * In order to execute this binary, we will be using
149 * a bootloader which will load this image into PMU IMEM/DMEM.
150 * Fill up the bootloader descriptor for PMU HAL to use..
151 * TODO: Use standard descriptor which the generic bootloader is
152 * checked in.
153 */
154 bl_dmem_desc->signature[0] = 0;
155 bl_dmem_desc->signature[1] = 0;
156 bl_dmem_desc->signature[2] = 0;
157 bl_dmem_desc->signature[3] = 0;
158 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
159 flcn64_set_dma(&bl_dmem_desc->code_dma_base,
160 acr->acr_ucode.gpu_va);
161 bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
162 bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
163 bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
164 bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
165 bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
166 flcn64_set_dma(&bl_dmem_desc->data_dma_base,
167 acr->acr_ucode.gpu_va +
168 acr_ucode_header_t210_load[2]);
169 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
170 } else
171 acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
172 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
173 if (status != 0) {
174 err = status;
175 goto err_free_ucode_map;
176 }
177
178 return 0;
179err_free_ucode_map:
180 nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
181err_release_acr_fw:
182 nvgpu_release_firmware(g, acr_fw);
183 acr->acr_fw = NULL;
184
185 return err;
186}
187
188static int bl_bootstrap(struct nvgpu_pmu *pmu,
189 struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
190{
191 struct gk20a *g = gk20a_from_pmu(pmu);
192 struct acr_desc *acr = &g->acr;
193 struct mm_gk20a *mm = &g->mm;
194 u32 virt_addr = 0;
195 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
196 u32 dst;
197
198 gk20a_dbg_fn("");
199
200 gk20a_writel(g, pwr_falcon_itfen_r(),
201 gk20a_readl(g, pwr_falcon_itfen_r()) |
202 pwr_falcon_itfen_ctxen_enable_f());
203 gk20a_writel(g, pwr_pmu_new_instblk_r(),
204 pwr_pmu_new_instblk_ptr_f(
205 gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
206 pwr_pmu_new_instblk_valid_f(1) |
207 pwr_pmu_new_instblk_target_sys_ncoh_f());
208
209 /*copy bootloader interface structure to dmem*/
210 nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
211 sizeof(struct flcn_bl_dmem_desc_v1), 0);
212
213 /* copy bootloader to TOP of IMEM */
214 dst = (pwr_falcon_hwcfg_imem_size_v(
215 gk20a_readl(g, pwr_falcon_hwcfg_r())) << 8) - bl_sz;
216
217 nvgpu_flcn_copy_to_imem(pmu->flcn, dst,
218 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
219 pmu_bl_gm10x_desc->bl_start_tag);
220
221 gv11b_dbg_pmu("Before starting falcon with BL\n");
222
223 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
224
225 nvgpu_flcn_bootstrap(pmu->flcn, virt_addr);
226
227 return 0;
228}
229
230int gv11b_init_pmu_setup_hw1(struct gk20a *g,
231 void *desc, u32 bl_sz)
232{
233
234 struct nvgpu_pmu *pmu = &g->pmu;
235 int err;
236
237 gk20a_dbg_fn("");
238
239 nvgpu_mutex_acquire(&pmu->isr_mutex);
240 nvgpu_flcn_reset(pmu->flcn);
241 pmu->isr_enabled = true;
242 nvgpu_mutex_release(&pmu->isr_mutex);
243
244 /* setup apertures - virtual */
245 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
246 pwr_fbif_transcfg_mem_type_physical_f() |
247 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
248 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
249 pwr_fbif_transcfg_mem_type_virtual_f());
250 /* setup apertures - physical */
251 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
252 pwr_fbif_transcfg_mem_type_physical_f() |
253 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
254 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
255 pwr_fbif_transcfg_mem_type_physical_f() |
256 pwr_fbif_transcfg_target_coherent_sysmem_f());
257 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
258 pwr_fbif_transcfg_mem_type_physical_f() |
259 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
260
261 /*Copying pmu cmdline args*/
262 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
263 g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
264 g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
265 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
266 pmu, GK20A_PMU_TRACE_BUFSIZE);
267 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
268 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
269 pmu, GK20A_PMU_DMAIDX_VIRT);
270 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
271 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
272 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
273 /*disable irqs for hs falcon booting as we will poll for halt*/
274 nvgpu_mutex_acquire(&pmu->isr_mutex);
275 pmu_enable_irq(pmu, false);
276 pmu->isr_enabled = false;
277 nvgpu_mutex_release(&pmu->isr_mutex);
278 /*Clearing mailbox register used to reflect capabilities*/
279 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
280 err = bl_bootstrap(pmu, desc, bl_sz);
281 if (err)
282 return err;
283 return 0;
284}
285