summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/sec2_gp106.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/sec2_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c116
1 files changed, 4 insertions, 112 deletions
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index f49d56c4..06f62a99 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -90,57 +90,6 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
90 return completion; 90 return completion;
91} 91}
92 92
93void sec2_copy_to_dmem(struct nvgpu_pmu *pmu,
94 u32 dst, u8 *src, u32 size, u8 port)
95{
96 struct gk20a *g = gk20a_from_pmu(pmu);
97 u32 i, words, bytes;
98 u32 data, addr_mask;
99 u32 *src_u32 = (u32*)src;
100
101 if (size == 0) {
102 nvgpu_err(g, "size is zero");
103 return;
104 }
105
106 if (dst & 0x3) {
107 nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
108 return;
109 }
110
111 nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
112
113 words = size >> 2;
114 bytes = size & 0x3;
115
116 addr_mask = psec_falcon_dmemc_offs_m() |
117 psec_falcon_dmemc_blk_m();
118
119 dst &= addr_mask;
120
121 gk20a_writel(g, psec_falcon_dmemc_r(port),
122 dst | psec_falcon_dmemc_aincw_f(1));
123
124 for (i = 0; i < words; i++)
125 gk20a_writel(g, psec_falcon_dmemd_r(port), src_u32[i]);
126
127 if (bytes > 0) {
128 data = 0;
129 for (i = 0; i < bytes; i++)
130 ((u8 *)&data)[i] = src[(words << 2) + i];
131 gk20a_writel(g, psec_falcon_dmemd_r(port), data);
132 }
133
134 data = gk20a_readl(g, psec_falcon_dmemc_r(port)) & addr_mask;
135 size = ALIGN(size, 4);
136 if (data != dst + size) {
137 nvgpu_err(g, "copy failed. bytes written %d, expected %d",
138 data - dst, size);
139 }
140 nvgpu_mutex_release(&pmu->pmu_copy_lock);
141 return;
142}
143
144int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, 93int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
145 void *desc, u32 bl_sz) 94 void *desc, u32 bl_sz)
146{ 95{
@@ -184,7 +133,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
184 psec_falcon_dmemc_offs_f(0) | 133 psec_falcon_dmemc_offs_f(0) |
185 psec_falcon_dmemc_blk_f(0) | 134 psec_falcon_dmemc_blk_f(0) |
186 psec_falcon_dmemc_aincw_f(1)); 135 psec_falcon_dmemc_aincw_f(1));
187 sec2_copy_to_dmem(pmu, 0, (u8 *)desc, 136 nvgpu_flcn_copy_to_dmem(&g->sec2_flcn, 0, (u8 *)desc,
188 sizeof(struct flcn_bl_dmem_desc), 0); 137 sizeof(struct flcn_bl_dmem_desc), 0);
189 /*TODO This had to be copied to bl_desc_dmem_load_off, but since 138 /*TODO This had to be copied to bl_desc_dmem_load_off, but since
190 * this is 0, so ok for now*/ 139 * this is 0, so ok for now*/
@@ -225,61 +174,6 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
225 return 0; 174 return 0;
226} 175}
227 176
228void sec_enable_irq(struct nvgpu_pmu *pmu, bool enable)
229{
230 struct gk20a *g = gk20a_from_pmu(pmu);
231
232 gk20a_dbg_fn("");
233
234 gk20a_writel(g, psec_falcon_irqmclr_r(),
235 psec_falcon_irqmclr_gptmr_f(1) |
236 psec_falcon_irqmclr_wdtmr_f(1) |
237 psec_falcon_irqmclr_mthd_f(1) |
238 psec_falcon_irqmclr_ctxsw_f(1) |
239 psec_falcon_irqmclr_halt_f(1) |
240 psec_falcon_irqmclr_exterr_f(1) |
241 psec_falcon_irqmclr_swgen0_f(1) |
242 psec_falcon_irqmclr_swgen1_f(1) |
243 psec_falcon_irqmclr_ext_f(0xff));
244
245 if (enable) {
246 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
247 gk20a_writel(g, psec_falcon_irqdest_r(),
248 psec_falcon_irqdest_host_gptmr_f(0) |
249 psec_falcon_irqdest_host_wdtmr_f(1) |
250 psec_falcon_irqdest_host_mthd_f(0) |
251 psec_falcon_irqdest_host_ctxsw_f(0) |
252 psec_falcon_irqdest_host_halt_f(1) |
253 psec_falcon_irqdest_host_exterr_f(0) |
254 psec_falcon_irqdest_host_swgen0_f(1) |
255 psec_falcon_irqdest_host_swgen1_f(0) |
256 psec_falcon_irqdest_host_ext_f(0xff) |
257 psec_falcon_irqdest_target_gptmr_f(1) |
258 psec_falcon_irqdest_target_wdtmr_f(0) |
259 psec_falcon_irqdest_target_mthd_f(0) |
260 psec_falcon_irqdest_target_ctxsw_f(0) |
261 psec_falcon_irqdest_target_halt_f(0) |
262 psec_falcon_irqdest_target_exterr_f(0) |
263 psec_falcon_irqdest_target_swgen0_f(0) |
264 psec_falcon_irqdest_target_swgen1_f(1) |
265 psec_falcon_irqdest_target_ext_f(0xff));
266
267 /* 0=disable, 1=enable */
268 gk20a_writel(g, psec_falcon_irqmset_r(),
269 psec_falcon_irqmset_gptmr_f(1) |
270 psec_falcon_irqmset_wdtmr_f(1) |
271 psec_falcon_irqmset_mthd_f(0) |
272 psec_falcon_irqmset_ctxsw_f(0) |
273 psec_falcon_irqmset_halt_f(1) |
274 psec_falcon_irqmset_exterr_f(1) |
275 psec_falcon_irqmset_swgen0_f(1) |
276 psec_falcon_irqmset_swgen1_f(1));
277
278 }
279
280 gk20a_dbg_fn("done");
281}
282
283void init_pmu_setup_hw1(struct gk20a *g) 177void init_pmu_setup_hw1(struct gk20a *g)
284{ 178{
285 struct mm_gk20a *mm = &g->mm; 179 struct mm_gk20a *mm = &g->mm;
@@ -330,7 +224,7 @@ void init_pmu_setup_hw1(struct gk20a *g)
330 224
331} 225}
332 226
333static int gp106_sec2_reset(struct gk20a *g) 227int gp106_sec2_reset(struct gk20a *g)
334{ 228{
335 nvgpu_log_fn(g, " "); 229 nvgpu_log_fn(g, " ");
336 230
@@ -351,9 +245,9 @@ int init_sec2_setup_hw1(struct gk20a *g,
351 int err; 245 int err;
352 u32 data = 0; 246 u32 data = 0;
353 247
354 gk20a_dbg_fn(""); 248 nvgpu_log_fn(g, " ");
355 249
356 gp106_sec2_reset(g); 250 nvgpu_flcn_reset(&g->sec2_flcn);
357 251
358 data = gk20a_readl(g, psec_fbif_ctl_r()); 252 data = gk20a_readl(g, psec_fbif_ctl_r());
359 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); 253 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -380,8 +274,6 @@ int init_sec2_setup_hw1(struct gk20a *g,
380 psec_fbif_transcfg_mem_type_physical_f() | 274 psec_fbif_transcfg_mem_type_physical_f() |
381 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 275 psec_fbif_transcfg_target_noncoherent_sysmem_f());
382 276
383 /*disable irqs for hs falcon booting as we will poll for halt*/
384 sec_enable_irq(pmu, false);
385 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 277 err = bl_bootstrap_sec2(pmu, desc, bl_sz);
386 if (err) 278 if (err)
387 return err; 279 return err;