diff options
author | Leo Liu <leo.liu@amd.com> | 2016-12-28 11:57:38 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-05-24 17:41:23 -0400 |
commit | 88b5af70e29edda095f3d26edcd376cb1688cf70 (patch) | |
tree | 35f6489b729c3d76808ffcd734c431b792baf62b /drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |
parent | 2d531d81d0fb906ee74a5d5f8c04857849efa785 (diff) |
drm/amdgpu: add vcn ip block functions (v2)
Fill in the core VCN 1.0 setup functionality.
v2: squash in fixup (Alex)
Signed-off-by: Leo Liu <leo.liu@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 417 |
1 files changed, 417 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c new file mode 100644 index 000000000000..744268f002d2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | #include "amdgpu_vcn.h" | ||
28 | #include "soc15d.h" | ||
29 | #include "soc15_common.h" | ||
30 | |||
31 | #include "vega10/soc15ip.h" | ||
32 | #include "raven1/VCN/vcn_1_0_offset.h" | ||
33 | #include "raven1/VCN/vcn_1_0_sh_mask.h" | ||
34 | #include "vega10/HDP/hdp_4_0_offset.h" | ||
35 | #include "raven1/MMHUB/mmhub_9_1_offset.h" | ||
36 | #include "raven1/MMHUB/mmhub_9_1_sh_mask.h" | ||
37 | |||
38 | static int vcn_v1_0_start(struct amdgpu_device *adev); | ||
39 | static int vcn_v1_0_stop(struct amdgpu_device *adev); | ||
40 | |||
41 | /** | ||
42 | * vcn_v1_0_early_init - set function pointers | ||
43 | * | ||
44 | * @handle: amdgpu_device pointer | ||
45 | * | ||
46 | * Set ring and irq function pointers | ||
47 | */ | ||
48 | static int vcn_v1_0_early_init(void *handle) | ||
49 | { | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * vcn_v1_0_sw_init - sw init for VCN block | ||
55 | * | ||
56 | * @handle: amdgpu_device pointer | ||
57 | * | ||
58 | * Load firmware and sw initialization | ||
59 | */ | ||
60 | static int vcn_v1_0_sw_init(void *handle) | ||
61 | { | ||
62 | int r; | ||
63 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
64 | |||
65 | /* VCN TRAP */ | ||
66 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq); | ||
67 | if (r) | ||
68 | return r; | ||
69 | |||
70 | r = amdgpu_vcn_sw_init(adev); | ||
71 | if (r) | ||
72 | return r; | ||
73 | |||
74 | r = amdgpu_vcn_resume(adev); | ||
75 | if (r) | ||
76 | return r; | ||
77 | |||
78 | return r; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * vcn_v1_0_sw_fini - sw fini for VCN block | ||
83 | * | ||
84 | * @handle: amdgpu_device pointer | ||
85 | * | ||
86 | * VCN suspend and free up sw allocation | ||
87 | */ | ||
88 | static int vcn_v1_0_sw_fini(void *handle) | ||
89 | { | ||
90 | int r; | ||
91 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
92 | |||
93 | r = amdgpu_vcn_suspend(adev); | ||
94 | if (r) | ||
95 | return r; | ||
96 | |||
97 | r = amdgpu_vcn_sw_fini(adev); | ||
98 | |||
99 | return r; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * vcn_v1_0_hw_init - start and test VCN block | ||
104 | * | ||
105 | * @handle: amdgpu_device pointer | ||
106 | * | ||
107 | * Initialize the hardware, boot up the VCPU and do some testing | ||
108 | */ | ||
109 | static int vcn_v1_0_hw_init(void *handle) | ||
110 | { | ||
111 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
112 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; | ||
113 | int r; | ||
114 | |||
115 | r = vcn_v1_0_start(adev); | ||
116 | if (r) | ||
117 | goto done; | ||
118 | |||
119 | ring->ready = true; | ||
120 | r = amdgpu_ring_test_ring(ring); | ||
121 | if (r) { | ||
122 | ring->ready = false; | ||
123 | goto done; | ||
124 | } | ||
125 | |||
126 | done: | ||
127 | if (!r) | ||
128 | DRM_INFO("VCN decode initialized successfully.\n"); | ||
129 | |||
130 | return r; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * vcn_v1_0_hw_fini - stop the hardware block | ||
135 | * | ||
136 | * @handle: amdgpu_device pointer | ||
137 | * | ||
138 | * Stop the VCN block, mark ring as not ready any more | ||
139 | */ | ||
140 | static int vcn_v1_0_hw_fini(void *handle) | ||
141 | { | ||
142 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
143 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; | ||
144 | int r; | ||
145 | |||
146 | r = vcn_v1_0_stop(adev); | ||
147 | if (r) | ||
148 | return r; | ||
149 | |||
150 | ring->ready = false; | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * vcn_v1_0_suspend - suspend VCN block | ||
157 | * | ||
158 | * @handle: amdgpu_device pointer | ||
159 | * | ||
160 | * HW fini and suspend VCN block | ||
161 | */ | ||
162 | static int vcn_v1_0_suspend(void *handle) | ||
163 | { | ||
164 | int r; | ||
165 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
166 | |||
167 | r = vcn_v1_0_hw_fini(adev); | ||
168 | if (r) | ||
169 | return r; | ||
170 | |||
171 | r = amdgpu_vcn_suspend(adev); | ||
172 | |||
173 | return r; | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * vcn_v1_0_resume - resume VCN block | ||
178 | * | ||
179 | * @handle: amdgpu_device pointer | ||
180 | * | ||
181 | * Resume firmware and hw init VCN block | ||
182 | */ | ||
183 | static int vcn_v1_0_resume(void *handle) | ||
184 | { | ||
185 | int r; | ||
186 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
187 | |||
188 | r = amdgpu_vcn_resume(adev); | ||
189 | if (r) | ||
190 | return r; | ||
191 | |||
192 | r = vcn_v1_0_hw_init(adev); | ||
193 | |||
194 | return r; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * vcn_v1_0_mc_resume - memory controller programming | ||
199 | * | ||
200 | * @adev: amdgpu_device pointer | ||
201 | * | ||
202 | * Let the VCN memory controller know it's offsets | ||
203 | */ | ||
204 | static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) | ||
205 | { | ||
206 | uint64_t offset; | ||
207 | uint32_t size; | ||
208 | |||
209 | /* programm memory controller bits 0-27 */ | ||
210 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), | ||
211 | lower_32_bits(adev->vcn.gpu_addr)); | ||
212 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), | ||
213 | upper_32_bits(adev->vcn.gpu_addr)); | ||
214 | |||
215 | /* Current FW has no signed header, but will be added later on */ | ||
216 | /* offset = AMDGPU_VCN_FIRMWARE_OFFSET; */ | ||
217 | offset = 0; | ||
218 | size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); | ||
219 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), offset >> 3); | ||
220 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); | ||
221 | |||
222 | offset += size; | ||
223 | size = AMDGPU_VCN_HEAP_SIZE; | ||
224 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), offset >> 3); | ||
225 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), size); | ||
226 | |||
227 | offset += size; | ||
228 | size = AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40); | ||
229 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), offset >> 3); | ||
230 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), size); | ||
231 | |||
232 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG), | ||
233 | adev->gfx.config.gb_addr_config); | ||
234 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG), | ||
235 | adev->gfx.config.gb_addr_config); | ||
236 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG), | ||
237 | adev->gfx.config.gb_addr_config); | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * vcn_v1_0_start - start VCN block | ||
242 | * | ||
243 | * @adev: amdgpu_device pointer | ||
244 | * | ||
245 | * Setup and start the VCN block | ||
246 | */ | ||
247 | static int vcn_v1_0_start(struct amdgpu_device *adev) | ||
248 | { | ||
249 | uint32_t lmi_swap_cntl; | ||
250 | int i, j, r; | ||
251 | |||
252 | /* disable byte swapping */ | ||
253 | lmi_swap_cntl = 0; | ||
254 | |||
255 | vcn_v1_0_mc_resume(adev); | ||
256 | |||
257 | /* disable clock gating */ | ||
258 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0, | ||
259 | ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); | ||
260 | |||
261 | /* disable interupt */ | ||
262 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, | ||
263 | ~UVD_MASTINT_EN__VCPU_EN_MASK); | ||
264 | |||
265 | /* stall UMC and register bus before resetting VCPU */ | ||
266 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), | ||
267 | UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, | ||
268 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); | ||
269 | mdelay(1); | ||
270 | |||
271 | /* put LMI, VCPU, RBC etc... into reset */ | ||
272 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), | ||
273 | UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
274 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | | ||
275 | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
276 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | | ||
277 | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
278 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | | ||
279 | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
280 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
281 | mdelay(5); | ||
282 | |||
283 | /* initialize VCN memory controller */ | ||
284 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL), | ||
285 | (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | | ||
286 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | | ||
287 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | | ||
288 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | | ||
289 | UVD_LMI_CTRL__REQ_MODE_MASK | | ||
290 | 0x00100000L); | ||
291 | |||
292 | #ifdef __BIG_ENDIAN | ||
293 | /* swap (8 in 32) RB and IB */ | ||
294 | lmi_swap_cntl = 0xa; | ||
295 | #endif | ||
296 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl); | ||
297 | |||
298 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040); | ||
299 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0); | ||
300 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040); | ||
301 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0); | ||
302 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0); | ||
303 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88); | ||
304 | |||
305 | /* take all subblocks out of reset, except VCPU */ | ||
306 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), | ||
307 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
308 | mdelay(5); | ||
309 | |||
310 | /* enable VCPU clock */ | ||
311 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), | ||
312 | UVD_VCPU_CNTL__CLK_EN_MASK); | ||
313 | |||
314 | /* enable UMC */ | ||
315 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, | ||
316 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); | ||
317 | |||
318 | /* boot up the VCPU */ | ||
319 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); | ||
320 | mdelay(10); | ||
321 | |||
322 | for (i = 0; i < 10; ++i) { | ||
323 | uint32_t status; | ||
324 | |||
325 | for (j = 0; j < 100; ++j) { | ||
326 | status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS)); | ||
327 | if (status & 2) | ||
328 | break; | ||
329 | mdelay(10); | ||
330 | } | ||
331 | r = 0; | ||
332 | if (status & 2) | ||
333 | break; | ||
334 | |||
335 | DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); | ||
336 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), | ||
337 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | ||
338 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
339 | mdelay(10); | ||
340 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, | ||
341 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
342 | mdelay(10); | ||
343 | r = -1; | ||
344 | } | ||
345 | |||
346 | if (r) { | ||
347 | DRM_ERROR("VCN decode not responding, giving up!!!\n"); | ||
348 | return r; | ||
349 | } | ||
350 | /* enable master interrupt */ | ||
351 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), | ||
352 | (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), | ||
353 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); | ||
354 | |||
355 | /* clear the bit 4 of VCN_STATUS */ | ||
356 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, | ||
357 | ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * vcn_v1_0_stop - stop VCN block | ||
364 | * | ||
365 | * @adev: amdgpu_device pointer | ||
366 | * | ||
367 | * stop the VCN block | ||
368 | */ | ||
369 | static int vcn_v1_0_stop(struct amdgpu_device *adev) | ||
370 | { | ||
371 | /* Stall UMC and register bus before resetting VCPU */ | ||
372 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), | ||
373 | UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, | ||
374 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); | ||
375 | mdelay(1); | ||
376 | |||
377 | /* put VCPU into reset */ | ||
378 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), | ||
379 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
380 | mdelay(5); | ||
381 | |||
382 | /* disable VCPU clock */ | ||
383 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0); | ||
384 | |||
385 | /* Unstall UMC and register bus */ | ||
386 | WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, | ||
387 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int vcn_v1_0_set_clockgating_state(void *handle, | ||
393 | enum amd_clockgating_state state) | ||
394 | { | ||
395 | /* needed for driver unload*/ | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { | ||
400 | .name = "vcn_v1_0", | ||
401 | .early_init = vcn_v1_0_early_init, | ||
402 | .late_init = NULL, | ||
403 | .sw_init = vcn_v1_0_sw_init, | ||
404 | .sw_fini = vcn_v1_0_sw_fini, | ||
405 | .hw_init = vcn_v1_0_hw_init, | ||
406 | .hw_fini = vcn_v1_0_hw_fini, | ||
407 | .suspend = vcn_v1_0_suspend, | ||
408 | .resume = vcn_v1_0_resume, | ||
409 | .is_idle = NULL /* vcn_v1_0_is_idle */, | ||
410 | .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */, | ||
411 | .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, | ||
412 | .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, | ||
413 | .soft_reset = NULL /* vcn_v1_0_soft_reset */, | ||
414 | .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, | ||
415 | .set_clockgating_state = vcn_v1_0_set_clockgating_state, | ||
416 | .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */, | ||
417 | }; | ||