diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 810 |
1 files changed, 810 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c new file mode 100644 index 000000000000..f59942d5c50e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -0,0 +1,810 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König <christian.koenig@amd.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include <drm/drmP.h> | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_uvd.h" | ||
29 | #include "vid.h" | ||
30 | #include "uvd/uvd_6_0_d.h" | ||
31 | #include "uvd/uvd_6_0_sh_mask.h" | ||
32 | #include "oss/oss_2_0_d.h" | ||
33 | #include "oss/oss_2_0_sh_mask.h" | ||
34 | |||
35 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); | ||
36 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); | ||
37 | static int uvd_v6_0_start(struct amdgpu_device *adev); | ||
38 | static void uvd_v6_0_stop(struct amdgpu_device *adev); | ||
39 | |||
40 | /** | ||
41 | * uvd_v6_0_ring_get_rptr - get read pointer | ||
42 | * | ||
43 | * @ring: amdgpu_ring pointer | ||
44 | * | ||
45 | * Returns the current hardware read pointer | ||
46 | */ | ||
47 | static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
48 | { | ||
49 | struct amdgpu_device *adev = ring->adev; | ||
50 | |||
51 | return RREG32(mmUVD_RBC_RB_RPTR); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * uvd_v6_0_ring_get_wptr - get write pointer | ||
56 | * | ||
57 | * @ring: amdgpu_ring pointer | ||
58 | * | ||
59 | * Returns the current hardware write pointer | ||
60 | */ | ||
61 | static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
62 | { | ||
63 | struct amdgpu_device *adev = ring->adev; | ||
64 | |||
65 | return RREG32(mmUVD_RBC_RB_WPTR); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * uvd_v6_0_ring_set_wptr - set write pointer | ||
70 | * | ||
71 | * @ring: amdgpu_ring pointer | ||
72 | * | ||
73 | * Commits the write pointer to the hardware | ||
74 | */ | ||
75 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
76 | { | ||
77 | struct amdgpu_device *adev = ring->adev; | ||
78 | |||
79 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
80 | } | ||
81 | |||
82 | static int uvd_v6_0_early_init(struct amdgpu_device *adev) | ||
83 | { | ||
84 | uvd_v6_0_set_ring_funcs(adev); | ||
85 | uvd_v6_0_set_irq_funcs(adev); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int uvd_v6_0_sw_init(struct amdgpu_device *adev) | ||
91 | { | ||
92 | struct amdgpu_ring *ring; | ||
93 | int r; | ||
94 | |||
95 | /* UVD TRAP */ | ||
96 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | ||
97 | if (r) | ||
98 | return r; | ||
99 | |||
100 | r = amdgpu_uvd_sw_init(adev); | ||
101 | if (r) | ||
102 | return r; | ||
103 | |||
104 | r = amdgpu_uvd_resume(adev); | ||
105 | if (r) | ||
106 | return r; | ||
107 | |||
108 | ring = &adev->uvd.ring; | ||
109 | sprintf(ring->name, "uvd"); | ||
110 | r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, | ||
111 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
112 | |||
113 | return r; | ||
114 | } | ||
115 | |||
116 | static int uvd_v6_0_sw_fini(struct amdgpu_device *adev) | ||
117 | { | ||
118 | int r; | ||
119 | |||
120 | r = amdgpu_uvd_suspend(adev); | ||
121 | if (r) | ||
122 | return r; | ||
123 | |||
124 | r = amdgpu_uvd_sw_fini(adev); | ||
125 | if (r) | ||
126 | return r; | ||
127 | |||
128 | return r; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * uvd_v6_0_hw_init - start and test UVD block | ||
133 | * | ||
134 | * @adev: amdgpu_device pointer | ||
135 | * | ||
136 | * Initialize the hardware, boot up the VCPU and do some testing | ||
137 | */ | ||
138 | static int uvd_v6_0_hw_init(struct amdgpu_device *adev) | ||
139 | { | ||
140 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
141 | uint32_t tmp; | ||
142 | int r; | ||
143 | |||
144 | r = uvd_v6_0_start(adev); | ||
145 | if (r) | ||
146 | goto done; | ||
147 | |||
148 | ring->ready = true; | ||
149 | r = amdgpu_ring_test_ring(ring); | ||
150 | if (r) { | ||
151 | ring->ready = false; | ||
152 | goto done; | ||
153 | } | ||
154 | |||
155 | r = amdgpu_ring_lock(ring, 10); | ||
156 | if (r) { | ||
157 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | ||
158 | goto done; | ||
159 | } | ||
160 | |||
161 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | ||
162 | amdgpu_ring_write(ring, tmp); | ||
163 | amdgpu_ring_write(ring, 0xFFFFF); | ||
164 | |||
165 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
166 | amdgpu_ring_write(ring, tmp); | ||
167 | amdgpu_ring_write(ring, 0xFFFFF); | ||
168 | |||
169 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
170 | amdgpu_ring_write(ring, tmp); | ||
171 | amdgpu_ring_write(ring, 0xFFFFF); | ||
172 | |||
173 | /* Clear timeout status bits */ | ||
174 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | ||
175 | amdgpu_ring_write(ring, 0x8); | ||
176 | |||
177 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | ||
178 | amdgpu_ring_write(ring, 3); | ||
179 | |||
180 | amdgpu_ring_unlock_commit(ring); | ||
181 | |||
182 | done: | ||
183 | if (!r) | ||
184 | DRM_INFO("UVD initialized successfully.\n"); | ||
185 | |||
186 | return r; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * uvd_v6_0_hw_fini - stop the hardware block | ||
191 | * | ||
192 | * @adev: amdgpu_device pointer | ||
193 | * | ||
194 | * Stop the UVD block, mark ring as not ready any more | ||
195 | */ | ||
196 | static int uvd_v6_0_hw_fini(struct amdgpu_device *adev) | ||
197 | { | ||
198 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
199 | |||
200 | uvd_v6_0_stop(adev); | ||
201 | ring->ready = false; | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int uvd_v6_0_suspend(struct amdgpu_device *adev) | ||
207 | { | ||
208 | int r; | ||
209 | |||
210 | r = uvd_v6_0_hw_fini(adev); | ||
211 | if (r) | ||
212 | return r; | ||
213 | |||
214 | r = amdgpu_uvd_suspend(adev); | ||
215 | if (r) | ||
216 | return r; | ||
217 | |||
218 | return r; | ||
219 | } | ||
220 | |||
221 | static int uvd_v6_0_resume(struct amdgpu_device *adev) | ||
222 | { | ||
223 | int r; | ||
224 | |||
225 | r = amdgpu_uvd_resume(adev); | ||
226 | if (r) | ||
227 | return r; | ||
228 | |||
229 | r = uvd_v6_0_hw_init(adev); | ||
230 | if (r) | ||
231 | return r; | ||
232 | |||
233 | return r; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * uvd_v6_0_mc_resume - memory controller programming | ||
238 | * | ||
239 | * @adev: amdgpu_device pointer | ||
240 | * | ||
241 | * Let the UVD memory controller know it's offsets | ||
242 | */ | ||
243 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | ||
244 | { | ||
245 | uint64_t offset; | ||
246 | uint32_t size; | ||
247 | |||
248 | /* programm memory controller bits 0-27 */ | ||
249 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | ||
250 | lower_32_bits(adev->uvd.gpu_addr)); | ||
251 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | ||
252 | upper_32_bits(adev->uvd.gpu_addr)); | ||
253 | |||
254 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | ||
255 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | ||
256 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | ||
257 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | ||
258 | |||
259 | offset += size; | ||
260 | size = AMDGPU_UVD_STACK_SIZE; | ||
261 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); | ||
262 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | ||
263 | |||
264 | offset += size; | ||
265 | size = AMDGPU_UVD_HEAP_SIZE; | ||
266 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); | ||
267 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * uvd_v6_0_start - start UVD block | ||
272 | * | ||
273 | * @adev: amdgpu_device pointer | ||
274 | * | ||
275 | * Setup and start the UVD block | ||
276 | */ | ||
277 | static int uvd_v6_0_start(struct amdgpu_device *adev) | ||
278 | { | ||
279 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
280 | uint32_t rb_bufsz, tmp; | ||
281 | uint32_t lmi_swap_cntl; | ||
282 | uint32_t mp_swap_cntl; | ||
283 | int i, j, r; | ||
284 | |||
285 | /*disable DPG */ | ||
286 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | ||
287 | |||
288 | /* disable byte swapping */ | ||
289 | lmi_swap_cntl = 0; | ||
290 | mp_swap_cntl = 0; | ||
291 | |||
292 | uvd_v6_0_mc_resume(adev); | ||
293 | |||
294 | /* disable clock gating */ | ||
295 | WREG32(mmUVD_CGC_GATE, 0); | ||
296 | |||
297 | /* disable interupt */ | ||
298 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | ||
299 | |||
300 | /* stall UMC and register bus before resetting VCPU */ | ||
301 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
302 | mdelay(1); | ||
303 | |||
304 | /* put LMI, VCPU, RBC etc... into reset */ | ||
305 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
306 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
307 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
308 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
309 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
310 | mdelay(5); | ||
311 | |||
312 | /* take UVD block out of reset */ | ||
313 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
314 | mdelay(5); | ||
315 | |||
316 | /* initialize UVD memory controller */ | ||
317 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | ||
318 | (1 << 21) | (1 << 9) | (1 << 20)); | ||
319 | |||
320 | #ifdef __BIG_ENDIAN | ||
321 | /* swap (8 in 32) RB and IB */ | ||
322 | lmi_swap_cntl = 0xa; | ||
323 | mp_swap_cntl = 0; | ||
324 | #endif | ||
325 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | ||
326 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | ||
327 | |||
328 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | ||
329 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | ||
330 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | ||
331 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | ||
332 | WREG32(mmUVD_MPC_SET_ALU, 0); | ||
333 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | ||
334 | |||
335 | /* take all subblocks out of reset, except VCPU */ | ||
336 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
337 | mdelay(5); | ||
338 | |||
339 | /* enable VCPU clock */ | ||
340 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | ||
341 | |||
342 | /* enable UMC */ | ||
343 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
344 | |||
345 | /* boot up the VCPU */ | ||
346 | WREG32(mmUVD_SOFT_RESET, 0); | ||
347 | mdelay(10); | ||
348 | |||
349 | for (i = 0; i < 10; ++i) { | ||
350 | uint32_t status; | ||
351 | |||
352 | for (j = 0; j < 100; ++j) { | ||
353 | status = RREG32(mmUVD_STATUS); | ||
354 | if (status & 2) | ||
355 | break; | ||
356 | mdelay(10); | ||
357 | } | ||
358 | r = 0; | ||
359 | if (status & 2) | ||
360 | break; | ||
361 | |||
362 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | ||
363 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | ||
364 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
365 | mdelay(10); | ||
366 | WREG32_P(mmUVD_SOFT_RESET, 0, | ||
367 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
368 | mdelay(10); | ||
369 | r = -1; | ||
370 | } | ||
371 | |||
372 | if (r) { | ||
373 | DRM_ERROR("UVD not responding, giving up!!!\n"); | ||
374 | return r; | ||
375 | } | ||
376 | /* enable master interrupt */ | ||
377 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | ||
378 | |||
379 | /* clear the bit 4 of UVD_STATUS */ | ||
380 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | ||
381 | |||
382 | rb_bufsz = order_base_2(ring->ring_size); | ||
383 | tmp = 0; | ||
384 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | ||
385 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | ||
386 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | ||
387 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | ||
388 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | ||
389 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | ||
390 | /* force RBC into idle state */ | ||
391 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | ||
392 | |||
393 | /* set the write pointer delay */ | ||
394 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | ||
395 | |||
396 | /* set the wb address */ | ||
397 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | ||
398 | |||
399 | /* programm the RB_BASE for ring buffer */ | ||
400 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | ||
401 | lower_32_bits(ring->gpu_addr)); | ||
402 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | ||
403 | upper_32_bits(ring->gpu_addr)); | ||
404 | |||
405 | /* Initialize the ring buffer's read and write pointers */ | ||
406 | WREG32(mmUVD_RBC_RB_RPTR, 0); | ||
407 | |||
408 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | ||
409 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
410 | |||
411 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * uvd_v6_0_stop - stop UVD block | ||
418 | * | ||
419 | * @adev: amdgpu_device pointer | ||
420 | * | ||
421 | * stop the UVD block | ||
422 | */ | ||
423 | static void uvd_v6_0_stop(struct amdgpu_device *adev) | ||
424 | { | ||
425 | /* force RBC into idle state */ | ||
426 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | ||
427 | |||
428 | /* Stall UMC and register bus before resetting VCPU */ | ||
429 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
430 | mdelay(1); | ||
431 | |||
432 | /* put VCPU into reset */ | ||
433 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
434 | mdelay(5); | ||
435 | |||
436 | /* disable VCPU clock */ | ||
437 | WREG32(mmUVD_VCPU_CNTL, 0x0); | ||
438 | |||
439 | /* Unstall UMC and register bus */ | ||
440 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command | ||
445 | * | ||
446 | * @ring: amdgpu_ring pointer | ||
447 | * @fence: fence to emit | ||
448 | * | ||
449 | * Write a fence and a trap command to the ring. | ||
450 | */ | ||
451 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
452 | bool write64bit) | ||
453 | { | ||
454 | WARN_ON(write64bit); | ||
455 | |||
456 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
457 | amdgpu_ring_write(ring, seq); | ||
458 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
459 | amdgpu_ring_write(ring, addr & 0xffffffff); | ||
460 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
461 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | ||
462 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
463 | amdgpu_ring_write(ring, 0); | ||
464 | |||
465 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
466 | amdgpu_ring_write(ring, 0); | ||
467 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
468 | amdgpu_ring_write(ring, 0); | ||
469 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
470 | amdgpu_ring_write(ring, 2); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * uvd_v6_0_ring_emit_semaphore - emit semaphore command | ||
475 | * | ||
476 | * @ring: amdgpu_ring pointer | ||
477 | * @semaphore: semaphore to emit commands for | ||
478 | * @emit_wait: true if we should emit a wait command | ||
479 | * | ||
480 | * Emit a semaphore command (either wait or signal) to the UVD ring. | ||
481 | */ | ||
482 | static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
483 | struct amdgpu_semaphore *semaphore, | ||
484 | bool emit_wait) | ||
485 | { | ||
486 | uint64_t addr = semaphore->gpu_addr; | ||
487 | |||
488 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); | ||
489 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
490 | |||
491 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); | ||
492 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
493 | |||
494 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); | ||
495 | amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); | ||
496 | |||
497 | return true; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * uvd_v6_0_ring_test_ring - register write test | ||
502 | * | ||
503 | * @ring: amdgpu_ring pointer | ||
504 | * | ||
505 | * Test if we can successfully write to the context register | ||
506 | */ | ||
507 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | ||
508 | { | ||
509 | struct amdgpu_device *adev = ring->adev; | ||
510 | uint32_t tmp = 0; | ||
511 | unsigned i; | ||
512 | int r; | ||
513 | |||
514 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | ||
515 | r = amdgpu_ring_lock(ring, 3); | ||
516 | if (r) { | ||
517 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
518 | ring->idx, r); | ||
519 | return r; | ||
520 | } | ||
521 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
522 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
523 | amdgpu_ring_unlock_commit(ring); | ||
524 | for (i = 0; i < adev->usec_timeout; i++) { | ||
525 | tmp = RREG32(mmUVD_CONTEXT_ID); | ||
526 | if (tmp == 0xDEADBEEF) | ||
527 | break; | ||
528 | DRM_UDELAY(1); | ||
529 | } | ||
530 | |||
531 | if (i < adev->usec_timeout) { | ||
532 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
533 | ring->idx, i); | ||
534 | } else { | ||
535 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
536 | ring->idx, tmp); | ||
537 | r = -EINVAL; | ||
538 | } | ||
539 | return r; | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * uvd_v6_0_ring_emit_ib - execute indirect buffer | ||
544 | * | ||
545 | * @ring: amdgpu_ring pointer | ||
546 | * @ib: indirect buffer to execute | ||
547 | * | ||
548 | * Write ring commands to execute the indirect buffer | ||
549 | */ | ||
550 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
551 | struct amdgpu_ib *ib) | ||
552 | { | ||
553 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | ||
554 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
555 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | ||
556 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
557 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | ||
558 | amdgpu_ring_write(ring, ib->length_dw); | ||
559 | } | ||
560 | |||
561 | /** | ||
562 | * uvd_v6_0_ring_test_ib - test ib execution | ||
563 | * | ||
564 | * @ring: amdgpu_ring pointer | ||
565 | * | ||
566 | * Test if we can successfully execute an IB | ||
567 | */ | ||
568 | static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) | ||
569 | { | ||
570 | struct amdgpu_fence *fence = NULL; | ||
571 | int r; | ||
572 | |||
573 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | ||
574 | if (r) { | ||
575 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
576 | goto error; | ||
577 | } | ||
578 | |||
579 | r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); | ||
580 | if (r) { | ||
581 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
582 | goto error; | ||
583 | } | ||
584 | |||
585 | r = amdgpu_fence_wait(fence, false); | ||
586 | if (r) { | ||
587 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
588 | goto error; | ||
589 | } | ||
590 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
591 | error: | ||
592 | amdgpu_fence_unref(&fence); | ||
593 | return r; | ||
594 | } | ||
595 | |||
596 | static bool uvd_v6_0_is_idle(struct amdgpu_device *adev) | ||
597 | { | ||
598 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); | ||
599 | } | ||
600 | |||
601 | static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev) | ||
602 | { | ||
603 | unsigned i; | ||
604 | |||
605 | for (i = 0; i < adev->usec_timeout; i++) { | ||
606 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | ||
607 | return 0; | ||
608 | } | ||
609 | return -ETIMEDOUT; | ||
610 | } | ||
611 | |||
612 | static int uvd_v6_0_soft_reset(struct amdgpu_device *adev) | ||
613 | { | ||
614 | uvd_v6_0_stop(adev); | ||
615 | |||
616 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | ||
617 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
618 | mdelay(5); | ||
619 | |||
620 | return uvd_v6_0_start(adev); | ||
621 | } | ||
622 | |||
623 | static void uvd_v6_0_print_status(struct amdgpu_device *adev) | ||
624 | { | ||
625 | dev_info(adev->dev, "UVD 6.0 registers\n"); | ||
626 | dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", | ||
627 | RREG32(mmUVD_SEMA_ADDR_LOW)); | ||
628 | dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", | ||
629 | RREG32(mmUVD_SEMA_ADDR_HIGH)); | ||
630 | dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", | ||
631 | RREG32(mmUVD_SEMA_CMD)); | ||
632 | dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", | ||
633 | RREG32(mmUVD_GPCOM_VCPU_CMD)); | ||
634 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", | ||
635 | RREG32(mmUVD_GPCOM_VCPU_DATA0)); | ||
636 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", | ||
637 | RREG32(mmUVD_GPCOM_VCPU_DATA1)); | ||
638 | dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", | ||
639 | RREG32(mmUVD_ENGINE_CNTL)); | ||
640 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | ||
641 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | ||
642 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | ||
643 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | ||
644 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | ||
645 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | ||
646 | dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", | ||
647 | RREG32(mmUVD_SEMA_CNTL)); | ||
648 | dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", | ||
649 | RREG32(mmUVD_LMI_EXT40_ADDR)); | ||
650 | dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", | ||
651 | RREG32(mmUVD_CTX_INDEX)); | ||
652 | dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", | ||
653 | RREG32(mmUVD_CTX_DATA)); | ||
654 | dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", | ||
655 | RREG32(mmUVD_CGC_GATE)); | ||
656 | dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", | ||
657 | RREG32(mmUVD_CGC_CTRL)); | ||
658 | dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", | ||
659 | RREG32(mmUVD_LMI_CTRL2)); | ||
660 | dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", | ||
661 | RREG32(mmUVD_MASTINT_EN)); | ||
662 | dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", | ||
663 | RREG32(mmUVD_LMI_ADDR_EXT)); | ||
664 | dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", | ||
665 | RREG32(mmUVD_LMI_CTRL)); | ||
666 | dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", | ||
667 | RREG32(mmUVD_LMI_SWAP_CNTL)); | ||
668 | dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", | ||
669 | RREG32(mmUVD_MP_SWAP_CNTL)); | ||
670 | dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", | ||
671 | RREG32(mmUVD_MPC_SET_MUXA0)); | ||
672 | dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", | ||
673 | RREG32(mmUVD_MPC_SET_MUXA1)); | ||
674 | dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", | ||
675 | RREG32(mmUVD_MPC_SET_MUXB0)); | ||
676 | dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", | ||
677 | RREG32(mmUVD_MPC_SET_MUXB1)); | ||
678 | dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", | ||
679 | RREG32(mmUVD_MPC_SET_MUX)); | ||
680 | dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", | ||
681 | RREG32(mmUVD_MPC_SET_ALU)); | ||
682 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", | ||
683 | RREG32(mmUVD_VCPU_CACHE_OFFSET0)); | ||
684 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", | ||
685 | RREG32(mmUVD_VCPU_CACHE_SIZE0)); | ||
686 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", | ||
687 | RREG32(mmUVD_VCPU_CACHE_OFFSET1)); | ||
688 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", | ||
689 | RREG32(mmUVD_VCPU_CACHE_SIZE1)); | ||
690 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", | ||
691 | RREG32(mmUVD_VCPU_CACHE_OFFSET2)); | ||
692 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", | ||
693 | RREG32(mmUVD_VCPU_CACHE_SIZE2)); | ||
694 | dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", | ||
695 | RREG32(mmUVD_VCPU_CNTL)); | ||
696 | dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", | ||
697 | RREG32(mmUVD_SOFT_RESET)); | ||
698 | dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", | ||
699 | RREG32(mmUVD_RBC_IB_SIZE)); | ||
700 | dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", | ||
701 | RREG32(mmUVD_RBC_RB_RPTR)); | ||
702 | dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", | ||
703 | RREG32(mmUVD_RBC_RB_WPTR)); | ||
704 | dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", | ||
705 | RREG32(mmUVD_RBC_RB_WPTR_CNTL)); | ||
706 | dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", | ||
707 | RREG32(mmUVD_RBC_RB_CNTL)); | ||
708 | dev_info(adev->dev, " UVD_STATUS=0x%08X\n", | ||
709 | RREG32(mmUVD_STATUS)); | ||
710 | dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", | ||
711 | RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); | ||
712 | dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
713 | RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); | ||
714 | dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", | ||
715 | RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); | ||
716 | dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
717 | RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); | ||
718 | dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", | ||
719 | RREG32(mmUVD_CONTEXT_ID)); | ||
720 | } | ||
721 | |||
722 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, | ||
723 | struct amdgpu_irq_src *source, | ||
724 | unsigned type, | ||
725 | enum amdgpu_interrupt_state state) | ||
726 | { | ||
727 | // TODO | ||
728 | return 0; | ||
729 | } | ||
730 | |||
731 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | ||
732 | struct amdgpu_irq_src *source, | ||
733 | struct amdgpu_iv_entry *entry) | ||
734 | { | ||
735 | DRM_DEBUG("IH: UVD TRAP\n"); | ||
736 | amdgpu_fence_process(&adev->uvd.ring); | ||
737 | return 0; | ||
738 | } | ||
739 | |||
740 | static int uvd_v6_0_set_clockgating_state(struct amdgpu_device *adev, | ||
741 | enum amdgpu_clockgating_state state) | ||
742 | { | ||
743 | //TODO | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, | ||
749 | enum amdgpu_powergating_state state) | ||
750 | { | ||
751 | /* This doesn't actually powergate the UVD block. | ||
752 | * That's done in the dpm code via the SMC. This | ||
753 | * just re-inits the block as necessary. The actual | ||
754 | * gating still happens in the dpm code. We should | ||
755 | * revisit this when there is a cleaner line between | ||
756 | * the smc and the hw blocks | ||
757 | */ | ||
758 | if (state == AMDGPU_PG_STATE_GATE) { | ||
759 | uvd_v6_0_stop(adev); | ||
760 | return 0; | ||
761 | } else { | ||
762 | return uvd_v6_0_start(adev); | ||
763 | } | ||
764 | } | ||
765 | |||
766 | const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs = { | ||
767 | .early_init = uvd_v6_0_early_init, | ||
768 | .late_init = NULL, | ||
769 | .sw_init = uvd_v6_0_sw_init, | ||
770 | .sw_fini = uvd_v6_0_sw_fini, | ||
771 | .hw_init = uvd_v6_0_hw_init, | ||
772 | .hw_fini = uvd_v6_0_hw_fini, | ||
773 | .suspend = uvd_v6_0_suspend, | ||
774 | .resume = uvd_v6_0_resume, | ||
775 | .is_idle = uvd_v6_0_is_idle, | ||
776 | .wait_for_idle = uvd_v6_0_wait_for_idle, | ||
777 | .soft_reset = uvd_v6_0_soft_reset, | ||
778 | .print_status = uvd_v6_0_print_status, | ||
779 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, | ||
780 | .set_powergating_state = uvd_v6_0_set_powergating_state, | ||
781 | }; | ||
782 | |||
783 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { | ||
784 | .get_rptr = uvd_v6_0_ring_get_rptr, | ||
785 | .get_wptr = uvd_v6_0_ring_get_wptr, | ||
786 | .set_wptr = uvd_v6_0_ring_set_wptr, | ||
787 | .parse_cs = amdgpu_uvd_ring_parse_cs, | ||
788 | .emit_ib = uvd_v6_0_ring_emit_ib, | ||
789 | .emit_fence = uvd_v6_0_ring_emit_fence, | ||
790 | .emit_semaphore = uvd_v6_0_ring_emit_semaphore, | ||
791 | .test_ring = uvd_v6_0_ring_test_ring, | ||
792 | .test_ib = uvd_v6_0_ring_test_ib, | ||
793 | .is_lockup = amdgpu_ring_test_lockup, | ||
794 | }; | ||
795 | |||
796 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | ||
797 | { | ||
798 | adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; | ||
799 | } | ||
800 | |||
801 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { | ||
802 | .set = uvd_v6_0_set_interrupt_state, | ||
803 | .process = uvd_v6_0_process_interrupt, | ||
804 | }; | ||
805 | |||
806 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | ||
807 | { | ||
808 | adev->uvd.irq.num_types = 1; | ||
809 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | ||
810 | } | ||