diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 830 |
1 files changed, 830 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c new file mode 100644 index 000000000000..f3b3026d5932 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -0,0 +1,830 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König <christian.koenig@amd.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include <drm/drmP.h> | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_uvd.h" | ||
29 | #include "vid.h" | ||
30 | #include "uvd/uvd_5_0_d.h" | ||
31 | #include "uvd/uvd_5_0_sh_mask.h" | ||
32 | #include "oss/oss_2_0_d.h" | ||
33 | #include "oss/oss_2_0_sh_mask.h" | ||
34 | |||
35 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); | ||
36 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); | ||
37 | static int uvd_v5_0_start(struct amdgpu_device *adev); | ||
38 | static void uvd_v5_0_stop(struct amdgpu_device *adev); | ||
39 | |||
40 | /** | ||
41 | * uvd_v5_0_ring_get_rptr - get read pointer | ||
42 | * | ||
43 | * @ring: amdgpu_ring pointer | ||
44 | * | ||
45 | * Returns the current hardware read pointer | ||
46 | */ | ||
47 | static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
48 | { | ||
49 | struct amdgpu_device *adev = ring->adev; | ||
50 | |||
51 | return RREG32(mmUVD_RBC_RB_RPTR); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * uvd_v5_0_ring_get_wptr - get write pointer | ||
56 | * | ||
57 | * @ring: amdgpu_ring pointer | ||
58 | * | ||
59 | * Returns the current hardware write pointer | ||
60 | */ | ||
61 | static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
62 | { | ||
63 | struct amdgpu_device *adev = ring->adev; | ||
64 | |||
65 | return RREG32(mmUVD_RBC_RB_WPTR); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * uvd_v5_0_ring_set_wptr - set write pointer | ||
70 | * | ||
71 | * @ring: amdgpu_ring pointer | ||
72 | * | ||
73 | * Commits the write pointer to the hardware | ||
74 | */ | ||
75 | static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
76 | { | ||
77 | struct amdgpu_device *adev = ring->adev; | ||
78 | |||
79 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
80 | } | ||
81 | |||
82 | static int uvd_v5_0_early_init(struct amdgpu_device *adev) | ||
83 | { | ||
84 | uvd_v5_0_set_ring_funcs(adev); | ||
85 | uvd_v5_0_set_irq_funcs(adev); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int uvd_v5_0_sw_init(struct amdgpu_device *adev) | ||
91 | { | ||
92 | struct amdgpu_ring *ring; | ||
93 | int r; | ||
94 | |||
95 | /* UVD TRAP */ | ||
96 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | ||
97 | if (r) | ||
98 | return r; | ||
99 | |||
100 | r = amdgpu_uvd_sw_init(adev); | ||
101 | if (r) | ||
102 | return r; | ||
103 | |||
104 | r = amdgpu_uvd_resume(adev); | ||
105 | if (r) | ||
106 | return r; | ||
107 | |||
108 | ring = &adev->uvd.ring; | ||
109 | sprintf(ring->name, "uvd"); | ||
110 | r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, | ||
111 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
112 | |||
113 | return r; | ||
114 | } | ||
115 | |||
116 | static int uvd_v5_0_sw_fini(struct amdgpu_device *adev) | ||
117 | { | ||
118 | int r; | ||
119 | |||
120 | r = amdgpu_uvd_suspend(adev); | ||
121 | if (r) | ||
122 | return r; | ||
123 | |||
124 | r = amdgpu_uvd_sw_fini(adev); | ||
125 | if (r) | ||
126 | return r; | ||
127 | |||
128 | return r; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * uvd_v5_0_hw_init - start and test UVD block | ||
133 | * | ||
134 | * @adev: amdgpu_device pointer | ||
135 | * | ||
136 | * Initialize the hardware, boot up the VCPU and do some testing | ||
137 | */ | ||
138 | static int uvd_v5_0_hw_init(struct amdgpu_device *adev) | ||
139 | { | ||
140 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
141 | uint32_t tmp; | ||
142 | int r; | ||
143 | |||
144 | /* raise clocks while booting up the VCPU */ | ||
145 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
146 | |||
147 | r = uvd_v5_0_start(adev); | ||
148 | if (r) | ||
149 | goto done; | ||
150 | |||
151 | ring->ready = true; | ||
152 | r = amdgpu_ring_test_ring(ring); | ||
153 | if (r) { | ||
154 | ring->ready = false; | ||
155 | goto done; | ||
156 | } | ||
157 | |||
158 | r = amdgpu_ring_lock(ring, 10); | ||
159 | if (r) { | ||
160 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | ||
161 | goto done; | ||
162 | } | ||
163 | |||
164 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | ||
165 | amdgpu_ring_write(ring, tmp); | ||
166 | amdgpu_ring_write(ring, 0xFFFFF); | ||
167 | |||
168 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
169 | amdgpu_ring_write(ring, tmp); | ||
170 | amdgpu_ring_write(ring, 0xFFFFF); | ||
171 | |||
172 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
173 | amdgpu_ring_write(ring, tmp); | ||
174 | amdgpu_ring_write(ring, 0xFFFFF); | ||
175 | |||
176 | /* Clear timeout status bits */ | ||
177 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | ||
178 | amdgpu_ring_write(ring, 0x8); | ||
179 | |||
180 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | ||
181 | amdgpu_ring_write(ring, 3); | ||
182 | |||
183 | amdgpu_ring_unlock_commit(ring); | ||
184 | |||
185 | done: | ||
186 | /* lower clocks again */ | ||
187 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
188 | |||
189 | if (!r) | ||
190 | DRM_INFO("UVD initialized successfully.\n"); | ||
191 | |||
192 | return r; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * uvd_v5_0_hw_fini - stop the hardware block | ||
197 | * | ||
198 | * @adev: amdgpu_device pointer | ||
199 | * | ||
200 | * Stop the UVD block, mark ring as not ready any more | ||
201 | */ | ||
202 | static int uvd_v5_0_hw_fini(struct amdgpu_device *adev) | ||
203 | { | ||
204 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
205 | |||
206 | uvd_v5_0_stop(adev); | ||
207 | ring->ready = false; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int uvd_v5_0_suspend(struct amdgpu_device *adev) | ||
213 | { | ||
214 | int r; | ||
215 | |||
216 | r = uvd_v5_0_hw_fini(adev); | ||
217 | if (r) | ||
218 | return r; | ||
219 | |||
220 | r = amdgpu_uvd_suspend(adev); | ||
221 | if (r) | ||
222 | return r; | ||
223 | |||
224 | return r; | ||
225 | } | ||
226 | |||
227 | static int uvd_v5_0_resume(struct amdgpu_device *adev) | ||
228 | { | ||
229 | int r; | ||
230 | |||
231 | r = amdgpu_uvd_resume(adev); | ||
232 | if (r) | ||
233 | return r; | ||
234 | |||
235 | r = uvd_v5_0_hw_init(adev); | ||
236 | if (r) | ||
237 | return r; | ||
238 | |||
239 | return r; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * uvd_v5_0_mc_resume - memory controller programming | ||
244 | * | ||
245 | * @adev: amdgpu_device pointer | ||
246 | * | ||
247 | * Let the UVD memory controller know it's offsets | ||
248 | */ | ||
249 | static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) | ||
250 | { | ||
251 | uint64_t offset; | ||
252 | uint32_t size; | ||
253 | |||
254 | /* programm memory controller bits 0-27 */ | ||
255 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | ||
256 | lower_32_bits(adev->uvd.gpu_addr)); | ||
257 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | ||
258 | upper_32_bits(adev->uvd.gpu_addr)); | ||
259 | |||
260 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | ||
261 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | ||
262 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | ||
263 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | ||
264 | |||
265 | offset += size; | ||
266 | size = AMDGPU_UVD_STACK_SIZE; | ||
267 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); | ||
268 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | ||
269 | |||
270 | offset += size; | ||
271 | size = AMDGPU_UVD_HEAP_SIZE; | ||
272 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); | ||
273 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * uvd_v5_0_start - start UVD block | ||
278 | * | ||
279 | * @adev: amdgpu_device pointer | ||
280 | * | ||
281 | * Setup and start the UVD block | ||
282 | */ | ||
283 | static int uvd_v5_0_start(struct amdgpu_device *adev) | ||
284 | { | ||
285 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
286 | uint32_t rb_bufsz, tmp; | ||
287 | uint32_t lmi_swap_cntl; | ||
288 | uint32_t mp_swap_cntl; | ||
289 | int i, j, r; | ||
290 | |||
291 | /*disable DPG */ | ||
292 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | ||
293 | |||
294 | /* disable byte swapping */ | ||
295 | lmi_swap_cntl = 0; | ||
296 | mp_swap_cntl = 0; | ||
297 | |||
298 | uvd_v5_0_mc_resume(adev); | ||
299 | |||
300 | /* disable clock gating */ | ||
301 | WREG32(mmUVD_CGC_GATE, 0); | ||
302 | |||
303 | /* disable interupt */ | ||
304 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | ||
305 | |||
306 | /* stall UMC and register bus before resetting VCPU */ | ||
307 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
308 | mdelay(1); | ||
309 | |||
310 | /* put LMI, VCPU, RBC etc... into reset */ | ||
311 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
312 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
313 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
314 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
315 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
316 | mdelay(5); | ||
317 | |||
318 | /* take UVD block out of reset */ | ||
319 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
320 | mdelay(5); | ||
321 | |||
322 | /* initialize UVD memory controller */ | ||
323 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | ||
324 | (1 << 21) | (1 << 9) | (1 << 20)); | ||
325 | |||
326 | #ifdef __BIG_ENDIAN | ||
327 | /* swap (8 in 32) RB and IB */ | ||
328 | lmi_swap_cntl = 0xa; | ||
329 | mp_swap_cntl = 0; | ||
330 | #endif | ||
331 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | ||
332 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | ||
333 | |||
334 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | ||
335 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | ||
336 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | ||
337 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | ||
338 | WREG32(mmUVD_MPC_SET_ALU, 0); | ||
339 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | ||
340 | |||
341 | /* take all subblocks out of reset, except VCPU */ | ||
342 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
343 | mdelay(5); | ||
344 | |||
345 | /* enable VCPU clock */ | ||
346 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | ||
347 | |||
348 | /* enable UMC */ | ||
349 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
350 | |||
351 | /* boot up the VCPU */ | ||
352 | WREG32(mmUVD_SOFT_RESET, 0); | ||
353 | mdelay(10); | ||
354 | |||
355 | for (i = 0; i < 10; ++i) { | ||
356 | uint32_t status; | ||
357 | for (j = 0; j < 100; ++j) { | ||
358 | status = RREG32(mmUVD_STATUS); | ||
359 | if (status & 2) | ||
360 | break; | ||
361 | mdelay(10); | ||
362 | } | ||
363 | r = 0; | ||
364 | if (status & 2) | ||
365 | break; | ||
366 | |||
367 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | ||
368 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | ||
369 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
370 | mdelay(10); | ||
371 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
372 | mdelay(10); | ||
373 | r = -1; | ||
374 | } | ||
375 | |||
376 | if (r) { | ||
377 | DRM_ERROR("UVD not responding, giving up!!!\n"); | ||
378 | return r; | ||
379 | } | ||
380 | /* enable master interrupt */ | ||
381 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | ||
382 | |||
383 | /* clear the bit 4 of UVD_STATUS */ | ||
384 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | ||
385 | |||
386 | rb_bufsz = order_base_2(ring->ring_size); | ||
387 | tmp = 0; | ||
388 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | ||
389 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | ||
390 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | ||
391 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | ||
392 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | ||
393 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | ||
394 | /* force RBC into idle state */ | ||
395 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | ||
396 | |||
397 | /* set the write pointer delay */ | ||
398 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | ||
399 | |||
400 | /* set the wb address */ | ||
401 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | ||
402 | |||
403 | /* programm the RB_BASE for ring buffer */ | ||
404 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | ||
405 | lower_32_bits(ring->gpu_addr)); | ||
406 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | ||
407 | upper_32_bits(ring->gpu_addr)); | ||
408 | |||
409 | /* Initialize the ring buffer's read and write pointers */ | ||
410 | WREG32(mmUVD_RBC_RB_RPTR, 0); | ||
411 | |||
412 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | ||
413 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
414 | |||
415 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * uvd_v5_0_stop - stop UVD block | ||
422 | * | ||
423 | * @adev: amdgpu_device pointer | ||
424 | * | ||
425 | * stop the UVD block | ||
426 | */ | ||
427 | static void uvd_v5_0_stop(struct amdgpu_device *adev) | ||
428 | { | ||
429 | /* force RBC into idle state */ | ||
430 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | ||
431 | |||
432 | /* Stall UMC and register bus before resetting VCPU */ | ||
433 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
434 | mdelay(1); | ||
435 | |||
436 | /* put VCPU into reset */ | ||
437 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
438 | mdelay(5); | ||
439 | |||
440 | /* disable VCPU clock */ | ||
441 | WREG32(mmUVD_VCPU_CNTL, 0x0); | ||
442 | |||
443 | /* Unstall UMC and register bus */ | ||
444 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
445 | } | ||
446 | |||
447 | /** | ||
448 | * uvd_v5_0_ring_emit_fence - emit an fence & trap command | ||
449 | * | ||
450 | * @ring: amdgpu_ring pointer | ||
451 | * @fence: fence to emit | ||
452 | * | ||
453 | * Write a fence and a trap command to the ring. | ||
454 | */ | ||
455 | static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
456 | bool write64bit) | ||
457 | { | ||
458 | WARN_ON(write64bit); | ||
459 | |||
460 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
461 | amdgpu_ring_write(ring, seq); | ||
462 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
463 | amdgpu_ring_write(ring, addr & 0xffffffff); | ||
464 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
465 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | ||
466 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
467 | amdgpu_ring_write(ring, 0); | ||
468 | |||
469 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
470 | amdgpu_ring_write(ring, 0); | ||
471 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
472 | amdgpu_ring_write(ring, 0); | ||
473 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
474 | amdgpu_ring_write(ring, 2); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * uvd_v5_0_ring_emit_semaphore - emit semaphore command | ||
479 | * | ||
480 | * @ring: amdgpu_ring pointer | ||
481 | * @semaphore: semaphore to emit commands for | ||
482 | * @emit_wait: true if we should emit a wait command | ||
483 | * | ||
484 | * Emit a semaphore command (either wait or signal) to the UVD ring. | ||
485 | */ | ||
486 | static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
487 | struct amdgpu_semaphore *semaphore, | ||
488 | bool emit_wait) | ||
489 | { | ||
490 | uint64_t addr = semaphore->gpu_addr; | ||
491 | |||
492 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); | ||
493 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
494 | |||
495 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); | ||
496 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
497 | |||
498 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); | ||
499 | amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); | ||
500 | |||
501 | return true; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * uvd_v5_0_ring_test_ring - register write test | ||
506 | * | ||
507 | * @ring: amdgpu_ring pointer | ||
508 | * | ||
509 | * Test if we can successfully write to the context register | ||
510 | */ | ||
511 | static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | ||
512 | { | ||
513 | struct amdgpu_device *adev = ring->adev; | ||
514 | uint32_t tmp = 0; | ||
515 | unsigned i; | ||
516 | int r; | ||
517 | |||
518 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | ||
519 | r = amdgpu_ring_lock(ring, 3); | ||
520 | if (r) { | ||
521 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
522 | ring->idx, r); | ||
523 | return r; | ||
524 | } | ||
525 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
526 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
527 | amdgpu_ring_unlock_commit(ring); | ||
528 | for (i = 0; i < adev->usec_timeout; i++) { | ||
529 | tmp = RREG32(mmUVD_CONTEXT_ID); | ||
530 | if (tmp == 0xDEADBEEF) | ||
531 | break; | ||
532 | DRM_UDELAY(1); | ||
533 | } | ||
534 | |||
535 | if (i < adev->usec_timeout) { | ||
536 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
537 | ring->idx, i); | ||
538 | } else { | ||
539 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
540 | ring->idx, tmp); | ||
541 | r = -EINVAL; | ||
542 | } | ||
543 | return r; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * uvd_v5_0_ring_emit_ib - execute indirect buffer | ||
548 | * | ||
549 | * @ring: amdgpu_ring pointer | ||
550 | * @ib: indirect buffer to execute | ||
551 | * | ||
552 | * Write ring commands to execute the indirect buffer | ||
553 | */ | ||
554 | static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
555 | struct amdgpu_ib *ib) | ||
556 | { | ||
557 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | ||
558 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
559 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | ||
560 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
561 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | ||
562 | amdgpu_ring_write(ring, ib->length_dw); | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * uvd_v5_0_ring_test_ib - test ib execution | ||
567 | * | ||
568 | * @ring: amdgpu_ring pointer | ||
569 | * | ||
570 | * Test if we can successfully execute an IB | ||
571 | */ | ||
572 | static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) | ||
573 | { | ||
574 | struct amdgpu_device *adev = ring->adev; | ||
575 | struct amdgpu_fence *fence = NULL; | ||
576 | int r; | ||
577 | |||
578 | r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
579 | if (r) { | ||
580 | DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); | ||
581 | return r; | ||
582 | } | ||
583 | |||
584 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | ||
585 | if (r) { | ||
586 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
587 | goto error; | ||
588 | } | ||
589 | |||
590 | r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); | ||
591 | if (r) { | ||
592 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
593 | goto error; | ||
594 | } | ||
595 | |||
596 | r = amdgpu_fence_wait(fence, false); | ||
597 | if (r) { | ||
598 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
599 | goto error; | ||
600 | } | ||
601 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
602 | error: | ||
603 | amdgpu_fence_unref(&fence); | ||
604 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
605 | return r; | ||
606 | } | ||
607 | |||
608 | static bool uvd_v5_0_is_idle(struct amdgpu_device *adev) | ||
609 | { | ||
610 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); | ||
611 | } | ||
612 | |||
613 | static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev) | ||
614 | { | ||
615 | unsigned i; | ||
616 | |||
617 | for (i = 0; i < adev->usec_timeout; i++) { | ||
618 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | ||
619 | return 0; | ||
620 | } | ||
621 | return -ETIMEDOUT; | ||
622 | } | ||
623 | |||
624 | static int uvd_v5_0_soft_reset(struct amdgpu_device *adev) | ||
625 | { | ||
626 | uvd_v5_0_stop(adev); | ||
627 | |||
628 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | ||
629 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
630 | mdelay(5); | ||
631 | |||
632 | return uvd_v5_0_start(adev); | ||
633 | } | ||
634 | |||
635 | static void uvd_v5_0_print_status(struct amdgpu_device *adev) | ||
636 | { | ||
637 | dev_info(adev->dev, "UVD 5.0 registers\n"); | ||
638 | dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", | ||
639 | RREG32(mmUVD_SEMA_ADDR_LOW)); | ||
640 | dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", | ||
641 | RREG32(mmUVD_SEMA_ADDR_HIGH)); | ||
642 | dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", | ||
643 | RREG32(mmUVD_SEMA_CMD)); | ||
644 | dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", | ||
645 | RREG32(mmUVD_GPCOM_VCPU_CMD)); | ||
646 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", | ||
647 | RREG32(mmUVD_GPCOM_VCPU_DATA0)); | ||
648 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", | ||
649 | RREG32(mmUVD_GPCOM_VCPU_DATA1)); | ||
650 | dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", | ||
651 | RREG32(mmUVD_ENGINE_CNTL)); | ||
652 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | ||
653 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | ||
654 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | ||
655 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | ||
656 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | ||
657 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | ||
658 | dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", | ||
659 | RREG32(mmUVD_SEMA_CNTL)); | ||
660 | dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", | ||
661 | RREG32(mmUVD_LMI_EXT40_ADDR)); | ||
662 | dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", | ||
663 | RREG32(mmUVD_CTX_INDEX)); | ||
664 | dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", | ||
665 | RREG32(mmUVD_CTX_DATA)); | ||
666 | dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", | ||
667 | RREG32(mmUVD_CGC_GATE)); | ||
668 | dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", | ||
669 | RREG32(mmUVD_CGC_CTRL)); | ||
670 | dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", | ||
671 | RREG32(mmUVD_LMI_CTRL2)); | ||
672 | dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", | ||
673 | RREG32(mmUVD_MASTINT_EN)); | ||
674 | dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", | ||
675 | RREG32(mmUVD_LMI_ADDR_EXT)); | ||
676 | dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", | ||
677 | RREG32(mmUVD_LMI_CTRL)); | ||
678 | dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", | ||
679 | RREG32(mmUVD_LMI_SWAP_CNTL)); | ||
680 | dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", | ||
681 | RREG32(mmUVD_MP_SWAP_CNTL)); | ||
682 | dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", | ||
683 | RREG32(mmUVD_MPC_SET_MUXA0)); | ||
684 | dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", | ||
685 | RREG32(mmUVD_MPC_SET_MUXA1)); | ||
686 | dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", | ||
687 | RREG32(mmUVD_MPC_SET_MUXB0)); | ||
688 | dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", | ||
689 | RREG32(mmUVD_MPC_SET_MUXB1)); | ||
690 | dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", | ||
691 | RREG32(mmUVD_MPC_SET_MUX)); | ||
692 | dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", | ||
693 | RREG32(mmUVD_MPC_SET_ALU)); | ||
694 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", | ||
695 | RREG32(mmUVD_VCPU_CACHE_OFFSET0)); | ||
696 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", | ||
697 | RREG32(mmUVD_VCPU_CACHE_SIZE0)); | ||
698 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", | ||
699 | RREG32(mmUVD_VCPU_CACHE_OFFSET1)); | ||
700 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", | ||
701 | RREG32(mmUVD_VCPU_CACHE_SIZE1)); | ||
702 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", | ||
703 | RREG32(mmUVD_VCPU_CACHE_OFFSET2)); | ||
704 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", | ||
705 | RREG32(mmUVD_VCPU_CACHE_SIZE2)); | ||
706 | dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", | ||
707 | RREG32(mmUVD_VCPU_CNTL)); | ||
708 | dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", | ||
709 | RREG32(mmUVD_SOFT_RESET)); | ||
710 | dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n", | ||
711 | RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW)); | ||
712 | dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n", | ||
713 | RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH)); | ||
714 | dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", | ||
715 | RREG32(mmUVD_RBC_IB_SIZE)); | ||
716 | dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n", | ||
717 | RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW)); | ||
718 | dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n", | ||
719 | RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH)); | ||
720 | dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", | ||
721 | RREG32(mmUVD_RBC_RB_RPTR)); | ||
722 | dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", | ||
723 | RREG32(mmUVD_RBC_RB_WPTR)); | ||
724 | dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", | ||
725 | RREG32(mmUVD_RBC_RB_WPTR_CNTL)); | ||
726 | dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", | ||
727 | RREG32(mmUVD_RBC_RB_CNTL)); | ||
728 | dev_info(adev->dev, " UVD_STATUS=0x%08X\n", | ||
729 | RREG32(mmUVD_STATUS)); | ||
730 | dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", | ||
731 | RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); | ||
732 | dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
733 | RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); | ||
734 | dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", | ||
735 | RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); | ||
736 | dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
737 | RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); | ||
738 | dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", | ||
739 | RREG32(mmUVD_CONTEXT_ID)); | ||
740 | } | ||
741 | |||
742 | static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, | ||
743 | struct amdgpu_irq_src *source, | ||
744 | unsigned type, | ||
745 | enum amdgpu_interrupt_state state) | ||
746 | { | ||
747 | // TODO | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, | ||
752 | struct amdgpu_irq_src *source, | ||
753 | struct amdgpu_iv_entry *entry) | ||
754 | { | ||
755 | DRM_DEBUG("IH: UVD TRAP\n"); | ||
756 | amdgpu_fence_process(&adev->uvd.ring); | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int uvd_v5_0_set_clockgating_state(struct amdgpu_device *adev, | ||
761 | enum amdgpu_clockgating_state state) | ||
762 | { | ||
763 | //TODO | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, | ||
769 | enum amdgpu_powergating_state state) | ||
770 | { | ||
771 | /* This doesn't actually powergate the UVD block. | ||
772 | * That's done in the dpm code via the SMC. This | ||
773 | * just re-inits the block as necessary. The actual | ||
774 | * gating still happens in the dpm code. We should | ||
775 | * revisit this when there is a cleaner line between | ||
776 | * the smc and the hw blocks | ||
777 | */ | ||
778 | if (state == AMDGPU_PG_STATE_GATE) { | ||
779 | uvd_v5_0_stop(adev); | ||
780 | return 0; | ||
781 | } else { | ||
782 | return uvd_v5_0_start(adev); | ||
783 | } | ||
784 | } | ||
785 | |||
786 | const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs = { | ||
787 | .early_init = uvd_v5_0_early_init, | ||
788 | .late_init = NULL, | ||
789 | .sw_init = uvd_v5_0_sw_init, | ||
790 | .sw_fini = uvd_v5_0_sw_fini, | ||
791 | .hw_init = uvd_v5_0_hw_init, | ||
792 | .hw_fini = uvd_v5_0_hw_fini, | ||
793 | .suspend = uvd_v5_0_suspend, | ||
794 | .resume = uvd_v5_0_resume, | ||
795 | .is_idle = uvd_v5_0_is_idle, | ||
796 | .wait_for_idle = uvd_v5_0_wait_for_idle, | ||
797 | .soft_reset = uvd_v5_0_soft_reset, | ||
798 | .print_status = uvd_v5_0_print_status, | ||
799 | .set_clockgating_state = uvd_v5_0_set_clockgating_state, | ||
800 | .set_powergating_state = uvd_v5_0_set_powergating_state, | ||
801 | }; | ||
802 | |||
803 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | ||
804 | .get_rptr = uvd_v5_0_ring_get_rptr, | ||
805 | .get_wptr = uvd_v5_0_ring_get_wptr, | ||
806 | .set_wptr = uvd_v5_0_ring_set_wptr, | ||
807 | .parse_cs = amdgpu_uvd_ring_parse_cs, | ||
808 | .emit_ib = uvd_v5_0_ring_emit_ib, | ||
809 | .emit_fence = uvd_v5_0_ring_emit_fence, | ||
810 | .emit_semaphore = uvd_v5_0_ring_emit_semaphore, | ||
811 | .test_ring = uvd_v5_0_ring_test_ring, | ||
812 | .test_ib = uvd_v5_0_ring_test_ib, | ||
813 | .is_lockup = amdgpu_ring_test_lockup, | ||
814 | }; | ||
815 | |||
816 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | ||
817 | { | ||
818 | adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; | ||
819 | } | ||
820 | |||
821 | static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { | ||
822 | .set = uvd_v5_0_set_interrupt_state, | ||
823 | .process = uvd_v5_0_process_interrupt, | ||
824 | }; | ||
825 | |||
826 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | ||
827 | { | ||
828 | adev->uvd.irq.num_types = 1; | ||
829 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; | ||
830 | } | ||