aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2015-04-20 17:31:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-06-03 21:03:17 -0400
commitaaa36a976bbb9b02a54c087ff390c0bad1d18e3e (patch)
tree105be3c06ef33c39e6934801d386847950d4ebf9 /drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
parenta2e73f56fa6282481927ec43aa9362c03c2e2104 (diff)
drm/amdgpu: Add initial VI support
This adds initial support for VI asics. This includes Iceland, Tonga, and Carrizo. Our inital focus as been Carrizo, so there are still gaps in support for Tonga and Iceland, notably power management. Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jammy Zhou <Jammy.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vce_v3_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c521
1 files changed, 521 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
new file mode 100644
index 000000000000..384c45e74053
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -0,0 +1,521 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <drm/drmP.h>
30#include "amdgpu.h"
31#include "amdgpu_vce.h"
32#include "vid.h"
33#include "vce/vce_3_0_d.h"
34#include "vce/vce_3_0_sh_mask.h"
35#include "oss/oss_2_0_d.h"
36#include "oss/oss_2_0_sh_mask.h"
37
38static void vce_v3_0_mc_resume(struct amdgpu_device *adev);
39static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
40static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
41
42/**
43 * vce_v3_0_ring_get_rptr - get read pointer
44 *
45 * @ring: amdgpu_ring pointer
46 *
47 * Returns the current hardware read pointer
48 */
49static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
50{
51 struct amdgpu_device *adev = ring->adev;
52
53 if (ring == &adev->vce.ring[0])
54 return RREG32(mmVCE_RB_RPTR);
55 else
56 return RREG32(mmVCE_RB_RPTR2);
57}
58
59/**
60 * vce_v3_0_ring_get_wptr - get write pointer
61 *
62 * @ring: amdgpu_ring pointer
63 *
64 * Returns the current hardware write pointer
65 */
66static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
67{
68 struct amdgpu_device *adev = ring->adev;
69
70 if (ring == &adev->vce.ring[0])
71 return RREG32(mmVCE_RB_WPTR);
72 else
73 return RREG32(mmVCE_RB_WPTR2);
74}
75
76/**
77 * vce_v3_0_ring_set_wptr - set write pointer
78 *
79 * @ring: amdgpu_ring pointer
80 *
81 * Commits the write pointer to the hardware
82 */
83static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
84{
85 struct amdgpu_device *adev = ring->adev;
86
87 if (ring == &adev->vce.ring[0])
88 WREG32(mmVCE_RB_WPTR, ring->wptr);
89 else
90 WREG32(mmVCE_RB_WPTR2, ring->wptr);
91}
92
93/**
94 * vce_v3_0_start - start VCE block
95 *
96 * @adev: amdgpu_device pointer
97 *
98 * Setup and start the VCE block
99 */
100static int vce_v3_0_start(struct amdgpu_device *adev)
101{
102 struct amdgpu_ring *ring;
103 int i, j, r;
104
105 vce_v3_0_mc_resume(adev);
106
107 /* set BUSY flag */
108 WREG32_P(mmVCE_STATUS, 1, ~1);
109
110 ring = &adev->vce.ring[0];
111 WREG32(mmVCE_RB_RPTR, ring->wptr);
112 WREG32(mmVCE_RB_WPTR, ring->wptr);
113 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
114 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
115 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
116
117 ring = &adev->vce.ring[1];
118 WREG32(mmVCE_RB_RPTR2, ring->wptr);
119 WREG32(mmVCE_RB_WPTR2, ring->wptr);
120 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
121 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
122 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
123
124 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
125
126 WREG32_P(mmVCE_SOFT_RESET,
127 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
128 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
129
130 mdelay(100);
131
132 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
133
134 for (i = 0; i < 10; ++i) {
135 uint32_t status;
136 for (j = 0; j < 100; ++j) {
137 status = RREG32(mmVCE_STATUS);
138 if (status & 2)
139 break;
140 mdelay(10);
141 }
142 r = 0;
143 if (status & 2)
144 break;
145
146 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
147 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
148 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
149 mdelay(10);
150 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
151 mdelay(10);
152 r = -1;
153 }
154
155 /* clear BUSY flag */
156 WREG32_P(mmVCE_STATUS, 0, ~1);
157
158 if (r) {
159 DRM_ERROR("VCE not responding, giving up!!!\n");
160 return r;
161 }
162
163 return 0;
164}
165
166static int vce_v3_0_early_init(struct amdgpu_device *adev)
167{
168 vce_v3_0_set_ring_funcs(adev);
169 vce_v3_0_set_irq_funcs(adev);
170
171 return 0;
172}
173
174static int vce_v3_0_sw_init(struct amdgpu_device *adev)
175{
176 struct amdgpu_ring *ring;
177 int r;
178
179 /* VCE */
180 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
181 if (r)
182 return r;
183
184 r = amdgpu_vce_sw_init(adev);
185 if (r)
186 return r;
187
188 r = amdgpu_vce_resume(adev);
189 if (r)
190 return r;
191
192 ring = &adev->vce.ring[0];
193 sprintf(ring->name, "vce0");
194 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
195 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
196 if (r)
197 return r;
198
199 ring = &adev->vce.ring[1];
200 sprintf(ring->name, "vce1");
201 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
202 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
203 if (r)
204 return r;
205
206 return r;
207}
208
209static int vce_v3_0_sw_fini(struct amdgpu_device *adev)
210{
211 int r;
212
213 r = amdgpu_vce_suspend(adev);
214 if (r)
215 return r;
216
217 r = amdgpu_vce_sw_fini(adev);
218 if (r)
219 return r;
220
221 return r;
222}
223
224static int vce_v3_0_hw_init(struct amdgpu_device *adev)
225{
226 struct amdgpu_ring *ring;
227 int r;
228
229 r = vce_v3_0_start(adev);
230 if (r)
231 return r;
232
233 ring = &adev->vce.ring[0];
234 ring->ready = true;
235 r = amdgpu_ring_test_ring(ring);
236 if (r) {
237 ring->ready = false;
238 return r;
239 }
240
241 ring = &adev->vce.ring[1];
242 ring->ready = true;
243 r = amdgpu_ring_test_ring(ring);
244 if (r) {
245 ring->ready = false;
246 return r;
247 }
248
249 DRM_INFO("VCE initialized successfully.\n");
250
251 return 0;
252}
253
254static int vce_v3_0_hw_fini(struct amdgpu_device *adev)
255{
256 // TODO
257 return 0;
258}
259
260static int vce_v3_0_suspend(struct amdgpu_device *adev)
261{
262 int r;
263
264 r = vce_v3_0_hw_fini(adev);
265 if (r)
266 return r;
267
268 r = amdgpu_vce_suspend(adev);
269 if (r)
270 return r;
271
272 return r;
273}
274
275static int vce_v3_0_resume(struct amdgpu_device *adev)
276{
277 int r;
278
279 r = amdgpu_vce_resume(adev);
280 if (r)
281 return r;
282
283 r = vce_v3_0_hw_init(adev);
284 if (r)
285 return r;
286
287 return r;
288}
289
290static void vce_v3_0_mc_resume(struct amdgpu_device *adev)
291{
292 uint32_t offset, size;
293
294 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
295 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
296 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
297 WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
298
299 WREG32(mmVCE_LMI_CTRL, 0x00398000);
300 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
301 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
302 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
303 WREG32(mmVCE_LMI_VM_CTRL, 0);
304
305 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
306 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
307 size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size);
308 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
309 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
310
311 offset += size;
312 size = AMDGPU_VCE_STACK_SIZE;
313 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
314 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
315
316 offset += size;
317 size = AMDGPU_VCE_HEAP_SIZE;
318 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
319 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
320
321 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
322
323 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
324 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
325}
326
327static bool vce_v3_0_is_idle(struct amdgpu_device *adev)
328{
329 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
330}
331
332static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev)
333{
334 unsigned i;
335
336 for (i = 0; i < adev->usec_timeout; i++) {
337 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
338 return 0;
339 }
340 return -ETIMEDOUT;
341}
342
343static int vce_v3_0_soft_reset(struct amdgpu_device *adev)
344{
345 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
346 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
347 mdelay(5);
348
349 return vce_v3_0_start(adev);
350}
351
352static void vce_v3_0_print_status(struct amdgpu_device *adev)
353{
354 dev_info(adev->dev, "VCE 3.0 registers\n");
355 dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
356 RREG32(mmVCE_STATUS));
357 dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
358 RREG32(mmVCE_VCPU_CNTL));
359 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
360 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
361 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
362 RREG32(mmVCE_VCPU_CACHE_SIZE0));
363 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
364 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
365 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
366 RREG32(mmVCE_VCPU_CACHE_SIZE1));
367 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
368 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
369 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
370 RREG32(mmVCE_VCPU_CACHE_SIZE2));
371 dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
372 RREG32(mmVCE_SOFT_RESET));
373 dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
374 RREG32(mmVCE_RB_BASE_LO2));
375 dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
376 RREG32(mmVCE_RB_BASE_HI2));
377 dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
378 RREG32(mmVCE_RB_SIZE2));
379 dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
380 RREG32(mmVCE_RB_RPTR2));
381 dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
382 RREG32(mmVCE_RB_WPTR2));
383 dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
384 RREG32(mmVCE_RB_BASE_LO));
385 dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
386 RREG32(mmVCE_RB_BASE_HI));
387 dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
388 RREG32(mmVCE_RB_SIZE));
389 dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
390 RREG32(mmVCE_RB_RPTR));
391 dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
392 RREG32(mmVCE_RB_WPTR));
393 dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
394 RREG32(mmVCE_CLOCK_GATING_A));
395 dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
396 RREG32(mmVCE_CLOCK_GATING_B));
397 dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
398 RREG32(mmVCE_UENC_CLOCK_GATING));
399 dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
400 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
401 dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
402 RREG32(mmVCE_SYS_INT_EN));
403 dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
404 RREG32(mmVCE_LMI_CTRL2));
405 dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
406 RREG32(mmVCE_LMI_CTRL));
407 dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
408 RREG32(mmVCE_LMI_VM_CTRL));
409 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
410 RREG32(mmVCE_LMI_SWAP_CNTL));
411 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
412 RREG32(mmVCE_LMI_SWAP_CNTL1));
413 dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
414 RREG32(mmVCE_LMI_CACHE_CTRL));
415}
416
417static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
418 struct amdgpu_irq_src *source,
419 unsigned type,
420 enum amdgpu_interrupt_state state)
421{
422 uint32_t val = 0;
423
424 if (state == AMDGPU_IRQ_STATE_ENABLE)
425 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
426
427 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
428 return 0;
429}
430
431static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
432 struct amdgpu_irq_src *source,
433 struct amdgpu_iv_entry *entry)
434{
435 DRM_DEBUG("IH: VCE\n");
436 switch (entry->src_data) {
437 case 0:
438 amdgpu_fence_process(&adev->vce.ring[0]);
439 break;
440 case 1:
441 amdgpu_fence_process(&adev->vce.ring[1]);
442 break;
443 default:
444 DRM_ERROR("Unhandled interrupt: %d %d\n",
445 entry->src_id, entry->src_data);
446 break;
447 }
448
449 return 0;
450}
451
452static int vce_v3_0_set_clockgating_state(struct amdgpu_device *adev,
453 enum amdgpu_clockgating_state state)
454{
455 //TODO
456 return 0;
457}
458
459static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev,
460 enum amdgpu_powergating_state state)
461{
462 /* This doesn't actually powergate the VCE block.
463 * That's done in the dpm code via the SMC. This
464 * just re-inits the block as necessary. The actual
465 * gating still happens in the dpm code. We should
466 * revisit this when there is a cleaner line between
467 * the smc and the hw blocks
468 */
469 if (state == AMDGPU_PG_STATE_GATE)
470 /* XXX do we need a vce_v3_0_stop()? */
471 return 0;
472 else
473 return vce_v3_0_start(adev);
474}
475
476const struct amdgpu_ip_funcs vce_v3_0_ip_funcs = {
477 .early_init = vce_v3_0_early_init,
478 .late_init = NULL,
479 .sw_init = vce_v3_0_sw_init,
480 .sw_fini = vce_v3_0_sw_fini,
481 .hw_init = vce_v3_0_hw_init,
482 .hw_fini = vce_v3_0_hw_fini,
483 .suspend = vce_v3_0_suspend,
484 .resume = vce_v3_0_resume,
485 .is_idle = vce_v3_0_is_idle,
486 .wait_for_idle = vce_v3_0_wait_for_idle,
487 .soft_reset = vce_v3_0_soft_reset,
488 .print_status = vce_v3_0_print_status,
489 .set_clockgating_state = vce_v3_0_set_clockgating_state,
490 .set_powergating_state = vce_v3_0_set_powergating_state,
491};
492
493static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
494 .get_rptr = vce_v3_0_ring_get_rptr,
495 .get_wptr = vce_v3_0_ring_get_wptr,
496 .set_wptr = vce_v3_0_ring_set_wptr,
497 .parse_cs = amdgpu_vce_ring_parse_cs,
498 .emit_ib = amdgpu_vce_ring_emit_ib,
499 .emit_fence = amdgpu_vce_ring_emit_fence,
500 .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
501 .test_ring = amdgpu_vce_ring_test_ring,
502 .test_ib = amdgpu_vce_ring_test_ib,
503 .is_lockup = amdgpu_ring_test_lockup,
504};
505
506static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
507{
508 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
509 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
510}
511
512static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
513 .set = vce_v3_0_set_interrupt_state,
514 .process = vce_v3_0_process_interrupt,
515};
516
517static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
518{
519 adev->vce.irq.num_types = 1;
520 adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
521};