aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c894
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.h29
4 files changed, 932 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index e1be2e1a10fd..cab8eecf2ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -84,7 +84,8 @@ amdgpu-y += \
84# add VCE block 84# add VCE block
85amdgpu-y += \ 85amdgpu-y += \
86 amdgpu_vce.o \ 86 amdgpu_vce.o \
87 vce_v3_0.o 87 vce_v3_0.o \
88 vce_v4_0.o
88 89
89# add amdkfd interfaces 90# add amdkfd interfaces
90amdgpu-y += \ 91amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index e2c06780ce49..199bc89a4d01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -54,6 +54,8 @@
54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" 55#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
56 56
57#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
58
57#ifdef CONFIG_DRM_AMDGPU_CIK 59#ifdef CONFIG_DRM_AMDGPU_CIK
58MODULE_FIRMWARE(FIRMWARE_BONAIRE); 60MODULE_FIRMWARE(FIRMWARE_BONAIRE);
59MODULE_FIRMWARE(FIRMWARE_KABINI); 61MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -69,6 +71,8 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS10);
69MODULE_FIRMWARE(FIRMWARE_POLARIS11); 71MODULE_FIRMWARE(FIRMWARE_POLARIS11);
70MODULE_FIRMWARE(FIRMWARE_POLARIS12); 72MODULE_FIRMWARE(FIRMWARE_POLARIS12);
71 73
74MODULE_FIRMWARE(FIRMWARE_VEGA10);
75
72static void amdgpu_vce_idle_work_handler(struct work_struct *work); 76static void amdgpu_vce_idle_work_handler(struct work_struct *work);
73 77
74/** 78/**
@@ -123,6 +127,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
123 case CHIP_POLARIS11: 127 case CHIP_POLARIS11:
124 fw_name = FIRMWARE_POLARIS11; 128 fw_name = FIRMWARE_POLARIS11;
125 break; 129 break;
130 case CHIP_VEGA10:
131 fw_name = FIRMWARE_VEGA10;
132 break;
126 case CHIP_POLARIS12: 133 case CHIP_POLARIS12:
127 fw_name = FIRMWARE_POLARIS12; 134 fw_name = FIRMWARE_POLARIS12;
128 break; 135 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
new file mode 100644
index 000000000000..74146bed5573
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -0,0 +1,894 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <drm/drmP.h>
29#include "amdgpu.h"
30#include "amdgpu_vce.h"
31#include "soc15d.h"
32#include "soc15_common.h"
33
34#include "vega10/soc15ip.h"
35#include "vega10/VCE/vce_4_0_offset.h"
36#include "vega10/VCE/vce_4_0_default.h"
37#include "vega10/VCE/vce_4_0_sh_mask.h"
38#include "vega10/MMHUB/mmhub_1_0_offset.h"
39#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
40
41#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
42
43#define VCE_V4_0_FW_SIZE (384 * 1024)
44#define VCE_V4_0_STACK_SIZE (64 * 1024)
45#define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
46
47static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
48static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
49static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
50
51/**
52 * vce_v4_0_ring_get_rptr - get read pointer
53 *
54 * @ring: amdgpu_ring pointer
55 *
56 * Returns the current hardware read pointer
57 */
58static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
59{
60 struct amdgpu_device *adev = ring->adev;
61
62 if (ring == &adev->vce.ring[0])
63 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
64 else if (ring == &adev->vce.ring[1])
65 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
66 else
67 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
68}
69
70/**
71 * vce_v4_0_ring_get_wptr - get write pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware write pointer
76 */
77static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 if (ring == &adev->vce.ring[0])
82 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
83 else if (ring == &adev->vce.ring[1])
84 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
85 else
86 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
87}
88
89/**
90 * vce_v4_0_ring_set_wptr - set write pointer
91 *
92 * @ring: amdgpu_ring pointer
93 *
94 * Commits the write pointer to the hardware
95 */
96static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
97{
98 struct amdgpu_device *adev = ring->adev;
99
100 if (ring == &adev->vce.ring[0])
101 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
102 lower_32_bits(ring->wptr));
103 else if (ring == &adev->vce.ring[1])
104 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
105 lower_32_bits(ring->wptr));
106 else
107 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
108 lower_32_bits(ring->wptr));
109}
110
111static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
112{
113 int i, j;
114
115 for (i = 0; i < 10; ++i) {
116 for (j = 0; j < 100; ++j) {
117 uint32_t status =
118 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
119
120 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
121 return 0;
122 mdelay(10);
123 }
124
125 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
126 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
127 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
128 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
129 mdelay(10);
130 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
131 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
132 mdelay(10);
133
134 }
135
136 return -ETIMEDOUT;
137}
138
139/**
140 * vce_v4_0_start - start VCE block
141 *
142 * @adev: amdgpu_device pointer
143 *
144 * Setup and start the VCE block
145 */
146static int vce_v4_0_start(struct amdgpu_device *adev)
147{
148 struct amdgpu_ring *ring;
149 int r;
150
151 ring = &adev->vce.ring[0];
152
153 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
154 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
155 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
156 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
157 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
158
159 ring = &adev->vce.ring[1];
160
161 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
162 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
163 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
164 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
165 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
166
167 ring = &adev->vce.ring[2];
168
169 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
170 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
171 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
172 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
173 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
174
175 vce_v4_0_mc_resume(adev);
176 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
177 ~VCE_STATUS__JOB_BUSY_MASK);
178
179 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
180
181 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
182 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
183 mdelay(100);
184
185 r = vce_v4_0_firmware_loaded(adev);
186
187 /* clear BUSY flag */
188 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
189
190 if (r) {
191 DRM_ERROR("VCE not responding, giving up!!!\n");
192 return r;
193 }
194
195 return 0;
196}
197
198static int vce_v4_0_stop(struct amdgpu_device *adev)
199{
200
201 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
202
203 /* hold on ECPU */
204 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
205 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
206 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
207
208 /* clear BUSY flag */
209 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
210
211 /* Set Clock-Gating off */
212 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
213 vce_v4_0_set_vce_sw_clock_gating(adev, false);
214 */
215
216 return 0;
217}
218
219static int vce_v4_0_early_init(void *handle)
220{
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222
223 adev->vce.num_rings = 3;
224
225 vce_v4_0_set_ring_funcs(adev);
226 vce_v4_0_set_irq_funcs(adev);
227
228 return 0;
229}
230
231static int vce_v4_0_sw_init(void *handle)
232{
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234 struct amdgpu_ring *ring;
235 unsigned size;
236 int r, i;
237
238 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
239 if (r)
240 return r;
241
242 size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
243 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
244 size += VCE_V4_0_FW_SIZE;
245
246 r = amdgpu_vce_sw_init(adev, size);
247 if (r)
248 return r;
249
250 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
251 const struct common_firmware_header *hdr;
252 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
253 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
254 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
255 adev->firmware.fw_size +=
256 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
257 DRM_INFO("PSP loading VCE firmware\n");
258 }
259
260 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
261 r = amdgpu_vce_resume(adev);
262 if (r)
263 return r;
264 }
265
266 for (i = 0; i < adev->vce.num_rings; i++) {
267 ring = &adev->vce.ring[i];
268 sprintf(ring->name, "vce%d", i);
269 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
270 if (r)
271 return r;
272 }
273
274 return r;
275}
276
277static int vce_v4_0_sw_fini(void *handle)
278{
279 int r;
280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
281
282 r = amdgpu_vce_suspend(adev);
283 if (r)
284 return r;
285
286 r = amdgpu_vce_sw_fini(adev);
287 if (r)
288 return r;
289
290 return r;
291}
292
293static int vce_v4_0_hw_init(void *handle)
294{
295 int r, i;
296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
297
298 r = vce_v4_0_start(adev);
299 if (r)
300 return r;
301
302 for (i = 0; i < adev->vce.num_rings; i++)
303 adev->vce.ring[i].ready = false;
304
305 for (i = 0; i < adev->vce.num_rings; i++) {
306 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
307 if (r)
308 return r;
309 else
310 adev->vce.ring[i].ready = true;
311 }
312
313 DRM_INFO("VCE initialized successfully.\n");
314
315 return 0;
316}
317
318static int vce_v4_0_hw_fini(void *handle)
319{
320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
321 int i;
322
323 /* vce_v4_0_wait_for_idle(handle); */
324 vce_v4_0_stop(adev);
325 for (i = 0; i < adev->vce.num_rings; i++)
326 adev->vce.ring[i].ready = false;
327
328 return 0;
329}
330
331static int vce_v4_0_suspend(void *handle)
332{
333 int r;
334 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
335
336 r = vce_v4_0_hw_fini(adev);
337 if (r)
338 return r;
339
340 r = amdgpu_vce_suspend(adev);
341 if (r)
342 return r;
343
344 return r;
345}
346
347static int vce_v4_0_resume(void *handle)
348{
349 int r;
350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
351
352 r = amdgpu_vce_resume(adev);
353 if (r)
354 return r;
355
356 r = vce_v4_0_hw_init(adev);
357 if (r)
358 return r;
359
360 return r;
361}
362
363static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
364{
365 uint32_t offset, size;
366
367 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
368 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
369 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
370 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
371
372 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
373 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
374 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
375 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
376 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
377
378 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
379 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
380 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8));
381 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
382 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
383 } else {
384 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
385 (adev->vce.gpu_addr >> 8));
386 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
387 (adev->vce.gpu_addr >> 40) & 0xff);
388 }
389
390 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
391 size = VCE_V4_0_FW_SIZE;
392 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
393 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
394
395 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
396 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
397 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
398 size = VCE_V4_0_STACK_SIZE;
399 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
400 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
401
402 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
403 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
404 offset += size;
405 size = VCE_V4_0_DATA_SIZE;
406 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
407 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
408
409 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
410 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
411 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
412 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
413}
414
415static int vce_v4_0_set_clockgating_state(void *handle,
416 enum amd_clockgating_state state)
417{
418 /* needed for driver unload*/
419 return 0;
420}
421
422#if 0
423static bool vce_v4_0_is_idle(void *handle)
424{
425 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
426 u32 mask = 0;
427
428 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
429 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
430
431 return !(RREG32(mmSRBM_STATUS2) & mask);
432}
433
434static int vce_v4_0_wait_for_idle(void *handle)
435{
436 unsigned i;
437 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
438
439 for (i = 0; i < adev->usec_timeout; i++)
440 if (vce_v4_0_is_idle(handle))
441 return 0;
442
443 return -ETIMEDOUT;
444}
445
446#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
447#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
448#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
449#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
450 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
451
452static bool vce_v4_0_check_soft_reset(void *handle)
453{
454 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
455 u32 srbm_soft_reset = 0;
456
457 /* According to VCE team , we should use VCE_STATUS instead
458 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
459 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
460 * instance's registers are accessed
461 * (0 for 1st instance, 10 for 2nd instance).
462 *
463 *VCE_STATUS
464 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
465 *|----+----+-----------+----+----+----+----------+---------+----|
466 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
467 *
468 * VCE team suggest use bit 3--bit 6 for busy status check
469 */
470 mutex_lock(&adev->grbm_idx_mutex);
471 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
472 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
473 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
474 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
475 }
476 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
477 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
478 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
479 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
480 }
481 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
482 mutex_unlock(&adev->grbm_idx_mutex);
483
484 if (srbm_soft_reset) {
485 adev->vce.srbm_soft_reset = srbm_soft_reset;
486 return true;
487 } else {
488 adev->vce.srbm_soft_reset = 0;
489 return false;
490 }
491}
492
493static int vce_v4_0_soft_reset(void *handle)
494{
495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
496 u32 srbm_soft_reset;
497
498 if (!adev->vce.srbm_soft_reset)
499 return 0;
500 srbm_soft_reset = adev->vce.srbm_soft_reset;
501
502 if (srbm_soft_reset) {
503 u32 tmp;
504
505 tmp = RREG32(mmSRBM_SOFT_RESET);
506 tmp |= srbm_soft_reset;
507 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
508 WREG32(mmSRBM_SOFT_RESET, tmp);
509 tmp = RREG32(mmSRBM_SOFT_RESET);
510
511 udelay(50);
512
513 tmp &= ~srbm_soft_reset;
514 WREG32(mmSRBM_SOFT_RESET, tmp);
515 tmp = RREG32(mmSRBM_SOFT_RESET);
516
517 /* Wait a little for things to settle down */
518 udelay(50);
519 }
520
521 return 0;
522}
523
524static int vce_v4_0_pre_soft_reset(void *handle)
525{
526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
527
528 if (!adev->vce.srbm_soft_reset)
529 return 0;
530
531 mdelay(5);
532
533 return vce_v4_0_suspend(adev);
534}
535
536
537static int vce_v4_0_post_soft_reset(void *handle)
538{
539 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
540
541 if (!adev->vce.srbm_soft_reset)
542 return 0;
543
544 mdelay(5);
545
546 return vce_v4_0_resume(adev);
547}
548
549static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
550{
551 u32 tmp, data;
552
553 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
554 if (override)
555 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
556 else
557 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
558
559 if (tmp != data)
560 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
561}
562
563static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
564 bool gated)
565{
566 u32 data;
567
568 /* Set Override to disable Clock Gating */
569 vce_v4_0_override_vce_clock_gating(adev, true);
570
571 /* This function enables MGCG which is controlled by firmware.
572 With the clocks in the gated state the core is still
573 accessible but the firmware will throttle the clocks on the
574 fly as necessary.
575 */
576 if (gated) {
577 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
578 data |= 0x1ff;
579 data &= ~0xef0000;
580 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
581
582 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
583 data |= 0x3ff000;
584 data &= ~0xffc00000;
585 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
586
587 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
588 data |= 0x2;
589 data &= ~0x00010000;
590 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
591
592 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
593 data |= 0x37f;
594 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
595
596 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
597 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
598 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
599 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
600 0x8;
601 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
602 } else {
603 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
604 data &= ~0x80010;
605 data |= 0xe70008;
606 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
607
608 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
609 data |= 0xffc00000;
610 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
611
612 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
613 data |= 0x10000;
614 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
615
616 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
617 data &= ~0xffc00000;
618 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
619
620 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
621 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
622 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
623 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
624 0x8);
625 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
626 }
627 vce_v4_0_override_vce_clock_gating(adev, false);
628}
629
630static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
631{
632 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
633
634 if (enable)
635 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
636 else
637 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
638
639 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
640}
641
642static int vce_v4_0_set_clockgating_state(void *handle,
643 enum amd_clockgating_state state)
644{
645 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
647 int i;
648
649 if ((adev->asic_type == CHIP_POLARIS10) ||
650 (adev->asic_type == CHIP_TONGA) ||
651 (adev->asic_type == CHIP_FIJI))
652 vce_v4_0_set_bypass_mode(adev, enable);
653
654 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
655 return 0;
656
657 mutex_lock(&adev->grbm_idx_mutex);
658 for (i = 0; i < 2; i++) {
659 /* Program VCE Instance 0 or 1 if not harvested */
660 if (adev->vce.harvest_config & (1 << i))
661 continue;
662
663 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
664
665 if (enable) {
666 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
667 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
668 data &= ~(0xf | 0xff0);
669 data |= ((0x0 << 0) | (0x04 << 4));
670 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
671
672 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
673 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
674 data &= ~(0xf | 0xff0);
675 data |= ((0x0 << 0) | (0x04 << 4));
676 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
677 }
678
679 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
680 }
681
682 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
683 mutex_unlock(&adev->grbm_idx_mutex);
684
685 return 0;
686}
687
688static int vce_v4_0_set_powergating_state(void *handle,
689 enum amd_powergating_state state)
690{
691 /* This doesn't actually powergate the VCE block.
692 * That's done in the dpm code via the SMC. This
693 * just re-inits the block as necessary. The actual
694 * gating still happens in the dpm code. We should
695 * revisit this when there is a cleaner line between
696 * the smc and the hw blocks
697 */
698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
699
700 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
701 return 0;
702
703 if (state == AMD_PG_STATE_GATE)
704 /* XXX do we need a vce_v4_0_stop()? */
705 return 0;
706 else
707 return vce_v4_0_start(adev);
708}
709#endif
710
711static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
712 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
713{
714 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
715 amdgpu_ring_write(ring, vm_id);
716 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
717 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
718 amdgpu_ring_write(ring, ib->length_dw);
719}
720
721static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
722 u64 seq, unsigned flags)
723{
724 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
725
726 amdgpu_ring_write(ring, VCE_CMD_FENCE);
727 amdgpu_ring_write(ring, addr);
728 amdgpu_ring_write(ring, upper_32_bits(addr));
729 amdgpu_ring_write(ring, seq);
730 amdgpu_ring_write(ring, VCE_CMD_TRAP);
731}
732
733static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
734{
735 amdgpu_ring_write(ring, VCE_CMD_END);
736}
737
738static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
739 unsigned int vm_id, uint64_t pd_addr)
740{
741 unsigned eng = ring->idx;
742 unsigned i;
743
744 pd_addr = pd_addr | 0x1; /* valid bit */
745 /* now only use physical base address of PDE and valid */
746 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
747
748 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
749 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
750 uint32_t req = hub->get_invalidate_req(vm_id);
751
752 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
753 amdgpu_ring_write(ring,
754 (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
755 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
756
757 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
758 amdgpu_ring_write(ring,
759 (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
760 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
761
762 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
763 amdgpu_ring_write(ring,
764 (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
765 amdgpu_ring_write(ring, 0xffffffff);
766 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
767
768 /* flush TLB */
769 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
770 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
771 amdgpu_ring_write(ring, req);
772
773 /* wait for flush */
774 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
775 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
776 amdgpu_ring_write(ring, 1 << vm_id);
777 amdgpu_ring_write(ring, 1 << vm_id);
778 }
779}
780
781static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
782 struct amdgpu_irq_src *source,
783 unsigned type,
784 enum amdgpu_interrupt_state state)
785{
786 uint32_t val = 0;
787
788 if (state == AMDGPU_IRQ_STATE_ENABLE)
789 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
790
791 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
792 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
793 return 0;
794}
795
796static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
797 struct amdgpu_irq_src *source,
798 struct amdgpu_iv_entry *entry)
799{
800 DRM_DEBUG("IH: VCE\n");
801
802 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
803 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
804 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
805
806 switch (entry->src_data[0]) {
807 case 0:
808 case 1:
809 case 2:
810 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
811 break;
812 default:
813 DRM_ERROR("Unhandled interrupt: %d %d\n",
814 entry->src_id, entry->src_data[0]);
815 break;
816 }
817
818 return 0;
819}
820
821const struct amd_ip_funcs vce_v4_0_ip_funcs = {
822 .name = "vce_v4_0",
823 .early_init = vce_v4_0_early_init,
824 .late_init = NULL,
825 .sw_init = vce_v4_0_sw_init,
826 .sw_fini = vce_v4_0_sw_fini,
827 .hw_init = vce_v4_0_hw_init,
828 .hw_fini = vce_v4_0_hw_fini,
829 .suspend = vce_v4_0_suspend,
830 .resume = vce_v4_0_resume,
831 .is_idle = NULL /* vce_v4_0_is_idle */,
832 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
833 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
834 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
835 .soft_reset = NULL /* vce_v4_0_soft_reset */,
836 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
837 .set_clockgating_state = vce_v4_0_set_clockgating_state,
838 .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
839};
840
841static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
842 .type = AMDGPU_RING_TYPE_VCE,
843 .align_mask = 0x3f,
844 .nop = VCE_CMD_NO_OP,
845 .support_64bit_ptrs = false,
846 .get_rptr = vce_v4_0_ring_get_rptr,
847 .get_wptr = vce_v4_0_ring_get_wptr,
848 .set_wptr = vce_v4_0_ring_set_wptr,
849 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
850 .emit_frame_size =
851 17 * AMDGPU_MAX_VMHUBS + /* vce_v4_0_emit_vm_flush */
852 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
853 1, /* vce_v4_0_ring_insert_end */
854 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
855 .emit_ib = vce_v4_0_ring_emit_ib,
856 .emit_vm_flush = vce_v4_0_emit_vm_flush,
857 .emit_fence = vce_v4_0_ring_emit_fence,
858 .test_ring = amdgpu_vce_ring_test_ring,
859 .test_ib = amdgpu_vce_ring_test_ib,
860 .insert_nop = amdgpu_ring_insert_nop,
861 .insert_end = vce_v4_0_ring_insert_end,
862 .pad_ib = amdgpu_ring_generic_pad_ib,
863 .begin_use = amdgpu_vce_ring_begin_use,
864 .end_use = amdgpu_vce_ring_end_use,
865};
866
867static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
868{
869 int i;
870
871 for (i = 0; i < adev->vce.num_rings; i++)
872 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
873 DRM_INFO("VCE enabled in VM mode\n");
874}
875
876static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
877 .set = vce_v4_0_set_interrupt_state,
878 .process = vce_v4_0_process_interrupt,
879};
880
881static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
882{
883 adev->vce.irq.num_types = 1;
884 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
885};
886
887const struct amdgpu_ip_block_version vce_v4_0_ip_block =
888{
889 .type = AMD_IP_BLOCK_TYPE_VCE,
890 .major = 4,
891 .minor = 0,
892 .rev = 0,
893 .funcs = &vce_v4_0_ip_funcs,
894};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h
new file mode 100644
index 000000000000..a32beda6a473
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __VCE_V4_0_H__
25#define __VCE_V4_0_H__
26
27extern const struct amdgpu_ip_block_version vce_v4_0_ip_block;
28
29#endif