diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2015-04-20 16:55:21 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:15 -0400 |
commit | d38ceaf99ed015f2a0b9af3499791bd3a3daae21 (patch) | |
tree | c8e237ea218e8ed8a5f64c1654fc01fe5d2239cb /drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |
parent | 97b2e202fba05b87d720318a6500a337100dab4d (diff) |
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code.
v2: remove extra kconfig option
v3: implement minor fixes from Fengguang Wu
v4: fix cast in amdgpu_ucode.c
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 727 |
1 files changed, 727 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c new file mode 100644 index 000000000000..c65d93cb540d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -0,0 +1,727 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | * Authors: Christian König <christian.koenig@amd.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/firmware.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <drm/drmP.h> | ||
31 | #include <drm/drm.h> | ||
32 | |||
33 | #include "amdgpu.h" | ||
34 | #include "amdgpu_pm.h" | ||
35 | #include "amdgpu_vce.h" | ||
36 | #include "cikd.h" | ||
37 | |||
38 | /* 1 second timeout */ | ||
39 | #define VCE_IDLE_TIMEOUT_MS 1000 | ||
40 | |||
41 | /* Firmware Names */ | ||
42 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
43 | #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" | ||
44 | #define FIRMWARE_KABINI "radeon/kabini_vce.bin" | ||
45 | #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" | ||
46 | #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" | ||
47 | #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" | ||
48 | #endif | ||
49 | #define FIRMWARE_TONGA "radeon/tonga_vce.bin" | ||
50 | #define FIRMWARE_CARRIZO "radeon/carrizo_vce.bin" | ||
51 | |||
52 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
53 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | ||
54 | MODULE_FIRMWARE(FIRMWARE_KABINI); | ||
55 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | ||
56 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | ||
57 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | ||
58 | #endif | ||
59 | MODULE_FIRMWARE(FIRMWARE_TONGA); | ||
60 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | ||
61 | |||
62 | static void amdgpu_vce_idle_work_handler(struct work_struct *work); | ||
63 | |||
64 | /** | ||
65 | * amdgpu_vce_init - allocate memory, load vce firmware | ||
66 | * | ||
67 | * @adev: amdgpu_device pointer | ||
68 | * | ||
69 | * First step to get VCE online, allocate memory and load the firmware | ||
70 | */ | ||
71 | int amdgpu_vce_sw_init(struct amdgpu_device *adev) | ||
72 | { | ||
73 | unsigned long size; | ||
74 | const char *fw_name; | ||
75 | const struct common_firmware_header *hdr; | ||
76 | unsigned ucode_version, version_major, version_minor, binary_id; | ||
77 | int i, r; | ||
78 | |||
79 | INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); | ||
80 | |||
81 | switch (adev->asic_type) { | ||
82 | #ifdef CONFIG_DRM_AMDGPU_CIK | ||
83 | case CHIP_BONAIRE: | ||
84 | fw_name = FIRMWARE_BONAIRE; | ||
85 | break; | ||
86 | case CHIP_KAVERI: | ||
87 | fw_name = FIRMWARE_KAVERI; | ||
88 | break; | ||
89 | case CHIP_KABINI: | ||
90 | fw_name = FIRMWARE_KABINI; | ||
91 | break; | ||
92 | case CHIP_HAWAII: | ||
93 | fw_name = FIRMWARE_HAWAII; | ||
94 | break; | ||
95 | case CHIP_MULLINS: | ||
96 | fw_name = FIRMWARE_MULLINS; | ||
97 | break; | ||
98 | #endif | ||
99 | case CHIP_TONGA: | ||
100 | fw_name = FIRMWARE_TONGA; | ||
101 | break; | ||
102 | case CHIP_CARRIZO: | ||
103 | fw_name = FIRMWARE_CARRIZO; | ||
104 | break; | ||
105 | |||
106 | default: | ||
107 | return -EINVAL; | ||
108 | } | ||
109 | |||
110 | r = request_firmware(&adev->vce.fw, fw_name, adev->dev); | ||
111 | if (r) { | ||
112 | dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", | ||
113 | fw_name); | ||
114 | return r; | ||
115 | } | ||
116 | |||
117 | r = amdgpu_ucode_validate(adev->vce.fw); | ||
118 | if (r) { | ||
119 | dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", | ||
120 | fw_name); | ||
121 | release_firmware(adev->vce.fw); | ||
122 | adev->vce.fw = NULL; | ||
123 | return r; | ||
124 | } | ||
125 | |||
126 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | ||
127 | |||
128 | ucode_version = le32_to_cpu(hdr->ucode_version); | ||
129 | version_major = (ucode_version >> 20) & 0xfff; | ||
130 | version_minor = (ucode_version >> 8) & 0xfff; | ||
131 | binary_id = ucode_version & 0xff; | ||
132 | DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", | ||
133 | version_major, version_minor, binary_id); | ||
134 | adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | | ||
135 | (binary_id << 8)); | ||
136 | |||
137 | /* allocate firmware, stack and heap BO */ | ||
138 | |||
139 | size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes)) + | ||
140 | AMDGPU_VCE_STACK_SIZE + AMDGPU_VCE_HEAP_SIZE; | ||
141 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | ||
142 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo); | ||
143 | if (r) { | ||
144 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | ||
145 | return r; | ||
146 | } | ||
147 | |||
148 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | ||
149 | if (r) { | ||
150 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
151 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
152 | return r; | ||
153 | } | ||
154 | |||
155 | r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | ||
156 | &adev->vce.gpu_addr); | ||
157 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
158 | if (r) { | ||
159 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
160 | dev_err(adev->dev, "(%d) VCE bo pin failed\n", r); | ||
161 | return r; | ||
162 | } | ||
163 | |||
164 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
165 | atomic_set(&adev->vce.handles[i], 0); | ||
166 | adev->vce.filp[i] = NULL; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * amdgpu_vce_fini - free memory | ||
174 | * | ||
175 | * @adev: amdgpu_device pointer | ||
176 | * | ||
177 | * Last step on VCE teardown, free firmware memory | ||
178 | */ | ||
179 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev) | ||
180 | { | ||
181 | if (adev->vce.vcpu_bo == NULL) | ||
182 | return 0; | ||
183 | |||
184 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | ||
185 | |||
186 | amdgpu_ring_fini(&adev->vce.ring[0]); | ||
187 | amdgpu_ring_fini(&adev->vce.ring[1]); | ||
188 | |||
189 | release_firmware(adev->vce.fw); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * amdgpu_vce_suspend - unpin VCE fw memory | ||
196 | * | ||
197 | * @adev: amdgpu_device pointer | ||
198 | * | ||
199 | */ | ||
200 | int amdgpu_vce_suspend(struct amdgpu_device *adev) | ||
201 | { | ||
202 | int i; | ||
203 | |||
204 | if (adev->vce.vcpu_bo == NULL) | ||
205 | return 0; | ||
206 | |||
207 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
208 | if (atomic_read(&adev->vce.handles[i])) | ||
209 | break; | ||
210 | |||
211 | if (i == AMDGPU_MAX_VCE_HANDLES) | ||
212 | return 0; | ||
213 | |||
214 | /* TODO: suspending running encoding sessions isn't supported */ | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * amdgpu_vce_resume - pin VCE fw memory | ||
220 | * | ||
221 | * @adev: amdgpu_device pointer | ||
222 | * | ||
223 | */ | ||
224 | int amdgpu_vce_resume(struct amdgpu_device *adev) | ||
225 | { | ||
226 | void *cpu_addr; | ||
227 | const struct common_firmware_header *hdr; | ||
228 | unsigned offset; | ||
229 | int r; | ||
230 | |||
231 | if (adev->vce.vcpu_bo == NULL) | ||
232 | return -EINVAL; | ||
233 | |||
234 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | ||
235 | if (r) { | ||
236 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
237 | return r; | ||
238 | } | ||
239 | |||
240 | r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); | ||
241 | if (r) { | ||
242 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
243 | dev_err(adev->dev, "(%d) VCE map failed\n", r); | ||
244 | return r; | ||
245 | } | ||
246 | |||
247 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | ||
248 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | ||
249 | memcpy(cpu_addr, (adev->vce.fw->data) + offset, | ||
250 | (adev->vce.fw->size) - offset); | ||
251 | |||
252 | amdgpu_bo_kunmap(adev->vce.vcpu_bo); | ||
253 | |||
254 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * amdgpu_vce_idle_work_handler - power off VCE | ||
261 | * | ||
262 | * @work: pointer to work structure | ||
263 | * | ||
264 | * power of VCE when it's not used any more | ||
265 | */ | ||
266 | static void amdgpu_vce_idle_work_handler(struct work_struct *work) | ||
267 | { | ||
268 | struct amdgpu_device *adev = | ||
269 | container_of(work, struct amdgpu_device, vce.idle_work.work); | ||
270 | |||
271 | if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && | ||
272 | (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { | ||
273 | if (adev->pm.dpm_enabled) { | ||
274 | amdgpu_dpm_enable_vce(adev, false); | ||
275 | } else { | ||
276 | amdgpu_asic_set_vce_clocks(adev, 0, 0); | ||
277 | } | ||
278 | } else { | ||
279 | schedule_delayed_work(&adev->vce.idle_work, | ||
280 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * amdgpu_vce_note_usage - power up VCE | ||
286 | * | ||
287 | * @adev: amdgpu_device pointer | ||
288 | * | ||
289 | * Make sure VCE is powerd up when we want to use it | ||
290 | */ | ||
291 | static void amdgpu_vce_note_usage(struct amdgpu_device *adev) | ||
292 | { | ||
293 | bool streams_changed = false; | ||
294 | bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); | ||
295 | set_clocks &= schedule_delayed_work(&adev->vce.idle_work, | ||
296 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
297 | |||
298 | if (adev->pm.dpm_enabled) { | ||
299 | /* XXX figure out if the streams changed */ | ||
300 | streams_changed = false; | ||
301 | } | ||
302 | |||
303 | if (set_clocks || streams_changed) { | ||
304 | if (adev->pm.dpm_enabled) { | ||
305 | amdgpu_dpm_enable_vce(adev, true); | ||
306 | } else { | ||
307 | amdgpu_asic_set_vce_clocks(adev, 53300, 40000); | ||
308 | } | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * amdgpu_vce_free_handles - free still open VCE handles | ||
314 | * | ||
315 | * @adev: amdgpu_device pointer | ||
316 | * @filp: drm file pointer | ||
317 | * | ||
318 | * Close all VCE handles still open by this file pointer | ||
319 | */ | ||
320 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | ||
321 | { | ||
322 | struct amdgpu_ring *ring = &adev->vce.ring[0]; | ||
323 | int i, r; | ||
324 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
325 | uint32_t handle = atomic_read(&adev->vce.handles[i]); | ||
326 | if (!handle || adev->vce.filp[i] != filp) | ||
327 | continue; | ||
328 | |||
329 | amdgpu_vce_note_usage(adev); | ||
330 | |||
331 | r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); | ||
332 | if (r) | ||
333 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r); | ||
334 | |||
335 | adev->vce.filp[i] = NULL; | ||
336 | atomic_set(&adev->vce.handles[i], 0); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * amdgpu_vce_get_create_msg - generate a VCE create msg | ||
342 | * | ||
343 | * @adev: amdgpu_device pointer | ||
344 | * @ring: ring we should submit the msg to | ||
345 | * @handle: VCE session handle to use | ||
346 | * @fence: optional fence to return | ||
347 | * | ||
348 | * Open up a stream for HW test | ||
349 | */ | ||
350 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
351 | struct amdgpu_fence **fence) | ||
352 | { | ||
353 | const unsigned ib_size_dw = 1024; | ||
354 | struct amdgpu_ib ib; | ||
355 | uint64_t dummy; | ||
356 | int i, r; | ||
357 | |||
358 | r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); | ||
359 | if (r) { | ||
360 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
361 | return r; | ||
362 | } | ||
363 | |||
364 | dummy = ib.gpu_addr + 1024; | ||
365 | |||
366 | /* stitch together an VCE create msg */ | ||
367 | ib.length_dw = 0; | ||
368 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
369 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
370 | ib.ptr[ib.length_dw++] = handle; | ||
371 | |||
372 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | ||
373 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | ||
374 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
375 | ib.ptr[ib.length_dw++] = 0x00000042; | ||
376 | ib.ptr[ib.length_dw++] = 0x0000000a; | ||
377 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
378 | ib.ptr[ib.length_dw++] = 0x00000080; | ||
379 | ib.ptr[ib.length_dw++] = 0x00000060; | ||
380 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
381 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
382 | ib.ptr[ib.length_dw++] = 0x0000000c; | ||
383 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
384 | |||
385 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
386 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
387 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
388 | ib.ptr[ib.length_dw++] = dummy; | ||
389 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
390 | |||
391 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
392 | ib.ptr[i] = 0x0; | ||
393 | |||
394 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
395 | if (r) { | ||
396 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
397 | } | ||
398 | |||
399 | if (fence) | ||
400 | *fence = amdgpu_fence_ref(ib.fence); | ||
401 | |||
402 | amdgpu_ib_free(ring->adev, &ib); | ||
403 | |||
404 | return r; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg | ||
409 | * | ||
410 | * @adev: amdgpu_device pointer | ||
411 | * @ring: ring we should submit the msg to | ||
412 | * @handle: VCE session handle to use | ||
413 | * @fence: optional fence to return | ||
414 | * | ||
415 | * Close up a stream for HW test or if userspace failed to do so | ||
416 | */ | ||
417 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | ||
418 | struct amdgpu_fence **fence) | ||
419 | { | ||
420 | const unsigned ib_size_dw = 1024; | ||
421 | struct amdgpu_ib ib; | ||
422 | uint64_t dummy; | ||
423 | int i, r; | ||
424 | |||
425 | r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); | ||
426 | if (r) { | ||
427 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
428 | return r; | ||
429 | } | ||
430 | |||
431 | dummy = ib.gpu_addr + 1024; | ||
432 | |||
433 | /* stitch together an VCE destroy msg */ | ||
434 | ib.length_dw = 0; | ||
435 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
436 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
437 | ib.ptr[ib.length_dw++] = handle; | ||
438 | |||
439 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
440 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
441 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
442 | ib.ptr[ib.length_dw++] = dummy; | ||
443 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
444 | |||
445 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | ||
446 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | ||
447 | |||
448 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
449 | ib.ptr[i] = 0x0; | ||
450 | |||
451 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
452 | if (r) { | ||
453 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
454 | } | ||
455 | |||
456 | if (fence) | ||
457 | *fence = amdgpu_fence_ref(ib.fence); | ||
458 | |||
459 | amdgpu_ib_free(ring->adev, &ib); | ||
460 | |||
461 | return r; | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * amdgpu_vce_cs_reloc - command submission relocation | ||
466 | * | ||
467 | * @p: parser context | ||
468 | * @lo: address of lower dword | ||
469 | * @hi: address of higher dword | ||
470 | * | ||
471 | * Patch relocation inside command stream with real buffer address | ||
472 | */ | ||
473 | int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi) | ||
474 | { | ||
475 | struct amdgpu_bo_va_mapping *mapping; | ||
476 | struct amdgpu_ib *ib = &p->ibs[ib_idx]; | ||
477 | struct amdgpu_bo *bo; | ||
478 | uint64_t addr; | ||
479 | |||
480 | addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | | ||
481 | ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; | ||
482 | |||
483 | mapping = amdgpu_cs_find_mapping(p, addr, &bo); | ||
484 | if (mapping == NULL) { | ||
485 | DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n", | ||
486 | addr, lo, hi); | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | |||
490 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | ||
491 | addr += amdgpu_bo_gpu_offset(bo); | ||
492 | |||
493 | ib->ptr[lo] = addr & 0xFFFFFFFF; | ||
494 | ib->ptr[hi] = addr >> 32; | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * amdgpu_vce_cs_parse - parse and validate the command stream | ||
501 | * | ||
502 | * @p: parser context | ||
503 | * | ||
504 | */ | ||
505 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) | ||
506 | { | ||
507 | uint32_t handle = 0; | ||
508 | bool destroy = false; | ||
509 | int i, r, idx = 0; | ||
510 | struct amdgpu_ib *ib = &p->ibs[ib_idx]; | ||
511 | |||
512 | amdgpu_vce_note_usage(p->adev); | ||
513 | |||
514 | while (idx < ib->length_dw) { | ||
515 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | ||
516 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | ||
517 | |||
518 | if ((len < 8) || (len & 3)) { | ||
519 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | ||
520 | return -EINVAL; | ||
521 | } | ||
522 | |||
523 | switch (cmd) { | ||
524 | case 0x00000001: // session | ||
525 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); | ||
526 | break; | ||
527 | |||
528 | case 0x00000002: // task info | ||
529 | case 0x01000001: // create | ||
530 | case 0x04000001: // config extension | ||
531 | case 0x04000002: // pic control | ||
532 | case 0x04000005: // rate control | ||
533 | case 0x04000007: // motion estimation | ||
534 | case 0x04000008: // rdo | ||
535 | case 0x04000009: // vui | ||
536 | case 0x05000002: // auxiliary buffer | ||
537 | break; | ||
538 | |||
539 | case 0x03000001: // encode | ||
540 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9); | ||
541 | if (r) | ||
542 | return r; | ||
543 | |||
544 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11); | ||
545 | if (r) | ||
546 | return r; | ||
547 | break; | ||
548 | |||
549 | case 0x02000001: // destroy | ||
550 | destroy = true; | ||
551 | break; | ||
552 | |||
553 | case 0x05000001: // context buffer | ||
554 | case 0x05000004: // video bitstream buffer | ||
555 | case 0x05000005: // feedback buffer | ||
556 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2); | ||
557 | if (r) | ||
558 | return r; | ||
559 | break; | ||
560 | |||
561 | default: | ||
562 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | |||
566 | idx += len / 4; | ||
567 | } | ||
568 | |||
569 | if (destroy) { | ||
570 | /* IB contains a destroy msg, free the handle */ | ||
571 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
572 | atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | /* create or encode, validate the handle */ | ||
578 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
579 | if (atomic_read(&p->adev->vce.handles[i]) == handle) | ||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | /* handle not found try to alloc a new one */ | ||
584 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | ||
585 | if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { | ||
586 | p->adev->vce.filp[i] = p->filp; | ||
587 | return 0; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | DRM_ERROR("No more free VCE handles!\n"); | ||
592 | |||
593 | return -EINVAL; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * amdgpu_vce_ring_emit_semaphore - emit a semaphore command | ||
598 | * | ||
599 | * @ring: engine to use | ||
600 | * @semaphore: address of semaphore | ||
601 | * @emit_wait: true=emit wait, false=emit signal | ||
602 | * | ||
603 | */ | ||
604 | bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
605 | struct amdgpu_semaphore *semaphore, | ||
606 | bool emit_wait) | ||
607 | { | ||
608 | uint64_t addr = semaphore->gpu_addr; | ||
609 | |||
610 | amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); | ||
611 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
612 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
613 | amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | ||
614 | if (!emit_wait) | ||
615 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
616 | |||
617 | return true; | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * amdgpu_vce_ring_emit_ib - execute indirect buffer | ||
622 | * | ||
623 | * @ring: engine to use | ||
624 | * @ib: the IB to execute | ||
625 | * | ||
626 | */ | ||
627 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | ||
628 | { | ||
629 | amdgpu_ring_write(ring, VCE_CMD_IB); | ||
630 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
631 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
632 | amdgpu_ring_write(ring, ib->length_dw); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * amdgpu_vce_ring_emit_fence - add a fence command to the ring | ||
637 | * | ||
638 | * @ring: engine to use | ||
639 | * @fence: the fence | ||
640 | * | ||
641 | */ | ||
642 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
643 | bool write64bits) | ||
644 | { | ||
645 | WARN_ON(write64bits); | ||
646 | |||
647 | amdgpu_ring_write(ring, VCE_CMD_FENCE); | ||
648 | amdgpu_ring_write(ring, addr); | ||
649 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
650 | amdgpu_ring_write(ring, seq); | ||
651 | amdgpu_ring_write(ring, VCE_CMD_TRAP); | ||
652 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * amdgpu_vce_ring_test_ring - test if VCE ring is working | ||
657 | * | ||
658 | * @ring: the engine to test on | ||
659 | * | ||
660 | */ | ||
661 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | ||
662 | { | ||
663 | struct amdgpu_device *adev = ring->adev; | ||
664 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
665 | unsigned i; | ||
666 | int r; | ||
667 | |||
668 | r = amdgpu_ring_lock(ring, 16); | ||
669 | if (r) { | ||
670 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", | ||
671 | ring->idx, r); | ||
672 | return r; | ||
673 | } | ||
674 | amdgpu_ring_write(ring, VCE_CMD_END); | ||
675 | amdgpu_ring_unlock_commit(ring); | ||
676 | |||
677 | for (i = 0; i < adev->usec_timeout; i++) { | ||
678 | if (amdgpu_ring_get_rptr(ring) != rptr) | ||
679 | break; | ||
680 | DRM_UDELAY(1); | ||
681 | } | ||
682 | |||
683 | if (i < adev->usec_timeout) { | ||
684 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
685 | ring->idx, i); | ||
686 | } else { | ||
687 | DRM_ERROR("amdgpu: ring %d test failed\n", | ||
688 | ring->idx); | ||
689 | r = -ETIMEDOUT; | ||
690 | } | ||
691 | |||
692 | return r; | ||
693 | } | ||
694 | |||
695 | /** | ||
696 | * amdgpu_vce_ring_test_ib - test if VCE IBs are working | ||
697 | * | ||
698 | * @ring: the engine to test on | ||
699 | * | ||
700 | */ | ||
701 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) | ||
702 | { | ||
703 | struct amdgpu_fence *fence = NULL; | ||
704 | int r; | ||
705 | |||
706 | r = amdgpu_vce_get_create_msg(ring, 1, NULL); | ||
707 | if (r) { | ||
708 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
709 | goto error; | ||
710 | } | ||
711 | |||
712 | r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); | ||
713 | if (r) { | ||
714 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
715 | goto error; | ||
716 | } | ||
717 | |||
718 | r = amdgpu_fence_wait(fence, false); | ||
719 | if (r) { | ||
720 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
721 | } else { | ||
722 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
723 | } | ||
724 | error: | ||
725 | amdgpu_fence_unref(&fence); | ||
726 | return r; | ||
727 | } | ||