diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2015-04-20 16:55:21 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:15 -0400 |
commit | d38ceaf99ed015f2a0b9af3499791bd3a3daae21 (patch) | |
tree | c8e237ea218e8ed8a5f64c1654fc01fe5d2239cb /drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |
parent | 97b2e202fba05b87d720318a6500a337100dab4d (diff) |
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code.
v2: remove extra kconfig option
v3: implement minor fixes from Fengguang Wu
v4: fix cast in amdgpu_ucode.c
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_test.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 552 |
1 files changed, 552 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c new file mode 100644 index 000000000000..df202999fbfe --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
@@ -0,0 +1,552 @@ | |||
1 | /* | ||
2 | * Copyright 2009 VMware, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Michel Dänzer | ||
23 | */ | ||
24 | #include <drm/drmP.h> | ||
25 | #include <drm/amdgpu_drm.h> | ||
26 | #include "amdgpu.h" | ||
27 | #include "amdgpu_uvd.h" | ||
28 | #include "amdgpu_vce.h" | ||
29 | |||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | ||
31 | static void amdgpu_do_test_moves(struct amdgpu_device *adev) | ||
32 | { | ||
33 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
34 | struct amdgpu_bo *vram_obj = NULL; | ||
35 | struct amdgpu_bo **gtt_obj = NULL; | ||
36 | uint64_t gtt_addr, vram_addr; | ||
37 | unsigned n, size; | ||
38 | int i, r; | ||
39 | |||
40 | size = 1024 * 1024; | ||
41 | |||
42 | /* Number of tests = | ||
43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size | ||
44 | */ | ||
45 | n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; | ||
46 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
47 | if (adev->rings[i]) | ||
48 | n -= adev->rings[i]->ring_size; | ||
49 | if (adev->wb.wb_obj) | ||
50 | n -= AMDGPU_GPU_PAGE_SIZE; | ||
51 | if (adev->irq.ih.ring_obj) | ||
52 | n -= adev->irq.ih.ring_size; | ||
53 | n /= size; | ||
54 | |||
55 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
56 | if (!gtt_obj) { | ||
57 | DRM_ERROR("Failed to allocate %d pointers\n", n); | ||
58 | r = 1; | ||
59 | goto out_cleanup; | ||
60 | } | ||
61 | |||
62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
63 | NULL, &vram_obj); | ||
64 | if (r) { | ||
65 | DRM_ERROR("Failed to create VRAM object\n"); | ||
66 | goto out_cleanup; | ||
67 | } | ||
68 | r = amdgpu_bo_reserve(vram_obj, false); | ||
69 | if (unlikely(r != 0)) | ||
70 | goto out_unref; | ||
71 | r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr); | ||
72 | if (r) { | ||
73 | DRM_ERROR("Failed to pin VRAM object\n"); | ||
74 | goto out_unres; | ||
75 | } | ||
76 | for (i = 0; i < n; i++) { | ||
77 | void *gtt_map, *vram_map; | ||
78 | void **gtt_start, **gtt_end; | ||
79 | void **vram_start, **vram_end; | ||
80 | struct amdgpu_fence *fence = NULL; | ||
81 | |||
82 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | ||
83 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); | ||
84 | if (r) { | ||
85 | DRM_ERROR("Failed to create GTT object %d\n", i); | ||
86 | goto out_lclean; | ||
87 | } | ||
88 | |||
89 | r = amdgpu_bo_reserve(gtt_obj[i], false); | ||
90 | if (unlikely(r != 0)) | ||
91 | goto out_lclean_unref; | ||
92 | r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); | ||
93 | if (r) { | ||
94 | DRM_ERROR("Failed to pin GTT object %d\n", i); | ||
95 | goto out_lclean_unres; | ||
96 | } | ||
97 | |||
98 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | ||
99 | if (r) { | ||
100 | DRM_ERROR("Failed to map GTT object %d\n", i); | ||
101 | goto out_lclean_unpin; | ||
102 | } | ||
103 | |||
104 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | ||
105 | gtt_start < gtt_end; | ||
106 | gtt_start++) | ||
107 | *gtt_start = gtt_start; | ||
108 | |||
109 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
110 | |||
111 | r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, | ||
112 | size, NULL, &fence); | ||
113 | |||
114 | if (r) { | ||
115 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | ||
116 | goto out_lclean_unpin; | ||
117 | } | ||
118 | |||
119 | r = amdgpu_fence_wait(fence, false); | ||
120 | if (r) { | ||
121 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | ||
122 | goto out_lclean_unpin; | ||
123 | } | ||
124 | |||
125 | amdgpu_fence_unref(&fence); | ||
126 | |||
127 | r = amdgpu_bo_kmap(vram_obj, &vram_map); | ||
128 | if (r) { | ||
129 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | ||
130 | goto out_lclean_unpin; | ||
131 | } | ||
132 | |||
133 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
134 | vram_start = vram_map, vram_end = vram_map + size; | ||
135 | vram_start < vram_end; | ||
136 | gtt_start++, vram_start++) { | ||
137 | if (*vram_start != gtt_start) { | ||
138 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | ||
139 | "expected 0x%p (GTT/VRAM offset " | ||
140 | "0x%16llx/0x%16llx)\n", | ||
141 | i, *vram_start, gtt_start, | ||
142 | (unsigned long long) | ||
143 | (gtt_addr - adev->mc.gtt_start + | ||
144 | (void*)gtt_start - gtt_map), | ||
145 | (unsigned long long) | ||
146 | (vram_addr - adev->mc.vram_start + | ||
147 | (void*)gtt_start - gtt_map)); | ||
148 | amdgpu_bo_kunmap(vram_obj); | ||
149 | goto out_lclean_unpin; | ||
150 | } | ||
151 | *vram_start = vram_start; | ||
152 | } | ||
153 | |||
154 | amdgpu_bo_kunmap(vram_obj); | ||
155 | |||
156 | r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, | ||
157 | size, NULL, &fence); | ||
158 | |||
159 | if (r) { | ||
160 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | ||
161 | goto out_lclean_unpin; | ||
162 | } | ||
163 | |||
164 | r = amdgpu_fence_wait(fence, false); | ||
165 | if (r) { | ||
166 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | ||
167 | goto out_lclean_unpin; | ||
168 | } | ||
169 | |||
170 | amdgpu_fence_unref(&fence); | ||
171 | |||
172 | r = amdgpu_bo_kmap(gtt_obj[i], >t_map); | ||
173 | if (r) { | ||
174 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | ||
175 | goto out_lclean_unpin; | ||
176 | } | ||
177 | |||
178 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
179 | vram_start = vram_map, vram_end = vram_map + size; | ||
180 | gtt_start < gtt_end; | ||
181 | gtt_start++, vram_start++) { | ||
182 | if (*gtt_start != vram_start) { | ||
183 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | ||
184 | "expected 0x%p (VRAM/GTT offset " | ||
185 | "0x%16llx/0x%16llx)\n", | ||
186 | i, *gtt_start, vram_start, | ||
187 | (unsigned long long) | ||
188 | (vram_addr - adev->mc.vram_start + | ||
189 | (void*)vram_start - vram_map), | ||
190 | (unsigned long long) | ||
191 | (gtt_addr - adev->mc.gtt_start + | ||
192 | (void*)vram_start - vram_map)); | ||
193 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
194 | goto out_lclean_unpin; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | amdgpu_bo_kunmap(gtt_obj[i]); | ||
199 | |||
200 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | ||
201 | gtt_addr - adev->mc.gtt_start); | ||
202 | continue; | ||
203 | |||
204 | out_lclean_unpin: | ||
205 | amdgpu_bo_unpin(gtt_obj[i]); | ||
206 | out_lclean_unres: | ||
207 | amdgpu_bo_unreserve(gtt_obj[i]); | ||
208 | out_lclean_unref: | ||
209 | amdgpu_bo_unref(>t_obj[i]); | ||
210 | out_lclean: | ||
211 | for (--i; i >= 0; --i) { | ||
212 | amdgpu_bo_unpin(gtt_obj[i]); | ||
213 | amdgpu_bo_unreserve(gtt_obj[i]); | ||
214 | amdgpu_bo_unref(>t_obj[i]); | ||
215 | } | ||
216 | if (fence) | ||
217 | amdgpu_fence_unref(&fence); | ||
218 | break; | ||
219 | } | ||
220 | |||
221 | amdgpu_bo_unpin(vram_obj); | ||
222 | out_unres: | ||
223 | amdgpu_bo_unreserve(vram_obj); | ||
224 | out_unref: | ||
225 | amdgpu_bo_unref(&vram_obj); | ||
226 | out_cleanup: | ||
227 | kfree(gtt_obj); | ||
228 | if (r) { | ||
229 | printk(KERN_WARNING "Error while testing BO move.\n"); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | void amdgpu_test_moves(struct amdgpu_device *adev) | ||
234 | { | ||
235 | if (adev->mman.buffer_funcs) | ||
236 | amdgpu_do_test_moves(adev); | ||
237 | } | ||
238 | |||
239 | static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, | ||
240 | struct amdgpu_ring *ring, | ||
241 | struct amdgpu_fence **fence) | ||
242 | { | ||
243 | uint32_t handle = ring->idx ^ 0xdeafbeef; | ||
244 | int r; | ||
245 | |||
246 | if (ring == &adev->uvd.ring) { | ||
247 | r = amdgpu_uvd_get_create_msg(ring, handle, NULL); | ||
248 | if (r) { | ||
249 | DRM_ERROR("Failed to get dummy create msg\n"); | ||
250 | return r; | ||
251 | } | ||
252 | |||
253 | r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); | ||
254 | if (r) { | ||
255 | DRM_ERROR("Failed to get dummy destroy msg\n"); | ||
256 | return r; | ||
257 | } | ||
258 | |||
259 | } else if (ring == &adev->vce.ring[0] || | ||
260 | ring == &adev->vce.ring[1]) { | ||
261 | r = amdgpu_vce_get_create_msg(ring, handle, NULL); | ||
262 | if (r) { | ||
263 | DRM_ERROR("Failed to get dummy create msg\n"); | ||
264 | return r; | ||
265 | } | ||
266 | |||
267 | r = amdgpu_vce_get_destroy_msg(ring, handle, fence); | ||
268 | if (r) { | ||
269 | DRM_ERROR("Failed to get dummy destroy msg\n"); | ||
270 | return r; | ||
271 | } | ||
272 | |||
273 | } else { | ||
274 | r = amdgpu_ring_lock(ring, 64); | ||
275 | if (r) { | ||
276 | DRM_ERROR("Failed to lock ring A %d\n", ring->idx); | ||
277 | return r; | ||
278 | } | ||
279 | amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); | ||
280 | amdgpu_ring_unlock_commit(ring); | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | void amdgpu_test_ring_sync(struct amdgpu_device *adev, | ||
286 | struct amdgpu_ring *ringA, | ||
287 | struct amdgpu_ring *ringB) | ||
288 | { | ||
289 | struct amdgpu_fence *fence1 = NULL, *fence2 = NULL; | ||
290 | struct amdgpu_semaphore *semaphore = NULL; | ||
291 | int r; | ||
292 | |||
293 | r = amdgpu_semaphore_create(adev, &semaphore); | ||
294 | if (r) { | ||
295 | DRM_ERROR("Failed to create semaphore\n"); | ||
296 | goto out_cleanup; | ||
297 | } | ||
298 | |||
299 | r = amdgpu_ring_lock(ringA, 64); | ||
300 | if (r) { | ||
301 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
302 | goto out_cleanup; | ||
303 | } | ||
304 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
305 | amdgpu_ring_unlock_commit(ringA); | ||
306 | |||
307 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1); | ||
308 | if (r) | ||
309 | goto out_cleanup; | ||
310 | |||
311 | r = amdgpu_ring_lock(ringA, 64); | ||
312 | if (r) { | ||
313 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
314 | goto out_cleanup; | ||
315 | } | ||
316 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
317 | amdgpu_ring_unlock_commit(ringA); | ||
318 | |||
319 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2); | ||
320 | if (r) | ||
321 | goto out_cleanup; | ||
322 | |||
323 | mdelay(1000); | ||
324 | |||
325 | if (amdgpu_fence_signaled(fence1)) { | ||
326 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | ||
327 | goto out_cleanup; | ||
328 | } | ||
329 | |||
330 | r = amdgpu_ring_lock(ringB, 64); | ||
331 | if (r) { | ||
332 | DRM_ERROR("Failed to lock ring B %p\n", ringB); | ||
333 | goto out_cleanup; | ||
334 | } | ||
335 | amdgpu_semaphore_emit_signal(ringB, semaphore); | ||
336 | amdgpu_ring_unlock_commit(ringB); | ||
337 | |||
338 | r = amdgpu_fence_wait(fence1, false); | ||
339 | if (r) { | ||
340 | DRM_ERROR("Failed to wait for sync fence 1\n"); | ||
341 | goto out_cleanup; | ||
342 | } | ||
343 | |||
344 | mdelay(1000); | ||
345 | |||
346 | if (amdgpu_fence_signaled(fence2)) { | ||
347 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | ||
348 | goto out_cleanup; | ||
349 | } | ||
350 | |||
351 | r = amdgpu_ring_lock(ringB, 64); | ||
352 | if (r) { | ||
353 | DRM_ERROR("Failed to lock ring B %p\n", ringB); | ||
354 | goto out_cleanup; | ||
355 | } | ||
356 | amdgpu_semaphore_emit_signal(ringB, semaphore); | ||
357 | amdgpu_ring_unlock_commit(ringB); | ||
358 | |||
359 | r = amdgpu_fence_wait(fence2, false); | ||
360 | if (r) { | ||
361 | DRM_ERROR("Failed to wait for sync fence 1\n"); | ||
362 | goto out_cleanup; | ||
363 | } | ||
364 | |||
365 | out_cleanup: | ||
366 | amdgpu_semaphore_free(adev, &semaphore, NULL); | ||
367 | |||
368 | if (fence1) | ||
369 | amdgpu_fence_unref(&fence1); | ||
370 | |||
371 | if (fence2) | ||
372 | amdgpu_fence_unref(&fence2); | ||
373 | |||
374 | if (r) | ||
375 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | ||
376 | } | ||
377 | |||
378 | static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, | ||
379 | struct amdgpu_ring *ringA, | ||
380 | struct amdgpu_ring *ringB, | ||
381 | struct amdgpu_ring *ringC) | ||
382 | { | ||
383 | struct amdgpu_fence *fenceA = NULL, *fenceB = NULL; | ||
384 | struct amdgpu_semaphore *semaphore = NULL; | ||
385 | bool sigA, sigB; | ||
386 | int i, r; | ||
387 | |||
388 | r = amdgpu_semaphore_create(adev, &semaphore); | ||
389 | if (r) { | ||
390 | DRM_ERROR("Failed to create semaphore\n"); | ||
391 | goto out_cleanup; | ||
392 | } | ||
393 | |||
394 | r = amdgpu_ring_lock(ringA, 64); | ||
395 | if (r) { | ||
396 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); | ||
397 | goto out_cleanup; | ||
398 | } | ||
399 | amdgpu_semaphore_emit_wait(ringA, semaphore); | ||
400 | amdgpu_ring_unlock_commit(ringA); | ||
401 | |||
402 | r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA); | ||
403 | if (r) | ||
404 | goto out_cleanup; | ||
405 | |||
406 | r = amdgpu_ring_lock(ringB, 64); | ||
407 | if (r) { | ||
408 | DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); | ||
409 | goto out_cleanup; | ||
410 | } | ||
411 | amdgpu_semaphore_emit_wait(ringB, semaphore); | ||
412 | amdgpu_ring_unlock_commit(ringB); | ||
413 | r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB); | ||
414 | if (r) | ||
415 | goto out_cleanup; | ||
416 | |||
417 | mdelay(1000); | ||
418 | |||
419 | if (amdgpu_fence_signaled(fenceA)) { | ||
420 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | ||
421 | goto out_cleanup; | ||
422 | } | ||
423 | if (amdgpu_fence_signaled(fenceB)) { | ||
424 | DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); | ||
425 | goto out_cleanup; | ||
426 | } | ||
427 | |||
428 | r = amdgpu_ring_lock(ringC, 64); | ||
429 | if (r) { | ||
430 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | ||
431 | goto out_cleanup; | ||
432 | } | ||
433 | amdgpu_semaphore_emit_signal(ringC, semaphore); | ||
434 | amdgpu_ring_unlock_commit(ringC); | ||
435 | |||
436 | for (i = 0; i < 30; ++i) { | ||
437 | mdelay(100); | ||
438 | sigA = amdgpu_fence_signaled(fenceA); | ||
439 | sigB = amdgpu_fence_signaled(fenceB); | ||
440 | if (sigA || sigB) | ||
441 | break; | ||
442 | } | ||
443 | |||
444 | if (!sigA && !sigB) { | ||
445 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | ||
446 | goto out_cleanup; | ||
447 | } else if (sigA && sigB) { | ||
448 | DRM_ERROR("Both fence A and B has been signaled\n"); | ||
449 | goto out_cleanup; | ||
450 | } | ||
451 | |||
452 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | ||
453 | |||
454 | r = amdgpu_ring_lock(ringC, 64); | ||
455 | if (r) { | ||
456 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | ||
457 | goto out_cleanup; | ||
458 | } | ||
459 | amdgpu_semaphore_emit_signal(ringC, semaphore); | ||
460 | amdgpu_ring_unlock_commit(ringC); | ||
461 | |||
462 | mdelay(1000); | ||
463 | |||
464 | r = amdgpu_fence_wait(fenceA, false); | ||
465 | if (r) { | ||
466 | DRM_ERROR("Failed to wait for sync fence A\n"); | ||
467 | goto out_cleanup; | ||
468 | } | ||
469 | r = amdgpu_fence_wait(fenceB, false); | ||
470 | if (r) { | ||
471 | DRM_ERROR("Failed to wait for sync fence B\n"); | ||
472 | goto out_cleanup; | ||
473 | } | ||
474 | |||
475 | out_cleanup: | ||
476 | amdgpu_semaphore_free(adev, &semaphore, NULL); | ||
477 | |||
478 | if (fenceA) | ||
479 | amdgpu_fence_unref(&fenceA); | ||
480 | |||
481 | if (fenceB) | ||
482 | amdgpu_fence_unref(&fenceB); | ||
483 | |||
484 | if (r) | ||
485 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | ||
486 | } | ||
487 | |||
488 | static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA, | ||
489 | struct amdgpu_ring *ringB) | ||
490 | { | ||
491 | if (ringA == &ringA->adev->vce.ring[0] && | ||
492 | ringB == &ringB->adev->vce.ring[1]) | ||
493 | return false; | ||
494 | |||
495 | return true; | ||
496 | } | ||
497 | |||
498 | void amdgpu_test_syncing(struct amdgpu_device *adev) | ||
499 | { | ||
500 | int i, j, k; | ||
501 | |||
502 | for (i = 1; i < AMDGPU_MAX_RINGS; ++i) { | ||
503 | struct amdgpu_ring *ringA = adev->rings[i]; | ||
504 | if (!ringA || !ringA->ready) | ||
505 | continue; | ||
506 | |||
507 | for (j = 0; j < i; ++j) { | ||
508 | struct amdgpu_ring *ringB = adev->rings[j]; | ||
509 | if (!ringB || !ringB->ready) | ||
510 | continue; | ||
511 | |||
512 | if (!amdgpu_test_sync_possible(ringA, ringB)) | ||
513 | continue; | ||
514 | |||
515 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); | ||
516 | amdgpu_test_ring_sync(adev, ringA, ringB); | ||
517 | |||
518 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); | ||
519 | amdgpu_test_ring_sync(adev, ringB, ringA); | ||
520 | |||
521 | for (k = 0; k < j; ++k) { | ||
522 | struct amdgpu_ring *ringC = adev->rings[k]; | ||
523 | if (!ringC || !ringC->ready) | ||
524 | continue; | ||
525 | |||
526 | if (!amdgpu_test_sync_possible(ringA, ringC)) | ||
527 | continue; | ||
528 | |||
529 | if (!amdgpu_test_sync_possible(ringB, ringC)) | ||
530 | continue; | ||
531 | |||
532 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | ||
533 | amdgpu_test_ring_sync2(adev, ringA, ringB, ringC); | ||
534 | |||
535 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | ||
536 | amdgpu_test_ring_sync2(adev, ringA, ringC, ringB); | ||
537 | |||
538 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | ||
539 | amdgpu_test_ring_sync2(adev, ringB, ringA, ringC); | ||
540 | |||
541 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | ||
542 | amdgpu_test_ring_sync2(adev, ringB, ringC, ringA); | ||
543 | |||
544 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | ||
545 | amdgpu_test_ring_sync2(adev, ringC, ringA, ringB); | ||
546 | |||
547 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | ||
548 | amdgpu_test_ring_sync2(adev, ringC, ringB, ringA); | ||
549 | } | ||
550 | } | ||
551 | } | ||
552 | } | ||