diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2015-04-20 16:55:21 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:15 -0400 |
commit | d38ceaf99ed015f2a0b9af3499791bd3a3daae21 (patch) | |
tree | c8e237ea218e8ed8a5f64c1654fc01fe5d2239cb /drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |
parent | 97b2e202fba05b87d720318a6500a337100dab4d (diff) |
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code.
v2: remove extra kconfig option
v3: implement minor fixes from Fengguang Wu
v4: fix cast in amdgpu_ucode.c
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 561 |
1 files changed, 561 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c new file mode 100644 index 000000000000..855e2196657a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -0,0 +1,561 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | * Christian König | ||
28 | */ | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <drm/drmP.h> | ||
32 | #include <drm/amdgpu_drm.h> | ||
33 | #include "amdgpu.h" | ||
34 | #include "atom.h" | ||
35 | |||
36 | /* | ||
37 | * Rings | ||
38 | * Most engines on the GPU are fed via ring buffers. Ring | ||
39 | * buffers are areas of GPU accessible memory that the host | ||
40 | * writes commands into and the GPU reads commands out of. | ||
41 | * There is a rptr (read pointer) that determines where the | ||
42 | * GPU is currently reading, and a wptr (write pointer) | ||
43 | * which determines where the host has written. When the | ||
44 | * pointers are equal, the ring is idle. When the host | ||
45 | * writes commands to the ring buffer, it increments the | ||
46 | * wptr. The GPU then starts fetching commands and executes | ||
47 | * them until the pointers are equal again. | ||
48 | */ | ||
49 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); | ||
50 | |||
51 | /** | ||
52 | * amdgpu_ring_free_size - update the free size | ||
53 | * | ||
54 | * @adev: amdgpu_device pointer | ||
55 | * @ring: amdgpu_ring structure holding ring information | ||
56 | * | ||
57 | * Update the free dw slots in the ring buffer (all asics). | ||
58 | */ | ||
59 | void amdgpu_ring_free_size(struct amdgpu_ring *ring) | ||
60 | { | ||
61 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
62 | |||
63 | /* This works because ring_size is a power of 2 */ | ||
64 | ring->ring_free_dw = rptr + (ring->ring_size / 4); | ||
65 | ring->ring_free_dw -= ring->wptr; | ||
66 | ring->ring_free_dw &= ring->ptr_mask; | ||
67 | if (!ring->ring_free_dw) { | ||
68 | /* this is an empty ring */ | ||
69 | ring->ring_free_dw = ring->ring_size / 4; | ||
70 | /* update lockup info to avoid false positive */ | ||
71 | amdgpu_ring_lockup_update(ring); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * amdgpu_ring_alloc - allocate space on the ring buffer | ||
77 | * | ||
78 | * @adev: amdgpu_device pointer | ||
79 | * @ring: amdgpu_ring structure holding ring information | ||
80 | * @ndw: number of dwords to allocate in the ring buffer | ||
81 | * | ||
82 | * Allocate @ndw dwords in the ring buffer (all asics). | ||
83 | * Returns 0 on success, error on failure. | ||
84 | */ | ||
85 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | ||
86 | { | ||
87 | int r; | ||
88 | |||
89 | /* make sure we aren't trying to allocate more space than there is on the ring */ | ||
90 | if (ndw > (ring->ring_size / 4)) | ||
91 | return -ENOMEM; | ||
92 | /* Align requested size with padding so unlock_commit can | ||
93 | * pad safely */ | ||
94 | amdgpu_ring_free_size(ring); | ||
95 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | ||
96 | while (ndw > (ring->ring_free_dw - 1)) { | ||
97 | amdgpu_ring_free_size(ring); | ||
98 | if (ndw < ring->ring_free_dw) { | ||
99 | break; | ||
100 | } | ||
101 | r = amdgpu_fence_wait_next(ring); | ||
102 | if (r) | ||
103 | return r; | ||
104 | } | ||
105 | ring->count_dw = ndw; | ||
106 | ring->wptr_old = ring->wptr; | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * amdgpu_ring_lock - lock the ring and allocate space on it | ||
112 | * | ||
113 | * @adev: amdgpu_device pointer | ||
114 | * @ring: amdgpu_ring structure holding ring information | ||
115 | * @ndw: number of dwords to allocate in the ring buffer | ||
116 | * | ||
117 | * Lock the ring and allocate @ndw dwords in the ring buffer | ||
118 | * (all asics). | ||
119 | * Returns 0 on success, error on failure. | ||
120 | */ | ||
121 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) | ||
122 | { | ||
123 | int r; | ||
124 | |||
125 | mutex_lock(ring->ring_lock); | ||
126 | r = amdgpu_ring_alloc(ring, ndw); | ||
127 | if (r) { | ||
128 | mutex_unlock(ring->ring_lock); | ||
129 | return r; | ||
130 | } | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * amdgpu_ring_commit - tell the GPU to execute the new | ||
136 | * commands on the ring buffer | ||
137 | * | ||
138 | * @adev: amdgpu_device pointer | ||
139 | * @ring: amdgpu_ring structure holding ring information | ||
140 | * | ||
141 | * Update the wptr (write pointer) to tell the GPU to | ||
142 | * execute new commands on the ring buffer (all asics). | ||
143 | */ | ||
144 | void amdgpu_ring_commit(struct amdgpu_ring *ring) | ||
145 | { | ||
146 | /* We pad to match fetch size */ | ||
147 | while (ring->wptr & ring->align_mask) { | ||
148 | amdgpu_ring_write(ring, ring->nop); | ||
149 | } | ||
150 | mb(); | ||
151 | amdgpu_ring_set_wptr(ring); | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * amdgpu_ring_unlock_commit - tell the GPU to execute the new | ||
156 | * commands on the ring buffer and unlock it | ||
157 | * | ||
158 | * @ring: amdgpu_ring structure holding ring information | ||
159 | * | ||
160 | * Call amdgpu_ring_commit() then unlock the ring (all asics). | ||
161 | */ | ||
162 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) | ||
163 | { | ||
164 | amdgpu_ring_commit(ring); | ||
165 | mutex_unlock(ring->ring_lock); | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * amdgpu_ring_undo - reset the wptr | ||
170 | * | ||
171 | * @ring: amdgpu_ring structure holding ring information | ||
172 | * | ||
173 | * Reset the driver's copy of the wptr (all asics). | ||
174 | */ | ||
175 | void amdgpu_ring_undo(struct amdgpu_ring *ring) | ||
176 | { | ||
177 | ring->wptr = ring->wptr_old; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring | ||
182 | * | ||
183 | * @ring: amdgpu_ring structure holding ring information | ||
184 | * | ||
185 | * Call amdgpu_ring_undo() then unlock the ring (all asics). | ||
186 | */ | ||
187 | void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) | ||
188 | { | ||
189 | amdgpu_ring_undo(ring); | ||
190 | mutex_unlock(ring->ring_lock); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * amdgpu_ring_lockup_update - update lockup variables | ||
195 | * | ||
196 | * @ring: amdgpu_ring structure holding ring information | ||
197 | * | ||
198 | * Update the last rptr value and timestamp (all asics). | ||
199 | */ | ||
200 | void amdgpu_ring_lockup_update(struct amdgpu_ring *ring) | ||
201 | { | ||
202 | atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring)); | ||
203 | atomic64_set(&ring->last_activity, jiffies_64); | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * amdgpu_ring_test_lockup() - check if ring is lockedup by recording information | ||
208 | * @ring: amdgpu_ring structure holding ring information | ||
209 | * | ||
210 | */ | ||
211 | bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring) | ||
212 | { | ||
213 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | ||
214 | uint64_t last = atomic64_read(&ring->last_activity); | ||
215 | uint64_t elapsed; | ||
216 | |||
217 | if (rptr != atomic_read(&ring->last_rptr)) { | ||
218 | /* ring is still working, no lockup */ | ||
219 | amdgpu_ring_lockup_update(ring); | ||
220 | return false; | ||
221 | } | ||
222 | |||
223 | elapsed = jiffies_to_msecs(jiffies_64 - last); | ||
224 | if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) { | ||
225 | dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n", | ||
226 | ring->idx, elapsed); | ||
227 | return true; | ||
228 | } | ||
229 | /* give a chance to the GPU ... */ | ||
230 | return false; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * amdgpu_ring_backup - Back up the content of a ring | ||
235 | * | ||
236 | * @ring: the ring we want to back up | ||
237 | * | ||
238 | * Saves all unprocessed commits from a ring, returns the number of dwords saved. | ||
239 | */ | ||
240 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, | ||
241 | uint32_t **data) | ||
242 | { | ||
243 | unsigned size, ptr, i; | ||
244 | |||
245 | /* just in case lock the ring */ | ||
246 | mutex_lock(ring->ring_lock); | ||
247 | *data = NULL; | ||
248 | |||
249 | if (ring->ring_obj == NULL) { | ||
250 | mutex_unlock(ring->ring_lock); | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /* it doesn't make sense to save anything if all fences are signaled */ | ||
255 | if (!amdgpu_fence_count_emitted(ring)) { | ||
256 | mutex_unlock(ring->ring_lock); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); | ||
261 | |||
262 | size = ring->wptr + (ring->ring_size / 4); | ||
263 | size -= ptr; | ||
264 | size &= ring->ptr_mask; | ||
265 | if (size == 0) { | ||
266 | mutex_unlock(ring->ring_lock); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | /* and then save the content of the ring */ | ||
271 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); | ||
272 | if (!*data) { | ||
273 | mutex_unlock(ring->ring_lock); | ||
274 | return 0; | ||
275 | } | ||
276 | for (i = 0; i < size; ++i) { | ||
277 | (*data)[i] = ring->ring[ptr++]; | ||
278 | ptr &= ring->ptr_mask; | ||
279 | } | ||
280 | |||
281 | mutex_unlock(ring->ring_lock); | ||
282 | return size; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * amdgpu_ring_restore - append saved commands to the ring again | ||
287 | * | ||
288 | * @ring: ring to append commands to | ||
289 | * @size: number of dwords we want to write | ||
290 | * @data: saved commands | ||
291 | * | ||
292 | * Allocates space on the ring and restore the previously saved commands. | ||
293 | */ | ||
294 | int amdgpu_ring_restore(struct amdgpu_ring *ring, | ||
295 | unsigned size, uint32_t *data) | ||
296 | { | ||
297 | int i, r; | ||
298 | |||
299 | if (!size || !data) | ||
300 | return 0; | ||
301 | |||
302 | /* restore the saved ring content */ | ||
303 | r = amdgpu_ring_lock(ring, size); | ||
304 | if (r) | ||
305 | return r; | ||
306 | |||
307 | for (i = 0; i < size; ++i) { | ||
308 | amdgpu_ring_write(ring, data[i]); | ||
309 | } | ||
310 | |||
311 | amdgpu_ring_unlock_commit(ring); | ||
312 | kfree(data); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * amdgpu_ring_init - init driver ring struct. | ||
318 | * | ||
319 | * @adev: amdgpu_device pointer | ||
320 | * @ring: amdgpu_ring structure holding ring information | ||
321 | * @ring_size: size of the ring | ||
322 | * @nop: nop packet for this ring | ||
323 | * | ||
324 | * Initialize the driver information for the selected ring (all asics). | ||
325 | * Returns 0 on success, error on failure. | ||
326 | */ | ||
327 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
328 | unsigned ring_size, u32 nop, u32 align_mask, | ||
329 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | ||
330 | enum amdgpu_ring_type ring_type) | ||
331 | { | ||
332 | u32 rb_bufsz; | ||
333 | int r; | ||
334 | |||
335 | if (ring->adev == NULL) { | ||
336 | if (adev->num_rings >= AMDGPU_MAX_RINGS) | ||
337 | return -EINVAL; | ||
338 | |||
339 | ring->adev = adev; | ||
340 | ring->idx = adev->num_rings++; | ||
341 | adev->rings[ring->idx] = ring; | ||
342 | amdgpu_fence_driver_init_ring(ring); | ||
343 | } | ||
344 | |||
345 | r = amdgpu_wb_get(adev, &ring->rptr_offs); | ||
346 | if (r) { | ||
347 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | ||
348 | return r; | ||
349 | } | ||
350 | |||
351 | r = amdgpu_wb_get(adev, &ring->wptr_offs); | ||
352 | if (r) { | ||
353 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); | ||
354 | return r; | ||
355 | } | ||
356 | |||
357 | r = amdgpu_wb_get(adev, &ring->fence_offs); | ||
358 | if (r) { | ||
359 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); | ||
360 | return r; | ||
361 | } | ||
362 | |||
363 | r = amdgpu_wb_get(adev, &ring->next_rptr_offs); | ||
364 | if (r) { | ||
365 | dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); | ||
366 | return r; | ||
367 | } | ||
368 | ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); | ||
369 | ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; | ||
370 | |||
371 | r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); | ||
372 | if (r) { | ||
373 | dev_err(adev->dev, "failed initializing fences (%d).\n", r); | ||
374 | return r; | ||
375 | } | ||
376 | |||
377 | ring->ring_lock = &adev->ring_lock; | ||
378 | /* Align ring size */ | ||
379 | rb_bufsz = order_base_2(ring_size / 8); | ||
380 | ring_size = (1 << (rb_bufsz + 1)) * 4; | ||
381 | ring->ring_size = ring_size; | ||
382 | ring->align_mask = align_mask; | ||
383 | ring->nop = nop; | ||
384 | ring->type = ring_type; | ||
385 | |||
386 | /* Allocate ring buffer */ | ||
387 | if (ring->ring_obj == NULL) { | ||
388 | r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, | ||
389 | AMDGPU_GEM_DOMAIN_GTT, 0, | ||
390 | NULL, &ring->ring_obj); | ||
391 | if (r) { | ||
392 | dev_err(adev->dev, "(%d) ring create failed\n", r); | ||
393 | return r; | ||
394 | } | ||
395 | r = amdgpu_bo_reserve(ring->ring_obj, false); | ||
396 | if (unlikely(r != 0)) | ||
397 | return r; | ||
398 | r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
399 | &ring->gpu_addr); | ||
400 | if (r) { | ||
401 | amdgpu_bo_unreserve(ring->ring_obj); | ||
402 | dev_err(adev->dev, "(%d) ring pin failed\n", r); | ||
403 | return r; | ||
404 | } | ||
405 | r = amdgpu_bo_kmap(ring->ring_obj, | ||
406 | (void **)&ring->ring); | ||
407 | amdgpu_bo_unreserve(ring->ring_obj); | ||
408 | if (r) { | ||
409 | dev_err(adev->dev, "(%d) ring map failed\n", r); | ||
410 | return r; | ||
411 | } | ||
412 | } | ||
413 | ring->ptr_mask = (ring->ring_size / 4) - 1; | ||
414 | ring->ring_free_dw = ring->ring_size / 4; | ||
415 | |||
416 | if (amdgpu_debugfs_ring_init(adev, ring)) { | ||
417 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | ||
418 | } | ||
419 | amdgpu_ring_lockup_update(ring); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * amdgpu_ring_fini - tear down the driver ring struct. | ||
425 | * | ||
426 | * @adev: amdgpu_device pointer | ||
427 | * @ring: amdgpu_ring structure holding ring information | ||
428 | * | ||
429 | * Tear down the driver information for the selected ring (all asics). | ||
430 | */ | ||
431 | void amdgpu_ring_fini(struct amdgpu_ring *ring) | ||
432 | { | ||
433 | int r; | ||
434 | struct amdgpu_bo *ring_obj; | ||
435 | |||
436 | if (ring->ring_lock == NULL) | ||
437 | return; | ||
438 | |||
439 | mutex_lock(ring->ring_lock); | ||
440 | ring_obj = ring->ring_obj; | ||
441 | ring->ready = false; | ||
442 | ring->ring = NULL; | ||
443 | ring->ring_obj = NULL; | ||
444 | mutex_unlock(ring->ring_lock); | ||
445 | |||
446 | amdgpu_wb_free(ring->adev, ring->fence_offs); | ||
447 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | ||
448 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | ||
449 | amdgpu_wb_free(ring->adev, ring->next_rptr_offs); | ||
450 | |||
451 | if (ring_obj) { | ||
452 | r = amdgpu_bo_reserve(ring_obj, false); | ||
453 | if (likely(r == 0)) { | ||
454 | amdgpu_bo_kunmap(ring_obj); | ||
455 | amdgpu_bo_unpin(ring_obj); | ||
456 | amdgpu_bo_unreserve(ring_obj); | ||
457 | } | ||
458 | amdgpu_bo_unref(&ring_obj); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Debugfs info | ||
464 | */ | ||
465 | #if defined(CONFIG_DEBUG_FS) | ||
466 | |||
467 | static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) | ||
468 | { | ||
469 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
470 | struct drm_device *dev = node->minor->dev; | ||
471 | struct amdgpu_device *adev = dev->dev_private; | ||
472 | int roffset = *(int*)node->info_ent->data; | ||
473 | struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); | ||
474 | |||
475 | uint32_t rptr, wptr, rptr_next; | ||
476 | unsigned count, i, j; | ||
477 | |||
478 | amdgpu_ring_free_size(ring); | ||
479 | count = (ring->ring_size / 4) - ring->ring_free_dw; | ||
480 | |||
481 | wptr = amdgpu_ring_get_wptr(ring); | ||
482 | seq_printf(m, "wptr: 0x%08x [%5d]\n", | ||
483 | wptr, wptr); | ||
484 | |||
485 | rptr = amdgpu_ring_get_rptr(ring); | ||
486 | seq_printf(m, "rptr: 0x%08x [%5d]\n", | ||
487 | rptr, rptr); | ||
488 | |||
489 | rptr_next = ~0; | ||
490 | |||
491 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", | ||
492 | ring->wptr, ring->wptr); | ||
493 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", | ||
494 | ring->last_semaphore_signal_addr); | ||
495 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", | ||
496 | ring->last_semaphore_wait_addr); | ||
497 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | ||
498 | seq_printf(m, "%u dwords in ring\n", count); | ||
499 | |||
500 | if (!ring->ready) | ||
501 | return 0; | ||
502 | |||
503 | /* print 8 dw before current rptr as often it's the last executed | ||
504 | * packet that is the root issue | ||
505 | */ | ||
506 | i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; | ||
507 | for (j = 0; j <= (count + 32); j++) { | ||
508 | seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); | ||
509 | if (rptr == i) | ||
510 | seq_puts(m, " *"); | ||
511 | if (rptr_next == i) | ||
512 | seq_puts(m, " #"); | ||
513 | seq_puts(m, "\n"); | ||
514 | i = (i + 1) & ring->ptr_mask; | ||
515 | } | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | /* TODO: clean this up !*/ | ||
520 | static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); | ||
521 | static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); | ||
522 | static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); | ||
523 | static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); | ||
524 | static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); | ||
525 | static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); | ||
526 | static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); | ||
527 | static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); | ||
528 | |||
529 | static struct drm_info_list amdgpu_debugfs_ring_info_list[] = { | ||
530 | {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, | ||
531 | {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, | ||
532 | {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, | ||
533 | {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, | ||
534 | {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, | ||
535 | {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, | ||
536 | {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, | ||
537 | {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, | ||
538 | }; | ||
539 | |||
540 | #endif | ||
541 | |||
542 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) | ||
543 | { | ||
544 | #if defined(CONFIG_DEBUG_FS) | ||
545 | unsigned i; | ||
546 | for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { | ||
547 | struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; | ||
548 | int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; | ||
549 | struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); | ||
550 | unsigned r; | ||
551 | |||
552 | if (other != ring) | ||
553 | continue; | ||
554 | |||
555 | r = amdgpu_debugfs_add_files(adev, info, 1); | ||
556 | if (r) | ||
557 | return r; | ||
558 | } | ||
559 | #endif | ||
560 | return 0; | ||
561 | } | ||