aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c242
1 files changed, 152 insertions, 90 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84b..f6e1e8d4d986 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,6 +26,7 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "radeon_drm.h" 31#include "radeon_drm.h"
31#include "radeon_reg.h" 32#include "radeon_reg.h"
@@ -34,6 +35,36 @@
34 35
35int radeon_debugfs_ib_init(struct radeon_device *rdev); 36int radeon_debugfs_ib_init(struct radeon_device *rdev);
36 37
38void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
39{
40 struct radeon_ib *ib, *n;
41
42 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
43 list_del(&ib->list);
44 vfree(ib->ptr);
45 kfree(ib);
46 }
47}
48
49void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
50{
51 struct radeon_ib *bib;
52
53 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
54 if (bib == NULL)
55 return;
56 bib->ptr = vmalloc(ib->length_dw * 4);
57 if (bib->ptr == NULL) {
58 kfree(bib);
59 return;
60 }
61 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
62 bib->length_dw = ib->length_dw;
63 mutex_lock(&rdev->ib_pool.mutex);
64 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
65 mutex_unlock(&rdev->ib_pool.mutex);
66}
67
37/* 68/*
38 * IB. 69 * IB.
39 */ 70 */
@@ -41,68 +72,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 72{
42 struct radeon_fence *fence; 73 struct radeon_fence *fence;
43 struct radeon_ib *nib; 74 struct radeon_ib *nib;
44 unsigned long i; 75 int r = 0, i, c;
45 int r = 0;
46 76
47 *ib = NULL; 77 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 78 r = radeon_fence_create(rdev, &fence);
49 if (r) { 79 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 80 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 81 return r;
52 } 82 }
53 mutex_lock(&rdev->ib_pool.mutex); 83 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 84 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 85 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 86 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 87 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 88 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 89 }
60 goto out;
61 } 90 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 91 if (nib == NULL) {
63 /* we go do nothings here */ 92 /* This should never happen, it means we allocated all
93 * IB and haven't scheduled one yet, return EBUSY to
94 * userspace hoping that on ioctl recall we get better
95 * luck
96 */
97 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 98 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 99 radeon_fence_unref(&fence);
66 r = -EINVAL; 100 return -EBUSY;
67 goto out;
68 } 101 }
69 /* get the first ib on the scheduled list */ 102 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 103 nib->free = false;
71 struct radeon_ib, list); 104 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 105 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 106 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 107 if (r) {
77 goto out; 108 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 109 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 110 mutex_lock(&rdev->ib_pool.mutex);
80 111 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 112 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 113 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 114 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 115 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 116 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 117 }
88 radeon_fence_unref(&nib->fence); 118 radeon_fence_unref(&nib->fence);
89 119 nib->fence = fence;
90 nib->length_dw = 0; 120 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 121 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 122 *ib = nib;
99out: 123 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 124}
107 125
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 126void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +131,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 131 if (tmp == NULL) {
114 return; 132 return;
115 } 133 }
116 mutex_lock(&rdev->ib_pool.mutex); 134 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 135 radeon_fence_unref(&tmp->fence);
126 136 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 137 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 138 mutex_unlock(&rdev->ib_pool.mutex);
130} 139}
131 140
@@ -135,7 +144,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 144
136 if (!ib->length_dw || !rdev->cp.ready) { 145 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 146 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 147 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 148 return -EINVAL;
140 } 149 }
141 150
@@ -148,7 +157,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 157 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 158 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 159 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 160 /* once scheduled IB is considered free and protected by the fence */
161 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 162 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 163 radeon_ring_unlock_commit(rdev);
154 return 0; 164 return 0;
@@ -163,21 +173,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
163 173
164 if (rdev->ib_pool.robj) 174 if (rdev->ib_pool.robj)
165 return 0; 175 return 0;
176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
166 /* Allocate 1M object buffer */ 177 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
168 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 179 true, RADEON_GEM_DOMAIN_GTT,
169 true, RADEON_GEM_DOMAIN_GTT, 180 &rdev->ib_pool.robj);
170 false, &rdev->ib_pool.robj);
171 if (r) { 181 if (r) {
172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
173 return r; 183 return r;
174 } 184 }
175 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 185 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
186 if (unlikely(r != 0))
187 return r;
188 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
176 if (r) { 189 if (r) {
190 radeon_bo_unreserve(rdev->ib_pool.robj);
177 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); 191 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
178 return r; 192 return r;
179 } 193 }
180 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); 194 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
195 radeon_bo_unreserve(rdev->ib_pool.robj);
181 if (r) { 196 if (r) {
182 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); 197 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
183 return r; 198 return r;
@@ -190,9 +205,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
190 rdev->ib_pool.ibs[i].ptr = ptr + offset; 205 rdev->ib_pool.ibs[i].ptr = ptr + offset;
191 rdev->ib_pool.ibs[i].idx = i; 206 rdev->ib_pool.ibs[i].idx = i;
192 rdev->ib_pool.ibs[i].length_dw = 0; 207 rdev->ib_pool.ibs[i].length_dw = 0;
193 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 208 rdev->ib_pool.ibs[i].free = true;
194 } 209 }
195 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 210 rdev->ib_pool.head_id = 0;
196 rdev->ib_pool.ready = true; 211 rdev->ib_pool.ready = true;
197 DRM_INFO("radeon: ib pool ready.\n"); 212 DRM_INFO("radeon: ib pool ready.\n");
198 if (radeon_debugfs_ib_init(rdev)) { 213 if (radeon_debugfs_ib_init(rdev)) {
@@ -203,14 +218,22 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
203 218
204void radeon_ib_pool_fini(struct radeon_device *rdev) 219void radeon_ib_pool_fini(struct radeon_device *rdev)
205{ 220{
221 int r;
222
206 if (!rdev->ib_pool.ready) { 223 if (!rdev->ib_pool.ready) {
207 return; 224 return;
208 } 225 }
209 mutex_lock(&rdev->ib_pool.mutex); 226 mutex_lock(&rdev->ib_pool.mutex);
210 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 227 radeon_ib_bogus_cleanup(rdev);
228
211 if (rdev->ib_pool.robj) { 229 if (rdev->ib_pool.robj) {
212 radeon_object_kunmap(rdev->ib_pool.robj); 230 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
213 radeon_object_unref(&rdev->ib_pool.robj); 231 if (likely(r == 0)) {
232 radeon_bo_kunmap(rdev->ib_pool.robj);
233 radeon_bo_unpin(rdev->ib_pool.robj);
234 radeon_bo_unreserve(rdev->ib_pool.robj);
235 }
236 radeon_bo_unref(&rdev->ib_pool.robj);
214 rdev->ib_pool.robj = NULL; 237 rdev->ib_pool.robj = NULL;
215 } 238 }
216 mutex_unlock(&rdev->ib_pool.mutex); 239 mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +311,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
288 rdev->cp.ring_size = ring_size; 311 rdev->cp.ring_size = ring_size;
289 /* Allocate ring buffer */ 312 /* Allocate ring buffer */
290 if (rdev->cp.ring_obj == NULL) { 313 if (rdev->cp.ring_obj == NULL) {
291 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, 314 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
292 true, 315 RADEON_GEM_DOMAIN_GTT,
293 RADEON_GEM_DOMAIN_GTT, 316 &rdev->cp.ring_obj);
294 false,
295 &rdev->cp.ring_obj);
296 if (r) { 317 if (r) {
297 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); 318 dev_err(rdev->dev, "(%d) ring create failed\n", r);
298 mutex_unlock(&rdev->cp.mutex);
299 return r; 319 return r;
300 } 320 }
301 r = radeon_object_pin(rdev->cp.ring_obj, 321 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
302 RADEON_GEM_DOMAIN_GTT, 322 if (unlikely(r != 0))
303 &rdev->cp.gpu_addr); 323 return r;
324 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
325 &rdev->cp.gpu_addr);
304 if (r) { 326 if (r) {
305 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); 327 radeon_bo_unreserve(rdev->cp.ring_obj);
306 mutex_unlock(&rdev->cp.mutex); 328 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
307 return r; 329 return r;
308 } 330 }
309 r = radeon_object_kmap(rdev->cp.ring_obj, 331 r = radeon_bo_kmap(rdev->cp.ring_obj,
310 (void **)&rdev->cp.ring); 332 (void **)&rdev->cp.ring);
333 radeon_bo_unreserve(rdev->cp.ring_obj);
311 if (r) { 334 if (r) {
312 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); 335 dev_err(rdev->dev, "(%d) ring map failed\n", r);
313 mutex_unlock(&rdev->cp.mutex);
314 return r; 336 return r;
315 } 337 }
316 } 338 }
@@ -321,11 +343,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
321 343
322void radeon_ring_fini(struct radeon_device *rdev) 344void radeon_ring_fini(struct radeon_device *rdev)
323{ 345{
346 int r;
347
324 mutex_lock(&rdev->cp.mutex); 348 mutex_lock(&rdev->cp.mutex);
325 if (rdev->cp.ring_obj) { 349 if (rdev->cp.ring_obj) {
326 radeon_object_kunmap(rdev->cp.ring_obj); 350 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
327 radeon_object_unpin(rdev->cp.ring_obj); 351 if (likely(r == 0)) {
328 radeon_object_unref(&rdev->cp.ring_obj); 352 radeon_bo_kunmap(rdev->cp.ring_obj);
353 radeon_bo_unpin(rdev->cp.ring_obj);
354 radeon_bo_unreserve(rdev->cp.ring_obj);
355 }
356 radeon_bo_unref(&rdev->cp.ring_obj);
329 rdev->cp.ring = NULL; 357 rdev->cp.ring = NULL;
330 rdev->cp.ring_obj = NULL; 358 rdev->cp.ring_obj = NULL;
331 } 359 }
@@ -346,7 +374,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
346 if (ib == NULL) { 374 if (ib == NULL) {
347 return 0; 375 return 0;
348 } 376 }
349 seq_printf(m, "IB %04lu\n", ib->idx); 377 seq_printf(m, "IB %04u\n", ib->idx);
350 seq_printf(m, "IB fence %p\n", ib->fence); 378 seq_printf(m, "IB fence %p\n", ib->fence);
351 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 379 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
352 for (i = 0; i < ib->length_dw; i++) { 380 for (i = 0; i < ib->length_dw; i++) {
@@ -355,15 +383,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
355 return 0; 383 return 0;
356} 384}
357 385
386static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
387{
388 struct drm_info_node *node = (struct drm_info_node *) m->private;
389 struct radeon_device *rdev = node->info_ent->data;
390 struct radeon_ib *ib;
391 unsigned i;
392
393 mutex_lock(&rdev->ib_pool.mutex);
394 if (list_empty(&rdev->ib_pool.bogus_ib)) {
395 mutex_unlock(&rdev->ib_pool.mutex);
396 seq_printf(m, "no bogus IB recorded\n");
397 return 0;
398 }
399 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
400 list_del_init(&ib->list);
401 mutex_unlock(&rdev->ib_pool.mutex);
402 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
403 for (i = 0; i < ib->length_dw; i++) {
404 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
405 }
406 vfree(ib->ptr);
407 kfree(ib);
408 return 0;
409}
410
358static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 411static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
359static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 412static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
413
414static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
415 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
416};
360#endif 417#endif
361 418
362int radeon_debugfs_ib_init(struct radeon_device *rdev) 419int radeon_debugfs_ib_init(struct radeon_device *rdev)
363{ 420{
364#if defined(CONFIG_DEBUG_FS) 421#if defined(CONFIG_DEBUG_FS)
365 unsigned i; 422 unsigned i;
423 int r;
366 424
425 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
426 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
427 if (r)
428 return r;
367 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 429 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
368 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 430 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
369 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 431 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];