aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2011-09-23 09:11:23 -0400
committerDave Airlie <airlied@redhat.com>2011-12-20 14:49:46 -0500
commit7b1f2485db253aaa0081e1c5213533e166130732 (patch)
tree77f7e6517d67501108feedfa029f4ea8549a9642 /drivers/gpu/drm/radeon/radeon_ring.c
parent15d3332f31afd571a6d23971dbc8d8db2856e661 (diff)
drm/radeon: make all functions work with multiple rings.
Give all asic and radeon_ring_* functions a radeon_cp parameter, so they know the ring to work with. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c121
1 files changed, 61 insertions, 60 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index c232317b1dd2..bc8a5807f1a4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -60,17 +60,17 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
60 return idx_value; 60 return idx_value;
61} 61}
62 62
63void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 63void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
64{ 64{
65#if DRM_DEBUG_CODE 65#if DRM_DEBUG_CODE
66 if (rdev->cp.count_dw <= 0) { 66 if (cp->count_dw <= 0) {
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 67 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
68 } 68 }
69#endif 69#endif
70 rdev->cp.ring[rdev->cp.wptr++] = v; 70 cp->ring[cp->wptr++] = v;
71 rdev->cp.wptr &= rdev->cp.ptr_mask; 71 cp->wptr &= cp->ptr_mask;
72 rdev->cp.count_dw--; 72 cp->count_dw--;
73 rdev->cp.ring_free_dw--; 73 cp->ring_free_dw--;
74} 74}
75 75
76void radeon_ib_bogus_cleanup(struct radeon_device *rdev) 76void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
@@ -106,14 +106,14 @@ void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
106/* 106/*
107 * IB. 107 * IB.
108 */ 108 */
109int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) 109int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
110{ 110{
111 struct radeon_fence *fence; 111 struct radeon_fence *fence;
112 struct radeon_ib *nib; 112 struct radeon_ib *nib;
113 int r = 0, i, c; 113 int r = 0, i, c;
114 114
115 *ib = NULL; 115 *ib = NULL;
116 r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 116 r = radeon_fence_create(rdev, &fence, ring);
117 if (r) { 117 if (r) {
118 dev_err(rdev->dev, "failed to create fence for new IB\n"); 118 dev_err(rdev->dev, "failed to create fence for new IB\n");
119 return r; 119 return r;
@@ -178,16 +178,17 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
178 178
179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{ 180{
181 struct radeon_cp *cp = &rdev->cp;
181 int r = 0; 182 int r = 0;
182 183
183 if (!ib->length_dw || !rdev->cp.ready) { 184 if (!ib->length_dw || !cp->ready) {
184 /* TODO: Nothings in the ib we should report. */ 185 /* TODO: Nothings in the ib we should report. */
185 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
186 return -EINVAL; 187 return -EINVAL;
187 } 188 }
188 189
189 /* 64 dwords should be enough for fence too */ 190 /* 64 dwords should be enough for fence too */
190 r = radeon_ring_lock(rdev, 64); 191 r = radeon_ring_lock(rdev, cp, 64);
191 if (r) { 192 if (r) {
192 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); 193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
193 return r; 194 return r;
@@ -198,7 +199,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
198 /* once scheduled IB is considered free and protected by the fence */ 199 /* once scheduled IB is considered free and protected by the fence */
199 ib->free = true; 200 ib->free = true;
200 mutex_unlock(&rdev->ib_pool.mutex); 201 mutex_unlock(&rdev->ib_pool.mutex);
201 radeon_ring_unlock_commit(rdev); 202 radeon_ring_unlock_commit(rdev, cp);
202 return 0; 203 return 0;
203} 204}
204 205
@@ -283,7 +284,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
283/* 284/*
284 * Ring. 285 * Ring.
285 */ 286 */
286void radeon_ring_free_size(struct radeon_device *rdev) 287void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
287{ 288{
288 if (rdev->wb.enabled) 289 if (rdev->wb.enabled)
289 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); 290 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
@@ -294,122 +295,123 @@ void radeon_ring_free_size(struct radeon_device *rdev)
294 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 295 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
295 } 296 }
296 /* This works because ring_size is a power of 2 */ 297 /* This works because ring_size is a power of 2 */
297 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); 298 cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
298 rdev->cp.ring_free_dw -= rdev->cp.wptr; 299 cp->ring_free_dw -= cp->wptr;
299 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask; 300 cp->ring_free_dw &= cp->ptr_mask;
300 if (!rdev->cp.ring_free_dw) { 301 if (!cp->ring_free_dw) {
301 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 302 cp->ring_free_dw = cp->ring_size / 4;
302 } 303 }
303} 304}
304 305
305int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) 306
307int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
306{ 308{
307 int r; 309 int r;
308 310
309 /* Align requested size with padding so unlock_commit can 311 /* Align requested size with padding so unlock_commit can
310 * pad safely */ 312 * pad safely */
311 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; 313 ndw = (ndw + cp->align_mask) & ~cp->align_mask;
312 while (ndw > (rdev->cp.ring_free_dw - 1)) { 314 while (ndw > (cp->ring_free_dw - 1)) {
313 radeon_ring_free_size(rdev); 315 radeon_ring_free_size(rdev, cp);
314 if (ndw < rdev->cp.ring_free_dw) { 316 if (ndw < cp->ring_free_dw) {
315 break; 317 break;
316 } 318 }
317 r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX); 319 r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX);
318 if (r) 320 if (r)
319 return r; 321 return r;
320 } 322 }
321 rdev->cp.count_dw = ndw; 323 cp->count_dw = ndw;
322 rdev->cp.wptr_old = rdev->cp.wptr; 324 cp->wptr_old = cp->wptr;
323 return 0; 325 return 0;
324} 326}
325 327
326int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) 328int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
327{ 329{
328 int r; 330 int r;
329 331
330 mutex_lock(&rdev->cp.mutex); 332 mutex_lock(&cp->mutex);
331 r = radeon_ring_alloc(rdev, ndw); 333 r = radeon_ring_alloc(rdev, cp, ndw);
332 if (r) { 334 if (r) {
333 mutex_unlock(&rdev->cp.mutex); 335 mutex_unlock(&cp->mutex);
334 return r; 336 return r;
335 } 337 }
336 return 0; 338 return 0;
337} 339}
338 340
339void radeon_ring_commit(struct radeon_device *rdev) 341void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
340{ 342{
341 unsigned count_dw_pad; 343 unsigned count_dw_pad;
342 unsigned i; 344 unsigned i;
343 345
344 /* We pad to match fetch size */ 346 /* We pad to match fetch size */
345 count_dw_pad = (rdev->cp.align_mask + 1) - 347 count_dw_pad = (cp->align_mask + 1) -
346 (rdev->cp.wptr & rdev->cp.align_mask); 348 (cp->wptr & cp->align_mask);
347 for (i = 0; i < count_dw_pad; i++) { 349 for (i = 0; i < count_dw_pad; i++) {
348 radeon_ring_write(rdev, 2 << 30); 350 radeon_ring_write(cp, 2 << 30);
349 } 351 }
350 DRM_MEMORYBARRIER(); 352 DRM_MEMORYBARRIER();
351 radeon_cp_commit(rdev); 353 radeon_cp_commit(rdev, cp);
352} 354}
353 355
354void radeon_ring_unlock_commit(struct radeon_device *rdev) 356void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
355{ 357{
356 radeon_ring_commit(rdev); 358 radeon_ring_commit(rdev, cp);
357 mutex_unlock(&rdev->cp.mutex); 359 mutex_unlock(&cp->mutex);
358} 360}
359 361
360void radeon_ring_unlock_undo(struct radeon_device *rdev) 362void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
361{ 363{
362 rdev->cp.wptr = rdev->cp.wptr_old; 364 cp->wptr = cp->wptr_old;
363 mutex_unlock(&rdev->cp.mutex); 365 mutex_unlock(&cp->mutex);
364} 366}
365 367
366int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) 368int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size)
367{ 369{
368 int r; 370 int r;
369 371
370 rdev->cp.ring_size = ring_size; 372 cp->ring_size = ring_size;
371 /* Allocate ring buffer */ 373 /* Allocate ring buffer */
372 if (rdev->cp.ring_obj == NULL) { 374 if (cp->ring_obj == NULL) {
373 r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, 375 r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
374 RADEON_GEM_DOMAIN_GTT, 376 RADEON_GEM_DOMAIN_GTT,
375 &rdev->cp.ring_obj); 377 &cp->ring_obj);
376 if (r) { 378 if (r) {
377 dev_err(rdev->dev, "(%d) ring create failed\n", r); 379 dev_err(rdev->dev, "(%d) ring create failed\n", r);
378 return r; 380 return r;
379 } 381 }
380 r = radeon_bo_reserve(rdev->cp.ring_obj, false); 382 r = radeon_bo_reserve(cp->ring_obj, false);
381 if (unlikely(r != 0)) 383 if (unlikely(r != 0))
382 return r; 384 return r;
383 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, 385 r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
384 &rdev->cp.gpu_addr); 386 &cp->gpu_addr);
385 if (r) { 387 if (r) {
386 radeon_bo_unreserve(rdev->cp.ring_obj); 388 radeon_bo_unreserve(cp->ring_obj);
387 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 389 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
388 return r; 390 return r;
389 } 391 }
390 r = radeon_bo_kmap(rdev->cp.ring_obj, 392 r = radeon_bo_kmap(cp->ring_obj,
391 (void **)&rdev->cp.ring); 393 (void **)&cp->ring);
392 radeon_bo_unreserve(rdev->cp.ring_obj); 394 radeon_bo_unreserve(cp->ring_obj);
393 if (r) { 395 if (r) {
394 dev_err(rdev->dev, "(%d) ring map failed\n", r); 396 dev_err(rdev->dev, "(%d) ring map failed\n", r);
395 return r; 397 return r;
396 } 398 }
397 } 399 }
398 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; 400 cp->ptr_mask = (cp->ring_size / 4) - 1;
399 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 401 cp->ring_free_dw = cp->ring_size / 4;
400 return 0; 402 return 0;
401} 403}
402 404
403void radeon_ring_fini(struct radeon_device *rdev) 405void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
404{ 406{
405 int r; 407 int r;
406 struct radeon_bo *ring_obj; 408 struct radeon_bo *ring_obj;
407 409
408 mutex_lock(&rdev->cp.mutex); 410 mutex_lock(&cp->mutex);
409 ring_obj = rdev->cp.ring_obj; 411 ring_obj = cp->ring_obj;
410 rdev->cp.ring = NULL; 412 cp->ring = NULL;
411 rdev->cp.ring_obj = NULL; 413 cp->ring_obj = NULL;
412 mutex_unlock(&rdev->cp.mutex); 414 mutex_unlock(&cp->mutex);
413 415
414 if (ring_obj) { 416 if (ring_obj) {
415 r = radeon_bo_reserve(ring_obj, false); 417 r = radeon_bo_reserve(ring_obj, false);
@@ -422,7 +424,6 @@ void radeon_ring_fini(struct radeon_device *rdev)
422 } 424 }
423} 425}
424 426
425
426/* 427/*
427 * Debugfs info 428 * Debugfs info
428 */ 429 */