aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2011-10-23 06:56:27 -0400
committerDave Airlie <airlied@redhat.com>2011-12-20 14:50:56 -0500
commite32eb50dbe43862606a51caa94368ec6bd019434 (patch)
treea064cf4e60c0d42694e5dcc3759794b4b24b8e77 /drivers/gpu/drm/radeon/radeon_ring.c
parentd6d2730c71a5d41a121a7b567bf7ff9c5d4cd3ab (diff)
drm/radeon: rename struct radeon_cp to radeon_ring
That naming seems to make more sense, since we not only want to run PM4 rings with it. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c136
1 files changed, 68 insertions, 68 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index a69cb049d877..52dd22f2596e 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -60,17 +60,17 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
60 return idx_value; 60 return idx_value;
61} 61}
62 62
63void radeon_ring_write(struct radeon_cp *cp, uint32_t v) 63void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
64{ 64{
65#if DRM_DEBUG_CODE 65#if DRM_DEBUG_CODE
66 if (cp->count_dw <= 0) { 66 if (ring->count_dw <= 0) {
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 67 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
68 } 68 }
69#endif 69#endif
70 cp->ring[cp->wptr++] = v; 70 ring->ring[ring->wptr++] = v;
71 cp->wptr &= cp->ptr_mask; 71 ring->wptr &= ring->ptr_mask;
72 cp->count_dw--; 72 ring->count_dw--;
73 cp->ring_free_dw--; 73 ring->ring_free_dw--;
74} 74}
75 75
76void radeon_ib_bogus_cleanup(struct radeon_device *rdev) 76void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
@@ -178,17 +178,17 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
178 178
179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{ 180{
181 struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; 181 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
182 int r = 0; 182 int r = 0;
183 183
184 if (!ib->length_dw || !cp->ready) { 184 if (!ib->length_dw || !ring->ready) {
185 /* TODO: Nothings in the ib we should report. */ 185 /* TODO: Nothings in the ib we should report. */
186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
187 return -EINVAL; 187 return -EINVAL;
188 } 188 }
189 189
190 /* 64 dwords should be enough for fence too */ 190 /* 64 dwords should be enough for fence too */
191 r = radeon_ring_lock(rdev, cp, 64); 191 r = radeon_ring_lock(rdev, ring, 64);
192 if (r) { 192 if (r) {
193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); 193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
194 return r; 194 return r;
@@ -199,7 +199,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
199 /* once scheduled IB is considered free and protected by the fence */ 199 /* once scheduled IB is considered free and protected by the fence */
200 ib->free = true; 200 ib->free = true;
201 mutex_unlock(&rdev->ib_pool.mutex); 201 mutex_unlock(&rdev->ib_pool.mutex);
202 radeon_ring_unlock_commit(rdev, cp); 202 radeon_ring_unlock_commit(rdev, ring);
203 return 0; 203 return 0;
204} 204}
205 205
@@ -284,150 +284,150 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
284/* 284/*
285 * Ring. 285 * Ring.
286 */ 286 */
287int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp) 287int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
288{ 288{
289 /* r1xx-r5xx only has CP ring */ 289 /* r1xx-r5xx only has CP ring */
290 if (rdev->family < CHIP_R600) 290 if (rdev->family < CHIP_R600)
291 return RADEON_RING_TYPE_GFX_INDEX; 291 return RADEON_RING_TYPE_GFX_INDEX;
292 292
293 if (rdev->family >= CHIP_CAYMAN) { 293 if (rdev->family >= CHIP_CAYMAN) {
294 if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]) 294 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
295 return CAYMAN_RING_TYPE_CP1_INDEX; 295 return CAYMAN_RING_TYPE_CP1_INDEX;
296 else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]) 296 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
297 return CAYMAN_RING_TYPE_CP2_INDEX; 297 return CAYMAN_RING_TYPE_CP2_INDEX;
298 } 298 }
299 return RADEON_RING_TYPE_GFX_INDEX; 299 return RADEON_RING_TYPE_GFX_INDEX;
300} 300}
301 301
302void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp) 302void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
303{ 303{
304 if (rdev->wb.enabled) 304 if (rdev->wb.enabled)
305 cp->rptr = le32_to_cpu(rdev->wb.wb[cp->rptr_offs/4]); 305 ring->rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
306 else 306 else
307 cp->rptr = RREG32(cp->rptr_reg); 307 ring->rptr = RREG32(ring->rptr_reg);
308 /* This works because ring_size is a power of 2 */ 308 /* This works because ring_size is a power of 2 */
309 cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4)); 309 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
310 cp->ring_free_dw -= cp->wptr; 310 ring->ring_free_dw -= ring->wptr;
311 cp->ring_free_dw &= cp->ptr_mask; 311 ring->ring_free_dw &= ring->ptr_mask;
312 if (!cp->ring_free_dw) { 312 if (!ring->ring_free_dw) {
313 cp->ring_free_dw = cp->ring_size / 4; 313 ring->ring_free_dw = ring->ring_size / 4;
314 } 314 }
315} 315}
316 316
317 317
318int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw) 318int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
319{ 319{
320 int r; 320 int r;
321 321
322 /* Align requested size with padding so unlock_commit can 322 /* Align requested size with padding so unlock_commit can
323 * pad safely */ 323 * pad safely */
324 ndw = (ndw + cp->align_mask) & ~cp->align_mask; 324 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
325 while (ndw > (cp->ring_free_dw - 1)) { 325 while (ndw > (ring->ring_free_dw - 1)) {
326 radeon_ring_free_size(rdev, cp); 326 radeon_ring_free_size(rdev, ring);
327 if (ndw < cp->ring_free_dw) { 327 if (ndw < ring->ring_free_dw) {
328 break; 328 break;
329 } 329 }
330 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp)); 330 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
331 if (r) 331 if (r)
332 return r; 332 return r;
333 } 333 }
334 cp->count_dw = ndw; 334 ring->count_dw = ndw;
335 cp->wptr_old = cp->wptr; 335 ring->wptr_old = ring->wptr;
336 return 0; 336 return 0;
337} 337}
338 338
339int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw) 339int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
340{ 340{
341 int r; 341 int r;
342 342
343 mutex_lock(&cp->mutex); 343 mutex_lock(&ring->mutex);
344 r = radeon_ring_alloc(rdev, cp, ndw); 344 r = radeon_ring_alloc(rdev, ring, ndw);
345 if (r) { 345 if (r) {
346 mutex_unlock(&cp->mutex); 346 mutex_unlock(&ring->mutex);
347 return r; 347 return r;
348 } 348 }
349 return 0; 349 return 0;
350} 350}
351 351
352void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp) 352void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
353{ 353{
354 unsigned count_dw_pad; 354 unsigned count_dw_pad;
355 unsigned i; 355 unsigned i;
356 356
357 /* We pad to match fetch size */ 357 /* We pad to match fetch size */
358 count_dw_pad = (cp->align_mask + 1) - 358 count_dw_pad = (ring->align_mask + 1) -
359 (cp->wptr & cp->align_mask); 359 (ring->wptr & ring->align_mask);
360 for (i = 0; i < count_dw_pad; i++) { 360 for (i = 0; i < count_dw_pad; i++) {
361 radeon_ring_write(cp, 2 << 30); 361 radeon_ring_write(ring, 2 << 30);
362 } 362 }
363 DRM_MEMORYBARRIER(); 363 DRM_MEMORYBARRIER();
364 WREG32(cp->wptr_reg, cp->wptr); 364 WREG32(ring->wptr_reg, ring->wptr);
365 (void)RREG32(cp->wptr_reg); 365 (void)RREG32(ring->wptr_reg);
366} 366}
367 367
368void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp) 368void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
369{ 369{
370 radeon_ring_commit(rdev, cp); 370 radeon_ring_commit(rdev, ring);
371 mutex_unlock(&cp->mutex); 371 mutex_unlock(&ring->mutex);
372} 372}
373 373
374void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp) 374void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
375{ 375{
376 cp->wptr = cp->wptr_old; 376 ring->wptr = ring->wptr_old;
377 mutex_unlock(&cp->mutex); 377 mutex_unlock(&ring->mutex);
378} 378}
379 379
380int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size, 380int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
381 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg) 381 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg)
382{ 382{
383 int r; 383 int r;
384 384
385 cp->ring_size = ring_size; 385 ring->ring_size = ring_size;
386 cp->rptr_offs = rptr_offs; 386 ring->rptr_offs = rptr_offs;
387 cp->rptr_reg = rptr_reg; 387 ring->rptr_reg = rptr_reg;
388 cp->wptr_reg = wptr_reg; 388 ring->wptr_reg = wptr_reg;
389 /* Allocate ring buffer */ 389 /* Allocate ring buffer */
390 if (cp->ring_obj == NULL) { 390 if (ring->ring_obj == NULL) {
391 r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true, 391 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
392 RADEON_GEM_DOMAIN_GTT, 392 RADEON_GEM_DOMAIN_GTT,
393 &cp->ring_obj); 393 &ring->ring_obj);
394 if (r) { 394 if (r) {
395 dev_err(rdev->dev, "(%d) ring create failed\n", r); 395 dev_err(rdev->dev, "(%d) ring create failed\n", r);
396 return r; 396 return r;
397 } 397 }
398 r = radeon_bo_reserve(cp->ring_obj, false); 398 r = radeon_bo_reserve(ring->ring_obj, false);
399 if (unlikely(r != 0)) 399 if (unlikely(r != 0))
400 return r; 400 return r;
401 r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT, 401 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
402 &cp->gpu_addr); 402 &ring->gpu_addr);
403 if (r) { 403 if (r) {
404 radeon_bo_unreserve(cp->ring_obj); 404 radeon_bo_unreserve(ring->ring_obj);
405 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 405 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
406 return r; 406 return r;
407 } 407 }
408 r = radeon_bo_kmap(cp->ring_obj, 408 r = radeon_bo_kmap(ring->ring_obj,
409 (void **)&cp->ring); 409 (void **)&ring->ring);
410 radeon_bo_unreserve(cp->ring_obj); 410 radeon_bo_unreserve(ring->ring_obj);
411 if (r) { 411 if (r) {
412 dev_err(rdev->dev, "(%d) ring map failed\n", r); 412 dev_err(rdev->dev, "(%d) ring map failed\n", r);
413 return r; 413 return r;
414 } 414 }
415 } 415 }
416 cp->ptr_mask = (cp->ring_size / 4) - 1; 416 ring->ptr_mask = (ring->ring_size / 4) - 1;
417 cp->ring_free_dw = cp->ring_size / 4; 417 ring->ring_free_dw = ring->ring_size / 4;
418 return 0; 418 return 0;
419} 419}
420 420
421void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp) 421void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
422{ 422{
423 int r; 423 int r;
424 struct radeon_bo *ring_obj; 424 struct radeon_bo *ring_obj;
425 425
426 mutex_lock(&cp->mutex); 426 mutex_lock(&ring->mutex);
427 ring_obj = cp->ring_obj; 427 ring_obj = ring->ring_obj;
428 cp->ring = NULL; 428 ring->ring = NULL;
429 cp->ring_obj = NULL; 429 ring->ring_obj = NULL;
430 mutex_unlock(&cp->mutex); 430 mutex_unlock(&ring->mutex);
431 431
432 if (ring_obj) { 432 if (ring_obj) {
433 r = radeon_bo_reserve(ring_obj, false); 433 r = radeon_bo_reserve(ring_obj, false);