aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 11:10:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 11:10:09 -0400
commit44040f107e64d689ccd3211ac62c6bc44f3f0775 (patch)
treef85059028aa570e758c7fb272fd8cf823ab4f119 /drivers/gpu/drm/radeon/radeon_ring.c
parent388dba30471c236a290c4082bce5f2b5cd1a7a06 (diff)
parent28d520433b6375740990ab99d69b0d0067fd656b (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (133 commits) drm/vgaarb: add VGA arbitration support to the drm and kms. drm/radeon: some r420s have a CP race with the DMA engine. drm/radeon/r600/kms: rv670 is not DCE3 drm/radeon/kms: r420 idle after programming GA_ENHANCE drm/radeon/kms: more fixes to rv770 suspend/resume path. drm/radeon/kms: more alignment for rv770.c with r600.c drm/radeon/kms: rv770 blit init called too late. drm/radeon/kms: move around new init path code to avoid posting at init drm/radeon/r600: fix some issues with suspend/resume. drm/radeon/kms: disable VGA rendering engine before taking over VRAM drm/radeon/kms: Move radeon_get_clock_info() call out of radeon_clocks_init(). drm/radeon/kms: add initial connector properties drm/radeon/kms: Use surfaces for scanout / cursor byte swapping on big endian. drm/radeon/kms: don't fail if we fail to init GPU acceleration drm/r600/kms: fixup number of loops per blit calculation. drm/radeon/kms: reprogram format in set base. drm/radeon: avivo chips have no separate int bit for display drm/radeon/r600: don't do interrupts drm: fix _DRM_GEM addmap error message drm: update crtc x/y when only fb changes ... Fixed up trivial conflicts in firmware/Makefile due to network driver (cxgb3) and drm (mga/r128/radeon) firmware being listed next to each other.
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c143
1 files changed, 25 insertions, 118 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 60d159308b88..747b4bffb84b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -56,10 +56,12 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
56 set_bit(i, rdev->ib_pool.alloc_bm); 56 set_bit(i, rdev->ib_pool.alloc_bm);
57 rdev->ib_pool.ibs[i].length_dw = 0; 57 rdev->ib_pool.ibs[i].length_dw = 0;
58 *ib = &rdev->ib_pool.ibs[i]; 58 *ib = &rdev->ib_pool.ibs[i];
59 mutex_unlock(&rdev->ib_pool.mutex);
59 goto out; 60 goto out;
60 } 61 }
61 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
62 /* we go do nothings here */ 63 /* we go do nothings here */
64 mutex_unlock(&rdev->ib_pool.mutex);
63 DRM_ERROR("all IB allocated none scheduled.\n"); 65 DRM_ERROR("all IB allocated none scheduled.\n");
64 r = -EINVAL; 66 r = -EINVAL;
65 goto out; 67 goto out;
@@ -69,10 +71,13 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
69 struct radeon_ib, list); 71 struct radeon_ib, list);
70 if (nib->fence == NULL) { 72 if (nib->fence == NULL) {
71 /* we go do nothings here */ 73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex);
72 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
73 r = -EINVAL; 76 r = -EINVAL;
74 goto out; 77 goto out;
75 } 78 }
79 mutex_unlock(&rdev->ib_pool.mutex);
80
76 r = radeon_fence_wait(nib->fence, false); 81 r = radeon_fence_wait(nib->fence, false);
77 if (r) { 82 if (r) {
78 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
@@ -81,12 +86,17 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
81 goto out; 86 goto out;
82 } 87 }
83 radeon_fence_unref(&nib->fence); 88 radeon_fence_unref(&nib->fence);
89
84 nib->length_dw = 0; 90 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
85 list_del(&nib->list); 94 list_del(&nib->list);
86 INIT_LIST_HEAD(&nib->list); 95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex);
97
87 *ib = nib; 98 *ib = nib;
88out: 99out:
89 mutex_unlock(&rdev->ib_pool.mutex);
90 if (r) { 100 if (r) {
91 radeon_fence_unref(&fence); 101 radeon_fence_unref(&fence);
92 } else { 102 } else {
@@ -111,47 +121,36 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
111 } 121 }
112 list_del(&tmp->list); 122 list_del(&tmp->list);
113 INIT_LIST_HEAD(&tmp->list); 123 INIT_LIST_HEAD(&tmp->list);
114 if (tmp->fence) { 124 if (tmp->fence)
115 radeon_fence_unref(&tmp->fence); 125 radeon_fence_unref(&tmp->fence);
116 } 126
117 tmp->length_dw = 0; 127 tmp->length_dw = 0;
118 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); 128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
119 mutex_unlock(&rdev->ib_pool.mutex); 129 mutex_unlock(&rdev->ib_pool.mutex);
120} 130}
121 131
122static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
123{
124 while ((ib->length_dw & rdev->cp.align_mask)) {
125 ib->ptr[ib->length_dw++] = PACKET2(0);
126 }
127}
128
129int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 132int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
130{ 133{
131 int r = 0; 134 int r = 0;
132 135
133 mutex_lock(&rdev->ib_pool.mutex);
134 radeon_ib_align(rdev, ib);
135 if (!ib->length_dw || !rdev->cp.ready) { 136 if (!ib->length_dw || !rdev->cp.ready) {
136 /* TODO: Nothings in the ib we should report. */ 137 /* TODO: Nothings in the ib we should report. */
137 mutex_unlock(&rdev->ib_pool.mutex);
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
139 return -EINVAL; 139 return -EINVAL;
140 } 140 }
141
141 /* 64 dwords should be enough for fence too */ 142 /* 64 dwords should be enough for fence too */
142 r = radeon_ring_lock(rdev, 64); 143 r = radeon_ring_lock(rdev, 64);
143 if (r) { 144 if (r) {
144 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); 145 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
145 mutex_unlock(&rdev->ib_pool.mutex);
146 return r; 146 return r;
147 } 147 }
148 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 148 radeon_ring_ib_execute(rdev, ib);
149 radeon_ring_write(rdev, ib->gpu_addr);
150 radeon_ring_write(rdev, ib->length_dw);
151 radeon_fence_emit(rdev, ib->fence); 149 radeon_fence_emit(rdev, ib->fence);
152 radeon_ring_unlock_commit(rdev); 150 mutex_lock(&rdev->ib_pool.mutex);
153 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
154 mutex_unlock(&rdev->ib_pool.mutex); 152 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev);
155 return 0; 154 return 0;
156} 155}
157 156
@@ -162,6 +161,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
162 int i; 161 int i;
163 int r = 0; 162 int r = 0;
164 163
164 if (rdev->ib_pool.robj)
165 return 0;
165 /* Allocate 1M object buffer */ 166 /* Allocate 1M object buffer */
166 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
167 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 168 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
@@ -215,69 +216,16 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
215 mutex_unlock(&rdev->ib_pool.mutex); 216 mutex_unlock(&rdev->ib_pool.mutex);
216} 217}
217 218
218int radeon_ib_test(struct radeon_device *rdev)
219{
220 struct radeon_ib *ib;
221 uint32_t scratch;
222 uint32_t tmp = 0;
223 unsigned i;
224 int r;
225
226 r = radeon_scratch_get(rdev, &scratch);
227 if (r) {
228 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
229 return r;
230 }
231 WREG32(scratch, 0xCAFEDEAD);
232 r = radeon_ib_get(rdev, &ib);
233 if (r) {
234 return r;
235 }
236 ib->ptr[0] = PACKET0(scratch, 0);
237 ib->ptr[1] = 0xDEADBEEF;
238 ib->ptr[2] = PACKET2(0);
239 ib->ptr[3] = PACKET2(0);
240 ib->ptr[4] = PACKET2(0);
241 ib->ptr[5] = PACKET2(0);
242 ib->ptr[6] = PACKET2(0);
243 ib->ptr[7] = PACKET2(0);
244 ib->length_dw = 8;
245 r = radeon_ib_schedule(rdev, ib);
246 if (r) {
247 radeon_scratch_free(rdev, scratch);
248 radeon_ib_free(rdev, &ib);
249 return r;
250 }
251 r = radeon_fence_wait(ib->fence, false);
252 if (r) {
253 return r;
254 }
255 for (i = 0; i < rdev->usec_timeout; i++) {
256 tmp = RREG32(scratch);
257 if (tmp == 0xDEADBEEF) {
258 break;
259 }
260 DRM_UDELAY(1);
261 }
262 if (i < rdev->usec_timeout) {
263 DRM_INFO("ib test succeeded in %u usecs\n", i);
264 } else {
265 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
266 scratch, tmp);
267 r = -EINVAL;
268 }
269 radeon_scratch_free(rdev, scratch);
270 radeon_ib_free(rdev, &ib);
271 return r;
272}
273
274 219
275/* 220/*
276 * Ring. 221 * Ring.
277 */ 222 */
278void radeon_ring_free_size(struct radeon_device *rdev) 223void radeon_ring_free_size(struct radeon_device *rdev)
279{ 224{
280 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 225 if (rdev->family >= CHIP_R600)
226 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
227 else
228 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
281 /* This works because ring_size is a power of 2 */ 229 /* This works because ring_size is a power of 2 */
282 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); 230 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
283 rdev->cp.ring_free_dw -= rdev->cp.wptr; 231 rdev->cp.ring_free_dw -= rdev->cp.wptr;
@@ -320,11 +268,10 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
320 count_dw_pad = (rdev->cp.align_mask + 1) - 268 count_dw_pad = (rdev->cp.align_mask + 1) -
321 (rdev->cp.wptr & rdev->cp.align_mask); 269 (rdev->cp.wptr & rdev->cp.align_mask);
322 for (i = 0; i < count_dw_pad; i++) { 270 for (i = 0; i < count_dw_pad; i++) {
323 radeon_ring_write(rdev, PACKET2(0)); 271 radeon_ring_write(rdev, 2 << 30);
324 } 272 }
325 DRM_MEMORYBARRIER(); 273 DRM_MEMORYBARRIER();
326 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 274 radeon_cp_commit(rdev);
327 (void)RREG32(RADEON_CP_RB_WPTR);
328 mutex_unlock(&rdev->cp.mutex); 275 mutex_unlock(&rdev->cp.mutex);
329} 276}
330 277
@@ -334,46 +281,6 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev)
334 mutex_unlock(&rdev->cp.mutex); 281 mutex_unlock(&rdev->cp.mutex);
335} 282}
336 283
337int radeon_ring_test(struct radeon_device *rdev)
338{
339 uint32_t scratch;
340 uint32_t tmp = 0;
341 unsigned i;
342 int r;
343
344 r = radeon_scratch_get(rdev, &scratch);
345 if (r) {
346 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
347 return r;
348 }
349 WREG32(scratch, 0xCAFEDEAD);
350 r = radeon_ring_lock(rdev, 2);
351 if (r) {
352 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
353 radeon_scratch_free(rdev, scratch);
354 return r;
355 }
356 radeon_ring_write(rdev, PACKET0(scratch, 0));
357 radeon_ring_write(rdev, 0xDEADBEEF);
358 radeon_ring_unlock_commit(rdev);
359 for (i = 0; i < rdev->usec_timeout; i++) {
360 tmp = RREG32(scratch);
361 if (tmp == 0xDEADBEEF) {
362 break;
363 }
364 DRM_UDELAY(1);
365 }
366 if (i < rdev->usec_timeout) {
367 DRM_INFO("ring test succeeded in %d usecs\n", i);
368 } else {
369 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
370 scratch, tmp);
371 r = -EINVAL;
372 }
373 radeon_scratch_free(rdev, scratch);
374 return r;
375}
376
377int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) 284int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
378{ 285{
379 int r; 286 int r;