diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 119 |
1 files changed, 7 insertions, 112 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 60d159308b88..aa9837a6aa75 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -110,7 +110,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | list_del(&tmp->list); | 112 | list_del(&tmp->list); |
113 | INIT_LIST_HEAD(&tmp->list); | ||
114 | if (tmp->fence) { | 113 | if (tmp->fence) { |
115 | radeon_fence_unref(&tmp->fence); | 114 | radeon_fence_unref(&tmp->fence); |
116 | } | 115 | } |
@@ -119,19 +118,11 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
119 | mutex_unlock(&rdev->ib_pool.mutex); | 118 | mutex_unlock(&rdev->ib_pool.mutex); |
120 | } | 119 | } |
121 | 120 | ||
122 | static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) | ||
123 | { | ||
124 | while ((ib->length_dw & rdev->cp.align_mask)) { | ||
125 | ib->ptr[ib->length_dw++] = PACKET2(0); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 121 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
130 | { | 122 | { |
131 | int r = 0; | 123 | int r = 0; |
132 | 124 | ||
133 | mutex_lock(&rdev->ib_pool.mutex); | 125 | mutex_lock(&rdev->ib_pool.mutex); |
134 | radeon_ib_align(rdev, ib); | ||
135 | if (!ib->length_dw || !rdev->cp.ready) { | 126 | if (!ib->length_dw || !rdev->cp.ready) { |
136 | /* TODO: Nothings in the ib we should report. */ | 127 | /* TODO: Nothings in the ib we should report. */ |
137 | mutex_unlock(&rdev->ib_pool.mutex); | 128 | mutex_unlock(&rdev->ib_pool.mutex); |
@@ -145,9 +136,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
145 | mutex_unlock(&rdev->ib_pool.mutex); | 136 | mutex_unlock(&rdev->ib_pool.mutex); |
146 | return r; | 137 | return r; |
147 | } | 138 | } |
148 | radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); | 139 | radeon_ring_ib_execute(rdev, ib); |
149 | radeon_ring_write(rdev, ib->gpu_addr); | ||
150 | radeon_ring_write(rdev, ib->length_dw); | ||
151 | radeon_fence_emit(rdev, ib->fence); | 140 | radeon_fence_emit(rdev, ib->fence); |
152 | radeon_ring_unlock_commit(rdev); | 141 | radeon_ring_unlock_commit(rdev); |
153 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); | 142 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
@@ -215,69 +204,16 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
215 | mutex_unlock(&rdev->ib_pool.mutex); | 204 | mutex_unlock(&rdev->ib_pool.mutex); |
216 | } | 205 | } |
217 | 206 | ||
218 | int radeon_ib_test(struct radeon_device *rdev) | ||
219 | { | ||
220 | struct radeon_ib *ib; | ||
221 | uint32_t scratch; | ||
222 | uint32_t tmp = 0; | ||
223 | unsigned i; | ||
224 | int r; | ||
225 | |||
226 | r = radeon_scratch_get(rdev, &scratch); | ||
227 | if (r) { | ||
228 | DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); | ||
229 | return r; | ||
230 | } | ||
231 | WREG32(scratch, 0xCAFEDEAD); | ||
232 | r = radeon_ib_get(rdev, &ib); | ||
233 | if (r) { | ||
234 | return r; | ||
235 | } | ||
236 | ib->ptr[0] = PACKET0(scratch, 0); | ||
237 | ib->ptr[1] = 0xDEADBEEF; | ||
238 | ib->ptr[2] = PACKET2(0); | ||
239 | ib->ptr[3] = PACKET2(0); | ||
240 | ib->ptr[4] = PACKET2(0); | ||
241 | ib->ptr[5] = PACKET2(0); | ||
242 | ib->ptr[6] = PACKET2(0); | ||
243 | ib->ptr[7] = PACKET2(0); | ||
244 | ib->length_dw = 8; | ||
245 | r = radeon_ib_schedule(rdev, ib); | ||
246 | if (r) { | ||
247 | radeon_scratch_free(rdev, scratch); | ||
248 | radeon_ib_free(rdev, &ib); | ||
249 | return r; | ||
250 | } | ||
251 | r = radeon_fence_wait(ib->fence, false); | ||
252 | if (r) { | ||
253 | return r; | ||
254 | } | ||
255 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
256 | tmp = RREG32(scratch); | ||
257 | if (tmp == 0xDEADBEEF) { | ||
258 | break; | ||
259 | } | ||
260 | DRM_UDELAY(1); | ||
261 | } | ||
262 | if (i < rdev->usec_timeout) { | ||
263 | DRM_INFO("ib test succeeded in %u usecs\n", i); | ||
264 | } else { | ||
265 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | ||
266 | scratch, tmp); | ||
267 | r = -EINVAL; | ||
268 | } | ||
269 | radeon_scratch_free(rdev, scratch); | ||
270 | radeon_ib_free(rdev, &ib); | ||
271 | return r; | ||
272 | } | ||
273 | |||
274 | 207 | ||
275 | /* | 208 | /* |
276 | * Ring. | 209 | * Ring. |
277 | */ | 210 | */ |
278 | void radeon_ring_free_size(struct radeon_device *rdev) | 211 | void radeon_ring_free_size(struct radeon_device *rdev) |
279 | { | 212 | { |
280 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 213 | if (rdev->family >= CHIP_R600) |
214 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | ||
215 | else | ||
216 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | ||
281 | /* This works because ring_size is a power of 2 */ | 217 | /* This works because ring_size is a power of 2 */ |
282 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); | 218 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
283 | rdev->cp.ring_free_dw -= rdev->cp.wptr; | 219 | rdev->cp.ring_free_dw -= rdev->cp.wptr; |
@@ -320,11 +256,10 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) | |||
320 | count_dw_pad = (rdev->cp.align_mask + 1) - | 256 | count_dw_pad = (rdev->cp.align_mask + 1) - |
321 | (rdev->cp.wptr & rdev->cp.align_mask); | 257 | (rdev->cp.wptr & rdev->cp.align_mask); |
322 | for (i = 0; i < count_dw_pad; i++) { | 258 | for (i = 0; i < count_dw_pad; i++) { |
323 | radeon_ring_write(rdev, PACKET2(0)); | 259 | radeon_ring_write(rdev, 2 << 30); |
324 | } | 260 | } |
325 | DRM_MEMORYBARRIER(); | 261 | DRM_MEMORYBARRIER(); |
326 | WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); | 262 | radeon_cp_commit(rdev); |
327 | (void)RREG32(RADEON_CP_RB_WPTR); | ||
328 | mutex_unlock(&rdev->cp.mutex); | 263 | mutex_unlock(&rdev->cp.mutex); |
329 | } | 264 | } |
330 | 265 | ||
@@ -334,46 +269,6 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev) | |||
334 | mutex_unlock(&rdev->cp.mutex); | 269 | mutex_unlock(&rdev->cp.mutex); |
335 | } | 270 | } |
336 | 271 | ||
337 | int radeon_ring_test(struct radeon_device *rdev) | ||
338 | { | ||
339 | uint32_t scratch; | ||
340 | uint32_t tmp = 0; | ||
341 | unsigned i; | ||
342 | int r; | ||
343 | |||
344 | r = radeon_scratch_get(rdev, &scratch); | ||
345 | if (r) { | ||
346 | DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); | ||
347 | return r; | ||
348 | } | ||
349 | WREG32(scratch, 0xCAFEDEAD); | ||
350 | r = radeon_ring_lock(rdev, 2); | ||
351 | if (r) { | ||
352 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | ||
353 | radeon_scratch_free(rdev, scratch); | ||
354 | return r; | ||
355 | } | ||
356 | radeon_ring_write(rdev, PACKET0(scratch, 0)); | ||
357 | radeon_ring_write(rdev, 0xDEADBEEF); | ||
358 | radeon_ring_unlock_commit(rdev); | ||
359 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
360 | tmp = RREG32(scratch); | ||
361 | if (tmp == 0xDEADBEEF) { | ||
362 | break; | ||
363 | } | ||
364 | DRM_UDELAY(1); | ||
365 | } | ||
366 | if (i < rdev->usec_timeout) { | ||
367 | DRM_INFO("ring test succeeded in %d usecs\n", i); | ||
368 | } else { | ||
369 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", | ||
370 | scratch, tmp); | ||
371 | r = -EINVAL; | ||
372 | } | ||
373 | radeon_scratch_free(rdev, scratch); | ||
374 | return r; | ||
375 | } | ||
376 | |||
377 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | 272 | int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
378 | { | 273 | { |
379 | int r; | 274 | int r; |