aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c66
1 files changed, 43 insertions, 23 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d4d986..261e98a276db 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
219void radeon_ib_pool_fini(struct radeon_device *rdev) 219void radeon_ib_pool_fini(struct radeon_device *rdev)
220{ 220{
221 int r; 221 int r;
222 struct radeon_bo *robj;
222 223
223 if (!rdev->ib_pool.ready) { 224 if (!rdev->ib_pool.ready) {
224 return; 225 return;
225 } 226 }
226 mutex_lock(&rdev->ib_pool.mutex); 227 mutex_lock(&rdev->ib_pool.mutex);
227 radeon_ib_bogus_cleanup(rdev); 228 radeon_ib_bogus_cleanup(rdev);
229 robj = rdev->ib_pool.robj;
230 rdev->ib_pool.robj = NULL;
231 mutex_unlock(&rdev->ib_pool.mutex);
228 232
229 if (rdev->ib_pool.robj) { 233 if (robj) {
230 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 234 r = radeon_bo_reserve(robj, false);
231 if (likely(r == 0)) { 235 if (likely(r == 0)) {
232 radeon_bo_kunmap(rdev->ib_pool.robj); 236 radeon_bo_kunmap(robj);
233 radeon_bo_unpin(rdev->ib_pool.robj); 237 radeon_bo_unpin(robj);
234 radeon_bo_unreserve(rdev->ib_pool.robj); 238 radeon_bo_unreserve(robj);
235 } 239 }
236 radeon_bo_unref(&rdev->ib_pool.robj); 240 radeon_bo_unref(&robj);
237 rdev->ib_pool.robj = NULL;
238 } 241 }
239 mutex_unlock(&rdev->ib_pool.mutex);
240} 242}
241 243
242 244
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
258 } 260 }
259} 261}
260 262
261int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) 263int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
262{ 264{
263 int r; 265 int r;
264 266
265 /* Align requested size with padding so unlock_commit can 267 /* Align requested size with padding so unlock_commit can
266 * pad safely */ 268 * pad safely */
267 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; 269 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
268 mutex_lock(&rdev->cp.mutex);
269 while (ndw > (rdev->cp.ring_free_dw - 1)) { 270 while (ndw > (rdev->cp.ring_free_dw - 1)) {
270 radeon_ring_free_size(rdev); 271 radeon_ring_free_size(rdev);
271 if (ndw < rdev->cp.ring_free_dw) { 272 if (ndw < rdev->cp.ring_free_dw) {
272 break; 273 break;
273 } 274 }
274 r = radeon_fence_wait_next(rdev); 275 r = radeon_fence_wait_next(rdev);
275 if (r) { 276 if (r)
276 mutex_unlock(&rdev->cp.mutex);
277 return r; 277 return r;
278 }
279 } 278 }
280 rdev->cp.count_dw = ndw; 279 rdev->cp.count_dw = ndw;
281 rdev->cp.wptr_old = rdev->cp.wptr; 280 rdev->cp.wptr_old = rdev->cp.wptr;
282 return 0; 281 return 0;
283} 282}
284 283
285void radeon_ring_unlock_commit(struct radeon_device *rdev) 284int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
285{
286 int r;
287
288 mutex_lock(&rdev->cp.mutex);
289 r = radeon_ring_alloc(rdev, ndw);
290 if (r) {
291 mutex_unlock(&rdev->cp.mutex);
292 return r;
293 }
294 return 0;
295}
296
297void radeon_ring_commit(struct radeon_device *rdev)
286{ 298{
287 unsigned count_dw_pad; 299 unsigned count_dw_pad;
288 unsigned i; 300 unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
295 } 307 }
296 DRM_MEMORYBARRIER(); 308 DRM_MEMORYBARRIER();
297 radeon_cp_commit(rdev); 309 radeon_cp_commit(rdev);
310}
311
312void radeon_ring_unlock_commit(struct radeon_device *rdev)
313{
314 radeon_ring_commit(rdev);
298 mutex_unlock(&rdev->cp.mutex); 315 mutex_unlock(&rdev->cp.mutex);
299} 316}
300 317
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
344void radeon_ring_fini(struct radeon_device *rdev) 361void radeon_ring_fini(struct radeon_device *rdev)
345{ 362{
346 int r; 363 int r;
364 struct radeon_bo *ring_obj;
347 365
348 mutex_lock(&rdev->cp.mutex); 366 mutex_lock(&rdev->cp.mutex);
349 if (rdev->cp.ring_obj) { 367 ring_obj = rdev->cp.ring_obj;
350 r = radeon_bo_reserve(rdev->cp.ring_obj, false); 368 rdev->cp.ring = NULL;
369 rdev->cp.ring_obj = NULL;
370 mutex_unlock(&rdev->cp.mutex);
371
372 if (ring_obj) {
373 r = radeon_bo_reserve(ring_obj, false);
351 if (likely(r == 0)) { 374 if (likely(r == 0)) {
352 radeon_bo_kunmap(rdev->cp.ring_obj); 375 radeon_bo_kunmap(ring_obj);
353 radeon_bo_unpin(rdev->cp.ring_obj); 376 radeon_bo_unpin(ring_obj);
354 radeon_bo_unreserve(rdev->cp.ring_obj); 377 radeon_bo_unreserve(ring_obj);
355 } 378 }
356 radeon_bo_unref(&rdev->cp.ring_obj); 379 radeon_bo_unref(&ring_obj);
357 rdev->cp.ring = NULL;
358 rdev->cp.ring_obj = NULL;
359 } 380 }
360 mutex_unlock(&rdev->cp.mutex);
361} 381}
362 382
363 383