diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 44 |
1 files changed, 20 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d9ffe1f56e8f..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -220,9 +220,11 @@ int radeon_bo_unpin(struct radeon_bo *bo) | |||
220 | 220 | ||
221 | int radeon_bo_evict_vram(struct radeon_device *rdev) | 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
222 | { | 222 | { |
223 | if (rdev->flags & RADEON_IS_IGP) { | 223 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
224 | /* Useless to evict on IGP chips */ | 224 | if (0 && (rdev->flags & RADEON_IS_IGP)) { |
225 | return 0; | 225 | if (rdev->mc.igp_sideport_enabled == false) |
226 | /* Useless to evict on IGP chips */ | ||
227 | return 0; | ||
226 | } | 228 | } |
227 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 229 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
228 | } | 230 | } |
@@ -304,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
304 | } | 306 | } |
305 | } | 307 | } |
306 | 308 | ||
307 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) |
308 | { | 310 | { |
309 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; |
310 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; |
311 | struct radeon_fence *old_fence = NULL; | ||
312 | int r; | 313 | int r; |
313 | 314 | ||
314 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); |
@@ -332,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
332 | } | 333 | } |
333 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
334 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; |
335 | if (fence) { | ||
336 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
337 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
338 | bo->tbo.sync_obj_arg = NULL; | ||
339 | } | ||
340 | if (old_fence) { | ||
341 | radeon_fence_unref(&old_fence); | ||
342 | } | ||
343 | } | 336 | } |
344 | return 0; | 337 | return 0; |
345 | } | 338 | } |
346 | 339 | ||
347 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
348 | { | 341 | { |
349 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; |
350 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; |
351 | 344 | struct radeon_fence *old_fence = NULL; | |
352 | if (fence) | 345 | |
353 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { |
354 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; |
355 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); |
356 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
357 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
358 | } | 351 | bo->tbo.sync_obj_arg = NULL; |
352 | spin_unlock(&bo->tbo.lock); | ||
353 | if (old_fence) { | ||
354 | radeon_fence_unref(&old_fence); | ||
359 | } | 355 | } |
360 | radeon_bo_list_unreserve(head); | 356 | } |
361 | } | 357 | } |
362 | 358 | ||
363 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |