diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 39 |
1 files changed, 17 insertions, 22 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 4e636de877b2..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo) | |||
220 | 220 | ||
221 | int radeon_bo_evict_vram(struct radeon_device *rdev) | 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
222 | { | 222 | { |
223 | if (rdev->flags & RADEON_IS_IGP) { | 223 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
224 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | ||
224 | if (rdev->mc.igp_sideport_enabled == false) | 225 | if (rdev->mc.igp_sideport_enabled == false) |
225 | /* Useless to evict on IGP chips */ | 226 | /* Useless to evict on IGP chips */ |
226 | return 0; | 227 | return 0; |
@@ -305,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
305 | } | 306 | } |
306 | } | 307 | } |
307 | 308 | ||
308 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) |
309 | { | 310 | { |
310 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; |
311 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; |
312 | struct radeon_fence *old_fence = NULL; | ||
313 | int r; | 313 | int r; |
314 | 314 | ||
315 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); |
@@ -333,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
333 | } | 333 | } |
334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
335 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; |
336 | if (fence) { | ||
337 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
338 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
339 | bo->tbo.sync_obj_arg = NULL; | ||
340 | } | ||
341 | if (old_fence) { | ||
342 | radeon_fence_unref(&old_fence); | ||
343 | } | ||
344 | } | 336 | } |
345 | return 0; | 337 | return 0; |
346 | } | 338 | } |
347 | 339 | ||
348 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
349 | { | 341 | { |
350 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; |
351 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; |
352 | 344 | struct radeon_fence *old_fence = NULL; | |
353 | if (fence) | 345 | |
354 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { |
355 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; |
356 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); |
357 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
358 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
359 | } | 351 | bo->tbo.sync_obj_arg = NULL; |
352 | spin_unlock(&bo->tbo.lock); | ||
353 | if (old_fence) { | ||
354 | radeon_fence_unref(&old_fence); | ||
360 | } | 355 | } |
361 | radeon_bo_list_unreserve(head); | 356 | } |
362 | } | 357 | } |
363 | 358 | ||
364 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |