aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c48
1 files changed, 9 insertions, 39 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c94a2257761f..5b71c716d83f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,15 +222,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
222{ 222{
223 struct radeon_device *rdev; 223 struct radeon_device *rdev;
224 uint64_t old_start, new_start; 224 uint64_t old_start, new_start;
225 struct radeon_fence *fence, *old_fence; 225 struct radeon_fence *fence;
226 struct radeon_semaphore *sem = NULL; 226 int r, ridx;
227 int r;
228 227
229 rdev = radeon_get_rdev(bo->bdev); 228 rdev = radeon_get_rdev(bo->bdev);
230 r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); 229 ridx = radeon_copy_ring_index(rdev);
231 if (unlikely(r)) {
232 return r;
233 }
234 old_start = old_mem->start << PAGE_SHIFT; 230 old_start = old_mem->start << PAGE_SHIFT;
235 new_start = new_mem->start << PAGE_SHIFT; 231 new_start = new_mem->start << PAGE_SHIFT;
236 232
@@ -243,7 +239,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
243 break; 239 break;
244 default: 240 default:
245 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 241 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
246 radeon_fence_unref(&fence);
247 return -EINVAL; 242 return -EINVAL;
248 } 243 }
249 switch (new_mem->mem_type) { 244 switch (new_mem->mem_type) {
@@ -255,46 +250,23 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
255 break; 250 break;
256 default: 251 default:
257 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 252 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
258 radeon_fence_unref(&fence);
259 return -EINVAL; 253 return -EINVAL;
260 } 254 }
261 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { 255 if (!rdev->ring[ridx].ready) {
262 DRM_ERROR("Trying to move memory with ring turned off.\n"); 256 DRM_ERROR("Trying to move memory with ring turned off.\n");
263 radeon_fence_unref(&fence);
264 return -EINVAL; 257 return -EINVAL;
265 } 258 }
266 259
267 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 260 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
268 261
269 /* sync other rings */ 262 /* sync other rings */
270 old_fence = bo->sync_obj; 263 fence = bo->sync_obj;
271 if (old_fence && old_fence->ring != fence->ring
272 && !radeon_fence_signaled(old_fence)) {
273 bool sync_to_ring[RADEON_NUM_RINGS] = { };
274 sync_to_ring[old_fence->ring] = true;
275
276 r = radeon_semaphore_create(rdev, &sem);
277 if (r) {
278 radeon_fence_unref(&fence);
279 return r;
280 }
281
282 r = radeon_semaphore_sync_rings(rdev, sem,
283 sync_to_ring, fence->ring);
284 if (r) {
285 radeon_semaphore_free(rdev, sem, NULL);
286 radeon_fence_unref(&fence);
287 return r;
288 }
289 }
290
291 r = radeon_copy(rdev, old_start, new_start, 264 r = radeon_copy(rdev, old_start, new_start,
292 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 265 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
293 fence); 266 &fence);
294 /* FIXME: handle copy error */ 267 /* FIXME: handle copy error */
295 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
296 evict, no_wait_reserve, no_wait_gpu, new_mem); 269 evict, no_wait_reserve, no_wait_gpu, new_mem);
297 radeon_semaphore_free(rdev, sem, fence);
298 radeon_fence_unref(&fence); 270 radeon_fence_unref(&fence);
299 return r; 271 return r;
300} 272}
@@ -762,9 +734,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
762 } 734 }
763 DRM_INFO("radeon: %uM of GTT memory ready.\n", 735 DRM_INFO("radeon: %uM of GTT memory ready.\n",
764 (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); 736 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
765 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 737 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
766 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
767 }
768 738
769 r = radeon_ttm_debugfs_init(rdev); 739 r = radeon_ttm_debugfs_init(rdev);
770 if (r) { 740 if (r) {
@@ -825,9 +795,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
825 return VM_FAULT_NOPAGE; 795 return VM_FAULT_NOPAGE;
826 } 796 }
827 rdev = radeon_get_rdev(bo->bdev); 797 rdev = radeon_get_rdev(bo->bdev);
828 mutex_lock(&rdev->vram_mutex); 798 down_read(&rdev->pm.mclk_lock);
829 r = ttm_vm_ops->fault(vma, vmf); 799 r = ttm_vm_ops->fault(vma, vmf);
830 mutex_unlock(&rdev->vram_mutex); 800 up_read(&rdev->pm.mclk_lock);
831 return r; 801 return r;
832} 802}
833 803