aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c39
2 files changed, 24 insertions, 21 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dc7e3f44913..4b441f87f47 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -191,7 +191,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
191 } 191 }
192 for (i = 0; i < bo->placement.num_placement; i++) 192 for (i = 0; i < bo->placement.num_placement; i++)
193 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 193 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
194 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 194 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
195 if (likely(r == 0)) { 195 if (likely(r == 0)) {
196 bo->pin_count = 1; 196 bo->pin_count = 1;
197 if (gpu_addr != NULL) 197 if (gpu_addr != NULL)
@@ -215,7 +215,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
215 return 0; 215 return 0;
216 for (i = 0; i < bo->placement.num_placement; i++) 216 for (i = 0; i < bo->placement.num_placement; i++)
217 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 217 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
218 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 218 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
219 if (unlikely(r != 0)) 219 if (unlikely(r != 0))
220 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 220 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
221 return r; 221 return r;
@@ -330,7 +330,7 @@ int radeon_bo_list_validate(struct list_head *head)
330 lobj->rdomain); 330 lobj->rdomain);
331 } 331 }
332 r = ttm_bo_validate(&bo->tbo, &bo->placement, 332 r = ttm_bo_validate(&bo->tbo, &bo->placement,
333 true, false); 333 true, false, false);
334 if (unlikely(r)) 334 if (unlikely(r))
335 return r; 335 return r;
336 } 336 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 43c5ab34b63..ba4724c38ac 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -243,9 +243,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
243} 243}
244 244
245static int radeon_move_blit(struct ttm_buffer_object *bo, 245static int radeon_move_blit(struct ttm_buffer_object *bo,
246 bool evict, int no_wait, 246 bool evict, int no_wait_reserve, bool no_wait_gpu,
247 struct ttm_mem_reg *new_mem, 247 struct ttm_mem_reg *new_mem,
248 struct ttm_mem_reg *old_mem) 248 struct ttm_mem_reg *old_mem)
249{ 249{
250 struct radeon_device *rdev; 250 struct radeon_device *rdev;
251 uint64_t old_start, new_start; 251 uint64_t old_start, new_start;
@@ -289,13 +289,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
290 /* FIXME: handle copy error */ 290 /* FIXME: handle copy error */
291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
292 evict, no_wait, new_mem); 292 evict, no_wait_reserve, no_wait_gpu, new_mem);
293 radeon_fence_unref(&fence); 293 radeon_fence_unref(&fence);
294 return r; 294 return r;
295} 295}
296 296
297static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 297static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait, 298 bool evict, bool interruptible,
299 bool no_wait_reserve, bool no_wait_gpu,
299 struct ttm_mem_reg *new_mem) 300 struct ttm_mem_reg *new_mem)
300{ 301{
301 struct radeon_device *rdev; 302 struct radeon_device *rdev;
@@ -316,7 +317,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
316 placement.busy_placement = &placements; 317 placement.busy_placement = &placements;
317 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
318 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
319 interruptible, no_wait); 320 interruptible, no_wait_reserve, no_wait_gpu);
320 if (unlikely(r)) { 321 if (unlikely(r)) {
321 return r; 322 return r;
322 } 323 }
@@ -330,11 +331,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
330 if (unlikely(r)) { 331 if (unlikely(r)) {
331 goto out_cleanup; 332 goto out_cleanup;
332 } 333 }
333 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 334 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
334 if (unlikely(r)) { 335 if (unlikely(r)) {
335 goto out_cleanup; 336 goto out_cleanup;
336 } 337 }
337 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 338 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
338out_cleanup: 339out_cleanup:
339 if (tmp_mem.mm_node) { 340 if (tmp_mem.mm_node) {
340 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 341 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -348,7 +349,8 @@ out_cleanup:
348} 349}
349 350
350static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 351static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
351 bool evict, bool interruptible, bool no_wait, 352 bool evict, bool interruptible,
353 bool no_wait_reserve, bool no_wait_gpu,
352 struct ttm_mem_reg *new_mem) 354 struct ttm_mem_reg *new_mem)
353{ 355{
354 struct radeon_device *rdev; 356 struct radeon_device *rdev;
@@ -368,15 +370,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
368 placement.num_busy_placement = 1; 370 placement.num_busy_placement = 1;
369 placement.busy_placement = &placements; 371 placement.busy_placement = &placements;
370 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 372 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
371 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 373 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
372 if (unlikely(r)) { 374 if (unlikely(r)) {
373 return r; 375 return r;
374 } 376 }
375 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 377 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
376 if (unlikely(r)) { 378 if (unlikely(r)) {
377 goto out_cleanup; 379 goto out_cleanup;
378 } 380 }
379 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 381 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
380 if (unlikely(r)) { 382 if (unlikely(r)) {
381 goto out_cleanup; 383 goto out_cleanup;
382 } 384 }
@@ -393,8 +395,9 @@ out_cleanup:
393} 395}
394 396
395static int radeon_bo_move(struct ttm_buffer_object *bo, 397static int radeon_bo_move(struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, bool no_wait, 398 bool evict, bool interruptible,
397 struct ttm_mem_reg *new_mem) 399 bool no_wait_reserve, bool no_wait_gpu,
400 struct ttm_mem_reg *new_mem)
398{ 401{
399 struct radeon_device *rdev; 402 struct radeon_device *rdev;
400 struct ttm_mem_reg *old_mem = &bo->mem; 403 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -421,18 +424,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
421 if (old_mem->mem_type == TTM_PL_VRAM && 424 if (old_mem->mem_type == TTM_PL_VRAM &&
422 new_mem->mem_type == TTM_PL_SYSTEM) { 425 new_mem->mem_type == TTM_PL_SYSTEM) {
423 r = radeon_move_vram_ram(bo, evict, interruptible, 426 r = radeon_move_vram_ram(bo, evict, interruptible,
424 no_wait, new_mem); 427 no_wait_reserve, no_wait_gpu, new_mem);
425 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 428 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
426 new_mem->mem_type == TTM_PL_VRAM) { 429 new_mem->mem_type == TTM_PL_VRAM) {
427 r = radeon_move_ram_vram(bo, evict, interruptible, 430 r = radeon_move_ram_vram(bo, evict, interruptible,
428 no_wait, new_mem); 431 no_wait_reserve, no_wait_gpu, new_mem);
429 } else { 432 } else {
430 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 433 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
431 } 434 }
432 435
433 if (r) { 436 if (r) {
434memcpy: 437memcpy:
435 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 438 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
436 } 439 }
437 440
438 return r; 441 return r;