aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_ttm.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-04-07 06:21:19 -0400
committerDave Airlie <airlied@redhat.com>2010-04-07 20:21:19 -0400
commit9d87fa2138d06ff400551800d67d522625033e35 (patch)
tree284cd0f73ccb2f2fad1c71f974d4e9e4d0035e81 /drivers/gpu/drm/radeon/radeon_ttm.c
parent3a89b4a9ca7ce11e3b7d5119aea917b9fc29a302 (diff)
drm/ttm: split no_wait argument in 2 GPU or reserve wait
There is case where we want to be able to wait only for the GPU while not waiting for other buffer to be unreserved. This patch split the no_wait argument all the way down in the whole ttm path so that upper level can decide on what to wait on or not. [airlied: squashed these 4 for bisectability reasons.] drm/radeon/kms: update to TTM no_wait splitted argument drm/nouveau: update to TTM no_wait splitted argument drm/vmwgfx: update to TTM no_wait splitted argument [vmwgfx patch: Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>] Signed-off-by: Jerome Glisse <jglisse@redhat.com> Acked-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 43c5ab34b63..ba4724c38ac 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -243,9 +243,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
243} 243}
244 244
245static int radeon_move_blit(struct ttm_buffer_object *bo, 245static int radeon_move_blit(struct ttm_buffer_object *bo,
246 bool evict, int no_wait, 246 bool evict, int no_wait_reserve, bool no_wait_gpu,
247 struct ttm_mem_reg *new_mem, 247 struct ttm_mem_reg *new_mem,
248 struct ttm_mem_reg *old_mem) 248 struct ttm_mem_reg *old_mem)
249{ 249{
250 struct radeon_device *rdev; 250 struct radeon_device *rdev;
251 uint64_t old_start, new_start; 251 uint64_t old_start, new_start;
@@ -289,13 +289,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
290 /* FIXME: handle copy error */ 290 /* FIXME: handle copy error */
291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
292 evict, no_wait, new_mem); 292 evict, no_wait_reserve, no_wait_gpu, new_mem);
293 radeon_fence_unref(&fence); 293 radeon_fence_unref(&fence);
294 return r; 294 return r;
295} 295}
296 296
297static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 297static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait, 298 bool evict, bool interruptible,
299 bool no_wait_reserve, bool no_wait_gpu,
299 struct ttm_mem_reg *new_mem) 300 struct ttm_mem_reg *new_mem)
300{ 301{
301 struct radeon_device *rdev; 302 struct radeon_device *rdev;
@@ -316,7 +317,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
316 placement.busy_placement = &placements; 317 placement.busy_placement = &placements;
317 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
318 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
319 interruptible, no_wait); 320 interruptible, no_wait_reserve, no_wait_gpu);
320 if (unlikely(r)) { 321 if (unlikely(r)) {
321 return r; 322 return r;
322 } 323 }
@@ -330,11 +331,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
330 if (unlikely(r)) { 331 if (unlikely(r)) {
331 goto out_cleanup; 332 goto out_cleanup;
332 } 333 }
333 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 334 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
334 if (unlikely(r)) { 335 if (unlikely(r)) {
335 goto out_cleanup; 336 goto out_cleanup;
336 } 337 }
337 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 338 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
338out_cleanup: 339out_cleanup:
339 if (tmp_mem.mm_node) { 340 if (tmp_mem.mm_node) {
340 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 341 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -348,7 +349,8 @@ out_cleanup:
348} 349}
349 350
350static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 351static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
351 bool evict, bool interruptible, bool no_wait, 352 bool evict, bool interruptible,
353 bool no_wait_reserve, bool no_wait_gpu,
352 struct ttm_mem_reg *new_mem) 354 struct ttm_mem_reg *new_mem)
353{ 355{
354 struct radeon_device *rdev; 356 struct radeon_device *rdev;
@@ -368,15 +370,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
368 placement.num_busy_placement = 1; 370 placement.num_busy_placement = 1;
369 placement.busy_placement = &placements; 371 placement.busy_placement = &placements;
370 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 372 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
371 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 373 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
372 if (unlikely(r)) { 374 if (unlikely(r)) {
373 return r; 375 return r;
374 } 376 }
375 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 377 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
376 if (unlikely(r)) { 378 if (unlikely(r)) {
377 goto out_cleanup; 379 goto out_cleanup;
378 } 380 }
379 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 381 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
380 if (unlikely(r)) { 382 if (unlikely(r)) {
381 goto out_cleanup; 383 goto out_cleanup;
382 } 384 }
@@ -393,8 +395,9 @@ out_cleanup:
393} 395}
394 396
395static int radeon_bo_move(struct ttm_buffer_object *bo, 397static int radeon_bo_move(struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, bool no_wait, 398 bool evict, bool interruptible,
397 struct ttm_mem_reg *new_mem) 399 bool no_wait_reserve, bool no_wait_gpu,
400 struct ttm_mem_reg *new_mem)
398{ 401{
399 struct radeon_device *rdev; 402 struct radeon_device *rdev;
400 struct ttm_mem_reg *old_mem = &bo->mem; 403 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -421,18 +424,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
421 if (old_mem->mem_type == TTM_PL_VRAM && 424 if (old_mem->mem_type == TTM_PL_VRAM &&
422 new_mem->mem_type == TTM_PL_SYSTEM) { 425 new_mem->mem_type == TTM_PL_SYSTEM) {
423 r = radeon_move_vram_ram(bo, evict, interruptible, 426 r = radeon_move_vram_ram(bo, evict, interruptible,
424 no_wait, new_mem); 427 no_wait_reserve, no_wait_gpu, new_mem);
425 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 428 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
426 new_mem->mem_type == TTM_PL_VRAM) { 429 new_mem->mem_type == TTM_PL_VRAM) {
427 r = radeon_move_ram_vram(bo, evict, interruptible, 430 r = radeon_move_ram_vram(bo, evict, interruptible,
428 no_wait, new_mem); 431 no_wait_reserve, no_wait_gpu, new_mem);
429 } else { 432 } else {
430 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 433 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
431 } 434 }
432 435
433 if (r) { 436 if (r) {
434memcpy: 437memcpy:
435 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 438 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
436 } 439 }
437 440
438 return r; 441 return r;