aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-04-07 06:21:19 -0400
committerDave Airlie <airlied@redhat.com>2010-04-07 20:21:19 -0400
commit9d87fa2138d06ff400551800d67d522625033e35 (patch)
tree284cd0f73ccb2f2fad1c71f974d4e9e4d0035e81
parent3a89b4a9ca7ce11e3b7d5119aea917b9fc29a302 (diff)
drm/ttm: split no_wait argument in 2 GPU or reserve wait
There is case where we want to be able to wait only for the GPU while not waiting for other buffer to be unreserved. This patch split the no_wait argument all the way down in the whole ttm path so that upper level can decide on what to wait on or not. [airlied: squashed these 4 for bisectability reasons.] drm/radeon/kms: update to TTM no_wait splitted argument drm/nouveau: update to TTM no_wait splitted argument drm/vmwgfx: update to TTM no_wait splitted argument [vmwgfx patch: Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>] Signed-off-by: Jerome Glisse <jglisse@redhat.com> Acked-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c39
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--include/drm/ttm/ttm_bo_api.h6
-rw-r--r--include/drm/ttm/ttm_bo_driver.h29
11 files changed, 115 insertions, 88 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 026612471c9..5a167de895c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -219,7 +219,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
219 for (i = 0; i < nvbo->placement.num_placement; i++) 219 for (i = 0; i < nvbo->placement.num_placement; i++)
220 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 220 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
221 221
222 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 222 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
223 if (ret == 0) { 223 if (ret == 0) {
224 switch (bo->mem.mem_type) { 224 switch (bo->mem.mem_type) {
225 case TTM_PL_VRAM: 225 case TTM_PL_VRAM:
@@ -256,7 +256,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
256 for (i = 0; i < nvbo->placement.num_placement; i++) 256 for (i = 0; i < nvbo->placement.num_placement; i++)
257 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 257 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
258 258
259 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 259 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
260 if (ret == 0) { 260 if (ret == 0) {
261 switch (bo->mem.mem_type) { 261 switch (bo->mem.mem_type) {
262 case TTM_PL_VRAM: 262 case TTM_PL_VRAM:
@@ -456,7 +456,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
456 456
457static int 457static int
458nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 458nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
459 struct nouveau_bo *nvbo, bool evict, bool no_wait, 459 struct nouveau_bo *nvbo, bool evict,
460 bool no_wait_reserve, bool no_wait_gpu,
460 struct ttm_mem_reg *new_mem) 461 struct ttm_mem_reg *new_mem)
461{ 462{
462 struct nouveau_fence *fence = NULL; 463 struct nouveau_fence *fence = NULL;
@@ -467,7 +468,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
467 return ret; 468 return ret;
468 469
469 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
470 evict, no_wait, new_mem); 471 evict, no_wait_reserve, no_wait_gpu, new_mem);
471 if (nvbo->channel && nvbo->channel != chan) 472 if (nvbo->channel && nvbo->channel != chan)
472 ret = nouveau_fence_wait(fence, NULL, false, false); 473 ret = nouveau_fence_wait(fence, NULL, false, false);
473 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
@@ -491,7 +492,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
491 492
492static int 493static int
493nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 494nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
494 int no_wait, struct ttm_mem_reg *new_mem) 495 bool no_wait_reserve, bool no_wait_gpu,
496 struct ttm_mem_reg *new_mem)
495{ 497{
496 struct nouveau_bo *nvbo = nouveau_bo(bo); 498 struct nouveau_bo *nvbo = nouveau_bo(bo);
497 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 499 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -569,12 +571,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
569 dst_offset += (PAGE_SIZE * line_count); 571 dst_offset += (PAGE_SIZE * line_count);
570 } 572 }
571 573
572 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); 574 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
573} 575}
574 576
575static int 577static int
576nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 578nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
577 bool no_wait, struct ttm_mem_reg *new_mem) 579 bool no_wait_reserve, bool no_wait_gpu,
580 struct ttm_mem_reg *new_mem)
578{ 581{
579 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 582 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
580 struct ttm_placement placement; 583 struct ttm_placement placement;
@@ -587,7 +590,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
587 590
588 tmp_mem = *new_mem; 591 tmp_mem = *new_mem;
589 tmp_mem.mm_node = NULL; 592 tmp_mem.mm_node = NULL;
590 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 593 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
591 if (ret) 594 if (ret)
592 return ret; 595 return ret;
593 596
@@ -595,11 +598,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
595 if (ret) 598 if (ret)
596 goto out; 599 goto out;
597 600
598 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); 601 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
599 if (ret) 602 if (ret)
600 goto out; 603 goto out;
601 604
602 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); 605 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
603out: 606out:
604 if (tmp_mem.mm_node) { 607 if (tmp_mem.mm_node) {
605 spin_lock(&bo->bdev->glob->lru_lock); 608 spin_lock(&bo->bdev->glob->lru_lock);
@@ -612,7 +615,8 @@ out:
612 615
613static int 616static int
614nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 617nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
615 bool no_wait, struct ttm_mem_reg *new_mem) 618 bool no_wait_reserve, bool no_wait_gpu,
619 struct ttm_mem_reg *new_mem)
616{ 620{
617 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 621 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
618 struct ttm_placement placement; 622 struct ttm_placement placement;
@@ -625,15 +629,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
625 629
626 tmp_mem = *new_mem; 630 tmp_mem = *new_mem;
627 tmp_mem.mm_node = NULL; 631 tmp_mem.mm_node = NULL;
628 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 632 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
629 if (ret) 633 if (ret)
630 return ret; 634 return ret;
631 635
632 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); 636 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
633 if (ret) 637 if (ret)
634 goto out; 638 goto out;
635 639
636 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 640 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
637 if (ret) 641 if (ret)
638 goto out; 642 goto out;
639 643
@@ -700,7 +704,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
700 704
701static int 705static int
702nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 706nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
703 bool no_wait, struct ttm_mem_reg *new_mem) 707 bool no_wait_reserve, bool no_wait_gpu,
708 struct ttm_mem_reg *new_mem)
704{ 709{
705 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 710 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
706 struct nouveau_bo *nvbo = nouveau_bo(bo); 711 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -715,7 +720,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
715 /* Software copy if the card isn't up and running yet. */ 720 /* Software copy if the card isn't up and running yet. */
716 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 721 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
717 !dev_priv->channel) { 722 !dev_priv->channel) {
718 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 723 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
719 goto out; 724 goto out;
720 } 725 }
721 726
@@ -729,17 +734,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
729 734
730 /* Hardware assisted copy. */ 735 /* Hardware assisted copy. */
731 if (new_mem->mem_type == TTM_PL_SYSTEM) 736 if (new_mem->mem_type == TTM_PL_SYSTEM)
732 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); 737 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
733 else if (old_mem->mem_type == TTM_PL_SYSTEM) 738 else if (old_mem->mem_type == TTM_PL_SYSTEM)
734 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); 739 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
735 else 740 else
736 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 741 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
737 742
738 if (!ret) 743 if (!ret)
739 goto out; 744 goto out;
740 745
741 /* Fallback to software copy. */ 746 /* Fallback to software copy. */
742 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 747 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
743 748
744out: 749out:
745 if (ret) 750 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0d22f66f1c7..1f5040363b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -387,7 +387,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
387 387
388 nvbo->channel = chan; 388 nvbo->channel = chan;
389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
390 false, false); 390 false, false, false);
391 nvbo->channel = NULL; 391 nvbo->channel = NULL;
392 if (unlikely(ret)) { 392 if (unlikely(ret)) {
393 NV_ERROR(dev, "fail ttm_validate\n"); 393 NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dc7e3f44913..4b441f87f47 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -191,7 +191,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
191 } 191 }
192 for (i = 0; i < bo->placement.num_placement; i++) 192 for (i = 0; i < bo->placement.num_placement; i++)
193 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 193 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
194 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 194 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
195 if (likely(r == 0)) { 195 if (likely(r == 0)) {
196 bo->pin_count = 1; 196 bo->pin_count = 1;
197 if (gpu_addr != NULL) 197 if (gpu_addr != NULL)
@@ -215,7 +215,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
215 return 0; 215 return 0;
216 for (i = 0; i < bo->placement.num_placement; i++) 216 for (i = 0; i < bo->placement.num_placement; i++)
217 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 217 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
218 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 218 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
219 if (unlikely(r != 0)) 219 if (unlikely(r != 0))
220 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 220 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
221 return r; 221 return r;
@@ -330,7 +330,7 @@ int radeon_bo_list_validate(struct list_head *head)
330 lobj->rdomain); 330 lobj->rdomain);
331 } 331 }
332 r = ttm_bo_validate(&bo->tbo, &bo->placement, 332 r = ttm_bo_validate(&bo->tbo, &bo->placement,
333 true, false); 333 true, false, false);
334 if (unlikely(r)) 334 if (unlikely(r))
335 return r; 335 return r;
336 } 336 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 43c5ab34b63..ba4724c38ac 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -243,9 +243,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
243} 243}
244 244
245static int radeon_move_blit(struct ttm_buffer_object *bo, 245static int radeon_move_blit(struct ttm_buffer_object *bo,
246 bool evict, int no_wait, 246 bool evict, int no_wait_reserve, bool no_wait_gpu,
247 struct ttm_mem_reg *new_mem, 247 struct ttm_mem_reg *new_mem,
248 struct ttm_mem_reg *old_mem) 248 struct ttm_mem_reg *old_mem)
249{ 249{
250 struct radeon_device *rdev; 250 struct radeon_device *rdev;
251 uint64_t old_start, new_start; 251 uint64_t old_start, new_start;
@@ -289,13 +289,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
290 /* FIXME: handle copy error */ 290 /* FIXME: handle copy error */
291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
292 evict, no_wait, new_mem); 292 evict, no_wait_reserve, no_wait_gpu, new_mem);
293 radeon_fence_unref(&fence); 293 radeon_fence_unref(&fence);
294 return r; 294 return r;
295} 295}
296 296
297static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 297static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait, 298 bool evict, bool interruptible,
299 bool no_wait_reserve, bool no_wait_gpu,
299 struct ttm_mem_reg *new_mem) 300 struct ttm_mem_reg *new_mem)
300{ 301{
301 struct radeon_device *rdev; 302 struct radeon_device *rdev;
@@ -316,7 +317,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
316 placement.busy_placement = &placements; 317 placement.busy_placement = &placements;
317 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
318 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
319 interruptible, no_wait); 320 interruptible, no_wait_reserve, no_wait_gpu);
320 if (unlikely(r)) { 321 if (unlikely(r)) {
321 return r; 322 return r;
322 } 323 }
@@ -330,11 +331,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
330 if (unlikely(r)) { 331 if (unlikely(r)) {
331 goto out_cleanup; 332 goto out_cleanup;
332 } 333 }
333 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 334 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
334 if (unlikely(r)) { 335 if (unlikely(r)) {
335 goto out_cleanup; 336 goto out_cleanup;
336 } 337 }
337 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 338 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
338out_cleanup: 339out_cleanup:
339 if (tmp_mem.mm_node) { 340 if (tmp_mem.mm_node) {
340 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 341 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -348,7 +349,8 @@ out_cleanup:
348} 349}
349 350
350static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 351static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
351 bool evict, bool interruptible, bool no_wait, 352 bool evict, bool interruptible,
353 bool no_wait_reserve, bool no_wait_gpu,
352 struct ttm_mem_reg *new_mem) 354 struct ttm_mem_reg *new_mem)
353{ 355{
354 struct radeon_device *rdev; 356 struct radeon_device *rdev;
@@ -368,15 +370,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
368 placement.num_busy_placement = 1; 370 placement.num_busy_placement = 1;
369 placement.busy_placement = &placements; 371 placement.busy_placement = &placements;
370 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 372 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
371 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 373 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
372 if (unlikely(r)) { 374 if (unlikely(r)) {
373 return r; 375 return r;
374 } 376 }
375 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 377 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
376 if (unlikely(r)) { 378 if (unlikely(r)) {
377 goto out_cleanup; 379 goto out_cleanup;
378 } 380 }
379 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 381 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
380 if (unlikely(r)) { 382 if (unlikely(r)) {
381 goto out_cleanup; 383 goto out_cleanup;
382 } 384 }
@@ -393,8 +395,9 @@ out_cleanup:
393} 395}
394 396
395static int radeon_bo_move(struct ttm_buffer_object *bo, 397static int radeon_bo_move(struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, bool no_wait, 398 bool evict, bool interruptible,
397 struct ttm_mem_reg *new_mem) 399 bool no_wait_reserve, bool no_wait_gpu,
400 struct ttm_mem_reg *new_mem)
398{ 401{
399 struct radeon_device *rdev; 402 struct radeon_device *rdev;
400 struct ttm_mem_reg *old_mem = &bo->mem; 403 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -421,18 +424,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
421 if (old_mem->mem_type == TTM_PL_VRAM && 424 if (old_mem->mem_type == TTM_PL_VRAM &&
422 new_mem->mem_type == TTM_PL_SYSTEM) { 425 new_mem->mem_type == TTM_PL_SYSTEM) {
423 r = radeon_move_vram_ram(bo, evict, interruptible, 426 r = radeon_move_vram_ram(bo, evict, interruptible,
424 no_wait, new_mem); 427 no_wait_reserve, no_wait_gpu, new_mem);
425 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 428 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
426 new_mem->mem_type == TTM_PL_VRAM) { 429 new_mem->mem_type == TTM_PL_VRAM) {
427 r = radeon_move_ram_vram(bo, evict, interruptible, 430 r = radeon_move_ram_vram(bo, evict, interruptible,
428 no_wait, new_mem); 431 no_wait_reserve, no_wait_gpu, new_mem);
429 } else { 432 } else {
430 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 433 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
431 } 434 }
432 435
433 if (r) { 436 if (r) {
434memcpy: 437memcpy:
435 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 438 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
436 } 439 }
437 440
438 return r; 441 return r;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a79..40631e2866f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -357,7 +357,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 357
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 359 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 360 bool evict, bool interruptible,
361 bool no_wait_reserve, bool no_wait_gpu)
361{ 362{
362 struct ttm_bo_device *bdev = bo->bdev; 363 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 364 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +403,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 403
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 404 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 405 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 406 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 407 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 408 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 409 no_wait_reserve, no_wait_gpu, mem);
409 else 410 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 411 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 412
412 if (ret) 413 if (ret)
413 goto out_err; 414 goto out_err;
@@ -606,7 +607,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
606EXPORT_SYMBOL(ttm_bo_unref); 607EXPORT_SYMBOL(ttm_bo_unref);
607 608
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 609static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 610 bool no_wait_reserve, bool no_wait_gpu)
610{ 611{
611 struct ttm_bo_device *bdev = bo->bdev; 612 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 613 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +616,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 616 int ret = 0;
616 617
617 spin_lock(&bo->lock); 618 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 619 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 620 spin_unlock(&bo->lock);
620 621
621 if (unlikely(ret != 0)) { 622 if (unlikely(ret != 0)) {
@@ -638,7 +639,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 639 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 640 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 641 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 642 no_wait_reserve, no_wait_gpu);
642 if (ret) { 643 if (ret) {
643 if (ret != -ERESTARTSYS) { 644 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 645 printk(KERN_ERR TTM_PFX
@@ -650,7 +651,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 651 }
651 652
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 653 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 654 no_wait_reserve, no_wait_gpu);
654 if (ret) { 655 if (ret) {
655 if (ret != -ERESTARTSYS) 656 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 657 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +671,8 @@ out:
670 671
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 672static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 673 uint32_t mem_type,
673 bool interruptible, bool no_wait) 674 bool interruptible, bool no_wait_reserve,
675 bool no_wait_gpu)
674{ 676{
675 struct ttm_bo_global *glob = bdev->glob; 677 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 678 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +689,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 689 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 690 kref_get(&bo->list_kref);
689 691
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 692 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 693
692 if (unlikely(ret == -EBUSY)) { 694 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 695 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 696 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 697 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 698
697 kref_put(&bo->list_kref, ttm_bo_release_list); 699 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +715,7 @@ retry:
713 while (put_count--) 715 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 716 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 717
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 718 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 719 ttm_bo_unreserve(bo);
718 720
719 kref_put(&bo->list_kref, ttm_bo_release_list); 721 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +766,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 766 uint32_t mem_type,
765 struct ttm_placement *placement, 767 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 768 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 769 bool interruptible,
770 bool no_wait_reserve,
771 bool no_wait_gpu)
768{ 772{
769 struct ttm_bo_device *bdev = bo->bdev; 773 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 774 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +789,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 789 }
786 spin_unlock(&glob->lru_lock); 790 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 791 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 792 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 793 if (unlikely(ret != 0))
790 return ret; 794 return ret;
791 } while (1); 795 } while (1);
@@ -855,7 +859,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 859int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 860 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 861 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 862 bool interruptible, bool no_wait_reserve,
863 bool no_wait_gpu)
859{ 864{
860 struct ttm_bo_device *bdev = bo->bdev; 865 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 866 struct ttm_mem_type_manager *man;
@@ -952,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 957 }
953 958
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 959 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 960 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 961 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 962 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 963 mem->mm_node->private = bo;
@@ -978,7 +983,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 983
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 984int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 985 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 986 bool interruptible, bool no_wait_reserve,
987 bool no_wait_gpu)
982{ 988{
983 struct ttm_bo_global *glob = bo->glob; 989 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 990 int ret = 0;
@@ -992,7 +998,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 998 * instead of doing it here.
993 */ 999 */
994 spin_lock(&bo->lock); 1000 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1001 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1002 spin_unlock(&bo->lock);
997 if (ret) 1003 if (ret)
998 return ret; 1004 return ret;
@@ -1002,10 +1008,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1002 /* 1008 /*
1003 * Determine where to move the buffer. 1009 * Determine where to move the buffer.
1004 */ 1010 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1011 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1012 if (ret)
1007 goto out_unlock; 1013 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1014 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1015out_unlock:
1010 if (ret && mem.mm_node) { 1016 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1017 spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1045
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1046int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1047 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1048 bool interruptible, bool no_wait_reserve,
1049 bool no_wait_gpu)
1043{ 1050{
1044 int ret; 1051 int ret;
1045 1052
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1061 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1062 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1063 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1064 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1065 if (ret)
1059 return ret; 1066 return ret;
1060 } else { 1067 } else {
@@ -1175,7 +1182,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1182 goto out_err;
1176 } 1183 }
1177 1184
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1185 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1186 if (ret)
1180 goto out_err; 1187 goto out_err;
1181 1188
@@ -1249,7 +1256,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1256 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1257 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1258 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1259 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1260 if (ret) {
1254 if (allow_errors) { 1261 if (allow_errors) {
1255 return ret; 1262 return ret;
@@ -1839,7 +1846,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1839 evict_mem.mem_type = TTM_PL_SYSTEM; 1846 evict_mem.mem_type = TTM_PL_SYSTEM;
1840 1847
1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1848 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1842 false, false); 1849 false, false, false);
1843 if (unlikely(ret != 0)) 1850 if (unlikely(ret != 0))
1844 goto out; 1851 goto out;
1845 } 1852 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5ca37a58a98..865b2a826e1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -49,7 +49,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49} 49}
50 50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 52 bool evict, bool no_wait_reserve,
53 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
53{ 54{
54 struct ttm_tt *ttm = bo->ttm; 55 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 56 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -207,7 +208,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
207} 208}
208 209
209int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
210 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 211 bool evict, bool no_wait_reserve, bool no_wait_gpu,
212 struct ttm_mem_reg *new_mem)
211{ 213{
212 struct ttm_bo_device *bdev = bo->bdev; 214 struct ttm_bo_device *bdev = bo->bdev;
213 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 215 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -525,7 +527,8 @@ int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
525int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 527int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
526 void *sync_obj, 528 void *sync_obj,
527 void *sync_obj_arg, 529 void *sync_obj_arg,
528 bool evict, bool no_wait, 530 bool evict, bool no_wait_reserve,
531 bool no_wait_gpu,
529 struct ttm_mem_reg *new_mem) 532 struct ttm_mem_reg *new_mem)
530{ 533{
531 struct ttm_bo_device *bdev = bo->bdev; 534 struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4..dbd36b8910c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM, only if there is space.
571 */ 571 */
572 572
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); 573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS)) 574 if (unlikely(ret == -ERESTARTSYS))
575 return ret; 575 return ret;
576 576
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 590 * previous contents.
591 */ 591 */
592 592
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 594 return ret;
595} 595}
596 596
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cd..80125ffc4e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
628 if (unlikely(ret != 0)) 628 if (unlikely(ret != 0))
629 return ret; 629 return ret;
630 630
631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); 631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
632 ttm_bo_unreserve(bo); 632 ttm_bo_unreserve(bo);
633 633
634 return ret; 634 return ret;
@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
652 if (unlikely(ret != 0)) 652 if (unlikely(ret != 0))
653 goto err_unlock; 653 goto err_unlock;
654 654
655 ret = ttm_bo_validate(bo, &ne_placement, false, false); 655 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
656 ttm_bo_unreserve(bo); 656 ttm_bo_unreserve(bo);
657err_unlock: 657err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock); 658 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f5..ad566c85b07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
118 if (pin) 118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
120 120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); 121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
122 122
123 ttm_bo_unreserve(bo); 123 ttm_bo_unreserve(bo);
124 124
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 81eb9f45883..8c8005ec4ea 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -313,7 +313,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
313 * @bo: The buffer object. 313 * @bo: The buffer object.
314 * @placement: Proposed placement for the buffer object. 314 * @placement: Proposed placement for the buffer object.
315 * @interruptible: Sleep interruptible if sleeping. 315 * @interruptible: Sleep interruptible if sleeping.
316 * @no_wait: Return immediately if the buffer is busy. 316 * @no_wait_reserve: Return immediately if other buffers are busy.
317 * @no_wait_gpu: Return immediately if the GPU is busy.
317 * 318 *
318 * Changes placement and caching policy of the buffer object 319 * Changes placement and caching policy of the buffer object
319 * according proposed placement. 320 * according proposed placement.
@@ -325,7 +326,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
325 */ 326 */
326extern int ttm_bo_validate(struct ttm_buffer_object *bo, 327extern int ttm_bo_validate(struct ttm_buffer_object *bo,
327 struct ttm_placement *placement, 328 struct ttm_placement *placement,
328 bool interruptible, bool no_wait); 329 bool interruptible, bool no_wait_reserve,
330 bool no_wait_gpu);
329 331
330/** 332/**
331 * ttm_bo_unref 333 * ttm_bo_unref
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e929c27ede2..69f70e418c2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -311,7 +311,8 @@ struct ttm_bo_driver {
311 */ 311 */
312 int (*move) (struct ttm_buffer_object *bo, 312 int (*move) (struct ttm_buffer_object *bo,
313 bool evict, bool interruptible, 313 bool evict, bool interruptible,
314 bool no_wait, struct ttm_mem_reg *new_mem); 314 bool no_wait_reserve, bool no_wait_gpu,
315 struct ttm_mem_reg *new_mem);
315 316
316 /** 317 /**
317 * struct ttm_bo_driver_member verify_access 318 * struct ttm_bo_driver_member verify_access
@@ -633,7 +634,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
633 * @proposed_placement: Proposed new placement for the buffer object. 634 * @proposed_placement: Proposed new placement for the buffer object.
634 * @mem: A struct ttm_mem_reg. 635 * @mem: A struct ttm_mem_reg.
635 * @interruptible: Sleep interruptible when sliping. 636 * @interruptible: Sleep interruptible when sliping.
636 * @no_wait: Don't sleep waiting for space to become available. 637 * @no_wait_reserve: Return immediately if other buffers are busy.
638 * @no_wait_gpu: Return immediately if the GPU is busy.
637 * 639 *
638 * Allocate memory space for the buffer object pointed to by @bo, using 640 * Allocate memory space for the buffer object pointed to by @bo, using
639 * the placement flags in @mem, potentially evicting other idle buffer objects. 641 * the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +649,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
647extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 649extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
648 struct ttm_placement *placement, 650 struct ttm_placement *placement,
649 struct ttm_mem_reg *mem, 651 struct ttm_mem_reg *mem,
650 bool interruptible, bool no_wait); 652 bool interruptible,
653 bool no_wait_reserve, bool no_wait_gpu);
651/** 654/**
652 * ttm_bo_wait_for_cpu 655 * ttm_bo_wait_for_cpu
653 * 656 *
@@ -826,7 +829,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
826 * 829 *
827 * @bo: A pointer to a struct ttm_buffer_object. 830 * @bo: A pointer to a struct ttm_buffer_object.
828 * @evict: 1: This is an eviction. Don't try to pipeline. 831 * @evict: 1: This is an eviction. Don't try to pipeline.
829 * @no_wait: Never sleep, but rather return with -EBUSY. 832 * @no_wait_reserve: Return immediately if other buffers are busy.
833 * @no_wait_gpu: Return immediately if the GPU is busy.
830 * @new_mem: struct ttm_mem_reg indicating where to move. 834 * @new_mem: struct ttm_mem_reg indicating where to move.
831 * 835 *
832 * Optimized move function for a buffer object with both old and 836 * Optimized move function for a buffer object with both old and
@@ -840,15 +844,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
840 */ 844 */
841 845
842extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 846extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
843 bool evict, bool no_wait, 847 bool evict, bool no_wait_reserve,
844 struct ttm_mem_reg *new_mem); 848 bool no_wait_gpu, struct ttm_mem_reg *new_mem);
845 849
846/** 850/**
847 * ttm_bo_move_memcpy 851 * ttm_bo_move_memcpy
848 * 852 *
849 * @bo: A pointer to a struct ttm_buffer_object. 853 * @bo: A pointer to a struct ttm_buffer_object.
850 * @evict: 1: This is an eviction. Don't try to pipeline. 854 * @evict: 1: This is an eviction. Don't try to pipeline.
851 * @no_wait: Never sleep, but rather return with -EBUSY. 855 * @no_wait_reserve: Return immediately if other buffers are busy.
856 * @no_wait_gpu: Return immediately if the GPU is busy.
852 * @new_mem: struct ttm_mem_reg indicating where to move. 857 * @new_mem: struct ttm_mem_reg indicating where to move.
853 * 858 *
854 * Fallback move function for a mappable buffer object in mappable memory. 859 * Fallback move function for a mappable buffer object in mappable memory.
@@ -862,8 +867,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
862 */ 867 */
863 868
864extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 869extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
865 bool evict, 870 bool evict, bool no_wait_reserve,
866 bool no_wait, struct ttm_mem_reg *new_mem); 871 bool no_wait_gpu, struct ttm_mem_reg *new_mem);
867 872
868/** 873/**
869 * ttm_bo_free_old_node 874 * ttm_bo_free_old_node
@@ -882,7 +887,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
882 * @sync_obj_arg: An argument to pass to the sync object idle / wait 887 * @sync_obj_arg: An argument to pass to the sync object idle / wait
883 * functions. 888 * functions.
884 * @evict: This is an evict move. Don't return until the buffer is idle. 889 * @evict: This is an evict move. Don't return until the buffer is idle.
885 * @no_wait: Never sleep, but rather return with -EBUSY. 890 * @no_wait_reserve: Return immediately if other buffers are busy.
891 * @no_wait_gpu: Return immediately if the GPU is busy.
886 * @new_mem: struct ttm_mem_reg indicating where to move. 892 * @new_mem: struct ttm_mem_reg indicating where to move.
887 * 893 *
888 * Accelerated move function to be called when an accelerated move 894 * Accelerated move function to be called when an accelerated move
@@ -896,7 +902,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
896extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 902extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
897 void *sync_obj, 903 void *sync_obj,
898 void *sync_obj_arg, 904 void *sync_obj_arg,
899 bool evict, bool no_wait, 905 bool evict, bool no_wait_reserve,
906 bool no_wait_gpu,
900 struct ttm_mem_reg *new_mem); 907 struct ttm_mem_reg *new_mem);
901/** 908/**
902 * ttm_io_prot 909 * ttm_io_prot