aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2012-11-28 06:25:44 -0500
committerDave Airlie <airlied@redhat.com>2012-12-10 05:21:30 -0500
commit97a875cbdf89a4638eea57c2b456c7cc4e3e8b21 (patch)
tree286ff23a3f8e389ec4fa1a6f3b58cbee4fc8e9fe /drivers/gpu/drm
parente7ab20197be3ee5fd75441e1cff0c7cdfea5bf1a (diff)
drm/ttm: remove no_wait_reserve, v3
All items on the lru list are always reservable, so this is a stupid thing to keep. Not only that, it is used in a way which would guarantee deadlocks if it were ever to be set to block on reserve. This is a lot of churn, but mostly because of the removal of the argument which can be nested arbitrarily deeply in many places. No change of code in this patch except removal of the no_wait_reserve argument, the previous patch removed the use of no_wait_reserve. v2: - Warn if -EBUSY is returned on reservation, all objects on the list should be reservable. Adjusted patch slightly due to conflicts. v3: - Focus on no_wait_reserve removal only. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
13 files changed, 99 insertions, 100 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0a54f65a8ebb..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
186 186
187static int ast_bo_move(struct ttm_buffer_object *bo, 187static int ast_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
383 ast_ttm_placement(bo, pl_flag); 383 ast_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++) 384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
387 if (ret) 387 if (ret)
388 return ret; 388 return ret;
389 389
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
406 406
407 for (i = 0; i < bo->placement.num_placement ; i++) 407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
410 if (ret) 410 if (ret)
411 return ret; 411 return ret;
412 412
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
431 for (i = 0; i < bo->placement.num_placement ; i++) 431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
433 433
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
435 if (ret) { 435 if (ret) {
436 DRM_ERROR("pushing to VRAM failed\n"); 436 DRM_ERROR("pushing to VRAM failed\n");
437 return ret; 437 return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 90d770143cc2..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
186 186
187static int cirrus_bo_move(struct ttm_buffer_object *bo, 187static int cirrus_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
388 cirrus_ttm_placement(bo, pl_flag); 388 cirrus_ttm_placement(bo, pl_flag);
389 for (i = 0; i < bo->placement.num_placement; i++) 389 for (i = 0; i < bo->placement.num_placement; i++)
390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
392 if (ret) 392 if (ret)
393 return ret; 393 return ret;
394 394
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
411 411
412 for (i = 0; i < bo->placement.num_placement ; i++) 412 for (i = 0; i < bo->placement.num_placement ; i++)
413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
415 if (ret) 415 if (ret)
416 return ret; 416 return ret;
417 417
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
436 for (i = 0; i < bo->placement.num_placement ; i++) 436 for (i = 0; i < bo->placement.num_placement ; i++)
437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
438 438
439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
440 if (ret) { 440 if (ret) {
441 DRM_ERROR("pushing to VRAM failed\n"); 441 DRM_ERROR("pushing to VRAM failed\n");
442 return ret; 442 return ret;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 49d60a620122..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
186 186
187static int mgag200_bo_move(struct ttm_buffer_object *bo, 187static int mgag200_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
382 mgag200_ttm_placement(bo, pl_flag); 382 mgag200_ttm_placement(bo, pl_flag);
383 for (i = 0; i < bo->placement.num_placement; i++) 383 for (i = 0; i < bo->placement.num_placement; i++)
384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
386 if (ret) 386 if (ret)
387 return ret; 387 return ret;
388 388
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
405 405
406 for (i = 0; i < bo->placement.num_placement ; i++) 406 for (i = 0; i < bo->placement.num_placement ; i++)
407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
409 if (ret) 409 if (ret)
410 return ret; 410 return ret;
411 411
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
430 for (i = 0; i < bo->placement.num_placement ; i++) 430 for (i = 0; i < bo->placement.num_placement ; i++)
431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
432 432
433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
434 if (ret) { 434 if (ret) {
435 DRM_ERROR("pushing to VRAM failed\n"); 435 DRM_ERROR("pushing to VRAM failed\n");
436 return ret; 436 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4c950b4cf416..5614c89148cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
315 315
316 nouveau_bo_placement_set(nvbo, memtype, 0); 316 nouveau_bo_placement_set(nvbo, memtype, 0);
317 317
318 ret = nouveau_bo_validate(nvbo, false, false, false); 318 ret = nouveau_bo_validate(nvbo, false, false);
319 if (ret == 0) { 319 if (ret == 0) {
320 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
321 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
351 351
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
353 353
354 ret = nouveau_bo_validate(nvbo, false, false, false); 354 ret = nouveau_bo_validate(nvbo, false, false);
355 if (ret == 0) { 355 if (ret == 0) {
356 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
357 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
392 392
393int 393int
394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
395 bool no_wait_reserve, bool no_wait_gpu) 395 bool no_wait_gpu)
396{ 396{
397 int ret; 397 int ret;
398 398
399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, 399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
400 no_wait_reserve, no_wait_gpu); 400 interruptible, no_wait_gpu);
401 if (ret) 401 if (ret)
402 return ret; 402 return ret;
403 403
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
556static int 556static int
557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
558 struct nouveau_bo *nvbo, bool evict, 558 struct nouveau_bo *nvbo, bool evict,
559 bool no_wait_reserve, bool no_wait_gpu, 559 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
560 struct ttm_mem_reg *new_mem)
561{ 560{
562 struct nouveau_fence *fence = NULL; 561 struct nouveau_fence *fence = NULL;
563 int ret; 562 int ret;
@@ -567,7 +566,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
567 return ret; 566 return ret;
568 567
569 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, 568 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
570 no_wait_reserve, no_wait_gpu, new_mem); 569 no_wait_gpu, new_mem);
571 nouveau_fence_unref(&fence); 570 nouveau_fence_unref(&fence);
572 return ret; 571 return ret;
573} 572}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
965 964
966static int 965static int
967nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 966nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
968 bool no_wait_reserve, bool no_wait_gpu, 967 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
969 struct ttm_mem_reg *new_mem)
970{ 968{
971 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
972 struct nouveau_channel *chan = chan = drm->channel; 970 struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
995 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
996 if (ret == 0) { 994 if (ret == 0) {
997 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
998 no_wait_reserve,
999 no_wait_gpu, new_mem); 996 no_wait_gpu, new_mem);
1000 } 997 }
1001 998
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1064 1061
1065static int 1062static int
1066nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1063nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1067 bool no_wait_reserve, bool no_wait_gpu, 1064 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1068 struct ttm_mem_reg *new_mem)
1069{ 1065{
1070 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1066 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1071 struct ttm_placement placement; 1067 struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1078 1074
1079 tmp_mem = *new_mem; 1075 tmp_mem = *new_mem;
1080 tmp_mem.mm_node = NULL; 1076 tmp_mem.mm_node = NULL;
1081 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1077 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1082 if (ret) 1078 if (ret)
1083 return ret; 1079 return ret;
1084 1080
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1086 if (ret) 1082 if (ret)
1087 goto out; 1083 goto out;
1088 1084
1089 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 1085 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1090 if (ret) 1086 if (ret)
1091 goto out; 1087 goto out;
1092 1088
1093 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 1089 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1094out: 1090out:
1095 ttm_bo_mem_put(bo, &tmp_mem); 1091 ttm_bo_mem_put(bo, &tmp_mem);
1096 return ret; 1092 return ret;
@@ -1098,8 +1094,7 @@ out:
1098 1094
1099static int 1095static int
1100nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1096nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1101 bool no_wait_reserve, bool no_wait_gpu, 1097 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1102 struct ttm_mem_reg *new_mem)
1103{ 1098{
1104 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1099 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1105 struct ttm_placement placement; 1100 struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1112 1107
1113 tmp_mem = *new_mem; 1108 tmp_mem = *new_mem;
1114 tmp_mem.mm_node = NULL; 1109 tmp_mem.mm_node = NULL;
1115 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1110 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1116 if (ret) 1111 if (ret)
1117 return ret; 1112 return ret;
1118 1113
1119 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 1114 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1120 if (ret) 1115 if (ret)
1121 goto out; 1116 goto out;
1122 1117
1123 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); 1118 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1124 if (ret) 1119 if (ret)
1125 goto out; 1120 goto out;
1126 1121
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1195 1190
1196static int 1191static int
1197nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1192nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1198 bool no_wait_reserve, bool no_wait_gpu, 1193 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1199 struct ttm_mem_reg *new_mem)
1200{ 1194{
1201 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1195 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1202 struct nouveau_bo *nvbo = nouveau_bo(bo); 1196 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1220 1214
1221 /* CPU copy if we have no accelerated method available */ 1215 /* CPU copy if we have no accelerated method available */
1222 if (!drm->ttm.move) { 1216 if (!drm->ttm.move) {
1223 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1217 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1224 goto out; 1218 goto out;
1225 } 1219 }
1226 1220
1227 /* Hardware assisted copy. */ 1221 /* Hardware assisted copy. */
1228 if (new_mem->mem_type == TTM_PL_SYSTEM) 1222 if (new_mem->mem_type == TTM_PL_SYSTEM)
1229 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1223 ret = nouveau_bo_move_flipd(bo, evict, intr,
1224 no_wait_gpu, new_mem);
1230 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1225 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1231 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1226 ret = nouveau_bo_move_flips(bo, evict, intr,
1227 no_wait_gpu, new_mem);
1232 else 1228 else
1233 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1229 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1230 no_wait_gpu, new_mem);
1234 1231
1235 if (!ret) 1232 if (!ret)
1236 goto out; 1233 goto out;
1237 1234
1238 /* Fallback to software copy. */ 1235 /* Fallback to software copy. */
1239 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1236 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1240 1237
1241out: 1238out:
1242 if (nv_device(drm->device)->card_type < NV_50) { 1239 if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1343 nvbo->placement.fpfn = 0; 1340 nvbo->placement.fpfn = 0;
1344 nvbo->placement.lpfn = mappable; 1341 nvbo->placement.lpfn = mappable;
1345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1342 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1346 return nouveau_bo_validate(nvbo, false, true, false); 1343 return nouveau_bo_validate(nvbo, false, false);
1347} 1344}
1348 1345
1349static int 1346static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); 76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
79 bool no_wait_reserve, bool no_wait_gpu); 79 bool no_wait_gpu);
80 80
81struct nouveau_vma * 81struct nouveau_vma *
82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); 82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
433 return ret; 433 return ret;
434 } 434 }
435 435
436 ret = nouveau_bo_validate(nvbo, true, false, false); 436 ret = nouveau_bo_validate(nvbo, true, false);
437 if (unlikely(ret)) { 437 if (unlikely(ret)) {
438 if (ret != -ERESTARTSYS) 438 if (ret != -ERESTARTSYS)
439 NV_ERROR(drm, "fail ttm_validate\n"); 439 NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index e6ee65cdfb5c..bfb332e616dc 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -250,7 +250,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
250 } 250 }
251 for (i = 0; i < bo->placement.num_placement; i++) 251 for (i = 0; i < bo->placement.num_placement; i++)
252 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 252 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
253 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 253 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
254 if (likely(r == 0)) { 254 if (likely(r == 0)) {
255 bo->pin_count = 1; 255 bo->pin_count = 1;
256 if (gpu_addr != NULL) 256 if (gpu_addr != NULL)
@@ -279,7 +279,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
279 return 0; 279 return 0;
280 for (i = 0; i < bo->placement.num_placement; i++) 280 for (i = 0; i < bo->placement.num_placement; i++)
281 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 281 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
282 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 282 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
283 if (unlikely(r != 0)) 283 if (unlikely(r != 0))
284 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 284 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
285 return r; 285 return r;
@@ -365,7 +365,7 @@ int radeon_bo_list_validate(struct list_head *head)
365 retry: 365 retry:
366 radeon_ttm_placement_from_domain(bo, domain); 366 radeon_ttm_placement_from_domain(bo, domain);
367 r = ttm_bo_validate(&bo->tbo, &bo->placement, 367 r = ttm_bo_validate(&bo->tbo, &bo->placement,
368 true, false, false); 368 true, false);
369 if (unlikely(r)) { 369 if (unlikely(r)) {
370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
371 domain |= RADEON_GEM_DOMAIN_GTT; 371 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -585,7 +585,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
585 /* hurrah the memory is not visible ! */ 585 /* hurrah the memory is not visible ! */
586 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 586 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
587 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 587 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
588 r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 588 r = ttm_bo_validate(bo, &rbo->placement, false, false);
589 if (unlikely(r != 0)) 589 if (unlikely(r != 0))
590 return r; 590 return r;
591 offset = bo->mem.start << PAGE_SHIFT; 591 offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 563c8edcb03b..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
216} 216}
217 217
218static int radeon_move_blit(struct ttm_buffer_object *bo, 218static int radeon_move_blit(struct ttm_buffer_object *bo,
219 bool evict, int no_wait_reserve, bool no_wait_gpu, 219 bool evict, bool no_wait_gpu,
220 struct ttm_mem_reg *new_mem, 220 struct ttm_mem_reg *new_mem,
221 struct ttm_mem_reg *old_mem) 221 struct ttm_mem_reg *old_mem)
222{ 222{
@@ -266,14 +266,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
266 &fence); 266 &fence);
267 /* FIXME: handle copy error */ 267 /* FIXME: handle copy error */
268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, 268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
269 evict, no_wait_reserve, no_wait_gpu, new_mem); 269 evict, no_wait_gpu, new_mem);
270 radeon_fence_unref(&fence); 270 radeon_fence_unref(&fence);
271 return r; 271 return r;
272} 272}
273 273
274static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 274static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
275 bool evict, bool interruptible, 275 bool evict, bool interruptible,
276 bool no_wait_reserve, bool no_wait_gpu, 276 bool no_wait_gpu,
277 struct ttm_mem_reg *new_mem) 277 struct ttm_mem_reg *new_mem)
278{ 278{
279 struct radeon_device *rdev; 279 struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
294 placement.busy_placement = &placements; 294 placement.busy_placement = &placements;
295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
297 interruptible, no_wait_reserve, no_wait_gpu); 297 interruptible, no_wait_gpu);
298 if (unlikely(r)) { 298 if (unlikely(r)) {
299 return r; 299 return r;
300 } 300 }
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
308 if (unlikely(r)) { 308 if (unlikely(r)) {
309 goto out_cleanup; 309 goto out_cleanup;
310 } 310 }
311 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); 311 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
312 if (unlikely(r)) { 312 if (unlikely(r)) {
313 goto out_cleanup; 313 goto out_cleanup;
314 } 314 }
315 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 315 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
316out_cleanup: 316out_cleanup:
317 ttm_bo_mem_put(bo, &tmp_mem); 317 ttm_bo_mem_put(bo, &tmp_mem);
318 return r; 318 return r;
@@ -320,7 +320,7 @@ out_cleanup:
320 320
321static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 321static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
322 bool evict, bool interruptible, 322 bool evict, bool interruptible,
323 bool no_wait_reserve, bool no_wait_gpu, 323 bool no_wait_gpu,
324 struct ttm_mem_reg *new_mem) 324 struct ttm_mem_reg *new_mem)
325{ 325{
326 struct radeon_device *rdev; 326 struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
340 placement.num_busy_placement = 1; 340 placement.num_busy_placement = 1;
341 placement.busy_placement = &placements; 341 placement.busy_placement = &placements;
342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); 343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
344 interruptible, no_wait_gpu);
344 if (unlikely(r)) { 345 if (unlikely(r)) {
345 return r; 346 return r;
346 } 347 }
347 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 348 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
348 if (unlikely(r)) { 349 if (unlikely(r)) {
349 goto out_cleanup; 350 goto out_cleanup;
350 } 351 }
351 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 352 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
352 if (unlikely(r)) { 353 if (unlikely(r)) {
353 goto out_cleanup; 354 goto out_cleanup;
354 } 355 }
@@ -359,7 +360,7 @@ out_cleanup:
359 360
360static int radeon_bo_move(struct ttm_buffer_object *bo, 361static int radeon_bo_move(struct ttm_buffer_object *bo,
361 bool evict, bool interruptible, 362 bool evict, bool interruptible,
362 bool no_wait_reserve, bool no_wait_gpu, 363 bool no_wait_gpu,
363 struct ttm_mem_reg *new_mem) 364 struct ttm_mem_reg *new_mem)
364{ 365{
365 struct radeon_device *rdev; 366 struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
388 if (old_mem->mem_type == TTM_PL_VRAM && 389 if (old_mem->mem_type == TTM_PL_VRAM &&
389 new_mem->mem_type == TTM_PL_SYSTEM) { 390 new_mem->mem_type == TTM_PL_SYSTEM) {
390 r = radeon_move_vram_ram(bo, evict, interruptible, 391 r = radeon_move_vram_ram(bo, evict, interruptible,
391 no_wait_reserve, no_wait_gpu, new_mem); 392 no_wait_gpu, new_mem);
392 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 393 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
393 new_mem->mem_type == TTM_PL_VRAM) { 394 new_mem->mem_type == TTM_PL_VRAM) {
394 r = radeon_move_ram_vram(bo, evict, interruptible, 395 r = radeon_move_ram_vram(bo, evict, interruptible,
395 no_wait_reserve, no_wait_gpu, new_mem); 396 no_wait_gpu, new_mem);
396 } else { 397 } else {
397 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 398 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
398 } 399 }
399 400
400 if (r) { 401 if (r) {
401memcpy: 402memcpy:
402 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 403 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
403 } 404 }
404 return r; 405 return r;
405} 406}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6059771d506e..a9151337d5b9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -366,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
366static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 366static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
367 struct ttm_mem_reg *mem, 367 struct ttm_mem_reg *mem,
368 bool evict, bool interruptible, 368 bool evict, bool interruptible,
369 bool no_wait_reserve, bool no_wait_gpu) 369 bool no_wait_gpu)
370{ 370{
371 struct ttm_bo_device *bdev = bo->bdev; 371 struct ttm_bo_device *bdev = bo->bdev;
372 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 372 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -420,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
420 420
421 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 421 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
422 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 422 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
423 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); 423 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
424 else if (bdev->driver->move) 424 else if (bdev->driver->move)
425 ret = bdev->driver->move(bo, evict, interruptible, 425 ret = bdev->driver->move(bo, evict, interruptible,
426 no_wait_reserve, no_wait_gpu, mem); 426 no_wait_gpu, mem);
427 else 427 else
428 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); 428 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
429 429
430 if (ret) { 430 if (ret) {
431 if (bdev->driver->move_notify) { 431 if (bdev->driver->move_notify) {
@@ -749,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
749EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 749EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
750 750
751static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 751static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
752 bool no_wait_reserve, bool no_wait_gpu) 752 bool no_wait_gpu)
753{ 753{
754 struct ttm_bo_device *bdev = bo->bdev; 754 struct ttm_bo_device *bdev = bo->bdev;
755 struct ttm_mem_reg evict_mem; 755 struct ttm_mem_reg evict_mem;
@@ -780,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
780 placement.num_busy_placement = 0; 780 placement.num_busy_placement = 0;
781 bdev->driver->evict_flags(bo, &placement); 781 bdev->driver->evict_flags(bo, &placement);
782 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 782 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
783 no_wait_reserve, no_wait_gpu); 783 no_wait_gpu);
784 if (ret) { 784 if (ret) {
785 if (ret != -ERESTARTSYS) { 785 if (ret != -ERESTARTSYS) {
786 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 786 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -791,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
791 } 791 }
792 792
793 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 793 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
794 no_wait_reserve, no_wait_gpu); 794 no_wait_gpu);
795 if (ret) { 795 if (ret) {
796 if (ret != -ERESTARTSYS) 796 if (ret != -ERESTARTSYS)
797 pr_err("Buffer eviction failed\n"); 797 pr_err("Buffer eviction failed\n");
@@ -805,7 +805,7 @@ out:
805 805
806static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 806static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
807 uint32_t mem_type, 807 uint32_t mem_type,
808 bool interruptible, bool no_wait_reserve, 808 bool interruptible,
809 bool no_wait_gpu) 809 bool no_wait_gpu)
810{ 810{
811 struct ttm_bo_global *glob = bdev->glob; 811 struct ttm_bo_global *glob = bdev->glob;
@@ -841,7 +841,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
841 841
842 ttm_bo_list_ref_sub(bo, put_count, true); 842 ttm_bo_list_ref_sub(bo, put_count, true);
843 843
844 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 844 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
845 ttm_bo_unreserve(bo); 845 ttm_bo_unreserve(bo);
846 846
847 kref_put(&bo->list_kref, ttm_bo_release_list); 847 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -866,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
866 struct ttm_placement *placement, 866 struct ttm_placement *placement,
867 struct ttm_mem_reg *mem, 867 struct ttm_mem_reg *mem,
868 bool interruptible, 868 bool interruptible,
869 bool no_wait_reserve,
870 bool no_wait_gpu) 869 bool no_wait_gpu)
871{ 870{
872 struct ttm_bo_device *bdev = bo->bdev; 871 struct ttm_bo_device *bdev = bo->bdev;
@@ -879,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
879 return ret; 878 return ret;
880 if (mem->mm_node) 879 if (mem->mm_node)
881 break; 880 break;
882 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 881 ret = ttm_mem_evict_first(bdev, mem_type,
883 no_wait_reserve, no_wait_gpu); 882 interruptible, no_wait_gpu);
884 if (unlikely(ret != 0)) 883 if (unlikely(ret != 0))
885 return ret; 884 return ret;
886 } while (1); 885 } while (1);
@@ -945,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
945int ttm_bo_mem_space(struct ttm_buffer_object *bo, 944int ttm_bo_mem_space(struct ttm_buffer_object *bo,
946 struct ttm_placement *placement, 945 struct ttm_placement *placement,
947 struct ttm_mem_reg *mem, 946 struct ttm_mem_reg *mem,
948 bool interruptible, bool no_wait_reserve, 947 bool interruptible,
949 bool no_wait_gpu) 948 bool no_wait_gpu)
950{ 949{
951 struct ttm_bo_device *bdev = bo->bdev; 950 struct ttm_bo_device *bdev = bo->bdev;
@@ -1036,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1036 } 1035 }
1037 1036
1038 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1037 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1039 interruptible, no_wait_reserve, no_wait_gpu); 1038 interruptible, no_wait_gpu);
1040 if (ret == 0 && mem->mm_node) { 1039 if (ret == 0 && mem->mm_node) {
1041 mem->placement = cur_flags; 1040 mem->placement = cur_flags;
1042 return 0; 1041 return 0;
@@ -1051,7 +1050,7 @@ EXPORT_SYMBOL(ttm_bo_mem_space);
1051 1050
1052int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1051int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1053 struct ttm_placement *placement, 1052 struct ttm_placement *placement,
1054 bool interruptible, bool no_wait_reserve, 1053 bool interruptible,
1055 bool no_wait_gpu) 1054 bool no_wait_gpu)
1056{ 1055{
1057 int ret = 0; 1056 int ret = 0;
@@ -1078,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1078 /* 1077 /*
1079 * Determine where to move the buffer. 1078 * Determine where to move the buffer.
1080 */ 1079 */
1081 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); 1080 ret = ttm_bo_mem_space(bo, placement, &mem,
1081 interruptible, no_wait_gpu);
1082 if (ret) 1082 if (ret)
1083 goto out_unlock; 1083 goto out_unlock;
1084 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 1084 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1085 interruptible, no_wait_gpu);
1085out_unlock: 1086out_unlock:
1086 if (ret && mem.mm_node) 1087 if (ret && mem.mm_node)
1087 ttm_bo_mem_put(bo, &mem); 1088 ttm_bo_mem_put(bo, &mem);
@@ -1110,7 +1111,7 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1110 1111
1111int ttm_bo_validate(struct ttm_buffer_object *bo, 1112int ttm_bo_validate(struct ttm_buffer_object *bo,
1112 struct ttm_placement *placement, 1113 struct ttm_placement *placement,
1113 bool interruptible, bool no_wait_reserve, 1114 bool interruptible,
1114 bool no_wait_gpu) 1115 bool no_wait_gpu)
1115{ 1116{
1116 int ret; 1117 int ret;
@@ -1126,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1126 */ 1127 */
1127 ret = ttm_bo_mem_compat(placement, &bo->mem); 1128 ret = ttm_bo_mem_compat(placement, &bo->mem);
1128 if (ret < 0) { 1129 if (ret < 0) {
1129 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); 1130 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1131 no_wait_gpu);
1130 if (ret) 1132 if (ret)
1131 return ret; 1133 return ret;
1132 } else { 1134 } else {
@@ -1239,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1239 goto out_err; 1241 goto out_err;
1240 } 1242 }
1241 1243
1242 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 1244 ret = ttm_bo_validate(bo, placement, interruptible, false);
1243 if (ret) 1245 if (ret)
1244 goto out_err; 1246 goto out_err;
1245 1247
@@ -1325,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1325 spin_lock(&glob->lru_lock); 1327 spin_lock(&glob->lru_lock);
1326 while (!list_empty(&man->lru)) { 1328 while (!list_empty(&man->lru)) {
1327 spin_unlock(&glob->lru_lock); 1329 spin_unlock(&glob->lru_lock);
1328 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); 1330 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1329 if (ret) { 1331 if (ret) {
1330 if (allow_errors) { 1332 if (allow_errors) {
1331 return ret; 1333 return ret;
@@ -1837,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1837 evict_mem.mem_type = TTM_PL_SYSTEM; 1839 evict_mem.mem_type = TTM_PL_SYSTEM;
1838 1840
1839 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1840 false, false, false); 1842 false, false);
1841 if (unlikely(ret != 0)) 1843 if (unlikely(ret != 0))
1842 goto out; 1844 goto out;
1843 } 1845 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index b9c4e515b1d8..9e9c5d2a5c74 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43} 43}
44 44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve, 46 bool evict,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48{ 48{
49 struct ttm_tt *ttm = bo->ttm; 49 struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
314} 314}
315 315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 bool evict, bool no_wait_reserve, bool no_wait_gpu, 317 bool evict, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem) 318 struct ttm_mem_reg *new_mem)
319{ 319{
320 struct ttm_bo_device *bdev = bo->bdev; 320 struct ttm_bo_device *bdev = bo->bdev;
@@ -611,7 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
611 611
612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
613 void *sync_obj, 613 void *sync_obj,
614 bool evict, bool no_wait_reserve, 614 bool evict,
615 bool no_wait_gpu, 615 bool no_wait_gpu,
616 struct ttm_mem_reg *new_mem) 616 struct ttm_mem_reg *new_mem)
617{ 617{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index e88b0eb1a179..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -66,7 +66,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
66 if (unlikely(ret != 0)) 66 if (unlikely(ret != 0))
67 goto err; 67 goto err;
68 68
69 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 69 ret = ttm_bo_validate(bo, placement, interruptible, false);
70 70
71 ttm_bo_unreserve(bo); 71 ttm_bo_unreserve(bo);
72 72
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
123 else 123 else
124 placement = &vmw_vram_gmr_placement; 124 placement = &vmw_vram_gmr_placement;
125 125
126 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 126 ret = ttm_bo_validate(bo, placement, interruptible, false);
127 if (likely(ret == 0) || ret == -ERESTARTSYS) 127 if (likely(ret == 0) || ret == -ERESTARTSYS)
128 goto err_unreserve; 128 goto err_unreserve;
129 129
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
138 else 138 else
139 placement = &vmw_vram_placement; 139 placement = &vmw_vram_placement;
140 140
141 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 141 ret = ttm_bo_validate(bo, placement, interruptible, false);
142 142
143err_unreserve: 143err_unreserve:
144 ttm_bo_unreserve(bo); 144 ttm_bo_unreserve(bo);
@@ -223,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
223 if (bo->mem.mem_type == TTM_PL_VRAM && 223 if (bo->mem.mem_type == TTM_PL_VRAM &&
224 bo->mem.start < bo->num_pages && 224 bo->mem.start < bo->num_pages &&
225 bo->mem.start > 0) 225 bo->mem.start > 0)
226 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 226 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
227 false, false);
228 227
229 ret = ttm_bo_validate(bo, &placement, interruptible, false, false); 228 ret = ttm_bo_validate(bo, &placement, interruptible, false);
230 229
231 /* For some reason we didn't up at the start of vram */ 230 /* For some reason we didn't up at the start of vram */
232 WARN_ON(ret == 0 && bo->offset != 0); 231 WARN_ON(ret == 0 && bo->offset != 0);
@@ -315,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
315 placement.num_placement = 1; 314 placement.num_placement = 1;
316 placement.placement = &pl_flags; 315 placement.placement = &pl_flags;
317 316
318 ret = ttm_bo_validate(bo, &placement, false, true, true); 317 ret = ttm_bo_validate(bo, &placement, false, true);
319 318
320 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 319 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
321} 320}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 534c96703c3f..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1245,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1245 * used as a GMR, this will return -ENOMEM. 1245 * used as a GMR, this will return -ENOMEM.
1246 */ 1246 */
1247 1247
1248 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); 1248 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1249 if (likely(ret == 0 || ret == -ERESTARTSYS)) 1249 if (likely(ret == 0 || ret == -ERESTARTSYS))
1250 return ret; 1250 return ret;
1251 1251
@@ -1255,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1255 */ 1255 */
1256 1256
1257 DRM_INFO("Falling through to VRAM.\n"); 1257 DRM_INFO("Falling through to VRAM.\n");
1258 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 1258 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1259 return ret; 1259 return ret;
1260} 1260}
1261 1261
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0def4ff5b621..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1018,7 +1018,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
1018 backup_dirty = res->backup_dirty; 1018 backup_dirty = res->backup_dirty;
1019 ret = ttm_bo_validate(&res->backup->base, 1019 ret = ttm_bo_validate(&res->backup->base,
1020 res->func->backup_placement, 1020 res->func->backup_placement,
1021 true, false, false); 1021 true, false);
1022 1022
1023 if (unlikely(ret != 0)) 1023 if (unlikely(ret != 0))
1024 goto out_no_validate; 1024 goto out_no_validate;