diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 66 |
1 files changed, 31 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2f80da016d57..c63efd7972d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -342,48 +342,44 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | |||
342 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 342 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
343 | struct amdgpu_vm *vm = &fpriv->vm; | 343 | struct amdgpu_vm *vm = &fpriv->vm; |
344 | struct amdgpu_bo_list_entry *lobj; | 344 | struct amdgpu_bo_list_entry *lobj; |
345 | struct amdgpu_bo *bo; | ||
346 | u64 initial_bytes_moved; | 345 | u64 initial_bytes_moved; |
347 | int r; | 346 | int r; |
348 | 347 | ||
349 | list_for_each_entry(lobj, validated, tv.head) { | 348 | list_for_each_entry(lobj, validated, tv.head) { |
350 | bo = lobj->robj; | 349 | struct amdgpu_bo *bo = lobj->robj; |
351 | if (!bo->pin_count) { | 350 | uint32_t domain; |
352 | u32 domain = lobj->prefered_domains; | ||
353 | u32 current_domain = | ||
354 | amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | ||
355 | |||
356 | /* Check if this buffer will be moved and don't move it | ||
357 | * if we have moved too many buffers for this IB already. | ||
358 | * | ||
359 | * Note that this allows moving at least one buffer of | ||
360 | * any size, because it doesn't take the current "bo" | ||
361 | * into account. We don't want to disallow buffer moves | ||
362 | * completely. | ||
363 | */ | ||
364 | if ((lobj->allowed_domains & current_domain) != 0 && | ||
365 | (domain & current_domain) == 0 && /* will be moved */ | ||
366 | p->bytes_moved > p->bytes_moved_threshold) { | ||
367 | /* don't move it */ | ||
368 | domain = current_domain; | ||
369 | } | ||
370 | 351 | ||
371 | retry: | 352 | lobj->bo_va = amdgpu_vm_bo_find(vm, bo); |
372 | amdgpu_ttm_placement_from_domain(bo, domain); | 353 | if (bo->pin_count) |
373 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 354 | continue; |
374 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 355 | |
375 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 356 | /* Avoid moving this one if we have moved too many buffers |
376 | initial_bytes_moved; | 357 | * for this IB already. |
377 | 358 | * | |
378 | if (unlikely(r)) { | 359 | * Note that this allows moving at least one buffer of |
379 | if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { | 360 | * any size, because it doesn't take the current "bo" |
380 | domain = lobj->allowed_domains; | 361 | * into account. We don't want to disallow buffer moves |
381 | goto retry; | 362 | * completely. |
382 | } | 363 | */ |
383 | return r; | 364 | if (p->bytes_moved <= p->bytes_moved_threshold) |
365 | domain = lobj->prefered_domains; | ||
366 | else | ||
367 | domain = lobj->allowed_domains; | ||
368 | |||
369 | retry: | ||
370 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
371 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | ||
372 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
373 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | ||
374 | initial_bytes_moved; | ||
375 | |||
376 | if (unlikely(r)) { | ||
377 | if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { | ||
378 | domain = lobj->allowed_domains; | ||
379 | goto retry; | ||
384 | } | 380 | } |
381 | return r; | ||
385 | } | 382 | } |
386 | lobj->bo_va = amdgpu_vm_bo_find(vm, bo); | ||
387 | } | 383 | } |
388 | return 0; | 384 | return 0; |
389 | } | 385 | } |