diff options
author | Chunming Zhou <David1.Zhou@amd.com> | 2016-08-04 01:05:46 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-08-16 10:42:33 -0400 |
commit | 14fd833efa3f13619623501de5e2221dfdab7f7f (patch) | |
tree | 8603aa6c79ebb18f976b1764f1995f487a873414 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | e7893c4bd34b9d2f942d77666656efaa084a3f87 (diff) |
drm/amdgpu: validate shadow as well when validating bo
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 76 |
1 files changed, 46 insertions, 30 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 8eb93dff69d6..396a412d70d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -287,18 +287,56 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |||
287 | return max(bytes_moved_threshold, 1024*1024ull); | 287 | return max(bytes_moved_threshold, 1024*1024ull); |
288 | } | 288 | } |
289 | 289 | ||
290 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | ||
291 | struct amdgpu_bo *bo) | ||
292 | { | ||
293 | u64 initial_bytes_moved; | ||
294 | uint32_t domain; | ||
295 | int r; | ||
296 | |||
297 | if (bo->pin_count) | ||
298 | return 0; | ||
299 | |||
300 | /* Avoid moving this one if we have moved too many buffers | ||
301 | * for this IB already. | ||
302 | * | ||
303 | * Note that this allows moving at least one buffer of | ||
304 | * any size, because it doesn't take the current "bo" | ||
305 | * into account. We don't want to disallow buffer moves | ||
306 | * completely. | ||
307 | */ | ||
308 | if (p->bytes_moved <= p->bytes_moved_threshold) | ||
309 | domain = bo->prefered_domains; | ||
310 | else | ||
311 | domain = bo->allowed_domains; | ||
312 | |||
313 | retry: | ||
314 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
315 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | ||
316 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
317 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | ||
318 | initial_bytes_moved; | ||
319 | |||
320 | if (unlikely(r)) { | ||
321 | if (r != -ERESTARTSYS && domain != bo->allowed_domains) { | ||
322 | domain = bo->allowed_domains; | ||
323 | goto retry; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | return r; | ||
328 | } | ||
329 | |||
290 | int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | 330 | int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
291 | struct list_head *validated) | 331 | struct list_head *validated) |
292 | { | 332 | { |
293 | struct amdgpu_bo_list_entry *lobj; | 333 | struct amdgpu_bo_list_entry *lobj; |
294 | u64 initial_bytes_moved; | ||
295 | int r; | 334 | int r; |
296 | 335 | ||
297 | list_for_each_entry(lobj, validated, tv.head) { | 336 | list_for_each_entry(lobj, validated, tv.head) { |
298 | struct amdgpu_bo *bo = lobj->robj; | 337 | struct amdgpu_bo *bo = lobj->robj; |
299 | bool binding_userptr = false; | 338 | bool binding_userptr = false; |
300 | struct mm_struct *usermm; | 339 | struct mm_struct *usermm; |
301 | uint32_t domain; | ||
302 | 340 | ||
303 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); | 341 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); |
304 | if (usermm && usermm != current->mm) | 342 | if (usermm && usermm != current->mm) |
@@ -313,35 +351,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | |||
313 | binding_userptr = true; | 351 | binding_userptr = true; |
314 | } | 352 | } |
315 | 353 | ||
316 | if (bo->pin_count) | 354 | r = amdgpu_cs_bo_validate(p, bo); |
317 | continue; | 355 | if (r) |
318 | |||
319 | /* Avoid moving this one if we have moved too many buffers | ||
320 | * for this IB already. | ||
321 | * | ||
322 | * Note that this allows moving at least one buffer of | ||
323 | * any size, because it doesn't take the current "bo" | ||
324 | * into account. We don't want to disallow buffer moves | ||
325 | * completely. | ||
326 | */ | ||
327 | if (p->bytes_moved <= p->bytes_moved_threshold) | ||
328 | domain = bo->prefered_domains; | ||
329 | else | ||
330 | domain = bo->allowed_domains; | ||
331 | |||
332 | retry: | ||
333 | amdgpu_ttm_placement_from_domain(bo, domain); | ||
334 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | ||
335 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
336 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | ||
337 | initial_bytes_moved; | ||
338 | |||
339 | if (unlikely(r)) { | ||
340 | if (r != -ERESTARTSYS && domain != bo->allowed_domains) { | ||
341 | domain = bo->allowed_domains; | ||
342 | goto retry; | ||
343 | } | ||
344 | return r; | 356 | return r; |
357 | if (bo->shadow) { | ||
358 | r = amdgpu_cs_bo_validate(p, bo); | ||
359 | if (r) | ||
360 | return r; | ||
345 | } | 361 | } |
346 | 362 | ||
347 | if (binding_userptr) { | 363 | if (binding_userptr) { |