diff options
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_uvd.c | 20 |
3 files changed, 23 insertions, 5 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a5ac95bf05ea..83a24614138a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1642,7 +1642,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, | |||
1642 | uint32_t handle, struct radeon_fence **fence); | 1642 | uint32_t handle, struct radeon_fence **fence); |
1643 | int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, | 1643 | int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, |
1644 | uint32_t handle, struct radeon_fence **fence); | 1644 | uint32_t handle, struct radeon_fence **fence); |
1645 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo); | 1645 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, |
1646 | uint32_t allowed_domains); | ||
1646 | void radeon_uvd_free_handles(struct radeon_device *rdev, | 1647 | void radeon_uvd_free_handles(struct radeon_device *rdev, |
1647 | struct drm_file *filp); | 1648 | struct drm_file *filp); |
1648 | int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); | 1649 | int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0129c7efae3b..c97a42432e2b 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -491,6 +491,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
491 | bo = lobj->robj; | 491 | bo = lobj->robj; |
492 | if (!bo->pin_count) { | 492 | if (!bo->pin_count) { |
493 | u32 domain = lobj->prefered_domains; | 493 | u32 domain = lobj->prefered_domains; |
494 | u32 allowed = lobj->allowed_domains; | ||
494 | u32 current_domain = | 495 | u32 current_domain = |
495 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | 496 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
496 | 497 | ||
@@ -502,7 +503,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
502 | * into account. We don't want to disallow buffer moves | 503 | * into account. We don't want to disallow buffer moves |
503 | * completely. | 504 | * completely. |
504 | */ | 505 | */ |
505 | if ((lobj->allowed_domains & current_domain) != 0 && | 506 | if ((allowed & current_domain) != 0 && |
506 | (domain & current_domain) == 0 && /* will be moved */ | 507 | (domain & current_domain) == 0 && /* will be moved */ |
507 | bytes_moved > bytes_moved_threshold) { | 508 | bytes_moved > bytes_moved_threshold) { |
508 | /* don't move it */ | 509 | /* don't move it */ |
@@ -512,7 +513,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
512 | retry: | 513 | retry: |
513 | radeon_ttm_placement_from_domain(bo, domain); | 514 | radeon_ttm_placement_from_domain(bo, domain); |
514 | if (ring == R600_RING_TYPE_UVD_INDEX) | 515 | if (ring == R600_RING_TYPE_UVD_INDEX) |
515 | radeon_uvd_force_into_uvd_segment(bo); | 516 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
516 | 517 | ||
517 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | 518 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
518 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 519 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 464d80145dfe..1dedadd8f5df 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -254,7 +254,8 @@ int radeon_uvd_resume(struct radeon_device *rdev) | |||
254 | return 0; | 254 | return 0; |
255 | } | 255 | } |
256 | 256 | ||
257 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) | 257 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, |
258 | uint32_t allowed_domains) | ||
258 | { | 259 | { |
259 | int i; | 260 | int i; |
260 | 261 | ||
@@ -262,6 +263,21 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) | |||
262 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | 263 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
263 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | 264 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
264 | } | 265 | } |
266 | |||
267 | /* If it must be in VRAM it must be in the first segment as well */ | ||
268 | if (allowed_domains == RADEON_GEM_DOMAIN_VRAM) | ||
269 | return; | ||
270 | |||
271 | /* abort if we already have more than one placement */ | ||
272 | if (rbo->placement.num_placement > 1) | ||
273 | return; | ||
274 | |||
275 | /* add another 256MB segment */ | ||
276 | rbo->placements[1] = rbo->placements[0]; | ||
277 | rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; | ||
278 | rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; | ||
279 | rbo->placement.num_placement++; | ||
280 | rbo->placement.num_busy_placement++; | ||
265 | } | 281 | } |
266 | 282 | ||
267 | void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | 283 | void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) |
@@ -652,7 +668,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, | |||
652 | return r; | 668 | return r; |
653 | 669 | ||
654 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); | 670 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); |
655 | radeon_uvd_force_into_uvd_segment(bo); | 671 | radeon_uvd_force_into_uvd_segment(bo, RADEON_GEM_DOMAIN_VRAM); |
656 | 672 | ||
657 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 673 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
658 | if (r) | 674 | if (r) |