diff options
author | Christian König <christian.koenig@amd.com> | 2014-09-04 14:01:53 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-09-11 10:46:01 -0400 |
commit | 57d20a43c9b30663bdbacde8294a902edef35a84 (patch) | |
tree | 1a2ab2ad48988611c498f8e8c202a6a90b1598da /drivers/gpu/drm/radeon/radeon_ttm.c | |
parent | ae9c0af2c0ea92e57013ab2dd7271ba7d6b2a833 (diff) |
drm/radeon: add the infrastructure for concurrent buffer access
This allows us to specify if we want to sync to
the shared fences of a reservation object or not.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 62d1f4d730a2..eca2ce60d440 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -233,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
233 | struct radeon_device *rdev; | 233 | struct radeon_device *rdev; |
234 | uint64_t old_start, new_start; | 234 | uint64_t old_start, new_start; |
235 | struct radeon_fence *fence; | 235 | struct radeon_fence *fence; |
236 | unsigned num_pages; | ||
236 | int r, ridx; | 237 | int r, ridx; |
237 | 238 | ||
238 | rdev = radeon_get_rdev(bo->bdev); | 239 | rdev = radeon_get_rdev(bo->bdev); |
@@ -269,12 +270,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
269 | 270 | ||
270 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | 271 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
271 | 272 | ||
272 | /* sync other rings */ | 273 | num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
273 | fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv); | 274 | fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); |
274 | r = radeon_copy(rdev, old_start, new_start, | 275 | if (IS_ERR(fence)) |
275 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | 276 | return PTR_ERR(fence); |
276 | &fence); | 277 | |
277 | /* FIXME: handle copy error */ | ||
278 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, | 278 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, |
279 | evict, no_wait_gpu, new_mem); | 279 | evict, no_wait_gpu, new_mem); |
280 | radeon_fence_unref(&fence); | 280 | radeon_fence_unref(&fence); |