diff options
author | Christian König <christian.koenig@amd.com> | 2014-09-04 14:01:53 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-09-11 10:46:01 -0400 |
commit | 57d20a43c9b30663bdbacde8294a902edef35a84 (patch) | |
tree | 1a2ab2ad48988611c498f8e8c202a6a90b1598da /drivers/gpu/drm/radeon/cik.c | |
parent | ae9c0af2c0ea92e57013ab2dd7271ba7d6b2a833 (diff) |
drm/radeon: add the infrastructure for concurrent buffer access
This allows us to specify if we want to sync to
the shared fences of a reservation object or not.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/cik.c')
-rw-r--r-- | drivers/gpu/drm/radeon/cik.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1f598ab3b9a7..0b5a230d8b96 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
3959 | * @src_offset: src GPU address | 3959 | * @src_offset: src GPU address |
3960 | * @dst_offset: dst GPU address | 3960 | * @dst_offset: dst GPU address |
3961 | * @num_gpu_pages: number of GPU pages to xfer | 3961 | * @num_gpu_pages: number of GPU pages to xfer |
3962 | * @fence: radeon fence object | 3962 | * @resv: reservation object to sync to |
3963 | * | 3963 | * |
3964 | * Copy GPU paging using the CP DMA engine (CIK+). | 3964 | * Copy GPU paging using the CP DMA engine (CIK+). |
3965 | * Used by the radeon ttm implementation to move pages if | 3965 | * Used by the radeon ttm implementation to move pages if |
3966 | * registered as the asic copy callback. | 3966 | * registered as the asic copy callback. |
3967 | */ | 3967 | */ |
3968 | int cik_copy_cpdma(struct radeon_device *rdev, | 3968 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
3969 | uint64_t src_offset, uint64_t dst_offset, | 3969 | uint64_t src_offset, uint64_t dst_offset, |
3970 | unsigned num_gpu_pages, | 3970 | unsigned num_gpu_pages, |
3971 | struct radeon_fence **fence) | 3971 | struct reservation_object *resv) |
3972 | { | 3972 | { |
3973 | struct radeon_semaphore *sem = NULL; | 3973 | struct radeon_semaphore *sem = NULL; |
3974 | struct radeon_fence *fence; | ||
3974 | int ring_index = rdev->asic->copy.blit_ring_index; | 3975 | int ring_index = rdev->asic->copy.blit_ring_index; |
3975 | struct radeon_ring *ring = &rdev->ring[ring_index]; | 3976 | struct radeon_ring *ring = &rdev->ring[ring_index]; |
3976 | u32 size_in_bytes, cur_size_in_bytes, control; | 3977 | u32 size_in_bytes, cur_size_in_bytes, control; |
@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
3980 | r = radeon_semaphore_create(rdev, &sem); | 3981 | r = radeon_semaphore_create(rdev, &sem); |
3981 | if (r) { | 3982 | if (r) { |
3982 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 3983 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
3983 | return r; | 3984 | return ERR_PTR(r); |
3984 | } | 3985 | } |
3985 | 3986 | ||
3986 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | 3987 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
3989 | if (r) { | 3990 | if (r) { |
3990 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 3991 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
3991 | radeon_semaphore_free(rdev, &sem, NULL); | 3992 | radeon_semaphore_free(rdev, &sem, NULL); |
3992 | return r; | 3993 | return ERR_PTR(r); |
3993 | } | 3994 | } |
3994 | 3995 | ||
3995 | radeon_semaphore_sync_to(sem, *fence); | 3996 | radeon_semaphore_sync_resv(sem, resv, false); |
3996 | radeon_semaphore_sync_rings(rdev, sem, ring->idx); | 3997 | radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
3997 | 3998 | ||
3998 | for (i = 0; i < num_loops; i++) { | 3999 | for (i = 0; i < num_loops; i++) { |
@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
4014 | dst_offset += cur_size_in_bytes; | 4015 | dst_offset += cur_size_in_bytes; |
4015 | } | 4016 | } |
4016 | 4017 | ||
4017 | r = radeon_fence_emit(rdev, fence, ring->idx); | 4018 | r = radeon_fence_emit(rdev, &fence, ring->idx); |
4018 | if (r) { | 4019 | if (r) { |
4019 | radeon_ring_unlock_undo(rdev, ring); | 4020 | radeon_ring_unlock_undo(rdev, ring); |
4020 | radeon_semaphore_free(rdev, &sem, NULL); | 4021 | radeon_semaphore_free(rdev, &sem, NULL); |
4021 | return r; | 4022 | return ERR_PTR(r); |
4022 | } | 4023 | } |
4023 | 4024 | ||
4024 | radeon_ring_unlock_commit(rdev, ring, false); | 4025 | radeon_ring_unlock_commit(rdev, ring, false); |
4025 | radeon_semaphore_free(rdev, &sem, *fence); | 4026 | radeon_semaphore_free(rdev, &sem, fence); |
4026 | 4027 | ||
4027 | return r; | 4028 | return fence; |
4028 | } | 4029 | } |
4029 | 4030 | ||
4030 | /* | 4031 | /* |