diff options
author | Christian König <christian.koenig@amd.com> | 2014-09-04 14:01:53 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-09-11 10:46:01 -0400 |
commit | 57d20a43c9b30663bdbacde8294a902edef35a84 (patch) | |
tree | 1a2ab2ad48988611c498f8e8c202a6a90b1598da /drivers/gpu/drm/radeon/r100.c | |
parent | ae9c0af2c0ea92e57013ab2dd7271ba7d6b2a833 (diff) |
drm/radeon: add the infrastructure for concurrent buffer access
This allows us to specify if we want to sync to
the shared fences of a reservation object or not.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 4c5ec44ff328..c6b486f888d5 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, | |||
855 | return false; | 855 | return false; |
856 | } | 856 | } |
857 | 857 | ||
858 | int r100_copy_blit(struct radeon_device *rdev, | 858 | struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, |
859 | uint64_t src_offset, | 859 | uint64_t src_offset, |
860 | uint64_t dst_offset, | 860 | uint64_t dst_offset, |
861 | unsigned num_gpu_pages, | 861 | unsigned num_gpu_pages, |
862 | struct radeon_fence **fence) | 862 | struct reservation_object *resv) |
863 | { | 863 | { |
864 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 864 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
865 | struct radeon_fence *fence; | ||
865 | uint32_t cur_pages; | 866 | uint32_t cur_pages; |
866 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; | 867 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
867 | uint32_t pitch; | 868 | uint32_t pitch; |
@@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
882 | r = radeon_ring_lock(rdev, ring, ndw); | 883 | r = radeon_ring_lock(rdev, ring, ndw); |
883 | if (r) { | 884 | if (r) { |
884 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | 885 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
885 | return -EINVAL; | 886 | return ERR_PTR(-EINVAL); |
886 | } | 887 | } |
887 | while (num_gpu_pages > 0) { | 888 | while (num_gpu_pages > 0) { |
888 | cur_pages = num_gpu_pages; | 889 | cur_pages = num_gpu_pages; |
@@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
922 | RADEON_WAIT_2D_IDLECLEAN | | 923 | RADEON_WAIT_2D_IDLECLEAN | |
923 | RADEON_WAIT_HOST_IDLECLEAN | | 924 | RADEON_WAIT_HOST_IDLECLEAN | |
924 | RADEON_WAIT_DMA_GUI_IDLE); | 925 | RADEON_WAIT_DMA_GUI_IDLE); |
925 | if (fence) { | 926 | r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); |
926 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); | 927 | if (r) { |
928 | radeon_ring_unlock_undo(rdev, ring); | ||
929 | return ERR_PTR(r); | ||
927 | } | 930 | } |
928 | radeon_ring_unlock_commit(rdev, ring, false); | 931 | radeon_ring_unlock_commit(rdev, ring, false); |
929 | return r; | 932 | return fence; |
930 | } | 933 | } |
931 | 934 | ||
932 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) | 935 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) |