diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/rv770.c')
-rw-r--r-- | drivers/gpu/drm/radeon/rv770.c | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 87c979c4f721..1b2444f4d8f4 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev) | |||
887 | return 0; | 887 | return 0; |
888 | } | 888 | } |
889 | 889 | ||
890 | /** | ||
891 | * rv770_copy_dma - copy pages using the DMA engine | ||
892 | * | ||
893 | * @rdev: radeon_device pointer | ||
894 | * @src_offset: src GPU address | ||
895 | * @dst_offset: dst GPU address | ||
896 | * @num_gpu_pages: number of GPU pages to xfer | ||
897 | * @fence: radeon fence object | ||
898 | * | ||
899 | * Copy GPU paging using the DMA engine (r7xx). | ||
900 | * Used by the radeon ttm implementation to move pages if | ||
901 | * registered as the asic copy callback. | ||
902 | */ | ||
903 | int rv770_copy_dma(struct radeon_device *rdev, | ||
904 | uint64_t src_offset, uint64_t dst_offset, | ||
905 | unsigned num_gpu_pages, | ||
906 | struct radeon_fence **fence) | ||
907 | { | ||
908 | struct radeon_semaphore *sem = NULL; | ||
909 | int ring_index = rdev->asic->copy.dma_ring_index; | ||
910 | struct radeon_ring *ring = &rdev->ring[ring_index]; | ||
911 | u32 size_in_dw, cur_size_in_dw; | ||
912 | int i, num_loops; | ||
913 | int r = 0; | ||
914 | |||
915 | r = radeon_semaphore_create(rdev, &sem); | ||
916 | if (r) { | ||
917 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
918 | return r; | ||
919 | } | ||
920 | |||
921 | size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; | ||
922 | num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); | ||
923 | r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); | ||
924 | if (r) { | ||
925 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
926 | radeon_semaphore_free(rdev, &sem, NULL); | ||
927 | return r; | ||
928 | } | ||
929 | |||
930 | if (radeon_fence_need_sync(*fence, ring->idx)) { | ||
931 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | ||
932 | ring->idx); | ||
933 | radeon_fence_note_sync(*fence, ring->idx); | ||
934 | } else { | ||
935 | radeon_semaphore_free(rdev, &sem, NULL); | ||
936 | } | ||
937 | |||
938 | for (i = 0; i < num_loops; i++) { | ||
939 | cur_size_in_dw = size_in_dw; | ||
940 | if (cur_size_in_dw > 0xFFFF) | ||
941 | cur_size_in_dw = 0xFFFF; | ||
942 | size_in_dw -= cur_size_in_dw; | ||
943 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); | ||
944 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | ||
945 | radeon_ring_write(ring, src_offset & 0xfffffffc); | ||
946 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); | ||
947 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); | ||
948 | src_offset += cur_size_in_dw * 4; | ||
949 | dst_offset += cur_size_in_dw * 4; | ||
950 | } | ||
951 | |||
952 | r = radeon_fence_emit(rdev, fence, ring->idx); | ||
953 | if (r) { | ||
954 | radeon_ring_unlock_undo(rdev, ring); | ||
955 | return r; | ||
956 | } | ||
957 | |||
958 | radeon_ring_unlock_commit(rdev, ring); | ||
959 | radeon_semaphore_free(rdev, &sem, *fence); | ||
960 | |||
961 | return r; | ||
962 | } | ||
963 | |||
890 | static int rv770_startup(struct radeon_device *rdev) | 964 | static int rv770_startup(struct radeon_device *rdev) |
891 | { | 965 | { |
892 | struct radeon_ring *ring; | 966 | struct radeon_ring *ring; |