diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2012-01-05 22:11:07 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-01-06 04:16:38 -0500 |
commit | 3000bf393302a8c786e9ebfc778050cb0d6226c4 (patch) | |
tree | 2d2be47822cec764e1a83d6294b0f94b6591f5d6 | |
parent | 93504fce28b1a387ec01f81b26637d237dca2b36 (diff) |
drm/radeon/kms: sync across multiple rings when doing bo moves v3
We need to synchronize across rings when doing a bo move to make
sure we the buffer is idle if it's in use by a different ring than
the ring doing the move.
v2: fix fence setup for bo moves
v3: add missing ring lock/unlock
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 38 |
3 files changed, 39 insertions, 4 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3483ed9b38e9..7cb63cd2e738 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1489,6 +1489,8 @@ struct radeon_device { | |||
1489 | unsigned debugfs_count; | 1489 | unsigned debugfs_count; |
1490 | /* virtual memory */ | 1490 | /* virtual memory */ |
1491 | struct radeon_vm_manager vm_manager; | 1491 | struct radeon_vm_manager vm_manager; |
1492 | /* ring used for bo copies */ | ||
1493 | u32 copy_ring; | ||
1492 | }; | 1494 | }; |
1493 | 1495 | ||
1494 | int radeon_device_init(struct radeon_device *rdev, | 1496 | int radeon_device_init(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 123a1969d284..36a6192ce862 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1036,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
1036 | else | 1036 | else |
1037 | rdev->num_crtc = 2; | 1037 | rdev->num_crtc = 2; |
1038 | 1038 | ||
1039 | /* set the ring used for bo copies */ | ||
1040 | rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX; | ||
1041 | |||
1039 | switch (rdev->family) { | 1042 | switch (rdev->family) { |
1040 | case CHIP_R100: | 1043 | case CHIP_R100: |
1041 | case CHIP_RV100: | 1044 | case CHIP_RV100: |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b0ebaf893aca..1882025a9dc8 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -223,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
223 | struct radeon_device *rdev; | 223 | struct radeon_device *rdev; |
224 | uint64_t old_start, new_start; | 224 | uint64_t old_start, new_start; |
225 | struct radeon_fence *fence; | 225 | struct radeon_fence *fence; |
226 | int r; | 226 | int r, i; |
227 | 227 | ||
228 | rdev = radeon_get_rdev(bo->bdev); | 228 | rdev = radeon_get_rdev(bo->bdev); |
229 | r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); | 229 | r = radeon_fence_create(rdev, &fence, rdev->copy_ring); |
230 | if (unlikely(r)) { | 230 | if (unlikely(r)) { |
231 | return r; | 231 | return r; |
232 | } | 232 | } |
@@ -255,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
256 | return -EINVAL; | 256 | return -EINVAL; |
257 | } | 257 | } |
258 | if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready) { | 258 | if (!rdev->ring[rdev->copy_ring].ready) { |
259 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | 259 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | } | 261 | } |
262 | 262 | ||
263 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | 263 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
264 | 264 | ||
265 | /* sync other rings */ | ||
266 | if (rdev->family >= CHIP_R600) { | ||
267 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
268 | /* no need to sync to our own or unused rings */ | ||
269 | if (i == rdev->copy_ring || !rdev->ring[i].ready) | ||
270 | continue; | ||
271 | |||
272 | if (!fence->semaphore) { | ||
273 | r = radeon_semaphore_create(rdev, &fence->semaphore); | ||
274 | /* FIXME: handle semaphore error */ | ||
275 | if (r) | ||
276 | continue; | ||
277 | } | ||
278 | |||
279 | r = radeon_ring_lock(rdev, &rdev->ring[i], 3); | ||
280 | /* FIXME: handle ring lock error */ | ||
281 | if (r) | ||
282 | continue; | ||
283 | radeon_semaphore_emit_signal(rdev, i, fence->semaphore); | ||
284 | radeon_ring_unlock_commit(rdev, &rdev->ring[i]); | ||
285 | |||
286 | r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3); | ||
287 | /* FIXME: handle ring lock error */ | ||
288 | if (r) | ||
289 | continue; | ||
290 | radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore); | ||
291 | radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]); | ||
292 | } | ||
293 | } | ||
294 | |||
265 | r = radeon_copy(rdev, old_start, new_start, | 295 | r = radeon_copy(rdev, old_start, new_start, |
266 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | 296 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ |
267 | fence); | 297 | fence); |