diff options
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 31 |
4 files changed, 38 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 40ab8a28c998..644d922990ef 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -2804,6 +2804,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); | |||
2804 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); | 2804 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); |
2805 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | 2805 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
2806 | struct radeon_vm *vm, int ring); | 2806 | struct radeon_vm *vm, int ring); |
2807 | void radeon_vm_flush(struct radeon_device *rdev, | ||
2808 | struct radeon_vm *vm, | ||
2809 | int ring); | ||
2807 | void radeon_vm_fence(struct radeon_device *rdev, | 2810 | void radeon_vm_fence(struct radeon_device *rdev, |
2808 | struct radeon_vm *vm, | 2811 | struct radeon_vm *vm, |
2809 | struct radeon_fence *fence); | 2812 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5abae403ea4f..f92df2e8ebdd 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -511,10 +511,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
511 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); | 511 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); |
512 | } | 512 | } |
513 | 513 | ||
514 | if (!r) { | ||
515 | radeon_vm_fence(rdev, vm, parser->ib.fence); | ||
516 | } | ||
517 | |||
518 | out: | 514 | out: |
519 | radeon_vm_add_to_lru(rdev, vm); | 515 | radeon_vm_add_to_lru(rdev, vm); |
520 | mutex_unlock(&vm->mutex); | 516 | mutex_unlock(&vm->mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index fa140119cdb6..665591a7faad 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -153,11 +153,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
153 | return r; | 153 | return r; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* if we can't remember our last VM flush then flush now! */ | 156 | if (ib->vm) |
157 | /* XXX figure out why we have to flush for every IB */ | 157 | radeon_vm_flush(rdev, ib->vm, ib->ring); |
158 | if (ib->vm /*&& !ib->vm->last_flush*/) { | 158 | |
159 | radeon_ring_vm_flush(rdev, ib->ring, ib->vm); | ||
160 | } | ||
161 | if (const_ib) { | 159 | if (const_ib) { |
162 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); | 160 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
163 | radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); | 161 | radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); |
@@ -172,10 +170,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
172 | if (const_ib) { | 170 | if (const_ib) { |
173 | const_ib->fence = radeon_fence_ref(ib->fence); | 171 | const_ib->fence = radeon_fence_ref(ib->fence); |
174 | } | 172 | } |
175 | /* we just flushed the VM, remember that */ | 173 | |
176 | if (ib->vm && !ib->vm->last_flush) { | 174 | if (ib->vm) |
177 | ib->vm->last_flush = radeon_fence_ref(ib->fence); | 175 | radeon_vm_fence(rdev, ib->vm, ib->fence); |
178 | } | 176 | |
179 | radeon_ring_unlock_commit(rdev, ring); | 177 | radeon_ring_unlock_commit(rdev, ring); |
180 | return 0; | 178 | return 0; |
181 | } | 179 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 433b1ebd07ea..516017689793 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -379,6 +379,27 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |||
379 | } | 379 | } |
380 | 380 | ||
381 | /** | 381 | /** |
382 | * radeon_vm_flush - hardware flush the vm | ||
383 | * | ||
384 | * @rdev: radeon_device pointer | ||
385 | * @vm: vm we want to flush | ||
386 | * @ring: ring to use for flush | ||
387 | * | ||
388 | * Flush the vm (cayman+). | ||
389 | * | ||
390 | * Global and local mutex must be locked! | ||
391 | */ | ||
392 | void radeon_vm_flush(struct radeon_device *rdev, | ||
393 | struct radeon_vm *vm, | ||
394 | int ring) | ||
395 | { | ||
396 | /* if we can't remember our last VM flush then flush now! */ | ||
397 | /* XXX figure out why we have to flush all the time */ | ||
398 | if (!vm->last_flush || true) | ||
399 | radeon_ring_vm_flush(rdev, ring, vm); | ||
400 | } | ||
401 | |||
402 | /** | ||
382 | * radeon_vm_fence - remember fence for vm | 403 | * radeon_vm_fence - remember fence for vm |
383 | * | 404 | * |
384 | * @rdev: radeon_device pointer | 405 | * @rdev: radeon_device pointer |
@@ -394,14 +415,18 @@ void radeon_vm_fence(struct radeon_device *rdev, | |||
394 | struct radeon_vm *vm, | 415 | struct radeon_vm *vm, |
395 | struct radeon_fence *fence) | 416 | struct radeon_fence *fence) |
396 | { | 417 | { |
397 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); | ||
398 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); | ||
399 | |||
400 | radeon_fence_unref(&vm->fence); | 418 | radeon_fence_unref(&vm->fence); |
401 | vm->fence = radeon_fence_ref(fence); | 419 | vm->fence = radeon_fence_ref(fence); |
402 | 420 | ||
421 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); | ||
422 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); | ||
423 | |||
403 | radeon_fence_unref(&vm->last_id_use); | 424 | radeon_fence_unref(&vm->last_id_use); |
404 | vm->last_id_use = radeon_fence_ref(fence); | 425 | vm->last_id_use = radeon_fence_ref(fence); |
426 | |||
427 | /* we just flushed the VM, remember that */ | ||
428 | if (!vm->last_flush) | ||
429 | vm->last_flush = radeon_fence_ref(fence); | ||
405 | } | 430 | } |
406 | 431 | ||
407 | /** | 432 | /** |