diff options
author | Christian König <christian.koenig@amd.com> | 2015-06-19 11:00:19 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-29 15:52:49 -0400 |
commit | 03507c4f2f63d8d98c2455cf4d192589fac553c7 (patch) | |
tree | 866a2e509f2de7bf285e8a74be0b323028cb4f53 | |
parent | 7cebc728174424d67df91dfb14f8b6dc13bed993 (diff) |
drm/amdgpu: recreate fence from user seq
And use common fence infrastructure for the wait.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 37 |
3 files changed, 44 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 963c4ba5dcba..01657830b470 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
425 | unsigned irq_type); | 425 | unsigned irq_type); |
426 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | 426 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, |
427 | struct amdgpu_fence **fence); | 427 | struct amdgpu_fence **fence); |
428 | int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, | ||
429 | uint64_t seq, struct amdgpu_fence **fence); | ||
428 | void amdgpu_fence_process(struct amdgpu_ring *ring); | 430 | void amdgpu_fence_process(struct amdgpu_ring *ring); |
429 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); | 431 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); |
430 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | 432 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); | |||
435 | int amdgpu_fence_wait_any(struct amdgpu_device *adev, | 437 | int amdgpu_fence_wait_any(struct amdgpu_device *adev, |
436 | struct amdgpu_fence **fences, | 438 | struct amdgpu_fence **fences, |
437 | bool intr); | 439 | bool intr); |
438 | long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, | ||
439 | u64 *target_seq, bool intr, | ||
440 | long timeout); | ||
441 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); | 440 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); |
442 | void amdgpu_fence_unref(struct amdgpu_fence **fence); | 441 | void amdgpu_fence_unref(struct amdgpu_fence **fence); |
443 | 442 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 86b78c799176..84ba1d1bf327 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -739,9 +739,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
739 | { | 739 | { |
740 | union drm_amdgpu_wait_cs *wait = data; | 740 | union drm_amdgpu_wait_cs *wait = data; |
741 | struct amdgpu_device *adev = dev->dev_private; | 741 | struct amdgpu_device *adev = dev->dev_private; |
742 | uint64_t seq[AMDGPU_MAX_RINGS] = {0}; | ||
743 | struct amdgpu_ring *ring = NULL; | ||
744 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | 742 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
743 | struct amdgpu_fence *fence = NULL; | ||
744 | struct amdgpu_ring *ring = NULL; | ||
745 | struct amdgpu_ctx *ctx; | 745 | struct amdgpu_ctx *ctx; |
746 | long r; | 746 | long r; |
747 | 747 | ||
@@ -754,9 +754,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
754 | if (r) | 754 | if (r) |
755 | return r; | 755 | return r; |
756 | 756 | ||
757 | seq[ring->idx] = wait->in.handle; | 757 | r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); |
758 | if (r) | ||
759 | return r; | ||
758 | 760 | ||
759 | r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout); | 761 | r = fence_wait_timeout(&fence->base, true, timeout); |
762 | amdgpu_fence_unref(&fence); | ||
760 | amdgpu_ctx_put(ctx); | 763 | amdgpu_ctx_put(ctx); |
761 | if (r < 0) | 764 | if (r < 0) |
762 | return r; | 765 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index f2d885c1da8f..a7189a1fa6a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -136,6 +136,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * amdgpu_fence_recreate - recreate a fence from an user fence | ||
140 | * | ||
141 | * @ring: ring the fence is associated with | ||
142 | * @owner: creator of the fence | ||
143 | * @seq: user fence sequence number | ||
144 | * @fence: resulting amdgpu fence object | ||
145 | * | ||
146 | * Recreates a fence command from the user fence sequence number (all asics). | ||
147 | * Returns 0 on success, -ENOMEM on failure. | ||
148 | */ | ||
149 | int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, | ||
150 | uint64_t seq, struct amdgpu_fence **fence) | ||
151 | { | ||
152 | struct amdgpu_device *adev = ring->adev; | ||
153 | |||
154 | if (seq > ring->fence_drv.sync_seq[ring->idx]) | ||
155 | return -EINVAL; | ||
156 | |||
157 | *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); | ||
158 | if ((*fence) == NULL) | ||
159 | return -ENOMEM; | ||
160 | |||
161 | (*fence)->seq = seq; | ||
162 | (*fence)->ring = ring; | ||
163 | (*fence)->owner = owner; | ||
164 | fence_init(&(*fence)->base, &amdgpu_fence_ops, | ||
165 | &adev->fence_queue.lock, adev->fence_context + ring->idx, | ||
166 | (*fence)->seq); | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /** | ||
139 | * amdgpu_fence_check_signaled - callback from fence_queue | 171 | * amdgpu_fence_check_signaled - callback from fence_queue |
140 | * | 172 | * |
141 | * this function is called with fence_queue lock held, which is also used | 173 | * this function is called with fence_queue lock held, which is also used |
@@ -517,8 +549,9 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) | |||
517 | * the wait timeout, or an error for all other cases. | 549 | * the wait timeout, or an error for all other cases. |
518 | * -EDEADLK is returned when a GPU lockup has been detected. | 550 | * -EDEADLK is returned when a GPU lockup has been detected. |
519 | */ | 551 | */ |
520 | long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq, | 552 | static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, |
521 | bool intr, long timeout) | 553 | u64 *target_seq, bool intr, |
554 | long timeout) | ||
522 | { | 555 | { |
523 | uint64_t last_seq[AMDGPU_MAX_RINGS]; | 556 | uint64_t last_seq[AMDGPU_MAX_RINGS]; |
524 | bool signaled; | 557 | bool signaled; |