diff options
author | Christian König <christian.koenig@amd.com> | 2015-11-03 06:21:57 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-11-04 12:29:23 -0500 |
commit | a95e264254dca5b6bfb331d5902930d0787bd7e1 (patch) | |
tree | 89159e316b1dd8b6f52ee6a151919241375e22e3 /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |
parent | 7a91d6cb3c8f93288865f98ffa03485aff6dbab4 (diff) |
drm/amdgpu: group together common fence implementation
And also add some missing function documentation. No functional change.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 206 |
1 files changed, 109 insertions, 97 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 2c485a22e14b..257d72205bb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -137,42 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * amdgpu_fence_check_signaled - callback from fence_queue | ||
141 | * | ||
142 | * this function is called with fence_queue lock held, which is also used | ||
143 | * for the fence locking itself, so unlocked variants are used for | ||
144 | * fence_signal, and remove_wait_queue. | ||
145 | */ | ||
146 | static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) | ||
147 | { | ||
148 | struct amdgpu_fence *fence; | ||
149 | struct amdgpu_device *adev; | ||
150 | u64 seq; | ||
151 | int ret; | ||
152 | |||
153 | fence = container_of(wait, struct amdgpu_fence, fence_wake); | ||
154 | adev = fence->ring->adev; | ||
155 | |||
156 | /* | ||
157 | * We cannot use amdgpu_fence_process here because we're already | ||
158 | * in the waitqueue, in a call from wake_up_all. | ||
159 | */ | ||
160 | seq = atomic64_read(&fence->ring->fence_drv.last_seq); | ||
161 | if (seq >= fence->seq) { | ||
162 | ret = fence_signal_locked(&fence->base); | ||
163 | if (!ret) | ||
164 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); | ||
165 | else | ||
166 | FENCE_TRACE(&fence->base, "was already signaled\n"); | ||
167 | |||
168 | __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); | ||
169 | fence_put(&fence->base); | ||
170 | } else | ||
171 | FENCE_TRACE(&fence->base, "pending\n"); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * amdgpu_fence_activity - check for fence activity | 140 | * amdgpu_fence_activity - check for fence activity |
177 | * | 141 | * |
178 | * @ring: pointer to struct amdgpu_ring | 142 | * @ring: pointer to struct amdgpu_ring |
@@ -305,48 +269,6 @@ static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) | |||
305 | return false; | 269 | return false; |
306 | } | 270 | } |
307 | 271 | ||
308 | static bool amdgpu_fence_is_signaled(struct fence *f) | ||
309 | { | ||
310 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
311 | struct amdgpu_ring *ring = fence->ring; | ||
312 | |||
313 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
314 | return true; | ||
315 | |||
316 | amdgpu_fence_process(ring); | ||
317 | |||
318 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
319 | return true; | ||
320 | |||
321 | return false; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * amdgpu_fence_enable_signaling - enable signalling on fence | ||
326 | * @fence: fence | ||
327 | * | ||
328 | * This function is called with fence_queue lock held, and adds a callback | ||
329 | * to fence_queue that checks if this fence is signaled, and if so it | ||
330 | * signals the fence and removes itself. | ||
331 | */ | ||
332 | static bool amdgpu_fence_enable_signaling(struct fence *f) | ||
333 | { | ||
334 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
335 | struct amdgpu_ring *ring = fence->ring; | ||
336 | |||
337 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
338 | return false; | ||
339 | |||
340 | fence->fence_wake.flags = 0; | ||
341 | fence->fence_wake.private = NULL; | ||
342 | fence->fence_wake.func = amdgpu_fence_check_signaled; | ||
343 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); | ||
344 | fence_get(f); | ||
345 | amdgpu_fence_schedule_check(ring); | ||
346 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | ||
347 | return true; | ||
348 | } | ||
349 | |||
350 | /* | 272 | /* |
351 | * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal | 273 | * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal |
352 | * @ring: ring to wait on for the seq number | 274 | * @ring: ring to wait on for the seq number |
@@ -733,6 +655,115 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | |||
733 | } | 655 | } |
734 | } | 656 | } |
735 | 657 | ||
658 | /* | ||
659 | * Common fence implementation | ||
660 | */ | ||
661 | |||
662 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | ||
663 | { | ||
664 | return "amdgpu"; | ||
665 | } | ||
666 | |||
667 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | ||
668 | { | ||
669 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
670 | return (const char *)fence->ring->name; | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * amdgpu_fence_is_signaled - test if fence is signaled | ||
675 | * | ||
676 | * @f: fence to test | ||
677 | * | ||
678 | * Test the fence sequence number if it is already signaled. If it isn't | ||
679 | * signaled start fence processing. Returns True if the fence is signaled. | ||
680 | */ | ||
681 | static bool amdgpu_fence_is_signaled(struct fence *f) | ||
682 | { | ||
683 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
684 | struct amdgpu_ring *ring = fence->ring; | ||
685 | |||
686 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
687 | return true; | ||
688 | |||
689 | amdgpu_fence_process(ring); | ||
690 | |||
691 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
692 | return true; | ||
693 | |||
694 | return false; | ||
695 | } | ||
696 | |||
697 | /** | ||
698 | * amdgpu_fence_check_signaled - callback from fence_queue | ||
699 | * | ||
700 | * this function is called with fence_queue lock held, which is also used | ||
701 | * for the fence locking itself, so unlocked variants are used for | ||
702 | * fence_signal, and remove_wait_queue. | ||
703 | */ | ||
704 | static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) | ||
705 | { | ||
706 | struct amdgpu_fence *fence; | ||
707 | struct amdgpu_device *adev; | ||
708 | u64 seq; | ||
709 | int ret; | ||
710 | |||
711 | fence = container_of(wait, struct amdgpu_fence, fence_wake); | ||
712 | adev = fence->ring->adev; | ||
713 | |||
714 | /* | ||
715 | * We cannot use amdgpu_fence_process here because we're already | ||
716 | * in the waitqueue, in a call from wake_up_all. | ||
717 | */ | ||
718 | seq = atomic64_read(&fence->ring->fence_drv.last_seq); | ||
719 | if (seq >= fence->seq) { | ||
720 | ret = fence_signal_locked(&fence->base); | ||
721 | if (!ret) | ||
722 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); | ||
723 | else | ||
724 | FENCE_TRACE(&fence->base, "was already signaled\n"); | ||
725 | |||
726 | __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); | ||
727 | fence_put(&fence->base); | ||
728 | } else | ||
729 | FENCE_TRACE(&fence->base, "pending\n"); | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * amdgpu_fence_enable_signaling - enable signalling on fence | ||
735 | * @fence: fence | ||
736 | * | ||
737 | * This function is called with fence_queue lock held, and adds a callback | ||
738 | * to fence_queue that checks if this fence is signaled, and if so it | ||
739 | * signals the fence and removes itself. | ||
740 | */ | ||
741 | static bool amdgpu_fence_enable_signaling(struct fence *f) | ||
742 | { | ||
743 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
744 | struct amdgpu_ring *ring = fence->ring; | ||
745 | |||
746 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
747 | return false; | ||
748 | |||
749 | fence->fence_wake.flags = 0; | ||
750 | fence->fence_wake.private = NULL; | ||
751 | fence->fence_wake.func = amdgpu_fence_check_signaled; | ||
752 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); | ||
753 | fence_get(f); | ||
754 | amdgpu_fence_schedule_check(ring); | ||
755 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | ||
756 | return true; | ||
757 | } | ||
758 | |||
759 | const struct fence_ops amdgpu_fence_ops = { | ||
760 | .get_driver_name = amdgpu_fence_get_driver_name, | ||
761 | .get_timeline_name = amdgpu_fence_get_timeline_name, | ||
762 | .enable_signaling = amdgpu_fence_enable_signaling, | ||
763 | .signaled = amdgpu_fence_is_signaled, | ||
764 | .wait = fence_default_wait, | ||
765 | .release = NULL, | ||
766 | }; | ||
736 | 767 | ||
737 | /* | 768 | /* |
738 | * Fence debugfs | 769 | * Fence debugfs |
@@ -783,22 +814,3 @@ int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) | |||
783 | #endif | 814 | #endif |
784 | } | 815 | } |
785 | 816 | ||
786 | static const char *amdgpu_fence_get_driver_name(struct fence *fence) | ||
787 | { | ||
788 | return "amdgpu"; | ||
789 | } | ||
790 | |||
791 | static const char *amdgpu_fence_get_timeline_name(struct fence *f) | ||
792 | { | ||
793 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
794 | return (const char *)fence->ring->name; | ||
795 | } | ||
796 | |||
797 | const struct fence_ops amdgpu_fence_ops = { | ||
798 | .get_driver_name = amdgpu_fence_get_driver_name, | ||
799 | .get_timeline_name = amdgpu_fence_get_timeline_name, | ||
800 | .enable_signaling = amdgpu_fence_enable_signaling, | ||
801 | .signaled = amdgpu_fence_is_signaled, | ||
802 | .wait = fence_default_wait, | ||
803 | .release = NULL, | ||
804 | }; | ||