aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-20 06:33:59 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-25 10:53:05 -0400
commit3c62338c26bf2677c8285b406cd769b92ee0dc10 (patch)
treeb0e1e6592c3298fd0fed935740c1827307bcf48e /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parentf38fdfddfaab070e3ff2333a79e45169ee33dc28 (diff)
drm/amdgpu: fix last_vm_update fence is not effetive for sched fence
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index febbf37b1412..4fffb2539331 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -58,6 +58,29 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
58 sync->last_vm_update = NULL; 58 sync->last_vm_update = NULL;
59} 59}
60 60
61static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
62{
63 struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
64 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
65
66 if (a_fence)
67 return a_fence->ring->adev == adev;
68 if (s_fence)
69 return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
70 return false;
71}
72
73static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
74{
75 struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
76 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
77 if (s_fence)
78 return s_fence->owner == owner;
79 if (a_fence)
80 return a_fence->owner == owner;
81 return false;
82}
83
61/** 84/**
62 * amdgpu_sync_fence - remember to sync to this fence 85 * amdgpu_sync_fence - remember to sync to this fence
63 * 86 *
@@ -71,10 +94,23 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
71 struct amdgpu_sync_entry *e; 94 struct amdgpu_sync_entry *e;
72 struct amdgpu_fence *fence; 95 struct amdgpu_fence *fence;
73 struct amdgpu_fence *other; 96 struct amdgpu_fence *other;
97 struct fence *tmp, *later;
74 98
75 if (!f) 99 if (!f)
76 return 0; 100 return 0;
77 101
102 if (amdgpu_sync_same_dev(adev, f) &&
103 amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
104 if (sync->last_vm_update) {
105 tmp = sync->last_vm_update;
106 BUG_ON(f->context != tmp->context);
107 later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
108 sync->last_vm_update = fence_get(later);
109 fence_put(tmp);
110 } else
111 sync->last_vm_update = fence_get(f);
112 }
113
78 fence = to_amdgpu_fence(f); 114 fence = to_amdgpu_fence(f);
79 if (!fence || fence->ring->adev != adev) { 115 if (!fence || fence->ring->adev != adev) {
80 hash_for_each_possible(sync->fences, e, node, f->context) { 116 hash_for_each_possible(sync->fences, e, node, f->context) {
@@ -103,13 +139,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
103 amdgpu_fence_later(fence, other)); 139 amdgpu_fence_later(fence, other));
104 amdgpu_fence_unref(&other); 140 amdgpu_fence_unref(&other);
105 141
106 if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
107 other = sync->last_vm_update;
108 sync->last_vm_update = amdgpu_fence_ref(
109 amdgpu_fence_later(fence, other));
110 amdgpu_fence_unref(&other);
111 }
112
113 return 0; 142 return 0;
114} 143}
115 144
@@ -296,5 +325,5 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
296 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 325 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
297 amdgpu_fence_unref(&sync->sync_to[i]); 326 amdgpu_fence_unref(&sync->sync_to[i]);
298 327
299 amdgpu_fence_unref(&sync->last_vm_update); 328 fence_put(sync->last_vm_update);
300} 329}