aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-01-18 11:01:42 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-02-10 14:16:57 -0500
commit94dd0a4ae0b1af997b1f45793e5fd5b47f4ffc18 (patch)
tree4752f0bba41887463fb60b08b5cd7912626a9dc3 /drivers/gpu/drm/amd
parent8d0a7cea824a2784150ef7f25a1e88f18a2a8f69 (diff)
drm/amdgpu: merge vm_grab_id and vm_fence v2
No need for an extra function any more. v2: comment cleanups Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c57
3 files changed, 30 insertions, 45 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index edfaae439b76..43b48eb6cf6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -956,13 +956,10 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
956void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 956void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
957 struct amdgpu_vm *vm); 957 struct amdgpu_vm *vm);
958int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 958int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
959 struct amdgpu_sync *sync); 959 struct amdgpu_sync *sync, struct fence *fence);
960void amdgpu_vm_flush(struct amdgpu_ring *ring, 960void amdgpu_vm_flush(struct amdgpu_ring *ring,
961 struct amdgpu_vm *vm, 961 struct amdgpu_vm *vm,
962 struct fence *updates); 962 struct fence *updates);
963void amdgpu_vm_fence(struct amdgpu_device *adev,
964 struct amdgpu_vm *vm,
965 struct fence *fence);
966uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); 963uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
967int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 964int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
968 struct amdgpu_vm *vm); 965 struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b22a95f0571c..76a1f823d983 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -38,19 +38,14 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
38 38
39 if (fence == NULL && vm && !job->ibs->grabbed_vmid) { 39 if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
40 struct amdgpu_ring *ring = job->ibs->ring; 40 struct amdgpu_ring *ring = job->ibs->ring;
41 struct amdgpu_device *adev = ring->adev;
42 int r; 41 int r;
43 42
44 mutex_lock(&adev->vm_manager.lock); 43 r = amdgpu_vm_grab_id(vm, ring, sync,
45 r = amdgpu_vm_grab_id(vm, ring, sync); 44 &job->base.s_fence->base);
46 if (r) { 45 if (r)
47 DRM_ERROR("Error getting VM ID (%d)\n", r); 46 DRM_ERROR("Error getting VM ID (%d)\n", r);
48 } else { 47 else
49 fence = &job->base.s_fence->base;
50 amdgpu_vm_fence(ring->adev, vm, fence);
51 job->ibs->grabbed_vmid = true; 48 job->ibs->grabbed_vmid = true;
52 }
53 mutex_unlock(&adev->vm_manager.lock);
54 49
55 fence = amdgpu_sync_get_fence(sync); 50 fence = amdgpu_sync_get_fence(sync);
56 } 51 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index edbb3ff4e731..d4718e1cd050 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -152,13 +152,14 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
152 * @vm: vm to allocate id for 152 * @vm: vm to allocate id for
153 * @ring: ring we want to submit job to 153 * @ring: ring we want to submit job to
154 * @sync: sync object where we add dependencies 154 * @sync: sync object where we add dependencies
155 * @fence: fence protecting ID from reuse
155 * 156 *
156 * Allocate an id for the vm, adding fences to the sync obj as necessary. 157 * Allocate an id for the vm, adding fences to the sync obj as necessary.
157 * 158 *
158 * Global mutex must be locked! 159 * Global mutex must be locked!
159 */ 160 */
160int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 161int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
161 struct amdgpu_sync *sync) 162 struct amdgpu_sync *sync, struct fence *fence)
162{ 163{
163 struct fence *best[AMDGPU_MAX_RINGS] = {}; 164 struct fence *best[AMDGPU_MAX_RINGS] = {};
164 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 165 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -167,6 +168,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
167 unsigned choices[2] = {}; 168 unsigned choices[2] = {};
168 unsigned i; 169 unsigned i;
169 170
171 mutex_lock(&adev->vm_manager.lock);
172
170 /* check if the id is still valid */ 173 /* check if the id is still valid */
171 if (vm_id->id) { 174 if (vm_id->id) {
172 unsigned id = vm_id->id; 175 unsigned id = vm_id->id;
@@ -175,6 +178,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
175 owner = atomic_long_read(&adev->vm_manager.ids[id].owner); 178 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
176 if (owner == (long)vm) { 179 if (owner == (long)vm) {
177 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 180 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
181 fence_put(adev->vm_manager.ids[id].active);
182 adev->vm_manager.ids[id].active = fence_get(fence);
183 mutex_unlock(&adev->vm_manager.lock);
178 return 0; 184 return 0;
179 } 185 }
180 } 186 }
@@ -191,6 +197,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
191 /* found a free one */ 197 /* found a free one */
192 vm_id->id = i; 198 vm_id->id = i;
193 trace_amdgpu_vm_grab_id(vm, i, ring->idx); 199 trace_amdgpu_vm_grab_id(vm, i, ring->idx);
200 mutex_unlock(&adev->vm_manager.lock);
194 return 0; 201 return 0;
195 } 202 }
196 203
@@ -203,19 +210,29 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
203 } 210 }
204 211
205 for (i = 0; i < 2; ++i) { 212 for (i = 0; i < 2; ++i) {
206 if (choices[i]) { 213 struct fence *active;
207 struct fence *fence; 214 int r;
208 215
209 fence = adev->vm_manager.ids[choices[i]].active; 216 if (!choices[i])
210 vm_id->id = choices[i]; 217 continue;
211 218
212 trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx); 219 vm_id->id = choices[i];
213 return amdgpu_sync_fence(ring->adev, sync, fence); 220 active = adev->vm_manager.ids[vm_id->id].active;
214 } 221 r = amdgpu_sync_fence(ring->adev, sync, active);
222
223 trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
224 atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm);
225
226 fence_put(adev->vm_manager.ids[vm_id->id].active);
227 adev->vm_manager.ids[vm_id->id].active = fence_get(fence);
228
229 mutex_unlock(&adev->vm_manager.lock);
230 return r;
215 } 231 }
216 232
217 /* should never happen */ 233 /* should never happen */
218 BUG(); 234 BUG();
235 mutex_unlock(&adev->vm_manager.lock);
219 return -EINVAL; 236 return -EINVAL;
220} 237}
221 238
@@ -258,30 +275,6 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
258} 275}
259 276
260/** 277/**
261 * amdgpu_vm_fence - remember fence for vm
262 *
263 * @adev: amdgpu_device pointer
264 * @vm: vm we want to fence
265 * @fence: fence to remember
266 *
267 * Fence the vm (cayman+).
268 * Set the fence used to protect page table and id.
269 *
270 * Global and local mutex must be locked!
271 */
272void amdgpu_vm_fence(struct amdgpu_device *adev,
273 struct amdgpu_vm *vm,
274 struct fence *fence)
275{
276 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
277 unsigned vm_id = vm->ids[ring->idx].id;
278
279 fence_put(adev->vm_manager.ids[vm_id].active);
280 adev->vm_manager.ids[vm_id].active = fence_get(fence);
281 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
282}
283
284/**
285 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 278 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
286 * 279 *
287 * @vm: requested vm 280 * @vm: requested vm