aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-03-09 16:11:53 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-05-04 20:19:30 -0400
commit794f50b95d40bbde905c3c4c514a33fdda54f497 (patch)
treec5fb9bb31789ad756a65b6eb9fe4fc499fe753c7
parent41d9eb2c5a2a21c9120e906d077e77562883510e (diff)
drm/amdgpu: reuse VMIDs already assigned to a process
If we don't need to flush we can easily use another VMID already assigned to the process. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c77
1 files changed, 46 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 252445f578f6..1425aab31233 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -166,48 +166,63 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
166{ 166{
167 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 167 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
168 struct amdgpu_device *adev = ring->adev; 168 struct amdgpu_device *adev = ring->adev;
169 struct amdgpu_vm_id *id = vm->ids[ring->idx];
170 struct fence *updates = sync->last_vm_update; 169 struct fence *updates = sync->last_vm_update;
170 struct amdgpu_vm_id *id;
171 unsigned i = ring->idx;
171 int r; 172 int r;
172 173
173 mutex_lock(&adev->vm_manager.lock); 174 mutex_lock(&adev->vm_manager.lock);
174 175
175 /* check if the id is still valid */ 176 /* Check if we can use a VMID already assigned to this VM */
176 if (id) { 177 do {
177 struct fence *flushed = id->flushed_updates; 178 struct fence *flushed;
178 long owner = atomic_long_read(&id->owner);
179 bool usable = pd_addr == id->pd_gpu_addr;
180
181 if (owner != (long)&vm->ids[ring->idx])
182 usable = false;
183 else if (!flushed)
184 usable = false;
185 else if (!updates)
186 usable = true;
187 else
188 usable = !fence_is_later(updates, flushed);
189 179
190 if (usable) { 180 id = vm->ids[i++];
181 if (i == AMDGPU_MAX_RINGS)
182 i = 0;
191 183
192 r = amdgpu_sync_fence(ring->adev, sync, id->first); 184 /* Check all the prerequisites to using this VMID */
193 if (r) 185 if (!id)
194 goto error; 186 continue;
187
188 if (atomic_long_read(&id->owner) != (long)vm)
189 continue;
190
191 if (pd_addr != id->pd_gpu_addr)
192 continue;
193
194 if (id != vm->ids[ring->idx] &&
195 (!id->last_flush || !fence_is_signaled(id->last_flush)))
196 continue;
197
198 flushed = id->flushed_updates;
199 if (updates && (!flushed || fence_is_later(updates, flushed)))
200 continue;
195 201
196 r = amdgpu_sync_fence(ring->adev, &id->active, fence); 202 /* Good we can use this VMID */
203 if (id == vm->ids[ring->idx]) {
204 r = amdgpu_sync_fence(ring->adev, sync,
205 id->first);
197 if (r) 206 if (r)
198 goto error; 207 goto error;
208 }
209
210 /* And remember this submission as user of the VMID */
211 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
212 if (r)
213 goto error;
199 214
200 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 215 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
216 vm->ids[ring->idx] = id;
201 217
202 *vm_id = id - adev->vm_manager.ids; 218 *vm_id = id - adev->vm_manager.ids;
203 *vm_pd_addr = AMDGPU_VM_NO_FLUSH; 219 *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
204 trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, 220 trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
205 *vm_pd_addr);
206 221
207 mutex_unlock(&adev->vm_manager.lock); 222 mutex_unlock(&adev->vm_manager.lock);
208 return 0; 223 return 0;
209 } 224
210 } 225 } while (i != ring->idx);
211 226
212 id = list_first_entry(&adev->vm_manager.ids_lru, 227 id = list_first_entry(&adev->vm_manager.ids_lru,
213 struct amdgpu_vm_id, 228 struct amdgpu_vm_id,
@@ -245,7 +260,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
245 id->pd_gpu_addr = pd_addr; 260 id->pd_gpu_addr = pd_addr;
246 261
247 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 262 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
248 atomic_long_set(&id->owner, (long)id); 263 atomic_long_set(&id->owner, (long)vm);
249 vm->ids[ring->idx] = id; 264 vm->ids[ring->idx] = id;
250 265
251 *vm_id = id - adev->vm_manager.ids; 266 *vm_id = id - adev->vm_manager.ids;
@@ -1464,7 +1479,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1464 if (!id) 1479 if (!id)
1465 continue; 1480 continue;
1466 1481
1467 atomic_long_cmpxchg(&id->owner, (long)&vm->ids[i], 0); 1482 atomic_long_cmpxchg(&id->owner, (long)vm, 0);
1468 } 1483 }
1469} 1484}
1470 1485