diff options
author | Christian König <christian.koenig@amd.com> | 2016-01-21 04:19:11 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:16:58 -0500 |
commit | a9a78b329a3e31a977f8d8ef64b2f3a574899992 (patch) | |
tree | f37eadb3fd535501d79f793cf568c99e4932f652 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 94dd0a4ae0b1af997b1f45793e5fd5b47f4ffc18 (diff) |
drm/amdgpu: use a global LRU list for VMIDs
With the scheduler enabled managing per ring LRUs don't
make much sense any more.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 88 |
1 files changed, 40 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d4718e1cd050..2dd73ca57221 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -161,79 +161,52 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |||
161 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 161 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
162 | struct amdgpu_sync *sync, struct fence *fence) | 162 | struct amdgpu_sync *sync, struct fence *fence) |
163 | { | 163 | { |
164 | struct fence *best[AMDGPU_MAX_RINGS] = {}; | ||
165 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 164 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; |
166 | struct amdgpu_device *adev = ring->adev; | 165 | struct amdgpu_device *adev = ring->adev; |
167 | 166 | struct amdgpu_vm_manager_id *id; | |
168 | unsigned choices[2] = {}; | 167 | int r; |
169 | unsigned i; | ||
170 | 168 | ||
171 | mutex_lock(&adev->vm_manager.lock); | 169 | mutex_lock(&adev->vm_manager.lock); |
172 | 170 | ||
173 | /* check if the id is still valid */ | 171 | /* check if the id is still valid */ |
174 | if (vm_id->id) { | 172 | if (vm_id->id) { |
175 | unsigned id = vm_id->id; | ||
176 | long owner; | 173 | long owner; |
177 | 174 | ||
178 | owner = atomic_long_read(&adev->vm_manager.ids[id].owner); | 175 | id = &adev->vm_manager.ids[vm_id->id]; |
176 | owner = atomic_long_read(&id->owner); | ||
179 | if (owner == (long)vm) { | 177 | if (owner == (long)vm) { |
178 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); | ||
180 | trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); | 179 | trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); |
181 | fence_put(adev->vm_manager.ids[id].active); | ||
182 | adev->vm_manager.ids[id].active = fence_get(fence); | ||
183 | mutex_unlock(&adev->vm_manager.lock); | ||
184 | return 0; | ||
185 | } | ||
186 | } | ||
187 | 180 | ||
188 | /* we definately need to flush */ | 181 | fence_put(id->active); |
189 | vm_id->pd_gpu_addr = ~0ll; | 182 | id->active = fence_get(fence); |
190 | 183 | ||
191 | /* skip over VMID 0, since it is the system VM */ | ||
192 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | ||
193 | struct fence *fence = adev->vm_manager.ids[i].active; | ||
194 | struct amdgpu_ring *fring; | ||
195 | |||
196 | if (fence == NULL) { | ||
197 | /* found a free one */ | ||
198 | vm_id->id = i; | ||
199 | trace_amdgpu_vm_grab_id(vm, i, ring->idx); | ||
200 | mutex_unlock(&adev->vm_manager.lock); | 184 | mutex_unlock(&adev->vm_manager.lock); |
201 | return 0; | 185 | return 0; |
202 | } | 186 | } |
203 | |||
204 | fring = amdgpu_ring_from_fence(fence); | ||
205 | if (best[fring->idx] == NULL || | ||
206 | fence_is_later(best[fring->idx], fence)) { | ||
207 | best[fring->idx] = fence; | ||
208 | choices[fring == ring ? 0 : 1] = i; | ||
209 | } | ||
210 | } | 187 | } |
211 | 188 | ||
212 | for (i = 0; i < 2; ++i) { | 189 | /* we definately need to flush */ |
213 | struct fence *active; | 190 | vm_id->pd_gpu_addr = ~0ll; |
214 | int r; | ||
215 | |||
216 | if (!choices[i]) | ||
217 | continue; | ||
218 | 191 | ||
219 | vm_id->id = choices[i]; | 192 | id = list_first_entry(&adev->vm_manager.ids_lru, |
220 | active = adev->vm_manager.ids[vm_id->id].active; | 193 | struct amdgpu_vm_manager_id, |
221 | r = amdgpu_sync_fence(ring->adev, sync, active); | 194 | list); |
195 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); | ||
196 | atomic_long_set(&id->owner, (long)vm); | ||
222 | 197 | ||
223 | trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx); | 198 | vm_id->id = id - adev->vm_manager.ids; |
224 | atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm); | 199 | trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); |
225 | 200 | ||
226 | fence_put(adev->vm_manager.ids[vm_id->id].active); | 201 | r = amdgpu_sync_fence(ring->adev, sync, id->active); |
227 | adev->vm_manager.ids[vm_id->id].active = fence_get(fence); | ||
228 | 202 | ||
229 | mutex_unlock(&adev->vm_manager.lock); | 203 | if (!r) { |
230 | return r; | 204 | fence_put(id->active); |
205 | id->active = fence_get(fence); | ||
231 | } | 206 | } |
232 | 207 | ||
233 | /* should never happen */ | ||
234 | BUG(); | ||
235 | mutex_unlock(&adev->vm_manager.lock); | 208 | mutex_unlock(&adev->vm_manager.lock); |
236 | return -EINVAL; | 209 | return r; |
237 | } | 210 | } |
238 | 211 | ||
239 | /** | 212 | /** |
@@ -1359,6 +1332,25 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1359 | } | 1332 | } |
1360 | 1333 | ||
1361 | /** | 1334 | /** |
1335 | * amdgpu_vm_manager_init - init the VM manager | ||
1336 | * | ||
1337 | * @adev: amdgpu_device pointer | ||
1338 | * | ||
1339 | * Initialize the VM manager structures | ||
1340 | */ | ||
1341 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | ||
1342 | { | ||
1343 | unsigned i; | ||
1344 | |||
1345 | INIT_LIST_HEAD(&adev->vm_manager.ids_lru); | ||
1346 | |||
1347 | /* skip over VMID 0, since it is the system VM */ | ||
1348 | for (i = 1; i < adev->vm_manager.num_ids; ++i) | ||
1349 | list_add_tail(&adev->vm_manager.ids[i].list, | ||
1350 | &adev->vm_manager.ids_lru); | ||
1351 | } | ||
1352 | |||
1353 | /** | ||
1362 | * amdgpu_vm_manager_fini - cleanup VM manager | 1354 | * amdgpu_vm_manager_fini - cleanup VM manager |
1363 | * | 1355 | * |
1364 | * @adev: amdgpu_device pointer | 1356 | * @adev: amdgpu_device pointer |