diff options
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 |
3 files changed, 4 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index d24884b419cb..16884a0b677b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | |||
@@ -115,7 +115,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, | |||
115 | 115 | ||
116 | flushed = id->flushed_updates; | 116 | flushed = id->flushed_updates; |
117 | if ((amdgpu_vmid_had_gpu_reset(adev, id)) || | 117 | if ((amdgpu_vmid_had_gpu_reset(adev, id)) || |
118 | (atomic64_read(&id->owner) != vm->client_id) || | 118 | (atomic64_read(&id->owner) != vm->entity.fence_context) || |
119 | (job->vm_pd_addr != id->pd_gpu_addr) || | 119 | (job->vm_pd_addr != id->pd_gpu_addr) || |
120 | (updates && (!flushed || updates->context != flushed->context || | 120 | (updates && (!flushed || updates->context != flushed->context || |
121 | dma_fence_is_later(updates, flushed))) || | 121 | dma_fence_is_later(updates, flushed))) || |
@@ -144,7 +144,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, | |||
144 | id->flushed_updates = dma_fence_get(updates); | 144 | id->flushed_updates = dma_fence_get(updates); |
145 | } | 145 | } |
146 | id->pd_gpu_addr = job->vm_pd_addr; | 146 | id->pd_gpu_addr = job->vm_pd_addr; |
147 | atomic64_set(&id->owner, vm->client_id); | 147 | atomic64_set(&id->owner, vm->entity.fence_context); |
148 | job->vm_needs_flush = needs_flush; | 148 | job->vm_needs_flush = needs_flush; |
149 | if (needs_flush) { | 149 | if (needs_flush) { |
150 | dma_fence_put(id->last_flush); | 150 | dma_fence_put(id->last_flush); |
@@ -242,7 +242,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
242 | if (amdgpu_vmid_had_gpu_reset(adev, id)) | 242 | if (amdgpu_vmid_had_gpu_reset(adev, id)) |
243 | continue; | 243 | continue; |
244 | 244 | ||
245 | if (atomic64_read(&id->owner) != vm->client_id) | 245 | if (atomic64_read(&id->owner) != vm->entity.fence_context) |
246 | continue; | 246 | continue; |
247 | 247 | ||
248 | if (job->vm_pd_addr != id->pd_gpu_addr) | 248 | if (job->vm_pd_addr != id->pd_gpu_addr) |
@@ -291,7 +291,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
291 | id->pd_gpu_addr = job->vm_pd_addr; | 291 | id->pd_gpu_addr = job->vm_pd_addr; |
292 | dma_fence_put(id->flushed_updates); | 292 | dma_fence_put(id->flushed_updates); |
293 | id->flushed_updates = dma_fence_get(updates); | 293 | id->flushed_updates = dma_fence_get(updates); |
294 | atomic64_set(&id->owner, vm->client_id); | 294 | atomic64_set(&id->owner, vm->entity.fence_context); |
295 | 295 | ||
296 | needs_flush: | 296 | needs_flush: |
297 | job->vm_needs_flush = true; | 297 | job->vm_needs_flush = true; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 946bc21c6d7d..af7dceb7131e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -2256,7 +2256,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2256 | uint64_t init_pde_value = 0; | 2256 | uint64_t init_pde_value = 0; |
2257 | 2257 | ||
2258 | vm->va = RB_ROOT_CACHED; | 2258 | vm->va = RB_ROOT_CACHED; |
2259 | vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); | ||
2260 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) | 2259 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) |
2261 | vm->reserved_vmid[i] = NULL; | 2260 | vm->reserved_vmid[i] = NULL; |
2262 | spin_lock_init(&vm->status_lock); | 2261 | spin_lock_init(&vm->status_lock); |
@@ -2502,7 +2501,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |||
2502 | adev->vm_manager.seqno[i] = 0; | 2501 | adev->vm_manager.seqno[i] = 0; |
2503 | 2502 | ||
2504 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); | 2503 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); |
2505 | atomic64_set(&adev->vm_manager.client_counter, 0); | ||
2506 | spin_lock_init(&adev->vm_manager.prt_lock); | 2504 | spin_lock_init(&adev->vm_manager.prt_lock); |
2507 | atomic_set(&adev->vm_manager.num_prt_users, 0); | 2505 | atomic_set(&adev->vm_manager.num_prt_users, 0); |
2508 | 2506 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 78296d1a5b2f..21a80f1bb2b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -191,8 +191,6 @@ struct amdgpu_vm { | |||
191 | /* Scheduler entity for page table updates */ | 191 | /* Scheduler entity for page table updates */ |
192 | struct drm_sched_entity entity; | 192 | struct drm_sched_entity entity; |
193 | 193 | ||
194 | /* client id and PASID (TODO: replace client_id with PASID) */ | ||
195 | u64 client_id; | ||
196 | unsigned int pasid; | 194 | unsigned int pasid; |
197 | /* dedicated to vm */ | 195 | /* dedicated to vm */ |
198 | struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; | 196 | struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; |
@@ -230,8 +228,6 @@ struct amdgpu_vm_manager { | |||
230 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | 228 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; |
231 | unsigned vm_pte_num_rings; | 229 | unsigned vm_pte_num_rings; |
232 | atomic_t vm_pte_next_ring; | 230 | atomic_t vm_pte_next_ring; |
233 | /* client id counter */ | ||
234 | atomic64_t client_counter; | ||
235 | 231 | ||
236 | /* partial resident texture handling */ | 232 | /* partial resident texture handling */ |
237 | spinlock_t prt_lock; | 233 | spinlock_t prt_lock; |