aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-11-14 15:31:40 -0500
committerAlex Deucher <alexander.deucher@amd.com>2015-11-18 11:40:37 -0500
commit1c16c0a7b26c6c905dc79c4194135ca2f360f0f5 (patch)
treeb3358c65300f32ef791fda5fbb91df22b7942467
parentea89f8c9e8ba8a7b75446eef36917da50d2186d9 (diff)
drm/amdgpu: keep the owner for VMIDs
We don't need the last VM use any more, keep the owner directly. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <davdi1.zhou@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c33
2 files changed, 24 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index beb74854a8a3..a5692624070a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -925,8 +925,6 @@ struct amdgpu_vm_id {
925 uint64_t pd_gpu_addr; 925 uint64_t pd_gpu_addr;
926 /* last flushed PD/PT update */ 926 /* last flushed PD/PT update */
927 struct fence *flushed_updates; 927 struct fence *flushed_updates;
928 /* last use of vmid */
929 struct fence *last_id_use;
930}; 928};
931 929
932struct amdgpu_vm { 930struct amdgpu_vm {
@@ -959,7 +957,11 @@ struct amdgpu_vm {
959}; 957};
960 958
961struct amdgpu_vm_manager { 959struct amdgpu_vm_manager {
962 struct fence *active[AMDGPU_NUM_VM]; 960 struct {
961 struct fence *active;
962 atomic_long_t owner;
963 } ids[AMDGPU_NUM_VM];
964
963 uint32_t max_pfn; 965 uint32_t max_pfn;
964 /* number of VMIDs */ 966 /* number of VMIDs */
965 unsigned nvm; 967 unsigned nvm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6bb209bc0d36..0bdbb2480f9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
143 unsigned i; 143 unsigned i;
144 144
145 /* check if the id is still valid */ 145 /* check if the id is still valid */
146 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id) {
147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 147 unsigned id = vm_id->id;
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 148 long owner;
149 return 0; 149
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
151 if (owner == (long)vm) {
152 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
153 return 0;
154 }
150 } 155 }
151 156
152 /* we definately need to flush */ 157 /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
154 159
155 /* skip over VMID 0, since it is the system VM */ 160 /* skip over VMID 0, since it is the system VM */
156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 161 for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 struct fence *fence = adev->vm_manager.active[i]; 162 struct fence *fence = adev->vm_manager.ids[i].active;
158 struct amdgpu_ring *fring; 163 struct amdgpu_ring *fring;
159 164
160 if (fence == NULL) { 165 if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 if (choices[i]) { 181 if (choices[i]) {
177 struct fence *fence; 182 struct fence *fence;
178 183
179 fence = adev->vm_manager.active[choices[i]]; 184 fence = adev->vm_manager.ids[choices[i]].active;
180 vm_id->id = choices[i]; 185 vm_id->id = choices[i];
181 186
182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 187 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -246,11 +251,9 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
246 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); 251 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
247 unsigned vm_id = vm->ids[ring->idx].id; 252 unsigned vm_id = vm->ids[ring->idx].id;
248 253
249 fence_put(adev->vm_manager.active[vm_id]); 254 fence_put(adev->vm_manager.ids[vm_id].active);
250 adev->vm_manager.active[vm_id] = fence_get(fence); 255 adev->vm_manager.ids[vm_id].active = fence_get(fence);
251 256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
252 fence_put(vm->ids[ring->idx].last_id_use);
253 vm->ids[ring->idx].last_id_use = fence_get(fence);
254} 257}
255 258
256/** 259/**
@@ -1238,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1238 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1239 vm->ids[i].id = 0; 1242 vm->ids[i].id = 0;
1240 vm->ids[i].flushed_updates = NULL; 1243 vm->ids[i].flushed_updates = NULL;
1241 vm->ids[i].last_id_use = NULL;
1242 } 1244 }
1243 mutex_init(&vm->mutex); 1245 mutex_init(&vm->mutex);
1244 vm->va = RB_ROOT; 1246 vm->va = RB_ROOT;
@@ -1312,8 +1314,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1312 fence_put(vm->page_directory_fence); 1314 fence_put(vm->page_directory_fence);
1313 1315
1314 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1316 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1317 unsigned id = vm->ids[i].id;
1318
1319 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1320 (long)vm, 0);
1315 fence_put(vm->ids[i].flushed_updates); 1321 fence_put(vm->ids[i].flushed_updates);
1316 fence_put(vm->ids[i].last_id_use);
1317 } 1322 }
1318 1323
1319 mutex_destroy(&vm->mutex); 1324 mutex_destroy(&vm->mutex);
@@ -1331,5 +1336,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1331 unsigned i; 1336 unsigned i;
1332 1337
1333 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1338 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1334 fence_put(adev->vm_manager.active[i]); 1339 fence_put(adev->vm_manager.ids[i].active);
1335} 1340}