diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 115 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 2 |
9 files changed, 91 insertions, 80 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f5bac97a438b..0c42a85ca5a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -769,8 +769,9 @@ struct amdgpu_ib { | |||
769 | uint32_t *ptr; | 769 | uint32_t *ptr; |
770 | struct amdgpu_fence *fence; | 770 | struct amdgpu_fence *fence; |
771 | struct amdgpu_user_fence *user; | 771 | struct amdgpu_user_fence *user; |
772 | bool grabbed_vmid; | ||
773 | struct amdgpu_vm *vm; | 772 | struct amdgpu_vm *vm; |
773 | unsigned vm_id; | ||
774 | uint64_t vm_pd_addr; | ||
774 | struct amdgpu_ctx *ctx; | 775 | struct amdgpu_ctx *ctx; |
775 | uint32_t gds_base, gds_size; | 776 | uint32_t gds_base, gds_size; |
776 | uint32_t gws_base, gws_size; | 777 | uint32_t gws_base, gws_size; |
@@ -877,10 +878,10 @@ struct amdgpu_vm_pt { | |||
877 | }; | 878 | }; |
878 | 879 | ||
879 | struct amdgpu_vm_id { | 880 | struct amdgpu_vm_id { |
880 | unsigned id; | 881 | struct amdgpu_vm_manager_id *mgr_id; |
881 | uint64_t pd_gpu_addr; | 882 | uint64_t pd_gpu_addr; |
882 | /* last flushed PD/PT update */ | 883 | /* last flushed PD/PT update */ |
883 | struct fence *flushed_updates; | 884 | struct fence *flushed_updates; |
884 | }; | 885 | }; |
885 | 886 | ||
886 | struct amdgpu_vm { | 887 | struct amdgpu_vm { |
@@ -954,10 +955,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); | |||
954 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | 955 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, |
955 | struct amdgpu_vm *vm); | 956 | struct amdgpu_vm *vm); |
956 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 957 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
957 | struct amdgpu_sync *sync, struct fence *fence); | 958 | struct amdgpu_sync *sync, struct fence *fence, |
959 | unsigned *vm_id, uint64_t *vm_pd_addr); | ||
958 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 960 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
959 | struct amdgpu_vm *vm, | 961 | unsigned vmid, |
960 | struct fence *updates); | 962 | uint64_t pd_addr); |
961 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); | 963 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); |
962 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | 964 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, |
963 | struct amdgpu_vm *vm); | 965 | struct amdgpu_vm *vm); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index b5bdd5d59b58..db14a7bbb8f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -75,6 +75,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
75 | } | 75 | } |
76 | 76 | ||
77 | ib->vm = vm; | 77 | ib->vm = vm; |
78 | ib->vm_id = 0; | ||
78 | 79 | ||
79 | return 0; | 80 | return 0; |
80 | } | 81 | } |
@@ -139,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
139 | return -EINVAL; | 140 | return -EINVAL; |
140 | } | 141 | } |
141 | 142 | ||
142 | if (vm && !ibs->grabbed_vmid) { | 143 | if (vm && !ibs->vm_id) { |
143 | dev_err(adev->dev, "VM IB without ID\n"); | 144 | dev_err(adev->dev, "VM IB without ID\n"); |
144 | return -EINVAL; | 145 | return -EINVAL; |
145 | } | 146 | } |
@@ -152,10 +153,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
152 | 153 | ||
153 | if (vm) { | 154 | if (vm) { |
154 | /* do context switch */ | 155 | /* do context switch */ |
155 | amdgpu_vm_flush(ring, vm, last_vm_update); | 156 | amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr); |
156 | 157 | ||
157 | if (ring->funcs->emit_gds_switch) | 158 | if (ring->funcs->emit_gds_switch) |
158 | amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, | 159 | amdgpu_ring_emit_gds_switch(ring, ib->vm_id, |
159 | ib->gds_base, ib->gds_size, | 160 | ib->gds_base, ib->gds_size, |
160 | ib->gws_base, ib->gws_size, | 161 | ib->gws_base, ib->gws_size, |
161 | ib->oa_base, ib->oa_size); | 162 | ib->oa_base, ib->oa_size); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f29bbb96a881..90e52f7e17a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -105,16 +105,23 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | |||
105 | 105 | ||
106 | struct fence *fence = amdgpu_sync_get_fence(&job->sync); | 106 | struct fence *fence = amdgpu_sync_get_fence(&job->sync); |
107 | 107 | ||
108 | if (fence == NULL && vm && !job->ibs->grabbed_vmid) { | 108 | if (fence == NULL && vm && !job->ibs->vm_id) { |
109 | struct amdgpu_ring *ring = job->ring; | 109 | struct amdgpu_ring *ring = job->ring; |
110 | unsigned i, vm_id; | ||
111 | uint64_t vm_pd_addr; | ||
110 | int r; | 112 | int r; |
111 | 113 | ||
112 | r = amdgpu_vm_grab_id(vm, ring, &job->sync, | 114 | r = amdgpu_vm_grab_id(vm, ring, &job->sync, |
113 | &job->base.s_fence->base); | 115 | &job->base.s_fence->base, |
116 | &vm_id, &vm_pd_addr); | ||
114 | if (r) | 117 | if (r) |
115 | DRM_ERROR("Error getting VM ID (%d)\n", r); | 118 | DRM_ERROR("Error getting VM ID (%d)\n", r); |
116 | else | 119 | else { |
117 | job->ibs->grabbed_vmid = true; | 120 | for (i = 0; i < job->num_ibs; ++i) { |
121 | job->ibs[i].vm_id = vm_id; | ||
122 | job->ibs[i].vm_pd_addr = vm_pd_addr; | ||
123 | } | ||
124 | } | ||
118 | 125 | ||
119 | fence = amdgpu_sync_get_fence(&job->sync); | 126 | fence = amdgpu_sync_get_fence(&job->sync); |
120 | } | 127 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 264c5968a1d3..ba909245fef5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -50,6 +50,9 @@ | |||
50 | * SI supports 16. | 50 | * SI supports 16. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | /* Special value that no flush is necessary */ | ||
54 | #define AMDGPU_VM_NO_FLUSH (~0ll) | ||
55 | |||
53 | /** | 56 | /** |
54 | * amdgpu_vm_num_pde - return the number of page directory entries | 57 | * amdgpu_vm_num_pde - return the number of page directory entries |
55 | * | 58 | * |
@@ -157,50 +160,69 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |||
157 | * Allocate an id for the vm, adding fences to the sync obj as necessary. | 160 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
158 | */ | 161 | */ |
159 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 162 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
160 | struct amdgpu_sync *sync, struct fence *fence) | 163 | struct amdgpu_sync *sync, struct fence *fence, |
164 | unsigned *vm_id, uint64_t *vm_pd_addr) | ||
161 | { | 165 | { |
162 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 166 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
163 | struct amdgpu_device *adev = ring->adev; | 167 | struct amdgpu_device *adev = ring->adev; |
164 | struct amdgpu_vm_manager_id *id; | 168 | struct amdgpu_vm_id *id = &vm->ids[ring->idx]; |
169 | struct fence *updates = sync->last_vm_update; | ||
165 | int r; | 170 | int r; |
166 | 171 | ||
167 | mutex_lock(&adev->vm_manager.lock); | 172 | mutex_lock(&adev->vm_manager.lock); |
168 | 173 | ||
169 | /* check if the id is still valid */ | 174 | /* check if the id is still valid */ |
170 | if (vm_id->id) { | 175 | if (id->mgr_id) { |
176 | struct fence *flushed = id->flushed_updates; | ||
177 | bool is_later; | ||
171 | long owner; | 178 | long owner; |
172 | 179 | ||
173 | id = &adev->vm_manager.ids[vm_id->id]; | 180 | if (!flushed) |
174 | owner = atomic_long_read(&id->owner); | 181 | is_later = true; |
175 | if (owner == (long)vm) { | 182 | else if (!updates) |
176 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); | 183 | is_later = false; |
177 | trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); | 184 | else |
185 | is_later = fence_is_later(updates, flushed); | ||
186 | |||
187 | owner = atomic_long_read(&id->mgr_id->owner); | ||
188 | if (!is_later && owner == (long)id && | ||
189 | pd_addr == id->pd_gpu_addr) { | ||
190 | |||
191 | fence_put(id->mgr_id->active); | ||
192 | id->mgr_id->active = fence_get(fence); | ||
193 | |||
194 | list_move_tail(&id->mgr_id->list, | ||
195 | &adev->vm_manager.ids_lru); | ||
178 | 196 | ||
179 | fence_put(id->active); | 197 | *vm_id = id->mgr_id - adev->vm_manager.ids; |
180 | id->active = fence_get(fence); | 198 | *vm_pd_addr = AMDGPU_VM_NO_FLUSH; |
199 | trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); | ||
181 | 200 | ||
182 | mutex_unlock(&adev->vm_manager.lock); | 201 | mutex_unlock(&adev->vm_manager.lock); |
183 | return 0; | 202 | return 0; |
184 | } | 203 | } |
185 | } | 204 | } |
186 | 205 | ||
187 | /* we definately need to flush */ | 206 | id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, |
188 | vm_id->pd_gpu_addr = ~0ll; | 207 | struct amdgpu_vm_manager_id, |
208 | list); | ||
189 | 209 | ||
190 | id = list_first_entry(&adev->vm_manager.ids_lru, | 210 | r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); |
191 | struct amdgpu_vm_manager_id, | 211 | if (!r) { |
192 | list); | 212 | fence_put(id->mgr_id->active); |
193 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); | 213 | id->mgr_id->active = fence_get(fence); |
194 | atomic_long_set(&id->owner, (long)vm); | ||
195 | 214 | ||
196 | vm_id->id = id - adev->vm_manager.ids; | 215 | fence_put(id->flushed_updates); |
197 | trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); | 216 | id->flushed_updates = fence_get(updates); |
198 | 217 | ||
199 | r = amdgpu_sync_fence(ring->adev, sync, id->active); | 218 | id->pd_gpu_addr = pd_addr; |
200 | 219 | ||
201 | if (!r) { | 220 | list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); |
202 | fence_put(id->active); | 221 | atomic_long_set(&id->mgr_id->owner, (long)id); |
203 | id->active = fence_get(fence); | 222 | |
223 | *vm_id = id->mgr_id - adev->vm_manager.ids; | ||
224 | *vm_pd_addr = pd_addr; | ||
225 | trace_amdgpu_vm_grab_id(vm, *vm_id, ring->idx); | ||
204 | } | 226 | } |
205 | 227 | ||
206 | mutex_unlock(&adev->vm_manager.lock); | 228 | mutex_unlock(&adev->vm_manager.lock); |
@@ -211,35 +233,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
211 | * amdgpu_vm_flush - hardware flush the vm | 233 | * amdgpu_vm_flush - hardware flush the vm |
212 | * | 234 | * |
213 | * @ring: ring to use for flush | 235 | * @ring: ring to use for flush |
214 | * @vm: vm we want to flush | 236 | * @vmid: vmid number to use |
215 | * @updates: last vm update that we waited for | 237 | * @pd_addr: address of the page directory |
216 | * | 238 | * |
217 | * Flush the vm. | 239 | * Emit a VM flush when it is necessary. |
218 | */ | 240 | */ |
219 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 241 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
220 | struct amdgpu_vm *vm, | 242 | unsigned vmid, |
221 | struct fence *updates) | 243 | uint64_t pd_addr) |
222 | { | 244 | { |
223 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 245 | if (pd_addr != AMDGPU_VM_NO_FLUSH) { |
224 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 246 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid); |
225 | struct fence *flushed_updates = vm_id->flushed_updates; | 247 | amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr); |
226 | bool is_later; | ||
227 | |||
228 | if (!flushed_updates) | ||
229 | is_later = true; | ||
230 | else if (!updates) | ||
231 | is_later = false; | ||
232 | else | ||
233 | is_later = fence_is_later(updates, flushed_updates); | ||
234 | |||
235 | if (pd_addr != vm_id->pd_gpu_addr || is_later) { | ||
236 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | ||
237 | if (is_later) { | ||
238 | vm_id->flushed_updates = fence_get(updates); | ||
239 | fence_put(flushed_updates); | ||
240 | } | ||
241 | vm_id->pd_gpu_addr = pd_addr; | ||
242 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | ||
243 | } | 248 | } |
244 | } | 249 | } |
245 | 250 | ||
@@ -1284,7 +1289,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1284 | int i, r; | 1289 | int i, r; |
1285 | 1290 | ||
1286 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1291 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
1287 | vm->ids[i].id = 0; | 1292 | vm->ids[i].mgr_id = NULL; |
1288 | vm->ids[i].flushed_updates = NULL; | 1293 | vm->ids[i].flushed_updates = NULL; |
1289 | } | 1294 | } |
1290 | vm->va = RB_ROOT; | 1295 | vm->va = RB_ROOT; |
@@ -1381,13 +1386,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1381 | amdgpu_bo_unref(&vm->page_directory); | 1386 | amdgpu_bo_unref(&vm->page_directory); |
1382 | fence_put(vm->page_directory_fence); | 1387 | fence_put(vm->page_directory_fence); |
1383 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1388 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
1384 | unsigned id = vm->ids[i].id; | 1389 | struct amdgpu_vm_id *id = &vm->ids[i]; |
1385 | 1390 | ||
1386 | atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, | 1391 | if (id->mgr_id) |
1387 | (long)vm, 0); | 1392 | atomic_long_cmpxchg(&id->mgr_id->owner, |
1388 | fence_put(vm->ids[i].flushed_updates); | 1393 | (long)id, 0); |
1394 | fence_put(id->flushed_updates); | ||
1389 | } | 1395 | } |
1390 | |||
1391 | } | 1396 | } |
1392 | 1397 | ||
1393 | /** | 1398 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 675f34916aab..e4e4b2ac77b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
212 | static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | 212 | static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, |
213 | struct amdgpu_ib *ib) | 213 | struct amdgpu_ib *ib) |
214 | { | 214 | { |
215 | u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 215 | u32 extra_bits = ib->vm_id & 0xf; |
216 | u32 next_rptr = ring->wptr + 5; | 216 | u32 next_rptr = ring->wptr + 5; |
217 | 217 | ||
218 | while ((next_rptr & 7) != 4) | 218 | while ((next_rptr & 7) != 4) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index bc5bdaf3d2bb..9cdf59518533 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2043,8 +2043,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | |||
2043 | else | 2043 | else |
2044 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | 2044 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
2045 | 2045 | ||
2046 | control |= ib->length_dw | | 2046 | control |= ib->length_dw | (ib->vm_id << 24); |
2047 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
2048 | 2047 | ||
2049 | amdgpu_ring_write(ring, header); | 2048 | amdgpu_ring_write(ring, header); |
2050 | amdgpu_ring_write(ring, | 2049 | amdgpu_ring_write(ring, |
@@ -2072,8 +2071,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | |||
2072 | 2071 | ||
2073 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | 2072 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
2074 | 2073 | ||
2075 | control |= ib->length_dw | | 2074 | control |= ib->length_dw | (ib->vm_id << 24); |
2076 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
2077 | 2075 | ||
2078 | amdgpu_ring_write(ring, header); | 2076 | amdgpu_ring_write(ring, header); |
2079 | amdgpu_ring_write(ring, | 2077 | amdgpu_ring_write(ring, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 71d536e595a2..5f67a189bce9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -4619,8 +4619,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | |||
4619 | else | 4619 | else |
4620 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | 4620 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
4621 | 4621 | ||
4622 | control |= ib->length_dw | | 4622 | control |= ib->length_dw | (ib->vm_id << 24); |
4623 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
4624 | 4623 | ||
4625 | amdgpu_ring_write(ring, header); | 4624 | amdgpu_ring_write(ring, header); |
4626 | amdgpu_ring_write(ring, | 4625 | amdgpu_ring_write(ring, |
@@ -4649,8 +4648,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | |||
4649 | 4648 | ||
4650 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | 4649 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
4651 | 4650 | ||
4652 | control |= ib->length_dw | | 4651 | control |= ib->length_dw | (ib->vm_id << 24); |
4653 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
4654 | 4652 | ||
4655 | amdgpu_ring_write(ring, header); | 4653 | amdgpu_ring_write(ring, header); |
4656 | amdgpu_ring_write(ring, | 4654 | amdgpu_ring_write(ring, |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 29ec986dd6fc..dddb8d6a81f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -244,7 +244,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
244 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | 244 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, |
245 | struct amdgpu_ib *ib) | 245 | struct amdgpu_ib *ib) |
246 | { | 246 | { |
247 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 247 | u32 vmid = ib->vm_id & 0xf; |
248 | u32 next_rptr = ring->wptr + 5; | 248 | u32 next_rptr = ring->wptr + 5; |
249 | 249 | ||
250 | while ((next_rptr & 7) != 2) | 250 | while ((next_rptr & 7) != 2) |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6f064d7076e6..19e02f7a06f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -355,7 +355,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
355 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | 355 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, |
356 | struct amdgpu_ib *ib) | 356 | struct amdgpu_ib *ib) |
357 | { | 357 | { |
358 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 358 | u32 vmid = ib->vm_id & 0xf; |
359 | u32 next_rptr = ring->wptr + 5; | 359 | u32 next_rptr = ring->wptr + 5; |
360 | 360 | ||
361 | while ((next_rptr & 7) != 2) | 361 | while ((next_rptr & 7) != 2) |