aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7a0b1e50f293..34e35423b78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
122 bool skip_preamble, need_ctx_switch; 122 bool skip_preamble, need_ctx_switch;
123 unsigned patch_offset = ~0; 123 unsigned patch_offset = ~0;
124 struct amdgpu_vm *vm; 124 struct amdgpu_vm *vm;
125 int vmid = 0, old_vmid = ring->vmid;
126 struct fence *hwf; 125 struct fence *hwf;
127 uint64_t ctx; 126 uint64_t ctx;
128 127
@@ -136,11 +135,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
136 if (job) { 135 if (job) {
137 vm = job->vm; 136 vm = job->vm;
138 ctx = job->ctx; 137 ctx = job->ctx;
139 vmid = job->vm_id;
140 } else { 138 } else {
141 vm = NULL; 139 vm = NULL;
142 ctx = 0; 140 ctx = 0;
143 vmid = 0;
144 } 141 }
145 142
146 if (!ring->ready) { 143 if (!ring->ready) {
@@ -166,8 +163,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
166 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, 163 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
167 job->gds_base, job->gds_size, 164 job->gds_base, job->gds_size,
168 job->gws_base, job->gws_size, 165 job->gws_base, job->gws_size,
169 job->oa_base, job->oa_size, 166 job->oa_base, job->oa_size);
170 (ring->current_ctx == ctx) && (old_vmid != vmid));
171 if (r) { 167 if (r) {
172 amdgpu_ring_undo(ring); 168 amdgpu_ring_undo(ring);
173 return r; 169 return r;
@@ -184,6 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
184 need_ctx_switch = ring->current_ctx != ctx; 180 need_ctx_switch = ring->current_ctx != ctx;
185 for (i = 0; i < num_ibs; ++i) { 181 for (i = 0; i < num_ibs; ++i) {
186 ib = &ibs[i]; 182 ib = &ibs[i];
183
187 /* drop preamble IBs if we don't have a context switch */ 184 /* drop preamble IBs if we don't have a context switch */
188 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) 185 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
189 continue; 186 continue;
@@ -191,7 +188,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
191 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, 188 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
192 need_ctx_switch); 189 need_ctx_switch);
193 need_ctx_switch = false; 190 need_ctx_switch = false;
194 ring->vmid = vmid;
195 } 191 }
196 192
197 if (ring->funcs->emit_hdp_invalidate) 193 if (ring->funcs->emit_hdp_invalidate)
@@ -202,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
202 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 198 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
203 if (job && job->vm_id) 199 if (job && job->vm_id)
204 amdgpu_vm_reset_id(adev, job->vm_id); 200 amdgpu_vm_reset_id(adev, job->vm_id);
205 ring->vmid = old_vmid;
206 amdgpu_ring_undo(ring); 201 amdgpu_ring_undo(ring);
207 return r; 202 return r;
208 } 203 }