diff options
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 907e6bc794f6..39a83eb7aecc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -175,6 +175,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
175 | int ring_id = workload->ring_id; | 175 | int ring_id = workload->ring_id; |
176 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | 176 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
177 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | 177 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; |
178 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; | ||
178 | struct drm_i915_gem_request *rq; | 179 | struct drm_i915_gem_request *rq; |
179 | struct intel_vgpu *vgpu = workload->vgpu; | 180 | struct intel_vgpu *vgpu = workload->vgpu; |
180 | int ret; | 181 | int ret; |
@@ -188,6 +189,21 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
188 | 189 | ||
189 | mutex_lock(&dev_priv->drm.struct_mutex); | 190 | mutex_lock(&dev_priv->drm.struct_mutex); |
190 | 191 | ||
192 | /* pin shadow context by gvt even the shadow context will be pinned | ||
193 | * when i915 alloc request. That is because gvt will update the guest | ||
194 | * context from shadow context when workload is completed, and at that | ||
195 | * moment, i915 may already unpined the shadow context to make the | ||
196 | * shadow_ctx pages invalid. So gvt need to pin itself. After update | ||
197 | * the guest context, gvt can unpin the shadow_ctx safely. | ||
198 | */ | ||
199 | ret = engine->context_pin(engine, shadow_ctx); | ||
200 | if (ret) { | ||
201 | gvt_vgpu_err("fail to pin shadow context\n"); | ||
202 | workload->status = ret; | ||
203 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
191 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); | 207 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
192 | if (IS_ERR(rq)) { | 208 | if (IS_ERR(rq)) { |
193 | gvt_vgpu_err("fail to allocate gem request\n"); | 209 | gvt_vgpu_err("fail to allocate gem request\n"); |
@@ -231,6 +247,9 @@ out: | |||
231 | 247 | ||
232 | if (!IS_ERR_OR_NULL(rq)) | 248 | if (!IS_ERR_OR_NULL(rq)) |
233 | i915_add_request_no_flush(rq); | 249 | i915_add_request_no_flush(rq); |
250 | else | ||
251 | engine->context_unpin(engine, shadow_ctx); | ||
252 | |||
234 | mutex_unlock(&dev_priv->drm.struct_mutex); | 253 | mutex_unlock(&dev_priv->drm.struct_mutex); |
235 | return ret; | 254 | return ret; |
236 | } | 255 | } |
@@ -380,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
380 | * For the workload w/o request, directly complete the workload. | 399 | * For the workload w/o request, directly complete the workload. |
381 | */ | 400 | */ |
382 | if (workload->req) { | 401 | if (workload->req) { |
402 | struct drm_i915_private *dev_priv = | ||
403 | workload->vgpu->gvt->dev_priv; | ||
404 | struct intel_engine_cs *engine = | ||
405 | dev_priv->engine[workload->ring_id]; | ||
383 | wait_event(workload->shadow_ctx_status_wq, | 406 | wait_event(workload->shadow_ctx_status_wq, |
384 | !atomic_read(&workload->shadow_ctx_active)); | 407 | !atomic_read(&workload->shadow_ctx_active)); |
385 | 408 | ||
@@ -392,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
392 | INTEL_GVT_EVENT_MAX) | 415 | INTEL_GVT_EVENT_MAX) |
393 | intel_vgpu_trigger_virtual_event(vgpu, event); | 416 | intel_vgpu_trigger_virtual_event(vgpu, event); |
394 | } | 417 | } |
418 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
419 | /* unpin shadow ctx as the shadow_ctx update is done */ | ||
420 | engine->context_unpin(engine, workload->vgpu->shadow_ctx); | ||
421 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
395 | } | 422 | } |
396 | 423 | ||
397 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 424 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |