aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c75
1 files changed, 48 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ad8c5e1455d..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
332 332
333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335
336 wa_ctx->indirect_ctx.obj = NULL;
337 wa_ctx->indirect_ctx.shadow_va = NULL;
335} 338}
336 339
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 340static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
356 return 0; 359 return 0;
357} 360}
358 361
362static int
363intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
364{
365 struct intel_vgpu *vgpu = workload->vgpu;
366 struct intel_vgpu_submission *s = &vgpu->submission;
367 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
369 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
370 struct i915_request *rq;
371 int ret = 0;
372
373 lockdep_assert_held(&dev_priv->drm.struct_mutex);
374
375 if (workload->req)
376 goto out;
377
378 rq = i915_request_alloc(engine, shadow_ctx);
379 if (IS_ERR(rq)) {
380 gvt_vgpu_err("fail to allocate gem request\n");
381 ret = PTR_ERR(rq);
382 goto out;
383 }
384 workload->req = i915_request_get(rq);
385out:
386 return ret;
387}
388
359/** 389/**
360 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 390 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
361 * shadow it as well, include ringbuffer,wa_ctx and ctx. 391 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
372 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 402 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
373 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 403 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
374 struct intel_context *ce; 404 struct intel_context *ce;
375 struct i915_request *rq;
376 int ret; 405 int ret;
377 406
378 lockdep_assert_held(&dev_priv->drm.struct_mutex); 407 lockdep_assert_held(&dev_priv->drm.struct_mutex);
379 408
380 if (workload->req) 409 if (workload->shadow)
381 return 0; 410 return 0;
382 411
383 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); 412 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
@@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
417 goto err_shadow; 446 goto err_shadow;
418 } 447 }
419 448
420 rq = i915_request_alloc(engine, shadow_ctx); 449 workload->shadow = true;
421 if (IS_ERR(rq)) {
422 gvt_vgpu_err("fail to allocate gem request\n");
423 ret = PTR_ERR(rq);
424 goto err_shadow;
425 }
426 workload->req = i915_request_get(rq);
427
428 ret = populate_shadow_context(workload);
429 if (ret)
430 goto err_req;
431
432 return 0; 450 return 0;
433err_req:
434 rq = fetch_and_zero(&workload->req);
435 i915_request_put(rq);
436err_shadow: 451err_shadow:
437 release_shadow_wa_ctx(&workload->wa_ctx); 452 release_shadow_wa_ctx(&workload->wa_ctx);
438err_unpin: 453err_unpin:
@@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
671 mutex_lock(&vgpu->vgpu_lock); 686 mutex_lock(&vgpu->vgpu_lock);
672 mutex_lock(&dev_priv->drm.struct_mutex); 687 mutex_lock(&dev_priv->drm.struct_mutex);
673 688
689 ret = intel_gvt_workload_req_alloc(workload);
690 if (ret)
691 goto err_req;
692
674 ret = intel_gvt_scan_and_shadow_workload(workload); 693 ret = intel_gvt_scan_and_shadow_workload(workload);
675 if (ret) 694 if (ret)
676 goto out; 695 goto out;
677 696
678 ret = prepare_workload(workload); 697 ret = populate_shadow_context(workload);
698 if (ret) {
699 release_shadow_wa_ctx(&workload->wa_ctx);
700 goto out;
701 }
679 702
703 ret = prepare_workload(workload);
680out: 704out:
681 if (ret)
682 workload->status = ret;
683
684 if (!IS_ERR_OR_NULL(workload->req)) { 705 if (!IS_ERR_OR_NULL(workload->req)) {
685 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 706 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
686 ring_id, workload->req); 707 ring_id, workload->req);
687 i915_request_add(workload->req); 708 i915_request_add(workload->req);
688 workload->dispatched = true; 709 workload->dispatched = true;
689 } 710 }
690 711err_req:
712 if (ret)
713 workload->status = ret;
691 mutex_unlock(&dev_priv->drm.struct_mutex); 714 mutex_unlock(&dev_priv->drm.struct_mutex);
692 mutex_unlock(&vgpu->vgpu_lock); 715 mutex_unlock(&vgpu->vgpu_lock);
693 return ret; 716 return ret;
@@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
891 914
892 list_del_init(&workload->list); 915 list_del_init(&workload->list);
893 916
894 if (!workload->status) {
895 release_shadow_batch_buffer(workload);
896 release_shadow_wa_ctx(&workload->wa_ctx);
897 }
898
899 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
900 /* if workload->status is not successful means HW GPU 918 /* if workload->status is not successful means HW GPU
901 * has occurred GPU hang or something wrong with i915/GVT, 919 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1263{ 1281{
1264 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1282 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1265 1283
1284 release_shadow_batch_buffer(workload);
1285 release_shadow_wa_ctx(&workload->wa_ctx);
1286
1266 if (workload->shadow_mm) 1287 if (workload->shadow_mm)
1267 intel_vgpu_mm_put(workload->shadow_mm); 1288 intel_vgpu_mm_put(workload->shadow_mm);
1268 1289