aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuanxiao Dong <chuanxiao.dong@intel.com>2017-03-06 00:05:24 -0500
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-03-06 04:28:52 -0500
commit8f1117abb408808af9cc4c948925c726bec4755a (patch)
treef2d51be82697165db1560bbb43288a60a83b8470
parent4ec3dd89052a437304e1451733c989b8cec681af (diff)
drm/i915/gvt: handle workload lifecycle properly
Currently i915 has a request replay mechanism which can make sure the request can be replayed after a GPU reset. With this mechanism, gvt should wait until the GVT request seqno passed before complete the current workload. So that there should be a context switch interrupt come before gvt free the workload. In this way, workload lifecylce matches with the i915 request lifecycle. The workload can only be freed after the request is completed. v2: use gvt_dbg_sched instead of gvt_err to print when wait again Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c49
1 files changed, 33 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e355a82ccabd..d3a56c949025 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -151,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
151 case INTEL_CONTEXT_SCHEDULE_OUT: 151 case INTEL_CONTEXT_SCHEDULE_OUT:
152 intel_gvt_restore_render_mmio(workload->vgpu, 152 intel_gvt_restore_render_mmio(workload->vgpu,
153 workload->ring_id); 153 workload->ring_id);
154 /* If the status is -EINPROGRESS means this workload
155 * doesn't meet any issue during dispatching so when
156 * get the SCHEDULE_OUT set the status to be zero for
157 * good. If the status is NOT -EINPROGRESS means there
158 * is something wrong happened during dispatching and
159 * the status should not be set to zero
160 */
161 if (workload->status == -EINPROGRESS)
162 workload->status = 0;
154 atomic_set(&workload->shadow_ctx_active, 0); 163 atomic_set(&workload->shadow_ctx_active, 0);
155 break; 164 break;
156 default: 165 default:
@@ -362,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
362 workload = scheduler->current_workload[ring_id]; 371 workload = scheduler->current_workload[ring_id];
363 vgpu = workload->vgpu; 372 vgpu = workload->vgpu;
364 373
365 if (!workload->status && !vgpu->resetting) { 374 /* For the workload w/ request, needs to wait for the context
375 * switch to make sure request is completed.
376 * For the workload w/o request, directly complete the workload.
377 */
378 if (workload->req) {
366 wait_event(workload->shadow_ctx_status_wq, 379 wait_event(workload->shadow_ctx_status_wq,
367 !atomic_read(&workload->shadow_ctx_active)); 380 !atomic_read(&workload->shadow_ctx_active));
368 381
369 update_guest_context(workload); 382 i915_gem_request_put(fetch_and_zero(&workload->req));
370 383
371 for_each_set_bit(event, workload->pending_events, 384 if (!workload->status && !vgpu->resetting) {
372 INTEL_GVT_EVENT_MAX) 385 update_guest_context(workload);
373 intel_vgpu_trigger_virtual_event(vgpu, event); 386
387 for_each_set_bit(event, workload->pending_events,
388 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event);
390 }
374 } 391 }
375 392
376 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 393 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -400,7 +417,6 @@ static int workload_thread(void *priv)
400 int ring_id = p->ring_id; 417 int ring_id = p->ring_id;
401 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
402 struct intel_vgpu_workload *workload = NULL; 419 struct intel_vgpu_workload *workload = NULL;
403 long lret;
404 int ret; 420 int ret;
405 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
406 DEFINE_WAIT_FUNC(wait, woken_wake_function); 422 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -449,23 +465,24 @@ static int workload_thread(void *priv)
449 465
450 gvt_dbg_sched("ring id %d wait workload %p\n", 466 gvt_dbg_sched("ring id %d wait workload %p\n",
451 workload->ring_id, workload); 467 workload->ring_id, workload);
452 468retry:
453 lret = i915_wait_request(workload->req, 469 i915_wait_request(workload->req,
454 0, MAX_SCHEDULE_TIMEOUT); 470 0, MAX_SCHEDULE_TIMEOUT);
455 if (lret < 0) { 471 /* I915 has replay mechanism and a request will be replayed
456 workload->status = lret; 472 * if there is i915 reset. So the seqno will be updated anyway.
457 gvt_err("fail to wait workload, skip\n"); 473 * If the seqno is not updated yet after waiting, which means
458 } else { 474 * the replay may still be in progress and we can wait again.
459 workload->status = 0; 475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
460 } 480 }
461 481
462complete: 482complete:
463 gvt_dbg_sched("will complete workload %p, status: %d\n", 483 gvt_dbg_sched("will complete workload %p, status: %d\n",
464 workload, workload->status); 484 workload, workload->status);
465 485
466 if (workload->req)
467 i915_gem_request_put(fetch_and_zero(&workload->req));
468
469 complete_current_workload(gvt, ring_id); 486 complete_current_workload(gvt, ring_id);
470 487
471 if (need_force_wake) 488 if (need_force_wake)