diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 52 |
1 files changed, 36 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d6b6d0efdd1a..d3a56c949025 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
139 | struct intel_vgpu_workload *workload = | 139 | struct intel_vgpu_workload *workload = |
140 | scheduler->current_workload[req->engine->id]; | 140 | scheduler->current_workload[req->engine->id]; |
141 | 141 | ||
142 | if (unlikely(!workload)) | ||
143 | return NOTIFY_OK; | ||
144 | |||
142 | switch (action) { | 145 | switch (action) { |
143 | case INTEL_CONTEXT_SCHEDULE_IN: | 146 | case INTEL_CONTEXT_SCHEDULE_IN: |
144 | intel_gvt_load_render_mmio(workload->vgpu, | 147 | intel_gvt_load_render_mmio(workload->vgpu, |
@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
148 | case INTEL_CONTEXT_SCHEDULE_OUT: | 151 | case INTEL_CONTEXT_SCHEDULE_OUT: |
149 | intel_gvt_restore_render_mmio(workload->vgpu, | 152 | intel_gvt_restore_render_mmio(workload->vgpu, |
150 | workload->ring_id); | 153 | workload->ring_id); |
154 | /* If the status is -EINPROGRESS means this workload | ||
155 | * doesn't meet any issue during dispatching so when | ||
156 | * get the SCHEDULE_OUT set the status to be zero for | ||
157 | * good. If the status is NOT -EINPROGRESS means there | ||
158 | * is something wrong happened during dispatching and | ||
159 | * the status should not be set to zero | ||
160 | */ | ||
161 | if (workload->status == -EINPROGRESS) | ||
162 | workload->status = 0; | ||
151 | atomic_set(&workload->shadow_ctx_active, 0); | 163 | atomic_set(&workload->shadow_ctx_active, 0); |
152 | break; | 164 | break; |
153 | default: | 165 | default: |
@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
359 | workload = scheduler->current_workload[ring_id]; | 371 | workload = scheduler->current_workload[ring_id]; |
360 | vgpu = workload->vgpu; | 372 | vgpu = workload->vgpu; |
361 | 373 | ||
362 | if (!workload->status && !vgpu->resetting) { | 374 | /* For the workload w/ request, needs to wait for the context |
375 | * switch to make sure request is completed. | ||
376 | * For the workload w/o request, directly complete the workload. | ||
377 | */ | ||
378 | if (workload->req) { | ||
363 | wait_event(workload->shadow_ctx_status_wq, | 379 | wait_event(workload->shadow_ctx_status_wq, |
364 | !atomic_read(&workload->shadow_ctx_active)); | 380 | !atomic_read(&workload->shadow_ctx_active)); |
365 | 381 | ||
366 | update_guest_context(workload); | 382 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
383 | |||
384 | if (!workload->status && !vgpu->resetting) { | ||
385 | update_guest_context(workload); | ||
367 | 386 | ||
368 | for_each_set_bit(event, workload->pending_events, | 387 | for_each_set_bit(event, workload->pending_events, |
369 | INTEL_GVT_EVENT_MAX) | 388 | INTEL_GVT_EVENT_MAX) |
370 | intel_vgpu_trigger_virtual_event(vgpu, event); | 389 | intel_vgpu_trigger_virtual_event(vgpu, event); |
390 | } | ||
371 | } | 391 | } |
372 | 392 | ||
373 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 393 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
@@ -397,7 +417,6 @@ static int workload_thread(void *priv) | |||
397 | int ring_id = p->ring_id; | 417 | int ring_id = p->ring_id; |
398 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 418 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
399 | struct intel_vgpu_workload *workload = NULL; | 419 | struct intel_vgpu_workload *workload = NULL; |
400 | long lret; | ||
401 | int ret; | 420 | int ret; |
402 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); | 421 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); |
403 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | 422 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
@@ -446,23 +465,24 @@ static int workload_thread(void *priv) | |||
446 | 465 | ||
447 | gvt_dbg_sched("ring id %d wait workload %p\n", | 466 | gvt_dbg_sched("ring id %d wait workload %p\n", |
448 | workload->ring_id, workload); | 467 | workload->ring_id, workload); |
449 | 468 | retry: | |
450 | lret = i915_wait_request(workload->req, | 469 | i915_wait_request(workload->req, |
451 | 0, MAX_SCHEDULE_TIMEOUT); | 470 | 0, MAX_SCHEDULE_TIMEOUT); |
452 | if (lret < 0) { | 471 | /* I915 has replay mechanism and a request will be replayed |
453 | workload->status = lret; | 472 | * if there is i915 reset. So the seqno will be updated anyway. |
454 | gvt_err("fail to wait workload, skip\n"); | 473 | * If the seqno is not updated yet after waiting, which means |
455 | } else { | 474 | * the replay may still be in progress and we can wait again. |
456 | workload->status = 0; | 475 | */ |
476 | if (!i915_gem_request_completed(workload->req)) { | ||
477 | gvt_dbg_sched("workload %p not completed, wait again\n", | ||
478 | workload); | ||
479 | goto retry; | ||
457 | } | 480 | } |
458 | 481 | ||
459 | complete: | 482 | complete: |
460 | gvt_dbg_sched("will complete workload %p, status: %d\n", | 483 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
461 | workload, workload->status); | 484 | workload, workload->status); |
462 | 485 | ||
463 | if (workload->req) | ||
464 | i915_gem_request_put(fetch_and_zero(&workload->req)); | ||
465 | |||
466 | complete_current_workload(gvt, ring_id); | 486 | complete_current_workload(gvt, ring_id); |
467 | 487 | ||
468 | if (need_force_wake) | 488 | if (need_force_wake) |