diff options
author | Chuanxiao Dong <chuanxiao.dong@intel.com> | 2017-06-23 01:01:11 -0400 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-07-11 01:47:09 -0400 |
commit | 0cf5ec41839d82ee7f8fbb47f137b7afc562b9f1 (patch) | |
tree | 2ca74ff82683c071d561c54105c55e11e55573da | |
parent | 4cc74389a551dc95fce72d58c11e55a93b6ecd19 (diff) |
drm/i915/gvt: Use fence error from GVT request for workload status
The req->fence.error will be set if this request caused GPU hang so
we can use this value to workload->status to indicate whether this
GVT request caused any problem. If it caused GPU hang, we shouldn't
trigger any context switch back to the guest.
v2:
- only take -EIO from fence->error. (Zhenyu)
Fixes: 8f1117abb408 (drm/i915/gvt: handle workload lifecycle properly)
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 5aeba13a5de4..4f7057d62d88 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
174 | atomic_set(&workload->shadow_ctx_active, 1); | 174 | atomic_set(&workload->shadow_ctx_active, 1); |
175 | break; | 175 | break; |
176 | case INTEL_CONTEXT_SCHEDULE_OUT: | 176 | case INTEL_CONTEXT_SCHEDULE_OUT: |
177 | /* If the status is -EINPROGRESS means this workload | ||
178 | * doesn't meet any issue during dispatching so when | ||
179 | * get the SCHEDULE_OUT set the status to be zero for | ||
180 | * good. If the status is NOT -EINPROGRESS means there | ||
181 | * is something wrong happened during dispatching and | ||
182 | * the status should not be set to zero | ||
183 | */ | ||
184 | if (workload->status == -EINPROGRESS) | ||
185 | workload->status = 0; | ||
186 | atomic_set(&workload->shadow_ctx_active, 0); | 177 | atomic_set(&workload->shadow_ctx_active, 0); |
187 | break; | 178 | break; |
188 | default: | 179 | default: |
@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
427 | wait_event(workload->shadow_ctx_status_wq, | 418 | wait_event(workload->shadow_ctx_status_wq, |
428 | !atomic_read(&workload->shadow_ctx_active)); | 419 | !atomic_read(&workload->shadow_ctx_active)); |
429 | 420 | ||
421 | /* If this request caused GPU hang, req->fence.error will | ||
422 | * be set to -EIO. Use -EIO to set workload status so | ||
423 | * that when this request caused GPU hang, didn't trigger | ||
424 | * context switch interrupt to guest. | ||
425 | */ | ||
426 | if (likely(workload->status == -EINPROGRESS)) { | ||
427 | if (workload->req->fence.error == -EIO) | ||
428 | workload->status = -EIO; | ||
429 | else | ||
430 | workload->status = 0; | ||
431 | } | ||
432 | |||
430 | i915_gem_request_put(fetch_and_zero(&workload->req)); | 433 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
431 | 434 | ||
432 | if (!workload->status && !vgpu->resetting) { | 435 | if (!workload->status && !vgpu->resetting) { |