diff options
author | Changbin Du <changbin.du@intel.com> | 2017-01-05 03:49:03 -0500 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-01-08 22:10:43 -0500 |
commit | 440a9b9fae37dfd7e4c7d76db34fada57f9afd92 (patch) | |
tree | 7679b1dd789545fe8ab5c33e68bc0014bc43c3fc | |
parent | 2e51ef32b0d66fcd5fe45c437cf7c6aef8350746 (diff) |
drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done
The vgpu->running_workload_num is used to determine whether a vgpu has
any workload running or not. So we should make sure the workload is
really done before we dec running_workload_num. Function
complete_current_workload is not the right place to do it, since this
function is still processing the workload. This patch move the dec op
afterward.
v2: move dec op before wake_up(&scheduler->workload_complete_wq) (Min He)
Signed-off-by: Changbin Du <changbin.du@intel.com>
Reviewed-by: Min He <min.he@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c694dd039f3b..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
350 | { | 350 | { |
351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
352 | struct intel_vgpu_workload *workload; | 352 | struct intel_vgpu_workload *workload; |
353 | struct intel_vgpu *vgpu; | ||
353 | int event; | 354 | int event; |
354 | 355 | ||
355 | mutex_lock(&gvt->lock); | 356 | mutex_lock(&gvt->lock); |
356 | 357 | ||
357 | workload = scheduler->current_workload[ring_id]; | 358 | workload = scheduler->current_workload[ring_id]; |
359 | vgpu = workload->vgpu; | ||
358 | 360 | ||
359 | if (!workload->status && !workload->vgpu->resetting) { | 361 | if (!workload->status && !vgpu->resetting) { |
360 | wait_event(workload->shadow_ctx_status_wq, | 362 | wait_event(workload->shadow_ctx_status_wq, |
361 | !atomic_read(&workload->shadow_ctx_active)); | 363 | !atomic_read(&workload->shadow_ctx_active)); |
362 | 364 | ||
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
364 | 366 | ||
365 | for_each_set_bit(event, workload->pending_events, | 367 | for_each_set_bit(event, workload->pending_events, |
366 | INTEL_GVT_EVENT_MAX) | 368 | INTEL_GVT_EVENT_MAX) |
367 | intel_vgpu_trigger_virtual_event(workload->vgpu, | 369 | intel_vgpu_trigger_virtual_event(vgpu, event); |
368 | event); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 372 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
373 | 374 | ||
374 | scheduler->current_workload[ring_id] = NULL; | 375 | scheduler->current_workload[ring_id] = NULL; |
375 | 376 | ||
376 | atomic_dec(&workload->vgpu->running_workload_num); | ||
377 | |||
378 | list_del_init(&workload->list); | 377 | list_del_init(&workload->list); |
379 | workload->complete(workload); | 378 | workload->complete(workload); |
380 | 379 | ||
380 | atomic_dec(&vgpu->running_workload_num); | ||
381 | wake_up(&scheduler->workload_complete_wq); | 381 | wake_up(&scheduler->workload_complete_wq); |
382 | mutex_unlock(&gvt->lock); | 382 | mutex_unlock(&gvt->lock); |
383 | } | 383 | } |