aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWeinan Li <weinan.z.li@intel.com>2017-06-18 20:49:17 -0400
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-07-11 01:46:58 -0400
commit4cc74389a551dc95fce72d58c11e55a93b6ecd19 (patch)
tree398a7de58b2142958b99a6b368d0876870101253
parent08673c3e27aa4407899e4fbb4738dac25370f706 (diff)
drm/i915/gvt: remove scheduler_mutex in per-engine workload_thread
For the vGPU workloads, now GVT-g use per vGPU scheduler, the per-ring work_thread only pick workload belongs to the current vGPU. And with time slice based scheduler, it waits all the engines become idle before do vGPU switch. So we can run free dispatch in per-ring work_thread, different ring running in different 'vGPU' won't happen. For the workloads between vGPU and Host, this scheduler_mutex can't block host to dispatch workload into other ring engines. Here remove this mutex since it impacts the performance when applications use more than 1 ring engines in 1 vgpu. ring0 running in vGPU1, ring1 running in Host. Will happen. ring0 running in vGPU1, ring1 running in vGPU2. Won't happen. Signed-off-by: Weinan Li <weinan.z.li@intel.com> Signed-off-by: Ping Gao <ping.a.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 488fdea348a9..5aeba13a5de4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -464,8 +464,6 @@ struct workload_thread_param {
464 int ring_id; 464 int ring_id;
465}; 465};
466 466
467static DEFINE_MUTEX(scheduler_mutex);
468
469static int workload_thread(void *priv) 467static int workload_thread(void *priv)
470{ 468{
471 struct workload_thread_param *p = (struct workload_thread_param *)priv; 469 struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -497,8 +495,6 @@ static int workload_thread(void *priv)
497 if (!workload) 495 if (!workload)
498 break; 496 break;
499 497
500 mutex_lock(&scheduler_mutex);
501
502 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", 498 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
503 workload->ring_id, workload, 499 workload->ring_id, workload,
504 workload->vgpu->id); 500 workload->vgpu->id);
@@ -537,9 +533,6 @@ complete:
537 FORCEWAKE_ALL); 533 FORCEWAKE_ALL);
538 534
539 intel_runtime_pm_put(gvt->dev_priv); 535 intel_runtime_pm_put(gvt->dev_priv);
540
541 mutex_unlock(&scheduler_mutex);
542
543 } 536 }
544 return 0; 537 return 0;
545} 538}