diff options
author | Zhipeng Gong <zhipeng.gong@intel.com> | 2018-04-03 20:43:53 -0400 |
---|---|---|
committer | Zhi Wang <zhi.a.wang@intel.com> | 2018-04-23 01:09:32 -0400 |
commit | 89babe7cf18e4f93c6ba1e6abfe2e5aa5e4fc66c (patch) | |
tree | 347a03cf59e6bb8ae4cc522876b8160288f9b807 | |
parent | 292bb0d38a5714440b59ef910404408d5e9a8017 (diff) |
drm/i915/gvt: Update time slice more frequently
When there is only one vGPU in GVT-g and it submits workloads
continuously, it will not be scheduled out, vgpu_update_timeslice
is not called and its sched_in_time is not updated in a long time,
which can be several seconds or longer.
Once GVT-g pauses to submit workload for this vGPU due to heavy
host CPU workload, this vGPU get scheduled out and
vgpu_update_timeslice is called, its left_ts will be subtract
by a big value from sched_out_time - sched_in_time.
When GVT-g is going to submit workload for this vGPU again,
it will not be scheduled in until gvt_balance_timeslice reaches
stage 0 and reset its left_ts, which introduces several
hunderand milliseconds latency.
This patch updates time slice in every ms to update sched_in_time
timely.
v2: revise commit message
v3: use more concise expr. (Zhenyu)
Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Min He <min.he@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 8876a57f407c..d053cbe1dc94 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -53,7 +53,6 @@ struct vgpu_sched_data { | |||
53 | bool active; | 53 | bool active; |
54 | 54 | ||
55 | ktime_t sched_in_time; | 55 | ktime_t sched_in_time; |
56 | ktime_t sched_out_time; | ||
57 | ktime_t sched_time; | 56 | ktime_t sched_time; |
58 | ktime_t left_ts; | 57 | ktime_t left_ts; |
59 | ktime_t allocated_ts; | 58 | ktime_t allocated_ts; |
@@ -69,15 +68,19 @@ struct gvt_sched_data { | |||
69 | ktime_t expire_time; | 68 | ktime_t expire_time; |
70 | }; | 69 | }; |
71 | 70 | ||
72 | static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu) | 71 | static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) |
73 | { | 72 | { |
74 | ktime_t delta_ts; | 73 | ktime_t delta_ts; |
75 | struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data; | 74 | struct vgpu_sched_data *vgpu_data; |
76 | 75 | ||
77 | delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time; | 76 | if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) |
77 | return; | ||
78 | 78 | ||
79 | vgpu_data->sched_time += delta_ts; | 79 | vgpu_data = vgpu->sched_data; |
80 | vgpu_data->left_ts -= delta_ts; | 80 | delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); |
81 | vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); | ||
82 | vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); | ||
83 | vgpu_data->sched_in_time = cur_time; | ||
81 | } | 84 | } |
82 | 85 | ||
83 | #define GVT_TS_BALANCE_PERIOD_MS 100 | 86 | #define GVT_TS_BALANCE_PERIOD_MS 100 |
@@ -151,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) | |||
151 | } | 154 | } |
152 | 155 | ||
153 | cur_time = ktime_get(); | 156 | cur_time = ktime_get(); |
154 | if (scheduler->current_vgpu) { | 157 | vgpu_update_timeslice(scheduler->current_vgpu, cur_time); |
155 | vgpu_data = scheduler->current_vgpu->sched_data; | ||
156 | vgpu_data->sched_out_time = cur_time; | ||
157 | vgpu_update_timeslice(scheduler->current_vgpu); | ||
158 | } | ||
159 | vgpu_data = scheduler->next_vgpu->sched_data; | 158 | vgpu_data = scheduler->next_vgpu->sched_data; |
160 | vgpu_data->sched_in_time = cur_time; | 159 | vgpu_data->sched_in_time = cur_time; |
161 | 160 | ||
@@ -227,13 +226,13 @@ out: | |||
227 | void intel_gvt_schedule(struct intel_gvt *gvt) | 226 | void intel_gvt_schedule(struct intel_gvt *gvt) |
228 | { | 227 | { |
229 | struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; | 228 | struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; |
229 | ktime_t cur_time; | ||
230 | 230 | ||
231 | mutex_lock(&gvt->lock); | 231 | mutex_lock(&gvt->lock); |
232 | cur_time = ktime_get(); | ||
232 | 233 | ||
233 | if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, | 234 | if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, |
234 | (void *)&gvt->service_request)) { | 235 | (void *)&gvt->service_request)) { |
235 | ktime_t cur_time = ktime_get(); | ||
236 | |||
237 | if (cur_time >= sched_data->expire_time) { | 236 | if (cur_time >= sched_data->expire_time) { |
238 | gvt_balance_timeslice(sched_data); | 237 | gvt_balance_timeslice(sched_data); |
239 | sched_data->expire_time = ktime_add_ms( | 238 | sched_data->expire_time = ktime_add_ms( |
@@ -242,6 +241,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) | |||
242 | } | 241 | } |
243 | clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); | 242 | clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); |
244 | 243 | ||
244 | vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); | ||
245 | tbs_sched_func(sched_data); | 245 | tbs_sched_func(sched_data); |
246 | 246 | ||
247 | mutex_unlock(&gvt->lock); | 247 | mutex_unlock(&gvt->lock); |