diff options
author | Ping Gao <ping.a.gao@intel.com> | 2017-03-29 12:36:34 -0400 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-03-30 01:32:24 -0400 |
commit | 91d0101ad30bd1bd7f7f805f4fa314c6b70bb602 (patch) | |
tree | 1309d69ef2f1262f7d9c55eafe2db960a424e446 | |
parent | 865f03d42ed0c90c9faf3301775176834ba13eba (diff) |
drm/i915/gvt: use hrtimer replace delayed_work in scheduler
Currently the scheduler is triggered by delayed_work, which doesn't
provide precision at microsecond level. Move to hrtimer instead for
more accurate control.
Signed-off-by: Ping Gao <ping.a.gao@intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.h | 2 |
4 files changed, 41 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index b84b7ca3f66f..894735c77f63 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
@@ -144,6 +144,11 @@ static int gvt_service_thread(void *data) | |||
144 | intel_gvt_emulate_vblank(gvt); | 144 | intel_gvt_emulate_vblank(gvt); |
145 | mutex_unlock(&gvt->lock); | 145 | mutex_unlock(&gvt->lock); |
146 | } | 146 | } |
147 | |||
148 | if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, | ||
149 | (void *)&gvt->service_request)) { | ||
150 | intel_gvt_schedule(gvt); | ||
151 | } | ||
147 | } | 152 | } |
148 | 153 | ||
149 | return 0; | 154 | return 0; |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 6dfc48b63b71..7455214b242c 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
@@ -249,6 +249,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) | |||
249 | 249 | ||
250 | enum { | 250 | enum { |
251 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, | 251 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, |
252 | INTEL_GVT_REQUEST_SCHED = 1, | ||
252 | }; | 253 | }; |
253 | 254 | ||
254 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, | 255 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 34b9acdf3479..c8ade8fc511d 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -96,17 +96,16 @@ struct tbs_vgpu_data { | |||
96 | 96 | ||
97 | struct tbs_sched_data { | 97 | struct tbs_sched_data { |
98 | struct intel_gvt *gvt; | 98 | struct intel_gvt *gvt; |
99 | struct delayed_work work; | 99 | struct hrtimer timer; |
100 | unsigned long period; | 100 | unsigned long period; |
101 | struct list_head runq_head; | 101 | struct list_head runq_head; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) | 104 | /* in nanosecond */ |
105 | #define GVT_DEFAULT_TIME_SLICE 1000000 | ||
105 | 106 | ||
106 | static void tbs_sched_func(struct work_struct *work) | 107 | static void tbs_sched_func(struct tbs_sched_data *sched_data) |
107 | { | 108 | { |
108 | struct tbs_sched_data *sched_data = container_of(work, | ||
109 | struct tbs_sched_data, work.work); | ||
110 | struct tbs_vgpu_data *vgpu_data; | 109 | struct tbs_vgpu_data *vgpu_data; |
111 | 110 | ||
112 | struct intel_gvt *gvt = sched_data->gvt; | 111 | struct intel_gvt *gvt = sched_data->gvt; |
@@ -115,8 +114,6 @@ static void tbs_sched_func(struct work_struct *work) | |||
115 | struct intel_vgpu *vgpu = NULL; | 114 | struct intel_vgpu *vgpu = NULL; |
116 | struct list_head *pos, *head; | 115 | struct list_head *pos, *head; |
117 | 116 | ||
118 | mutex_lock(&gvt->lock); | ||
119 | |||
120 | /* no vgpu or has already had a target */ | 117 | /* no vgpu or has already had a target */ |
121 | if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) | 118 | if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) |
122 | goto out; | 119 | goto out; |
@@ -151,17 +148,30 @@ out: | |||
151 | scheduler->next_vgpu->id); | 148 | scheduler->next_vgpu->id); |
152 | try_to_schedule_next_vgpu(gvt); | 149 | try_to_schedule_next_vgpu(gvt); |
153 | } | 150 | } |
151 | } | ||
154 | 152 | ||
155 | /* | 153 | void intel_gvt_schedule(struct intel_gvt *gvt) |
156 | * still have vgpu on runq | 154 | { |
157 | * or last schedule haven't finished due to running workload | 155 | struct tbs_sched_data *sched_data = gvt->scheduler.sched_data; |
158 | */ | ||
159 | if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) | ||
160 | schedule_delayed_work(&sched_data->work, sched_data->period); | ||
161 | 156 | ||
157 | mutex_lock(&gvt->lock); | ||
158 | tbs_sched_func(sched_data); | ||
162 | mutex_unlock(&gvt->lock); | 159 | mutex_unlock(&gvt->lock); |
163 | } | 160 | } |
164 | 161 | ||
162 | static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) | ||
163 | { | ||
164 | struct tbs_sched_data *data; | ||
165 | |||
166 | data = container_of(timer_data, struct tbs_sched_data, timer); | ||
167 | |||
168 | intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); | ||
169 | |||
170 | hrtimer_add_expires_ns(&data->timer, data->period); | ||
171 | |||
172 | return HRTIMER_RESTART; | ||
173 | } | ||
174 | |||
165 | static int tbs_sched_init(struct intel_gvt *gvt) | 175 | static int tbs_sched_init(struct intel_gvt *gvt) |
166 | { | 176 | { |
167 | struct intel_gvt_workload_scheduler *scheduler = | 177 | struct intel_gvt_workload_scheduler *scheduler = |
@@ -174,11 +184,13 @@ static int tbs_sched_init(struct intel_gvt *gvt) | |||
174 | return -ENOMEM; | 184 | return -ENOMEM; |
175 | 185 | ||
176 | INIT_LIST_HEAD(&data->runq_head); | 186 | INIT_LIST_HEAD(&data->runq_head); |
177 | INIT_DELAYED_WORK(&data->work, tbs_sched_func); | 187 | hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
188 | data->timer.function = tbs_timer_fn; | ||
178 | data->period = GVT_DEFAULT_TIME_SLICE; | 189 | data->period = GVT_DEFAULT_TIME_SLICE; |
179 | data->gvt = gvt; | 190 | data->gvt = gvt; |
180 | 191 | ||
181 | scheduler->sched_data = data; | 192 | scheduler->sched_data = data; |
193 | |||
182 | return 0; | 194 | return 0; |
183 | } | 195 | } |
184 | 196 | ||
@@ -188,7 +200,8 @@ static void tbs_sched_clean(struct intel_gvt *gvt) | |||
188 | &gvt->scheduler; | 200 | &gvt->scheduler; |
189 | struct tbs_sched_data *data = scheduler->sched_data; | 201 | struct tbs_sched_data *data = scheduler->sched_data; |
190 | 202 | ||
191 | cancel_delayed_work(&data->work); | 203 | hrtimer_cancel(&data->timer); |
204 | |||
192 | kfree(data); | 205 | kfree(data); |
193 | scheduler->sched_data = NULL; | 206 | scheduler->sched_data = NULL; |
194 | } | 207 | } |
@@ -205,6 +218,7 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |||
205 | INIT_LIST_HEAD(&data->list); | 218 | INIT_LIST_HEAD(&data->list); |
206 | 219 | ||
207 | vgpu->sched_data = data; | 220 | vgpu->sched_data = data; |
221 | |||
208 | return 0; | 222 | return 0; |
209 | } | 223 | } |
210 | 224 | ||
@@ -223,7 +237,10 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |||
223 | return; | 237 | return; |
224 | 238 | ||
225 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); | 239 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); |
226 | schedule_delayed_work(&sched_data->work, 0); | 240 | |
241 | if (!hrtimer_active(&sched_data->timer)) | ||
242 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), | ||
243 | sched_data->period), HRTIMER_MODE_ABS); | ||
227 | } | 244 | } |
228 | 245 | ||
229 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) | 246 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index bb8b9097e41a..ba00a5f7455f 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h | |||
@@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops { | |||
43 | void (*stop_schedule)(struct intel_vgpu *vgpu); | 43 | void (*stop_schedule)(struct intel_vgpu *vgpu); |
44 | }; | 44 | }; |
45 | 45 | ||
46 | void intel_gvt_schedule(struct intel_gvt *gvt); | ||
47 | |||
46 | int intel_gvt_init_sched_policy(struct intel_gvt *gvt); | 48 | int intel_gvt_init_sched_policy(struct intel_gvt *gvt); |
47 | 49 | ||
48 | void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); | 50 | void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); |