aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/reservations/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/reservations/core.c')
-rw-r--r--litmus/reservations/core.c392
1 files changed, 392 insertions, 0 deletions
diff --git a/litmus/reservations/core.c b/litmus/reservations/core.c
new file mode 100644
index 000000000000..cf55ca8ffd3c
--- /dev/null
+++ b/litmus/reservations/core.c
@@ -0,0 +1,392 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/reservations/reservation.h>
5
6void reservation_init(struct reservation *res)
7{
8 memset(res, 0, sizeof(*res));
9 res->state = RESERVATION_INACTIVE;
10 INIT_LIST_HEAD(&res->clients);
11 INIT_LIST_HEAD(&res->replenish_list);
12 budget_notifier_list_init(&res->budget_notifiers);
13}
14
15struct task_struct* default_dispatch_client(
16 struct reservation *res,
17 lt_t *for_at_most)
18{
19 struct reservation_client *client, *next;
20 struct task_struct* tsk;
21
22 BUG_ON(res->state != RESERVATION_ACTIVE);
23 *for_at_most = 0;
24
25 list_for_each_entry_safe(client, next, &res->clients, list) {
26 tsk = client->dispatch(client);
27 if (likely(tsk)) {
28 /* Primitive form of round-robin scheduling:
29 * make sure we alternate between multiple clients
30 * with at least the granularity of the replenishment
31 * period. Reservations that need more fine-grained
32 * or more predictable alternation between threads
33 * within a reservation should provide a custom
34 * dispatch function. */
35 list_del(&client->list);
36 /* move to back of list */
37 list_add_tail(&client->list, &res->clients);
38 return tsk;
39 }
40 }
41 return NULL;
42}
43
44void common_drain_budget(
45 struct reservation *res,
46 lt_t how_much)
47{
48 if (how_much >= res->cur_budget)
49 res->cur_budget = 0;
50 else
51 res->cur_budget -= how_much;
52
53 res->budget_consumed += how_much;
54 res->budget_consumed_total += how_much;
55
56 switch (res->state) {
57 case RESERVATION_DEPLETED:
58 case RESERVATION_INACTIVE:
59 BUG();
60 break;
61
62 case RESERVATION_ACTIVE_IDLE:
63 case RESERVATION_ACTIVE:
64 if (!res->cur_budget) {
65 res->env->change_state(res->env, res,
66 RESERVATION_DEPLETED);
67 } /* else: stay in current state */
68 break;
69 }
70}
71
72static struct task_struct * task_client_dispatch(struct reservation_client *client)
73{
74 struct task_client *tc = container_of(client, struct task_client, client);
75 return tc->task;
76}
77
78void task_client_init(struct task_client *tc, struct task_struct *tsk,
79 struct reservation *res)
80{
81 memset(&tc->client, 0, sizeof(tc->client));
82 tc->client.dispatch = task_client_dispatch;
83 tc->client.reservation = res;
84 tc->task = tsk;
85}
86
87static void sup_scheduler_update_at(
88 struct sup_reservation_environment* sup_env,
89 lt_t when)
90{
91 if (sup_env->next_scheduler_update > when)
92 sup_env->next_scheduler_update = when;
93}
94
95static void sup_scheduler_update_after(
96 struct sup_reservation_environment* sup_env,
97 lt_t timeout)
98{
99 sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
100}
101
102static int _sup_queue_depleted(
103 struct sup_reservation_environment* sup_env,
104 struct reservation *res)
105{
106 struct list_head *pos;
107 struct reservation *queued;
108 int passed_earlier = 0;
109
110 BUG_ON(in_list(&res->replenish_list));
111
112 list_for_each(pos, &sup_env->depleted_reservations) {
113 queued = list_entry(pos, struct reservation, replenish_list);
114 if (queued->next_replenishment > res->next_replenishment) {
115 list_add(&res->replenish_list, pos->prev);
116 return passed_earlier;
117 } else
118 passed_earlier = 1;
119 }
120
121 list_add_tail(&res->replenish_list, &sup_env->depleted_reservations);
122
123 return passed_earlier;
124}
125
126static void sup_queue_depleted(
127 struct sup_reservation_environment* sup_env,
128 struct reservation *res)
129{
130 int passed_earlier = _sup_queue_depleted(sup_env, res);
131
132 /* check for updated replenishment time */
133 if (!passed_earlier)
134 sup_scheduler_update_at(sup_env, res->next_replenishment);
135}
136
137static int _sup_queue_active(
138 struct sup_reservation_environment* sup_env,
139 struct reservation *res)
140{
141 struct list_head *pos;
142 struct reservation *queued;
143 int passed_active = 0;
144
145 if (likely(res->priority != RESERVATION_BACKGROUND_PRIORITY)) {
146 /* enqueue in order of priority */
147 list_for_each(pos, &sup_env->active_reservations) {
148 queued = list_entry(pos, struct reservation, list);
149 if (queued->priority > res->priority) {
150 list_add(&res->list, pos->prev);
151 return passed_active;
152 } else if (queued->state == RESERVATION_ACTIVE)
153 passed_active = 1;
154 }
155 } else {
156 /* don't preempt unless the list happens to be empty */
157 passed_active = !list_empty(&sup_env->active_reservations);
158 }
159 /* Either a background reservation, or we fell off the end of the list.
160 * In both cases, just add the reservation to the end of the list of
161 * active reservations. */
162 list_add_tail(&res->list, &sup_env->active_reservations);
163 return passed_active;
164}
165
166static void sup_queue_active(
167 struct sup_reservation_environment* sup_env,
168 struct reservation *res)
169{
170 int passed_active = _sup_queue_active(sup_env, res);
171
172 /* check for possible preemption */
173 if (res->state == RESERVATION_ACTIVE && !passed_active)
174 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
175 else if (res == list_first_entry(&sup_env->active_reservations,
176 struct reservation, list)) {
177 /* First reservation is draining budget => make sure
178 * the scheduler is called to notice when the reservation
179 * budget has been drained completely. */
180 sup_scheduler_update_after(sup_env, res->cur_budget);
181 }
182}
183
184static void sup_queue_reservation(
185 struct sup_reservation_environment* sup_env,
186 struct reservation *res)
187{
188 switch (res->state) {
189 case RESERVATION_INACTIVE:
190 list_add(&res->list, &sup_env->inactive_reservations);
191 break;
192
193 case RESERVATION_DEPLETED:
194 sup_queue_depleted(sup_env, res);
195 break;
196
197 case RESERVATION_ACTIVE_IDLE:
198 case RESERVATION_ACTIVE:
199 sup_queue_active(sup_env, res);
200 break;
201 }
202}
203
204void sup_add_new_reservation(
205 struct sup_reservation_environment* sup_env,
206 struct reservation* new_res)
207{
208 new_res->env = &sup_env->env;
209 list_add(&new_res->all_list, &sup_env->all_reservations);
210 sup_queue_reservation(sup_env, new_res);
211}
212
213struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
214 unsigned int id)
215{
216 struct reservation *res;
217
218 list_for_each_entry(res, &sup_env->all_reservations, all_list) {
219 if (res->id == id)
220 return res;
221 }
222
223 return NULL;
224}
225
226static void sup_charge_budget(
227 struct sup_reservation_environment* sup_env,
228 lt_t delta)
229{
230 struct reservation *res;
231
232 /* charge the highest-priority ACTIVE or ACTIVE_IDLE reservation */
233
234 res = list_first_entry_or_null(
235 &sup_env->active_reservations, struct reservation, list);
236
237 if (res) {
238 TRACE("R%d: charging at %llu for %llu execution, budget before: %llu\n",
239 res->id, res->env->current_time, delta, res->cur_budget);
240 res->ops->drain_budget(res, delta);
241 TRACE("R%d: budget now: %llu, priority: %llu\n",
242 res->id, res->cur_budget, res->priority);
243 }
244
245 /* check when the next budget expires */
246
247 res = list_first_entry_or_null(
248 &sup_env->active_reservations, struct reservation, list);
249
250 if (res) {
251 /* make sure scheduler is invoked when this reservation expires
252 * its remaining budget */
253 TRACE("requesting scheduler update for reservation %u "
254 "in %llu nanoseconds\n",
255 res->id, res->cur_budget);
256 sup_scheduler_update_after(sup_env, res->cur_budget);
257 }
258}
259
260static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
261{
262 struct list_head *pos, *next;
263 struct reservation *res;
264
265 list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
266 res = list_entry(pos, struct reservation, replenish_list);
267 if (res->next_replenishment <= sup_env->env.current_time) {
268 TRACE("R%d: replenishing budget at %llu, "
269 "priority: %llu\n",
270 res->id, res->env->current_time, res->priority);
271 res->ops->replenish(res);
272 } else {
273 /* list is ordered by increasing depletion times */
274 break;
275 }
276 }
277
278 /* request a scheduler update at the next replenishment instant */
279 res = list_first_entry_or_null(&sup_env->depleted_reservations,
280 struct reservation, replenish_list);
281 if (res)
282 sup_scheduler_update_at(sup_env, res->next_replenishment);
283}
284
285void sup_update_time(
286 struct sup_reservation_environment* sup_env,
287 lt_t now)
288{
289 lt_t delta;
290
291 /* If the time didn't advance, there is nothing to do.
292 * This check makes it safe to call sup_advance_time() potentially
293 * multiple times (e.g., via different code paths. */
294 if (!list_empty(&sup_env->active_reservations))
295 TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now,
296 sup_env->env.current_time);
297 if (unlikely(now <= sup_env->env.current_time))
298 return;
299
300 delta = now - sup_env->env.current_time;
301 sup_env->env.current_time = now;
302
303 /* check if future updates are required */
304 if (sup_env->next_scheduler_update <= sup_env->env.current_time)
305 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
306
307 /* deplete budgets by passage of time */
308 sup_charge_budget(sup_env, delta);
309
310 /* check if any budgets were replenished */
311 sup_replenish_budgets(sup_env);
312}
313
314struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
315{
316 struct reservation *res, *next;
317 struct task_struct *tsk = NULL;
318 lt_t time_slice;
319
320 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
321 if (res->state == RESERVATION_ACTIVE) {
322 tsk = res->ops->dispatch_client(res, &time_slice);
323 if (likely(tsk)) {
324 if (time_slice)
325 sup_scheduler_update_after(sup_env, time_slice);
326 sup_scheduler_update_after(sup_env, res->cur_budget);
327 return tsk;
328 }
329 }
330 }
331
332 return NULL;
333}
334
335static void sup_res_change_state(
336 struct reservation_environment* env,
337 struct reservation *res,
338 reservation_state_t new_state)
339{
340 struct sup_reservation_environment* sup_env;
341
342 sup_env = container_of(env, struct sup_reservation_environment, env);
343
344 TRACE("reservation R%d state %d->%d at %llu\n",
345 res->id, res->state, new_state, env->current_time);
346
347 if (new_state == RESERVATION_DEPLETED
348 && (res->state == RESERVATION_ACTIVE ||
349 res->state == RESERVATION_ACTIVE_IDLE)) {
350 budget_notifiers_fire(&res->budget_notifiers, false);
351 } else if (res->state == RESERVATION_DEPLETED
352 && new_state == RESERVATION_ACTIVE) {
353 budget_notifiers_fire(&res->budget_notifiers, true);
354 }
355
356 /* dequeue prior to re-queuing */
357 if (res->state == RESERVATION_DEPLETED)
358 list_del(&res->replenish_list);
359 else
360 list_del(&res->list);
361
362 /* check if we need to reschedule because we lost an active reservation */
363 if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
364 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
365 res->state = new_state;
366 sup_queue_reservation(sup_env, res);
367}
368
369static void sup_request_replenishment(
370 struct reservation_environment* env,
371 struct reservation *res)
372{
373 struct sup_reservation_environment* sup_env;
374
375 sup_env = container_of(env, struct sup_reservation_environment, env);
376 sup_queue_depleted(sup_env, res);
377}
378
379void sup_init(struct sup_reservation_environment* sup_env)
380{
381 memset(sup_env, 0, sizeof(*sup_env));
382
383 INIT_LIST_HEAD(&sup_env->all_reservations);
384 INIT_LIST_HEAD(&sup_env->active_reservations);
385 INIT_LIST_HEAD(&sup_env->depleted_reservations);
386 INIT_LIST_HEAD(&sup_env->inactive_reservations);
387
388 sup_env->env.change_state = sup_res_change_state;
389 sup_env->env.request_replenishment = sup_request_replenishment;
390
391 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
392}