aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2016-03-16 07:59:07 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2016-03-20 14:30:35 -0400
commit2a38056cc098c56a04bbe18f4e752f4fa782599f (patch)
treec38619d281c9b7ae23333573b322bab72d8801b3 /litmus
parent095f515b2fd903a0140afcc42db9a9f76d688b65 (diff)
Add basic generic reservation-based scheduling infrastructure
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/reservations/Makefile3
-rw-r--r--litmus/reservations/alloc.c143
-rw-r--r--litmus/reservations/budget-notifier.c26
-rw-r--r--litmus/reservations/core.c392
-rw-r--r--litmus/reservations/polling.c256
-rw-r--r--litmus/reservations/table-driven.c269
7 files changed, 1091 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 7970cd55e7fd..c969ce59db67 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -30,3 +30,5 @@ obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
30obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 30obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
31obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 31obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
32obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 32obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
33
34obj-y += reservations/
diff --git a/litmus/reservations/Makefile b/litmus/reservations/Makefile
new file mode 100644
index 000000000000..517fc2ff8a76
--- /dev/null
+++ b/litmus/reservations/Makefile
@@ -0,0 +1,3 @@
1obj-y += core.o budget-notifier.o alloc.o
2obj-y += polling.o
3obj-y += table-driven.o
diff --git a/litmus/reservations/alloc.c b/litmus/reservations/alloc.c
new file mode 100644
index 000000000000..1f93f223f504
--- /dev/null
+++ b/litmus/reservations/alloc.c
@@ -0,0 +1,143 @@
1#include <linux/slab.h>
2#include <asm/uaccess.h>
3
4#include <litmus/rt_param.h>
5
6#include <litmus/reservations/alloc.h>
7#include <litmus/reservations/polling.h>
8#include <litmus/reservations/table-driven.h>
9
10
11long alloc_polling_reservation(
12 int res_type,
13 struct reservation_config *config,
14 struct reservation **_res)
15{
16 struct polling_reservation *pres;
17 int use_edf = config->priority == LITMUS_NO_PRIORITY;
18 int periodic = res_type == PERIODIC_POLLING;
19
20 if (config->polling_params.budget >
21 config->polling_params.period) {
22 printk(KERN_ERR "invalid polling reservation (%u): "
23 "budget > period\n", config->id);
24 return -EINVAL;
25 }
26 if (config->polling_params.budget >
27 config->polling_params.relative_deadline
28 && config->polling_params.relative_deadline) {
29 printk(KERN_ERR "invalid polling reservation (%u): "
30 "budget > deadline\n", config->id);
31 return -EINVAL;
32 }
33 if (config->polling_params.offset >
34 config->polling_params.period) {
35 printk(KERN_ERR "invalid polling reservation (%u): "
36 "offset > period\n", config->id);
37 return -EINVAL;
38 }
39
40 /* XXX: would be nice to use a core-local allocation. */
41 pres = kzalloc(sizeof(*pres), GFP_KERNEL);
42 if (!pres)
43 return -ENOMEM;
44
45 polling_reservation_init(pres, use_edf, periodic,
46 config->polling_params.budget,
47 config->polling_params.period,
48 config->polling_params.relative_deadline,
49 config->polling_params.offset);
50 pres->res.id = config->id;
51 if (!use_edf)
52 pres->res.priority = config->priority;
53
54 *_res = &pres->res;
55 return 0;
56}
57
58
59#define MAX_INTERVALS 1024
60
61long alloc_table_driven_reservation(
62 struct reservation_config *config,
63 struct reservation **_res)
64{
65 struct table_driven_reservation *td_res = NULL;
66 struct lt_interval *slots = NULL;
67 size_t slots_size;
68 unsigned int i, num_slots;
69 long err = -EINVAL;
70 void *mem;
71
72 if (!config->table_driven_params.num_intervals) {
73 printk(KERN_ERR "invalid table-driven reservation (%u): "
74 "no intervals\n", config->id);
75 return -EINVAL;
76 }
77
78 if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
79 printk(KERN_ERR "invalid table-driven reservation (%u): "
80 "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
81 return -EINVAL;
82 }
83
84 num_slots = config->table_driven_params.num_intervals;
85 slots_size = sizeof(slots[0]) * num_slots;
86
87 mem = kzalloc(sizeof(*td_res) + slots_size, GFP_KERNEL);
88 if (!mem) {
89 return -ENOMEM;
90 } else {
91 slots = mem + sizeof(*td_res);
92 td_res = mem;
93 err = copy_from_user(slots,
94 config->table_driven_params.intervals, slots_size);
95 }
96
97 if (!err) {
98 /* sanity checks */
99 for (i = 0; !err && i < num_slots; i++)
100 if (slots[i].end <= slots[i].start) {
101 printk(KERN_ERR
102 "invalid table-driven reservation (%u): "
103 "invalid interval %u => [%llu, %llu]\n",
104 config->id, i,
105 slots[i].start, slots[i].end);
106 err = -EINVAL;
107 }
108
109 for (i = 0; !err && i + 1 < num_slots; i++)
110 if (slots[i + 1].start <= slots[i].end) {
111 printk(KERN_ERR
112 "invalid table-driven reservation (%u): "
113 "overlapping intervals %u, %u\n",
114 config->id, i, i + 1);
115 err = -EINVAL;
116 }
117
118 if (slots[num_slots - 1].end >
119 config->table_driven_params.major_cycle_length) {
120 printk(KERN_ERR
121 "invalid table-driven reservation (%u): last "
122 "interval ends past major cycle %llu > %llu\n",
123 config->id,
124 slots[num_slots - 1].end,
125 config->table_driven_params.major_cycle_length);
126 err = -EINVAL;
127 }
128 }
129
130 if (err) {
131 kfree(td_res);
132 } else {
133 table_driven_reservation_init(td_res,
134 config->table_driven_params.major_cycle_length,
135 slots, num_slots);
136 td_res->res.id = config->id;
137 td_res->res.priority = config->priority;
138 *_res = &td_res->res;
139 }
140
141 return err;
142}
143
diff --git a/litmus/reservations/budget-notifier.c b/litmus/reservations/budget-notifier.c
new file mode 100644
index 000000000000..0b0f42687882
--- /dev/null
+++ b/litmus/reservations/budget-notifier.c
@@ -0,0 +1,26 @@
1#include <litmus/reservations/budget-notifier.h>
2
3void budget_notifier_list_init(struct budget_notifier_list* bnl)
4{
5 INIT_LIST_HEAD(&bnl->list);
6 raw_spin_lock_init(&bnl->lock);
7}
8
9void budget_notifiers_fire(struct budget_notifier_list *bnl, bool replenished)
10{
11 struct budget_notifier *bn, *next;
12
13 unsigned long flags;
14
15 raw_spin_lock_irqsave(&bnl->lock, flags);
16
17 list_for_each_entry_safe(bn, next, &bnl->list, list) {
18 if (replenished)
19 bn->budget_replenished(bn);
20 else
21 bn->budget_exhausted(bn);
22 }
23
24 raw_spin_unlock_irqrestore(&bnl->lock, flags);
25}
26
diff --git a/litmus/reservations/core.c b/litmus/reservations/core.c
new file mode 100644
index 000000000000..cf55ca8ffd3c
--- /dev/null
+++ b/litmus/reservations/core.c
@@ -0,0 +1,392 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/reservations/reservation.h>
5
6void reservation_init(struct reservation *res)
7{
8 memset(res, 0, sizeof(*res));
9 res->state = RESERVATION_INACTIVE;
10 INIT_LIST_HEAD(&res->clients);
11 INIT_LIST_HEAD(&res->replenish_list);
12 budget_notifier_list_init(&res->budget_notifiers);
13}
14
15struct task_struct* default_dispatch_client(
16 struct reservation *res,
17 lt_t *for_at_most)
18{
19 struct reservation_client *client, *next;
20 struct task_struct* tsk;
21
22 BUG_ON(res->state != RESERVATION_ACTIVE);
23 *for_at_most = 0;
24
25 list_for_each_entry_safe(client, next, &res->clients, list) {
26 tsk = client->dispatch(client);
27 if (likely(tsk)) {
28 /* Primitive form of round-robin scheduling:
29 * make sure we alternate between multiple clients
30 * with at least the granularity of the replenishment
31 * period. Reservations that need more fine-grained
32 * or more predictable alternation between threads
33 * within a reservation should provide a custom
34 * dispatch function. */
35 list_del(&client->list);
36 /* move to back of list */
37 list_add_tail(&client->list, &res->clients);
38 return tsk;
39 }
40 }
41 return NULL;
42}
43
44void common_drain_budget(
45 struct reservation *res,
46 lt_t how_much)
47{
48 if (how_much >= res->cur_budget)
49 res->cur_budget = 0;
50 else
51 res->cur_budget -= how_much;
52
53 res->budget_consumed += how_much;
54 res->budget_consumed_total += how_much;
55
56 switch (res->state) {
57 case RESERVATION_DEPLETED:
58 case RESERVATION_INACTIVE:
59 BUG();
60 break;
61
62 case RESERVATION_ACTIVE_IDLE:
63 case RESERVATION_ACTIVE:
64 if (!res->cur_budget) {
65 res->env->change_state(res->env, res,
66 RESERVATION_DEPLETED);
67 } /* else: stay in current state */
68 break;
69 }
70}
71
72static struct task_struct * task_client_dispatch(struct reservation_client *client)
73{
74 struct task_client *tc = container_of(client, struct task_client, client);
75 return tc->task;
76}
77
78void task_client_init(struct task_client *tc, struct task_struct *tsk,
79 struct reservation *res)
80{
81 memset(&tc->client, 0, sizeof(tc->client));
82 tc->client.dispatch = task_client_dispatch;
83 tc->client.reservation = res;
84 tc->task = tsk;
85}
86
87static void sup_scheduler_update_at(
88 struct sup_reservation_environment* sup_env,
89 lt_t when)
90{
91 if (sup_env->next_scheduler_update > when)
92 sup_env->next_scheduler_update = when;
93}
94
95static void sup_scheduler_update_after(
96 struct sup_reservation_environment* sup_env,
97 lt_t timeout)
98{
99 sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
100}
101
102static int _sup_queue_depleted(
103 struct sup_reservation_environment* sup_env,
104 struct reservation *res)
105{
106 struct list_head *pos;
107 struct reservation *queued;
108 int passed_earlier = 0;
109
110 BUG_ON(in_list(&res->replenish_list));
111
112 list_for_each(pos, &sup_env->depleted_reservations) {
113 queued = list_entry(pos, struct reservation, replenish_list);
114 if (queued->next_replenishment > res->next_replenishment) {
115 list_add(&res->replenish_list, pos->prev);
116 return passed_earlier;
117 } else
118 passed_earlier = 1;
119 }
120
121 list_add_tail(&res->replenish_list, &sup_env->depleted_reservations);
122
123 return passed_earlier;
124}
125
126static void sup_queue_depleted(
127 struct sup_reservation_environment* sup_env,
128 struct reservation *res)
129{
130 int passed_earlier = _sup_queue_depleted(sup_env, res);
131
132 /* check for updated replenishment time */
133 if (!passed_earlier)
134 sup_scheduler_update_at(sup_env, res->next_replenishment);
135}
136
137static int _sup_queue_active(
138 struct sup_reservation_environment* sup_env,
139 struct reservation *res)
140{
141 struct list_head *pos;
142 struct reservation *queued;
143 int passed_active = 0;
144
145 if (likely(res->priority != RESERVATION_BACKGROUND_PRIORITY)) {
146 /* enqueue in order of priority */
147 list_for_each(pos, &sup_env->active_reservations) {
148 queued = list_entry(pos, struct reservation, list);
149 if (queued->priority > res->priority) {
150 list_add(&res->list, pos->prev);
151 return passed_active;
152 } else if (queued->state == RESERVATION_ACTIVE)
153 passed_active = 1;
154 }
155 } else {
156 /* don't preempt unless the list happens to be empty */
157 passed_active = !list_empty(&sup_env->active_reservations);
158 }
159 /* Either a background reservation, or we fell off the end of the list.
160 * In both cases, just add the reservation to the end of the list of
161 * active reservations. */
162 list_add_tail(&res->list, &sup_env->active_reservations);
163 return passed_active;
164}
165
166static void sup_queue_active(
167 struct sup_reservation_environment* sup_env,
168 struct reservation *res)
169{
170 int passed_active = _sup_queue_active(sup_env, res);
171
172 /* check for possible preemption */
173 if (res->state == RESERVATION_ACTIVE && !passed_active)
174 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
175 else if (res == list_first_entry(&sup_env->active_reservations,
176 struct reservation, list)) {
177 /* First reservation is draining budget => make sure
178 * the scheduler is called to notice when the reservation
179 * budget has been drained completely. */
180 sup_scheduler_update_after(sup_env, res->cur_budget);
181 }
182}
183
184static void sup_queue_reservation(
185 struct sup_reservation_environment* sup_env,
186 struct reservation *res)
187{
188 switch (res->state) {
189 case RESERVATION_INACTIVE:
190 list_add(&res->list, &sup_env->inactive_reservations);
191 break;
192
193 case RESERVATION_DEPLETED:
194 sup_queue_depleted(sup_env, res);
195 break;
196
197 case RESERVATION_ACTIVE_IDLE:
198 case RESERVATION_ACTIVE:
199 sup_queue_active(sup_env, res);
200 break;
201 }
202}
203
204void sup_add_new_reservation(
205 struct sup_reservation_environment* sup_env,
206 struct reservation* new_res)
207{
208 new_res->env = &sup_env->env;
209 list_add(&new_res->all_list, &sup_env->all_reservations);
210 sup_queue_reservation(sup_env, new_res);
211}
212
213struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
214 unsigned int id)
215{
216 struct reservation *res;
217
218 list_for_each_entry(res, &sup_env->all_reservations, all_list) {
219 if (res->id == id)
220 return res;
221 }
222
223 return NULL;
224}
225
226static void sup_charge_budget(
227 struct sup_reservation_environment* sup_env,
228 lt_t delta)
229{
230 struct reservation *res;
231
232 /* charge the highest-priority ACTIVE or ACTIVE_IDLE reservation */
233
234 res = list_first_entry_or_null(
235 &sup_env->active_reservations, struct reservation, list);
236
237 if (res) {
238 TRACE("R%d: charging at %llu for %llu execution, budget before: %llu\n",
239 res->id, res->env->current_time, delta, res->cur_budget);
240 res->ops->drain_budget(res, delta);
241 TRACE("R%d: budget now: %llu, priority: %llu\n",
242 res->id, res->cur_budget, res->priority);
243 }
244
245 /* check when the next budget expires */
246
247 res = list_first_entry_or_null(
248 &sup_env->active_reservations, struct reservation, list);
249
250 if (res) {
251 /* make sure scheduler is invoked when this reservation expires
252 * its remaining budget */
253 TRACE("requesting scheduler update for reservation %u "
254 "in %llu nanoseconds\n",
255 res->id, res->cur_budget);
256 sup_scheduler_update_after(sup_env, res->cur_budget);
257 }
258}
259
260static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
261{
262 struct list_head *pos, *next;
263 struct reservation *res;
264
265 list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
266 res = list_entry(pos, struct reservation, replenish_list);
267 if (res->next_replenishment <= sup_env->env.current_time) {
268 TRACE("R%d: replenishing budget at %llu, "
269 "priority: %llu\n",
270 res->id, res->env->current_time, res->priority);
271 res->ops->replenish(res);
272 } else {
273 /* list is ordered by increasing depletion times */
274 break;
275 }
276 }
277
278 /* request a scheduler update at the next replenishment instant */
279 res = list_first_entry_or_null(&sup_env->depleted_reservations,
280 struct reservation, replenish_list);
281 if (res)
282 sup_scheduler_update_at(sup_env, res->next_replenishment);
283}
284
285void sup_update_time(
286 struct sup_reservation_environment* sup_env,
287 lt_t now)
288{
289 lt_t delta;
290
291 /* If the time didn't advance, there is nothing to do.
292 * This check makes it safe to call sup_advance_time() potentially
293 * multiple times (e.g., via different code paths. */
294 if (!list_empty(&sup_env->active_reservations))
295 TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now,
296 sup_env->env.current_time);
297 if (unlikely(now <= sup_env->env.current_time))
298 return;
299
300 delta = now - sup_env->env.current_time;
301 sup_env->env.current_time = now;
302
303 /* check if future updates are required */
304 if (sup_env->next_scheduler_update <= sup_env->env.current_time)
305 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
306
307 /* deplete budgets by passage of time */
308 sup_charge_budget(sup_env, delta);
309
310 /* check if any budgets were replenished */
311 sup_replenish_budgets(sup_env);
312}
313
314struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
315{
316 struct reservation *res, *next;
317 struct task_struct *tsk = NULL;
318 lt_t time_slice;
319
320 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
321 if (res->state == RESERVATION_ACTIVE) {
322 tsk = res->ops->dispatch_client(res, &time_slice);
323 if (likely(tsk)) {
324 if (time_slice)
325 sup_scheduler_update_after(sup_env, time_slice);
326 sup_scheduler_update_after(sup_env, res->cur_budget);
327 return tsk;
328 }
329 }
330 }
331
332 return NULL;
333}
334
335static void sup_res_change_state(
336 struct reservation_environment* env,
337 struct reservation *res,
338 reservation_state_t new_state)
339{
340 struct sup_reservation_environment* sup_env;
341
342 sup_env = container_of(env, struct sup_reservation_environment, env);
343
344 TRACE("reservation R%d state %d->%d at %llu\n",
345 res->id, res->state, new_state, env->current_time);
346
347 if (new_state == RESERVATION_DEPLETED
348 && (res->state == RESERVATION_ACTIVE ||
349 res->state == RESERVATION_ACTIVE_IDLE)) {
350 budget_notifiers_fire(&res->budget_notifiers, false);
351 } else if (res->state == RESERVATION_DEPLETED
352 && new_state == RESERVATION_ACTIVE) {
353 budget_notifiers_fire(&res->budget_notifiers, true);
354 }
355
356 /* dequeue prior to re-queuing */
357 if (res->state == RESERVATION_DEPLETED)
358 list_del(&res->replenish_list);
359 else
360 list_del(&res->list);
361
362 /* check if we need to reschedule because we lost an active reservation */
363 if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
364 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
365 res->state = new_state;
366 sup_queue_reservation(sup_env, res);
367}
368
369static void sup_request_replenishment(
370 struct reservation_environment* env,
371 struct reservation *res)
372{
373 struct sup_reservation_environment* sup_env;
374
375 sup_env = container_of(env, struct sup_reservation_environment, env);
376 sup_queue_depleted(sup_env, res);
377}
378
379void sup_init(struct sup_reservation_environment* sup_env)
380{
381 memset(sup_env, 0, sizeof(*sup_env));
382
383 INIT_LIST_HEAD(&sup_env->all_reservations);
384 INIT_LIST_HEAD(&sup_env->active_reservations);
385 INIT_LIST_HEAD(&sup_env->depleted_reservations);
386 INIT_LIST_HEAD(&sup_env->inactive_reservations);
387
388 sup_env->env.change_state = sup_res_change_state;
389 sup_env->env.request_replenishment = sup_request_replenishment;
390
391 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
392}
diff --git a/litmus/reservations/polling.c b/litmus/reservations/polling.c
new file mode 100644
index 000000000000..63e0bed566e8
--- /dev/null
+++ b/litmus/reservations/polling.c
@@ -0,0 +1,256 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/reservations/reservation.h>
5#include <litmus/reservations/polling.h>
6
7
8static void periodic_polling_client_arrives(
9 struct reservation* res,
10 struct reservation_client *client
11)
12{
13 struct polling_reservation *pres =
14 container_of(res, struct polling_reservation, res);
15 lt_t instances, tmp;
16
17 list_add_tail(&client->list, &res->clients);
18
19 switch (res->state) {
20 case RESERVATION_INACTIVE:
21 /* Figure out next replenishment time. */
22 tmp = res->env->current_time - res->env->time_zero;
23 instances = div64_u64(tmp, pres->period);
24 res->next_replenishment =
25 (instances + 1) * pres->period + pres->offset;
26
27 TRACE("pol-res: activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n",
28 tmp, instances, pres->period, res->next_replenishment,
29 res->env->current_time);
30
31 res->env->change_state(res->env, res,
32 RESERVATION_DEPLETED);
33 break;
34
35 case RESERVATION_ACTIVE:
36 case RESERVATION_DEPLETED:
37 /* do nothing */
38 break;
39
40 case RESERVATION_ACTIVE_IDLE:
41 res->env->change_state(res->env, res,
42 RESERVATION_ACTIVE);
43 break;
44 }
45}
46
47
48static void periodic_polling_client_departs(
49 struct reservation *res,
50 struct reservation_client *client,
51 int did_signal_job_completion
52)
53{
54 list_del(&client->list);
55
56 switch (res->state) {
57 case RESERVATION_INACTIVE:
58 case RESERVATION_ACTIVE_IDLE:
59 BUG(); /* INACTIVE or IDLE <=> no client */
60 break;
61
62 case RESERVATION_ACTIVE:
63 if (list_empty(&res->clients)) {
64 res->env->change_state(res->env, res,
65 RESERVATION_ACTIVE_IDLE);
66 } /* else: nothing to do, more clients ready */
67 break;
68
69 case RESERVATION_DEPLETED:
70 /* do nothing */
71 break;
72 }
73}
74
75static void periodic_polling_on_replenishment(
76 struct reservation *res
77)
78{
79 struct polling_reservation *pres =
80 container_of(res, struct polling_reservation, res);
81
82 /* replenish budget */
83 res->cur_budget = pres->max_budget;
84 res->next_replenishment += pres->period;
85 res->budget_consumed = 0;
86
87 switch (res->state) {
88 case RESERVATION_DEPLETED:
89 case RESERVATION_INACTIVE:
90 case RESERVATION_ACTIVE_IDLE:
91 if (list_empty(&res->clients))
92 /* no clients => poll again later */
93 res->env->change_state(res->env, res,
94 RESERVATION_INACTIVE);
95 else
96 /* we have clients & budget => ACTIVE */
97 res->env->change_state(res->env, res,
98 RESERVATION_ACTIVE);
99 break;
100
101 case RESERVATION_ACTIVE:
102 /* Replenished while active => tardy? In any case,
103 * go ahead and stay active. */
104 break;
105 }
106}
107
108static void periodic_polling_on_replenishment_edf(
109 struct reservation *res
110)
111{
112 struct polling_reservation *pres =
113 container_of(res, struct polling_reservation, res);
114
115 /* update current priority */
116 res->priority = res->next_replenishment + pres->deadline;
117
118 /* do common updates */
119 periodic_polling_on_replenishment(res);
120}
121
122static struct reservation_ops periodic_polling_ops_fp = {
123 .dispatch_client = default_dispatch_client,
124 .client_arrives = periodic_polling_client_arrives,
125 .client_departs = periodic_polling_client_departs,
126 .replenish = periodic_polling_on_replenishment,
127 .drain_budget = common_drain_budget,
128};
129
130static struct reservation_ops periodic_polling_ops_edf = {
131 .dispatch_client = default_dispatch_client,
132 .client_arrives = periodic_polling_client_arrives,
133 .client_departs = periodic_polling_client_departs,
134 .replenish = periodic_polling_on_replenishment_edf,
135 .drain_budget = common_drain_budget,
136};
137
138
139
140
141static void sporadic_polling_client_arrives_fp(
142 struct reservation* res,
143 struct reservation_client *client
144)
145{
146 struct polling_reservation *pres =
147 container_of(res, struct polling_reservation, res);
148
149 list_add_tail(&client->list, &res->clients);
150
151 switch (res->state) {
152 case RESERVATION_INACTIVE:
153 /* Replenish now. */
154 res->cur_budget = pres->max_budget;
155 res->next_replenishment =
156 res->env->current_time + pres->period;
157
158 res->env->change_state(res->env, res,
159 RESERVATION_ACTIVE);
160 break;
161
162 case RESERVATION_ACTIVE:
163 case RESERVATION_DEPLETED:
164 /* do nothing */
165 break;
166
167 case RESERVATION_ACTIVE_IDLE:
168 res->env->change_state(res->env, res,
169 RESERVATION_ACTIVE);
170 break;
171 }
172}
173
174static void sporadic_polling_client_arrives_edf(
175 struct reservation* res,
176 struct reservation_client *client
177)
178{
179 struct polling_reservation *pres =
180 container_of(res, struct polling_reservation, res);
181
182 list_add_tail(&client->list, &res->clients);
183
184 switch (res->state) {
185 case RESERVATION_INACTIVE:
186 /* Replenish now. */
187 res->cur_budget = pres->max_budget;
188 res->next_replenishment =
189 res->env->current_time + pres->period;
190 res->priority =
191 res->env->current_time + pres->deadline;
192
193 res->env->change_state(res->env, res,
194 RESERVATION_ACTIVE);
195 break;
196
197 case RESERVATION_ACTIVE:
198 case RESERVATION_DEPLETED:
199 /* do nothing */
200 break;
201
202 case RESERVATION_ACTIVE_IDLE:
203 res->env->change_state(res->env, res,
204 RESERVATION_ACTIVE);
205 break;
206 }
207}
208
209static struct reservation_ops sporadic_polling_ops_fp = {
210 .dispatch_client = default_dispatch_client,
211 .client_arrives = sporadic_polling_client_arrives_fp,
212 .client_departs = periodic_polling_client_departs,
213 .replenish = periodic_polling_on_replenishment,
214 .drain_budget = common_drain_budget,
215};
216
217static struct reservation_ops sporadic_polling_ops_edf = {
218 .dispatch_client = default_dispatch_client,
219 .client_arrives = sporadic_polling_client_arrives_edf,
220 .client_departs = periodic_polling_client_departs,
221 .replenish = periodic_polling_on_replenishment_edf,
222 .drain_budget = common_drain_budget,
223};
224
225void polling_reservation_init(
226 struct polling_reservation *pres,
227 int use_edf_prio,
228 int use_periodic_polling,
229 lt_t budget, lt_t period, lt_t deadline, lt_t offset
230)
231{
232 if (!deadline)
233 deadline = period;
234 BUG_ON(budget > period);
235 BUG_ON(budget > deadline);
236 BUG_ON(offset >= period);
237
238 reservation_init(&pres->res);
239 pres->max_budget = budget;
240 pres->period = period;
241 pres->deadline = deadline;
242 pres->offset = offset;
243 if (use_periodic_polling) {
244 pres->res.kind = PERIODIC_POLLING;
245 if (use_edf_prio)
246 pres->res.ops = &periodic_polling_ops_edf;
247 else
248 pres->res.ops = &periodic_polling_ops_fp;
249 } else {
250 pres->res.kind = SPORADIC_POLLING;
251 if (use_edf_prio)
252 pres->res.ops = &sporadic_polling_ops_edf;
253 else
254 pres->res.ops = &sporadic_polling_ops_fp;
255 }
256}
diff --git a/litmus/reservations/table-driven.c b/litmus/reservations/table-driven.c
new file mode 100644
index 000000000000..e4debcb5d4d2
--- /dev/null
+++ b/litmus/reservations/table-driven.c
@@ -0,0 +1,269 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/reservations/reservation.h>
5#include <litmus/reservations/table-driven.h>
6
7static lt_t td_cur_major_cycle_start(struct table_driven_reservation *tdres)
8{
9 lt_t x, tmp;
10
11 tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
12 x = div64_u64(tmp, tdres->major_cycle);
13 x *= tdres->major_cycle;
14 return x;
15}
16
17
18static lt_t td_next_major_cycle_start(struct table_driven_reservation *tdres)
19{
20 lt_t x, tmp;
21
22 tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
23 x = div64_u64(tmp, tdres->major_cycle) + 1;
24 x *= tdres->major_cycle;
25 return x;
26}
27
28static void td_client_arrives(
29 struct reservation* res,
30 struct reservation_client *client
31)
32{
33 struct table_driven_reservation *tdres =
34 container_of(res, struct table_driven_reservation, res);
35
36 list_add_tail(&client->list, &res->clients);
37
38 switch (res->state) {
39 case RESERVATION_INACTIVE:
40 /* Figure out first replenishment time. */
41 tdres->major_cycle_start = td_next_major_cycle_start(tdres);
42 res->next_replenishment = tdres->major_cycle_start;
43 res->next_replenishment += tdres->intervals[0].start;
44 tdres->next_interval = 0;
45
46 res->env->change_state(res->env, res,
47 RESERVATION_DEPLETED);
48 break;
49
50 case RESERVATION_ACTIVE:
51 case RESERVATION_DEPLETED:
52 /* do nothing */
53 break;
54
55 case RESERVATION_ACTIVE_IDLE:
56 res->env->change_state(res->env, res,
57 RESERVATION_ACTIVE);
58 break;
59 }
60}
61
62static void td_client_departs(
63 struct reservation *res,
64 struct reservation_client *client,
65 int did_signal_job_completion
66)
67{
68 list_del(&client->list);
69
70 switch (res->state) {
71 case RESERVATION_INACTIVE:
72 case RESERVATION_ACTIVE_IDLE:
73 BUG(); /* INACTIVE or IDLE <=> no client */
74 break;
75
76 case RESERVATION_ACTIVE:
77 if (list_empty(&res->clients)) {
78 res->env->change_state(res->env, res,
79 RESERVATION_ACTIVE_IDLE);
80 } /* else: nothing to do, more clients ready */
81 break;
82
83 case RESERVATION_DEPLETED:
84 /* do nothing */
85 break;
86 }
87}
88
89static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
90{
91 lt_t now = tdres->res.env->current_time;
92 lt_t end = tdres->cur_interval.end;
93 TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n",
94 tdres->res.id,
95 tdres->cur_interval.start,
96 now, end,
97 tdres->res.state);
98 if (now >= end)
99 return 0;
100 else
101 return end - now;
102}
103
104static void td_replenish(
105 struct reservation *res)
106{
107 struct table_driven_reservation *tdres =
108 container_of(res, struct table_driven_reservation, res);
109
110 TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id,
111 res->next_replenishment);
112
113 /* figure out current interval */
114 tdres->cur_interval.start = tdres->major_cycle_start +
115 tdres->intervals[tdres->next_interval].start;
116 tdres->cur_interval.end = tdres->major_cycle_start +
117 tdres->intervals[tdres->next_interval].end;
118 TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
119 tdres->major_cycle_start,
120 tdres->cur_interval.start,
121 tdres->cur_interval.end);
122
123 /* reset budget */
124 res->cur_budget = td_time_remaining_until_end(tdres);
125 res->budget_consumed = 0;
126 TRACE("td_replenish(%u): %s budget=%llu\n", res->id,
127 res->cur_budget ? "" : "WARNING", res->cur_budget);
128
129 /* prepare next slot */
130 tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
131 if (!tdres->next_interval)
132 /* wrap to next major cycle */
133 tdres->major_cycle_start += tdres->major_cycle;
134
135 /* determine next time this reservation becomes eligible to execute */
136 res->next_replenishment = tdres->major_cycle_start;
137 res->next_replenishment += tdres->intervals[tdres->next_interval].start;
138 TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id,
139 res->next_replenishment);
140
141
142 switch (res->state) {
143 case RESERVATION_DEPLETED:
144 case RESERVATION_ACTIVE:
145 case RESERVATION_ACTIVE_IDLE:
146 if (list_empty(&res->clients))
147 res->env->change_state(res->env, res,
148 RESERVATION_ACTIVE_IDLE);
149 else
150 /* we have clients & budget => ACTIVE */
151 res->env->change_state(res->env, res,
152 RESERVATION_ACTIVE);
153 break;
154
155 case RESERVATION_INACTIVE:
156 BUG();
157 break;
158 }
159}
160
161static void td_drain_budget(
162 struct reservation *res,
163 lt_t how_much)
164{
165 struct table_driven_reservation *tdres =
166 container_of(res, struct table_driven_reservation, res);
167
168 res->budget_consumed += how_much;
169 res->budget_consumed_total += how_much;
170
171 /* Table-driven scheduling: instead of tracking the budget, we compute
172 * how much time is left in this allocation interval. */
173
174 /* sanity check: we should never try to drain from future slots */
175 BUG_ON(tdres->cur_interval.start > res->env->current_time);
176
177 switch (res->state) {
178 case RESERVATION_DEPLETED:
179 case RESERVATION_INACTIVE:
180 BUG();
181 break;
182
183 case RESERVATION_ACTIVE_IDLE:
184 case RESERVATION_ACTIVE:
185 res->cur_budget = td_time_remaining_until_end(tdres);
186 TRACE("td_drain_budget(%u): drained to budget=%llu\n",
187 res->id, res->cur_budget);
188 if (!res->cur_budget) {
189 res->env->change_state(res->env, res,
190 RESERVATION_DEPLETED);
191 } else {
192 /* sanity check budget calculation */
193 BUG_ON(res->env->current_time >= tdres->cur_interval.end);
194 BUG_ON(res->env->current_time < tdres->cur_interval.start);
195 }
196
197 break;
198 }
199}
200
201static struct task_struct* td_dispatch_client(
202 struct reservation *res,
203 lt_t *for_at_most)
204{
205 struct task_struct *t;
206 struct table_driven_reservation *tdres =
207 container_of(res, struct table_driven_reservation, res);
208
209 /* usual logic for selecting a client */
210 t = default_dispatch_client(res, for_at_most);
211
212 TRACE_TASK(t, "td_dispatch_client(%u): selected, budget=%llu\n",
213 res->id, res->cur_budget);
214
215 /* check how much budget we have left in this time slot */
216 res->cur_budget = td_time_remaining_until_end(tdres);
217
218 TRACE_TASK(t, "td_dispatch_client(%u): updated to budget=%llu next=%d\n",
219 res->id, res->cur_budget, tdres->next_interval);
220
221 if (unlikely(!res->cur_budget)) {
222 /* Unlikely case: if we ran out of budget, the user configured
223 * a broken scheduling table (overlapping table slots).
224 * Not much we can do about this, but we can't dispatch a job
225 * now without causing overload. So let's register this reservation
226 * as depleted and wait for the next allocation. */
227 TRACE("td_dispatch_client(%u): budget unexpectedly depleted "
228 "(check scheduling table for unintended overlap)\n",
229 res->id);
230 res->env->change_state(res->env, res,
231 RESERVATION_DEPLETED);
232 return NULL;
233 } else
234 return t;
235}
236
237static struct reservation_ops td_ops = {
238 .dispatch_client = td_dispatch_client,
239 .client_arrives = td_client_arrives,
240 .client_departs = td_client_departs,
241 .replenish = td_replenish,
242 .drain_budget = td_drain_budget,
243};
244
245void table_driven_reservation_init(
246 struct table_driven_reservation *tdres,
247 lt_t major_cycle,
248 struct lt_interval *intervals,
249 unsigned int num_intervals)
250{
251 unsigned int i;
252
253 /* sanity checking */
254 BUG_ON(!num_intervals);
255 for (i = 0; i < num_intervals; i++)
256 BUG_ON(intervals[i].end <= intervals[i].start);
257 for (i = 0; i + 1 < num_intervals; i++)
258 BUG_ON(intervals[i + 1].start <= intervals[i].end);
259 BUG_ON(intervals[num_intervals - 1].end > major_cycle);
260
261 reservation_init(&tdres->res);
262 tdres->res.kind = TABLE_DRIVEN;
263 tdres->major_cycle = major_cycle;
264 tdres->intervals = intervals;
265 tdres->cur_interval.start = 0;
266 tdres->cur_interval.end = 0;
267 tdres->num_intervals = num_intervals;
268 tdres->res.ops = &td_ops;
269}