aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZelin Tong <ztong@ludwig.cs.unc.edu>2020-09-16 15:08:10 -0400
committerZelin Tong <ztong@ludwig.cs.unc.edu>2020-09-16 15:08:10 -0400
commitb665fac6de1744ce6aeb4cfdd81a2cff528484dc (patch)
tree65cc441d5620d3872864410b1fc827779af72354
parente21750e5fcfcd24ae828eab337acc04284aff887 (diff)
WIP commit backup
-rw-r--r--include/litmus/reservations/ext_reservation.h135
-rw-r--r--include/litmus/reservations/gedf_reservation.h58
-rw-r--r--include/litmus/reservations/task_reservation.h16
-rw-r--r--litmus/ext_reservation.c19
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/reservations/ext_reservation.c406
-rw-r--r--litmus/reservations/gedf_reservation.c459
-rw-r--r--litmus/reservations/task_reservation.c73
-rw-r--r--litmus/rt_domain.c7
9 files changed, 1171 insertions, 4 deletions
diff --git a/include/litmus/reservations/ext_reservation.h b/include/litmus/reservations/ext_reservation.h
new file mode 100644
index 000000000000..b5fedf46a2cc
--- /dev/null
+++ b/include/litmus/reservations/ext_reservation.h
@@ -0,0 +1,135 @@
1#ifndef LITMUS_EXT_RESERVATION_H
2#define LITMUS_EXT_RESERVATION_H
3
4#include <linux/list.h>
5#include <linux/hrtimer.h>
6
7#include <litmus/debug_trace.h>
8#include <litmus/bheap.h>
9#include <litmus/rt_domain.h>
10#include <litmus/reservations/budget-notifier.h>
11
12struct reservation_environment;
13struct reservation;
14
15/* ************************************************************************** */
16/* Reservation replenishes its budget. */
17typedef void (*replenish_budget_t) (
18 struct reservation *reservation,
19 int cpu
20);
21
22/* Update the reservation's budget to reflect execution or idling. */
23typedef void (*drain_budget_t) (
24 struct reservation *reservation,
25 lt_t how_much,
26 int cpu
27);
28
29/* When reservation is scheduled. */
30typedef void (*on_schedule_t) (
31 struct reservation *reservation,
32 lt_t now,
33 int cpu
34);
35
36/* When reservation is preempted. */
37typedef void (*on_preempt_t) (
38 struct reservation *reservation,
39 int cpu
40);
41
42/* Destructor: called before scheduler is deactivated. */
43typedef void (*shutdown_t)(struct reservation *reservation);
44
45struct reservation_ops {
46 drain_budget_t drain_budget;
47 replenish_budget_t replenish_budget;
48 on_schedule_t on_schedule;
49 on_preempt_t on_preempt;
50 shutdown_t shutdown;
51};
52
53struct reservation {
54 unsigned int id;
55 unsigned int kind;
56
57 /* exact meaning defined by impl. */
58 lt_t priority;
59 lt_t cur_budget;
60 lt_t next_replenishment;
61
62 /* budget stats */
63 lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
64 lt_t budget_consumed_total;
65
66 /* list of registered budget callbacks */
67 struct budget_notifier_list budget_notifiers;
68
69 /* for memory reclamation purposes */
70 struct list_head all_list;
71
72 /* interaction with framework */
73 struct reservation_ops *ops;
74 struct reservation_environment* par_env;
75
76 struct reservation_environment* env;
77
78 /* used to enqueue int rt_domain framework */
79 struct bheap_node* heap_node;
80 struct release_heap* rel_heap;
81 struct list_head ln;
82};
83
84void init_ext_reservation(struct reservation* res);
85
86void clean_up_ext_reservation(struct reservation* res);
87
88/* ************************************************************************** */
89typedef void (*env_advance_time_t) (
90 struct reservation_environment* env,
91 lt_t how_much,
92 int cpu);
93
94typedef struct task_struct* (*env_dispatch_t) (
95 struct reservation_environment* env,
96 lt_t now,
97 int cpu);
98
99typedef void (*env_resume_t) (
100 struct reservation_environment* env,
101 lt_t now,
102 int cpu);
103
104typedef void (*env_suspend_t) (
105 struct reservation_environment* env,
106 int cpu);
107
108typedef void (*env_add_res_t) (struct reservation_environment* env,
109 struct reservation* res,
110 int cpu);
111
112typedef void (*env_remove_res_t) (struct reservation_environment* env,
113 struct reservation* res,
114 int complete,
115 int cpu);
116
117typedef void (*env_shutdown_t) (
118 struct reservation_environment* env,
119 int cpu);
120
121struct reservation_environment_ops {
122 env_update_time_t update_time;
123 env_dispatch_t dispatch;
124 env_resume_t resume;
125 env_suspend_t suspend;
126 env_add_res_t add_res;
127 env_remove_res_t remove_res;
128 env_shutdown_t shutdown;
129};
130
131struct reservation_environment {
132 struct reservation_environment_ops* ops;
133};
134
135#endif
diff --git a/include/litmus/reservations/gedf_reservation.h b/include/litmus/reservations/gedf_reservation.h
new file mode 100644
index 000000000000..f00c21e37fac
--- /dev/null
+++ b/include/litmus/reservations/gedf_reservation.h
@@ -0,0 +1,58 @@
1#ifndef LITMUS_GEDF_RESERVATION_H
2#define LITMUS_GEDF_RESERVATION_H
3
4#include <litmus/reservations/ext_reservation.h>
5
6/* ************************************************************************** */
7struct cpu_entry {
8 int id;
9 struct hrtimer timer;
10 struct bheap_node* hn;
11 struct reservation* linked;
12 struct reservation* scheduled;
13};
14
15struct gedf_reservation {
16 struct reservation res;
17 struct cpu_entry linked_on;
18 struct cpu_entry scheduled_on;
19 int will_remove;
20 int blocked;
21 lt_t period;
22 lt_t relative_deadline;
23 lt_t exec_cost;
24};
25
26long alloc_gedf_reservation(
27 struct reservation** _res,
28 int id,
29 lt_t exec_cost,
30 lt_t period,
31 lt_t relative_deadline
32);
33
34/* environment for scheduling reservations via gedf */
35struct gedf_reservation_environment {
36 struct reservation_environment env;
37 /* number of active cpus in reservation */
38 int num_cpus;
39 /* array of gedf cpu entries */
40 struct cpu_entry cpu_entries[NR_CPUS];
41 /* smp_processor_id to environment cpu array offset mapping */
42 int cpu_mapping[NR_CPUS];
43
44 /* used to order cpus for gedf purposes */
45 struct bheap cpu_heap;
46 struct bheap_node cpu_node[NR_CPUS];
47
48 /* operations */
49 struct reservation_environment_ops ops;
50
51 rt_domain_t domain;
52};
53
54long alloc_gedf_reservation_environment(
55 struct gedf_reservation_environment** _env
56);
57
58#endif
diff --git a/include/litmus/reservations/task_reservation.h b/include/litmus/reservations/task_reservation.h
new file mode 100644
index 000000000000..d54a789ff3e2
--- /dev/null
+++ b/include/litmus/reservations/task_reservation.h
@@ -0,0 +1,16 @@
1#ifndef LITMUS_GEDF_RESERVATION_H
2#define LITMUS_GEDF_RESERVATION_H
3
4#include <litmus/reservations/ext_reservation.h>
5
6struct task_reservation_environment {
7 struct reservation_environment res;
8 struct task_struct* task;
9};
10
11void task_reservation_environment_init(
12 struct task_reservation_environment* task_env,
13 struct task_struct* task
14);
15
16#endif
diff --git a/litmus/ext_reservation.c b/litmus/ext_reservation.c
new file mode 100644
index 000000000000..89fe62d43c91
--- /dev/null
+++ b/litmus/ext_reservation.c
@@ -0,0 +1,19 @@
1#include <litmus/ext_reservation.h>
2
3/* allocated in litmus.c */
4struct kmem_cache* bheap_node_cache;
5struct kmem_cache* release_heap_cache;
6
7void init_ext_reservation(struct reservation* res)
8{
9 memset(res, 0, sizeof(struct reservation));
10 res->heap_node = kmem_cache_alloc(bheap_node_cache, GFP_ATOMIC);
11 res->rel_heap = kmem_cache_alloc(release_heap_cache, GFP_ATOMIC);
12 INIT_LIST_HEAD(&res->ln);
13}
14
15void clean_up_ext_reservation(struct reservation* res)
16{
17 kmem_cache_free(bheap_node_cache, res->heap_node);
18 kmem_cache_free(release_heap_cache, res->rel_heap);
19}
diff --git a/litmus/litmus.c b/litmus/litmus.c
index bd192180fef7..218109074fda 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -40,7 +40,7 @@ atomic_t rt_task_count = ATOMIC_INIT(0);
40atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); 40atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
41#endif 41#endif
42 42
43static struct kmem_cache * bheap_node_cache; 43extern struct kmem_cache * bheap_node_cache;
44extern struct kmem_cache * release_heap_cache; 44extern struct kmem_cache * release_heap_cache;
45 45
46struct bheap_node* bheap_node_alloc(int gfp_flags) 46struct bheap_node* bheap_node_alloc(int gfp_flags)
diff --git a/litmus/reservations/ext_reservation.c b/litmus/reservations/ext_reservation.c
new file mode 100644
index 000000000000..271c3abf5a3c
--- /dev/null
+++ b/litmus/reservations/ext_reservation.c
@@ -0,0 +1,406 @@
1#include <linux/sched.h>
2#include <linux/smp.h>
3
4#include <litmus/litmus.h>
5#include <litmus/bheap.h>
6#include <litmus/edf_common.h>
7#include <litmus/jobs.h>
8#include <litmus/debug_trace.h>
9#include <litmus/reservations/ext_reservation.h>
10#include <litmus/rt_domain.h>
11
12/* ******************************************************************************* */
13static int higher_prio(struct reservation* first,
14 struct reservation* second)
15{
16 struct reservation *first_task = first;
17 struct reservation *second_task = second;
18
19 /* There is no point in comparing a reservation to itself. */
20 if (first && first == second) {
21 return 0;
22 }
23
24 /* check for NULL reservations */
25 if (!first || !second)
26 return first && !second;
27
28 if (first_task->priority > second_task->priority) {
29 return 1;
30 }
31 else if (first_task->priority == second_task->priority) {
32 /* Tie break by pid */
33 if (first_task->id < second_task->id) {
34 return 1;
35 }
36 }
37 return 0; /* fall-through. prio(second_task) > prio(first_task) */
38}
39
40/* returns 1 if res of a has earlier deadline than res of b */
41static int edf_ready_order(struct bheap_node* a, bheap_node* b)
42{
43 return higher_prio(bheap2res(b), bheap2res(a));
44}
45
46/* Functions used to maintain a heap of cpu entries in edf order
47 * cpu_lower_prio is the comparator function used to enforce edf order
48 *
49 * The next two functions must be called under domain.ready_lock of the reservation
50 * update_cpu_position is called when cpu->linked changes
51 * lowest_prio_cpu returns the lowest prio cpu
52 */
53static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
54{
55 struct cpu_entry *a, *b;
56 a = _a->value;
57 b = _b->value;
58 return higher_prio(b->linked, a->linked);
59}
60
61static void update_cpu_position(struct cpu_entry* entry, struct bheap* cpu_heap)
62{
63 if (likely(bheap_node_in_heap(entry->hn)))
64 bheap_delete(cpu_lower_prio, cpu_heap, entry->hn);
65 bheap_insert(cpu_lower_prio, cpu_heap, entry->hn);
66}
67
68static struct cpu_entry* lowest_prio_cpu(struct bheap* cpu_heap)
69{
70 struct bheap_node* hn;
71 hn = bheap_peek(cpu_lower_prio, cpu_heap);
72 return hn->value;
73}
74
75/* ******************************************************************************* */
76
77static void gedf_on_preempt(
78 struct reservation *res,
79 int cpu)
80{
81 res->env->ops->suspend(res->env, cpu);
82}
83
84static void gedf_on_schedule(
85 struct reservation *res,
86 lt_t now,
87 int cpu)
88{
89 res->env->ops->resume(res->env, now, cpu);
90}
91
92static void replenish_budget(
93 struct reservation* res,
94 int cpu)
95{
96 struct gedf_reservation gedf_res =
97 container_of(res, struct gedf_reservation, res);
98 res->budget_consumed = 0;
99 res->cur_budget = gedf_res->exec_cost;
100 res->priority = res->next_replenishment + gedf_res->relative_deadline;
101 res->next_replenishment += gedf_res->period;
102}
103
104static void gedf_drain_budget(
105 struct reservation* res,
106 lt_t,
107 int cpu)
108{
109 if (how_much > cur_budget)
110 cur_budget = 0;
111 else
112 cur_budget -= how_much;
113 budget_consumed += how_much;
114 budget_consumed_total += how_much;
115
116 res->env->ops->advance_time(res->env, how_much, cpu);
117}
118
119static struct gedf_reservation_ops gedf_ops
120{
121 .drain_budget = gedf_drain_budget,
122 .replenish_budget = gedf_replenish_budget,
123 .on_schedule = gedf_on_schedule,
124 .on_preempt = gedf_on_preempt,
125 .shutdown = gedf_shutdown
126};
127
128/* ******************************************************************************** */
129static void preempt(struct cpu_entry* entry)
130{
131 if (entry->scheduled) {
132 if (smp_processor_id() == entry->id)
133 litmus_reschedule_local();
134 else
135 litmus_reschedule(entry->id);
136 }
137}
138
139static void timer_callback(hrtimer* timer)
140{
141 litmus_reschedule_local();
142}
143
144static void requeue(rt_domain_t* domain, struct reservation* res) {
145 BUG_ON(!res);
146 BUG_ON(is_queued_res(res));
147
148 if (lt_before_eq(res->next_replenishment, litmus_clock()))
149 __add_ready_res(domain, res);
150 else
151 __add_release_res(domain, res);
152
153}
154
155static void link_task_to_cpu(
156 struct gedf_reservation* linked,
157 struct cpu_entry* entry)
158{
159 struct cpu_entry* sched;
160 struct gedf_reservation* tmp;
161
162 if (entry->linked)
163 entry->linked->linked_on = NULL;
164
165 if (linked) {
166 sched = linked->scheduled_on;
167 if (on_cpu) {
168 BUG_ON(sched->linked == linked);
169 if (entry != sched) {
170 tmp = sched->linked;
171 linked->linked_on = sched;
172 sched->linked = linked;
173 update_cpu_position(sched);
174 linked = tmp
175 }
176 }
177 if (linked)
178 linked->linked_on = entry;
179 }
180 entry->linked = linked;
181 update_cpu_position(entry);
182}
183
184static void unlink(struct gedf_reservation* res)
185{
186 BUG_ON(!res->linked_on);
187 if (res->linked_on) {
188 link_task_to_cpu(NULL, res->linked_on);
189 }
190}
191
192static void check_for_preemptions(struct gedf_reservation_environment* env)
193{
194 struct gedf_reservation* res;
195 struct gedf_cpu_entry* last;
196
197 for (last = lowest_prio_cpu();
198 edf_preemption_needed(&env->domain, last->linked);
199 last = lowest_prio_cpu()) {
200 res = __take_ready_res(&env->domain);
201 if (last->linked->cur_budget)
202 requeue(last->linked);
203 link_task_to_cpu(task, last);
204 preempt(last);
205 }
206}
207/* ******************************************************************************** */
208/* not really called because liblitms doesn't support removing reservations
209 * the callback is here in case someone plans on adding it in the future
210 */
211static void gedf_env_shutdown(
212 struct reservation_environment* env,
213 int cpu)
214{
215 struct gedf_reservation_environment* gedf_env;
216 gedf_env = container_of(env, struct gedf_reservation_environment* gedf_env);
217 env->ops->suspend(env, cpu);
218}
219
220/* must be called when res is scheduled or from task_exit or task_block */
221static void gedf_env_remove_res(
222 struct reservation_environment* env,
223 struct reservation* res,
224 int complete,
225 int cpu)
226{
227 struct gedf_reservation_environment* gedf_env;
228 struct gedf_reservation gedf_res;
229 unsigned long flags;
230 gedf_env = container_of(env, struct gedf_reservation_environment, env);
231 gedf_res = container_of(res, struct gedf_reservation, res);
232 gedf_res->will_remove = complete;
233 gedf_res->blocked = !complete;
234 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
235 unlink(res);
236 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
237 preempt(gedf_res->scheduled_on);
238 /* After preempt is called, schedule will update budget tracking.
239 * In advance_time, the environment will detect that res(which is scheduled)
240 * wants to be removed.
241 * If the reservation is flagged for removal, the shutdown callback is called
242 * If the reservation is flagged as blocked, then it will not be requeued back
243 * into the domain, and will invoke on_preempt callback in env_dispatch.
244 * Because we unlinked it, after env_dispatch, res is essentially gone.
245 */
246}
247
248static void gedf_env_add_res(
249 struct reservation_environment* env,
250 struct reservation* res,
251 int cpu)
252{
253 struct gedf_reservation_environment* gedf_env;
254 struct gedf_reservation gedf_res;
255 gedf_env = container_of(env, struct gedf_reservation_environment, env);
256 gedf_res = container_of(res, struct gedf_reservation, res);
257 res->par_env = env;
258 gedf_res->will_remove = 0;
259 gedf_res->blocked = 0;
260 requeue(&gedf_env->domain, res);
261 check_for_preemptions(gedf_env);
262}
263
264static gedf_env_suspend(
265 struct reservation_environment* env,
266 int cpu)
267{
268 struct gedf_reservation_environment* gedf_env;
269 struct cpu_entry* entry;
270 int env_cpu;
271
272 gedf_env = container_of(env, struct gedf_reservation_environment, env);
273 env_cpu = gedf_env->cpu_mapping[cpu];
274 entry = gedf_env->cpu_entries[env_cpu];
275
276 if (res_cpu == 0)
277 suspend_releases(&gedf_env->domain);
278 hrtimer_try_to_cancel(&entry->timer);
279}
280
281static gedf_env_resume(
282 struct reservation_environment* env,
283 lt_t now,
284 int cpu)
285{
286 struct gedf_reservation_environment* gedf_env;
287 struct cpu_entry* entry;
288 int env_cpu;
289
290 gedf_env = container_of(env, struct gedf_reservation_environment, env);
291 env_cpu = gedf_env->cpu_mapping[cpu];
292 entry = gedf_env->cpu_entries[env_cpu];
293
294 if (env_cpu == 0)
295 resume_releases(&gedf_env->domain);
296 if (entry->scheduled)
297 hrtimer_start(&entry->timer, ns_to_ktime(now + entry->scheduled->cur_budget));
298}
299
300static struct task_struct* gedf_env_dispatch(
301 struct reservation_environment* env,
302 lt_t now,
303 int cpu)
304{
305 struct gedf_reservation_environment* gedf_env;
306 struct cpu_entry* entry;
307 int env_cpu;
308 struct task_struct* next;
309 unsigned long flags;
310
311 gedf_env = container_of(env, struct gedf_reservation_environment, env);
312 env_cpu = gedf_env->cpu_mapping[cpu];
313 entry = &gedf_env->cpu_entries[env_cpu];
314
315 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
316
317 check_for_preemptions(gedf_env);
318 if (entry->scheduled != entry->linked) {
319 if (entry->scheduled)
320 entry->scheduled->op->on_preempt(entry->scheduled, env_cpu);
321 if (entry->linked)
322 entry->linked->op->on_schedule(entry->linked, now, env_cpu);
323 entry->scheduled = entry->linked;
324 }
325 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
326
327 next = entry->scheduled->env->op->dispatch(next, time_slice, cpu);
328
329 hrtimer_start(&entry->timer, ns_to_ktime(now + entry->scheduled->cur_budget);
330
331 return next;
332}
333
334static void gedf_env_advance_time(
335 struct reservation_environment* env, lt_t how_much, int cpu)
336{
337 struct gedf_reservation_environment* gedf_env;
338 struct gedf_reservation* gedf_res;
339 struct cpu_entry* entry;
340 int env_cpu;
341 unsigned long flags;
342
343 gedf_env = container_of(env, struct gedf_reservation_environment, env);
344 env_cpu = gedf_env->cpu_mapping[cpu];
345 entry = &gedf_env->cpu_entries[env_cpu];
346
347 BUG_ON(entry->id != cpu);
348 entry->scheduled->ops->drain_budget(entry->scheduled, how_much, env_cpu);
349 gedf_res = container_of(entry->scheduled, struct gedf_reservation, res);
350 /* if flagged for removal from environment, invoke shutdown callback */
351 if (gedf_res->will_remove) {
352 entry->scheduled->ops->shutdown(entry->scheduled);
353 entry->scheduled = NULL;
354 } else if (!entry->scheduled->cur_budget) {
355 entry->scheduled->ops->replenish(entry->scheduled, env_cpu);
356 /* unlink and requeue if not blocked */
357 if (!gedf_res->blocked) {
358 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
359 unlink(entry->scheduled);
360 requeue(&gedf_env->domain, entry->scheduled);
361 check_for_preemption();
362 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
363 }
364 }
365}
366
367static void gedf_env_release_jobs(rt_domain_t* rt, struct bheap* res)
368{
369 unsigned long flags;
370 struct gedf_reservation_environment gedf_env
371 = container_of(rt, struct gedf_reservation_environment, domain);
372
373 raw_spin_lock_irqsave(&rt->ready_lock);
374 __merge_ready_res(rt, tasks);
375 check_for_preemptions(gedf_env);
376 raw_spin_unlock_irqrestore(&rt->ready_lock);
377}
378
379static struct gedf_reservation_environment_ops gedf_env_ops {
380 .advance_time = gedf_env_advance_time,
381 .dispatch = gedf_env_dispatch,
382 .resume = gedf_env_resume,
383 .suspend = gedf_env_suspend,
384 .add_res = gedf_env_add_res,
385 .remove_res = gedf_env_remove_res,
386 .shutdown = gedf_env_shutdown
387}
388
389void gedf_reservation_environment_init(
390 struct gedf_reservation_environment* gedf_env)
391{
392 int i;
393
394 gedf_env->env.ops = &gedf_env_ops;
395 res->num_cpus = 0;
396 memset(&res->cpu_entries, 0, sizeof(struct cpu_entry) * NR_CPUS);
397 memset(&res->cpu_mapping, 0, sizeof(int) * NR_CPUS);
398 bheap_init(&res->cpu_heap);
399 for_each_online_cpu(i) {
400 res->cpu_entries[i].hn = &res->cpu_node[i];
401 hrtimer_init(&res->cpu_entries[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
402 res->cpu_entires[i].timer.function = timer_callback;
403 bheap_node_init(&res->cpu_entries[i].hn, &res->cpu_entries[i]);
404 }
405 rt_domain_init(&res->domain, edf_ready_order, NULL, gedf_env_release_jobs);
406}
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
new file mode 100644
index 000000000000..8b20be315237
--- /dev/null
+++ b/litmus/reservations/gedf_reservation.c
@@ -0,0 +1,459 @@
1#include <linux/sched.h>
2#include <linux/smp.h>
3
4#include <litmus/litmus.h>
5#include <litmus/edf_common.h>
6#include <litmus/jobs.h>
7#include <litmus/debug_trace.h>
8#include <litmus/reservations/gedf_reservation.h>
9
10/* ******************************************************************************* */
11static int higher_prio(struct reservation* first,
12 struct reservation* second)
13{
14 struct reservation *first_task = first;
15 struct reservation *second_task = second;
16
17 /* There is no point in comparing a reservation to itself. */
18 if (first && first == second) {
19 return 0;
20 }
21
22 /* check for NULL reservations */
23 if (!first || !second)
24 return first && !second;
25
26 if (first_task->priority > second_task->priority) {
27 return 1;
28 }
29 else if (first_task->priority == second_task->priority) {
30 /* Tie break by pid */
31 if (first_task->id < second_task->id) {
32 return 1;
33 }
34 }
35 return 0; /* fall-through. prio(second_task) > prio(first_task) */
36}
37
38/* returns 1 if res of a has earlier deadline than res of b */
39static int edf_ready_order(struct bheap_node* a, bheap_node* b)
40{
41 return higher_prio(bheap2res(b), bheap2res(a));
42}
43
44/* Functions used to maintain a heap of cpu entries in edf order
45 * cpu_lower_prio is the comparator function used to enforce edf order
46 *
47 * The next two functions must be called under domain.ready_lock of the reservation
48 * update_cpu_position is called when cpu->linked changes
49 * lowest_prio_cpu returns the lowest prio cpu
50 */
51static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
52{
53 struct cpu_entry *a, *b;
54 a = _a->value;
55 b = _b->value;
56 return higher_prio(b->linked, a->linked);
57}
58
59static void update_cpu_position(struct cpu_entry* entry, struct bheap* cpu_heap)
60{
61 if (likely(bheap_node_in_heap(entry->hn)))
62 bheap_delete(cpu_lower_prio, cpu_heap, entry->hn);
63 bheap_insert(cpu_lower_prio, cpu_heap, entry->hn);
64}
65
66static struct cpu_entry* lowest_prio_cpu(struct bheap* cpu_heap)
67{
68 struct bheap_node* hn;
69 hn = bheap_peek(cpu_lower_prio, cpu_heap);
70 return hn->value;
71}
72
73/* ******************************************************************************* */
74static void gedf_shutdown(
75 struct reservation *res,
76 int cpu)
77{
78 res->env->ops->shutdown(res->env, cpu);
79 clean_up_ext_reservation(res);
80 kfree(res);
81}
82
83static void gedf_on_preempt(
84 struct reservation *res,
85 int cpu)
86{
87 res->env->ops->suspend(res->env, cpu);
88}
89
90static void gedf_on_schedule(
91 struct reservation *res,
92 lt_t now,
93 int cpu)
94{
95 res->env->ops->resume(res->env, now, cpu);
96}
97
98static void replenish_budget(
99 struct reservation* res,
100 int cpu)
101{
102 struct gedf_reservation gedf_res =
103 container_of(res, struct gedf_reservation, res);
104 res->budget_consumed = 0;
105 res->cur_budget = gedf_res->exec_cost;
106 res->priority = res->next_replenishment + gedf_res->relative_deadline;
107 res->next_replenishment += gedf_res->period;
108}
109
110static void gedf_drain_budget(
111 struct reservation* res,
112 lt_t,
113 int cpu)
114{
115 if (how_much > cur_budget)
116 cur_budget = 0;
117 else
118 cur_budget -= how_much;
119 budget_consumed += how_much;
120 budget_consumed_total += how_much;
121
122 res->env->ops->advance_time(res->env, how_much, cpu);
123}
124
125static struct gedf_reservation_ops gedf_ops
126{
127 .drain_budget = gedf_drain_budget,
128 .replenish_budget = gedf_replenish_budget,
129 .on_schedule = gedf_on_schedule,
130 .on_preempt = gedf_on_preempt,
131 .shutdown = gedf_shutdown
132};
133
134void gedf_reservation_init(
135 struct gedf_reservation* res,
136 lt_t period,
137 lt_t relative_deadline,
138 lt_t exec_cost)
139{
140 reservation_init(&res->res);
141 res->linked_on = NULL;
142 res->scheduled_on = NULL;
143 res->will_remove = 0;
144 res->blocked = 0;
145 res->period = period;
146 res->relative_deadline = relative_deadline;
147 res->exec_cost = exec_cost;
148}
149
150long alloc_gedf_reservation(
151 struct reservation** _res,
152 int id,
153 lt_t exec_cost,
154 lt_t period,
155 lt_t relative_deadline)
156{
157 struct gedf_reservation* gedf_res;
158 gedf_res = kzalloc(sizeof(*gedf_res), GFP_KERNEL);
159 if (!gedf_res)
160 return -ENOMEM;
161
162 init_ext_reservation(&gedf_res.res);
163 gedf_res->res.id = id;
164
165 return 0;
166}
167
168/* ******************************************************************************** */
169static void preempt(struct cpu_entry* entry)
170{
171 if (entry->scheduled) {
172 if (smp_processor_id() == entry->id)
173 litmus_reschedule_local();
174 else
175 litmus_reschedule(entry->id);
176 }
177}
178
179static void timer_callback(hrtimer* timer)
180{
181 litmus_reschedule_local();
182}
183
184static void requeue(rt_domain_t* domain, struct reservation* res) {
185 BUG_ON(!res);
186 BUG_ON(is_queued_res(res));
187
188 if (lt_before_eq(res->next_replenishment, litmus_clock()))
189 __add_ready_res(domain, res);
190 else
191 __add_release_res(domain, res);
192
193}
194
195static void link_task_to_cpu(
196 struct gedf_reservation* linked,
197 struct cpu_entry* entry)
198{
199 struct cpu_entry* sched;
200 struct gedf_reservation* tmp;
201
202 if (entry->linked)
203 entry->linked->linked_on = NULL;
204
205 if (linked) {
206 sched = linked->scheduled_on;
207 if (on_cpu) {
208 BUG_ON(sched->linked == linked);
209 if (entry != sched) {
210 tmp = sched->linked;
211 linked->linked_on = sched;
212 sched->linked = linked;
213 update_cpu_position(sched);
214 linked = tmp
215 }
216 }
217 if (linked)
218 linked->linked_on = entry;
219 }
220 entry->linked = linked;
221 update_cpu_position(entry);
222}
223
224static void unlink(struct gedf_reservation* res)
225{
226 struct gedf_reservation_environment gedf_env;
227 BUG_ON(!res->linked_on);
228 gedf_env = container_of(res->par_env, struct gedf_reservation_environment, env);
229 if (res->linked_on) {
230 link_task_to_cpu(NULL, res->linked_on);
231 res->linked_on = NULL;
232 } else if (is_queue_res(res)) {
233 remove_res(&gedf_env, res);
234 }
235}
236
237static void check_for_preemptions(struct gedf_reservation_environment* env)
238{
239 struct gedf_reservation* res;
240 struct cpu_entry* last;
241
242 for (last = lowest_prio_cpu();
243 edf_preemption_needed(&env->domain, last->linked);
244 last = lowest_prio_cpu()) {
245 res = (gedf_reservation*)__take_ready_res(&env->domain);
246 if (last->linked->cur_budget)
247 requeue(last->linked);
248 link_task_to_cpu(res, last);
249 preempt(last);
250 }
251}
252
253/* ******************************************************************************** */
254static void gedf_env_shutdown(
255 struct reservation_environment* env,
256 int cpu)
257{
258 struct gedf_reservation_environment* gedf_env;
259 int i;
260 gedf_env = container_of(env, struct gedf_reservation_environment* gedf_env);
261 suspend_releases(&gedf_env->domain);
262 for (i = 0; i < gedf_env->num_cpus; i++) {
263 hrtimer_cancel(&cpu_entries[i]);
264 }
265 kfree(env);
266}
267
268/* must be called when res is scheduled or from task_exit or task_block */
269static void gedf_env_remove_res(
270 struct reservation_environment* env,
271 struct reservation* res,
272 int complete,
273 int cpu)
274{
275 struct gedf_reservation_environment* gedf_env;
276 struct gedf_reservation gedf_res;
277 unsigned long flags;
278 gedf_env = container_of(env, struct gedf_reservation_environment, env);
279 gedf_res = container_of(res, struct gedf_reservation, res);
280 gedf_res->will_remove = complete;
281 gedf_res->blocked = !complete;
282 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
283 unlink(res);
284 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
285 preempt(gedf_res->scheduled_on);
286 /* After preempt is called, schedule will update budget tracking.
287 * In advance_time, the environment will detect that res(which is scheduled)
288 * wants to be removed.
289 * If the reservation is flagged for removal, the shutdown callback is called
290 * If the reservation is flagged as blocked, then it will not be requeued back
291 * into the domain, and will invoke on_preempt callback in env_dispatch.
292 * Because we unlinked it, after env_dispatch, res is essentially gone.
293 */
294}
295
296static void gedf_env_add_res(
297 struct reservation_environment* env,
298 struct reservation* res,
299 int cpu)
300{
301 struct gedf_reservation_environment* gedf_env;
302 struct gedf_reservation gedf_res;
303 gedf_env = container_of(env, struct gedf_reservation_environment, env);
304 gedf_res = container_of(res, struct gedf_reservation, res);
305 res->par_env = env;
306 gedf_res->will_remove = 0;
307 gedf_res->blocked = 0;
308 requeue(&gedf_env->domain, res);
309 check_for_preemptions(gedf_env);
310}
311
312static gedf_env_suspend(
313 struct reservation_environment* env,
314 int cpu)
315{
316 struct gedf_reservation_environment* gedf_env;
317 struct cpu_entry* entry;
318 int env_cpu;
319
320 gedf_env = container_of(env, struct gedf_reservation_environment, env);
321 env_cpu = gedf_env->cpu_mapping[cpu];
322 entry = gedf_env->cpu_entries[env_cpu];
323
324 if (res_cpu == 0)
325 suspend_releases(&gedf_env->domain);
326 hrtimer_try_to_cancel(&entry->timer);
327}
328
329static gedf_env_resume(
330 struct reservation_environment* env,
331 lt_t now,
332 int cpu)
333{
334 struct gedf_reservation_environment* gedf_env;
335 struct cpu_entry* entry;
336 int env_cpu;
337
338 gedf_env = container_of(env, struct gedf_reservation_environment, env);
339 env_cpu = gedf_env->cpu_mapping[cpu];
340 entry = gedf_env->cpu_entries[env_cpu];
341
342 if (env_cpu == 0)
343 resume_releases(&gedf_env->domain);
344 if (entry->scheduled)
345 hrtimer_start(&entry->timer, ns_to_ktime(now + entry->scheduled->cur_budget));
346}
347
348static struct task_struct* gedf_env_dispatch(
349 struct reservation_environment* env,
350 lt_t now,
351 int cpu)
352{
353 struct gedf_reservation_environment* gedf_env;
354 struct cpu_entry* entry;
355 int env_cpu;
356 struct task_struct* next;
357 unsigned long flags;
358
359 gedf_env = container_of(env, struct gedf_reservation_environment, env);
360 env_cpu = gedf_env->cpu_mapping[cpu];
361 entry = &gedf_env->cpu_entries[env_cpu];
362
363 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
364
365 check_for_preemptions(gedf_env);
366 if (entry->scheduled != entry->linked) {
367 if (entry->scheduled)
368 entry->scheduled->op->on_preempt(entry->scheduled, cpu);
369 if (entry->linked)
370 entry->linked->op->on_schedule(entry->linked, now, cpu);
371 entry->scheduled = entry->linked;
372 }
373 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
374
375 next = entry->scheduled->env->op->dispatch(next, time_slice, cpu);
376
377 hrtimer_start(&entry->timer, ns_to_ktime(now + entry->scheduled->cur_budget);
378
379 return next;
380}
381
382static void gedf_env_advance_time(
383 struct reservation_environment* env, lt_t how_much, int cpu)
384{
385 struct gedf_reservation_environment* gedf_env;
386 struct gedf_reservation* gedf_res;
387 struct cpu_entry* entry;
388 int env_cpu;
389 unsigned long flags;
390
391 gedf_env = container_of(env, struct gedf_reservation_environment, env);
392 env_cpu = gedf_env->cpu_mapping[cpu];
393 entry = &gedf_env->cpu_entries[env_cpu];
394
395 BUG_ON(entry->id != cpu);
396 entry->scheduled->ops->drain_budget(entry->scheduled, how_much, cpu);
397 gedf_res = container_of(entry->scheduled, struct gedf_reservation, res);
398 /* if flagged for removal from environment, invoke shutdown callback */
399 if (gedf_res->will_remove) {
400 entry->scheduled->ops->shutdown(entry->scheduled);
401 entry->scheduled = NULL;
402 } else if (!entry->scheduled->cur_budget) {
403 entry->scheduled->ops->replenish(entry->scheduled, cpu);
404 /* unlink and requeue if not blocked */
405 if (!gedf_res->blocked) {
406 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
407 unlink(entry->scheduled);
408 requeue(&gedf_env->domain, entry->scheduled);
409 check_for_preemption();
410 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);
411 }
412 }
413}
414
415static void gedf_env_release_jobs(rt_domain_t* rt, struct bheap* res)
416{
417 unsigned long flags;
418 struct gedf_reservation_environment gedf_env
419 = container_of(rt, struct gedf_reservation_environment, domain);
420
421 raw_spin_lock_irqsave(&rt->ready_lock);
422 __merge_ready_res(rt, tasks);
423 check_for_preemptions(gedf_env);
424 raw_spin_unlock_irqrestore(&rt->ready_lock);
425}
426
427static struct gedf_reservation_environment_ops gedf_env_ops {
428 .advance_time = gedf_env_advance_time,
429 .dispatch = gedf_env_dispatch,
430 .resume = gedf_env_resume,
431 .suspend = gedf_env_suspend,
432 .add_res = gedf_env_add_res,
433 .remove_res = gedf_env_remove_res,
434 .shutdown = gedf_env_shutdown
435};
436
437long alloc_gedf_reservation_environment(
438 struct reservation_environment** _env)
439{
440 struct gedf_reservation_environment* gedf_env;
441 gedf_env = kzalloc(sizeof(*gedf_env), GFP_KERNEL);
442 if (!gedf_env)
443 return -ENOMEM;
444
445 memset(gedf_env, 0, sizeof(struct gedf_reservation_environment));
446 gedf_env->env.ops = &gedf_env_ops;
447 gedf_env->num_cpus = 0;
448 bheap_init(&gedf_env->cpu_heap);
449 for_each_online_cpu(i) {
450 gedf_env->cpu_entries[i].hn = &gedf_env->cpu_node[i];
451 hrtimer_init(&gedf_env->cpu_entries[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
452 gedf_env->cpu_entires[i].timer.function = timer_callback;
453 bheap_node_init(&gedf_env->cpu_entries[i].hn, &gedf_env->cpu_entries[i]);
454 }
455 rt_domain_init(&gedf_env->domain, edf_ready_order, NULL, gedf_env_release_jobs);
456
457 *_env = &gedf_env->env;
458 return 0;
459}
diff --git a/litmus/reservations/task_reservation.c b/litmus/reservations/task_reservation.c
new file mode 100644
index 000000000000..9422598cfd15
--- /dev/null
+++ b/litmus/reservations/task_reservation.c
@@ -0,0 +1,73 @@
1#include <litmus/reservations/task_reservation.h>
2
3static void task_env_shutdown(
4 struct reservation_environment* env,
5 int cpu)
6{
7 return;
8}
9
10static void task_env_remove_res(
11 struct reservation_environment* env,
12 struct reservation* res,
13 int cpu)
14{
15 return;
16}
17
18static void task_env_add_res(
19 struct reservation_environment* env,
20 struct reservation* res,
21 int cpu)
22{
23 return;
24}
25
26static void task_env_suspend(
27 struct reservation_environment* env,
28 int cpu)
29{
30 return;
31}
32
33static void task_env_resume(
34 struct reservation_environment* env,
35 lt_t now, int cpu)
36{
37 return;
38}
39
40static struct task_struct* task_env_dispatch(
41 struct reservation_environment* env,
42 lt_t now,
43 int cpu)
44{
45 struct task_reservation_environment* task_env =
46 container_of(env, struct task_reservation_environment, env);
47 return task_env->task;
48}
49
50static void task_env_advance_time(
51 struct reservation_environment* env,
52 lt_t how_much,
53 int cpu)
54{
55 return;
56}
57
58static struct task_reservation_environment_ops task_env_ops {
59 .advance_time = gedf_env_advance_time,
60 .dispatch = gedf_env_dispatch,
61 .resume = gedf_env_resume,
62 .suspend = gedf_env_suspend,
63 .add_res = gedf_env_add_res,
64 .remove_res = gedf_env_remove_res,
65 .shutdown = gedf_env_shutdown
66};
67
68void task_reservation_environment_init(
69 struct task_reservation_environment* task_env,
70 struct task_struct* task)
71{
72 task_env->task = task;
73}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index f90f5718c0ad..044b3e64a2eb 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -339,9 +339,9 @@ static void arm_release_timer(rt_domain_t *_rt)
339 */ 339 */
340#ifdef CONFIG_RELEASE_MASTER 340#ifdef CONFIG_RELEASE_MASTER
341#define arm_release_timer_res(t) arm_release_timer_res_on((t), NO_CPU) 341#define arm_release_timer_res(t) arm_release_timer_res_on((t), NO_CPU)
342static void arm_release_timer_res_on(rt_domain_t *_rt , int target_cpu) 342static void arm_release_timer_res_on(rt_domain_t *_rt , int interrupt_release, int target_cpu)
343#else 343#else
344static void arm_release_timer_res(rt_domain_t *_rt) 344static void arm_release_timer_res(rt_domain_t *_rt, int interrupt_release)
345#endif 345#endif
346{ 346{
347 rt_domain_t *rt = _rt; 347 rt_domain_t *rt = _rt;
@@ -392,7 +392,7 @@ static void arm_release_timer_res(rt_domain_t *_rt)
392 /* To avoid arming the timer multiple times, we only let the 392 /* To avoid arming the timer multiple times, we only let the
393 * owner of the new earliest release heap do the arming. 393 * owner of the new earliest release heap do the arming.
394 */ 394 */
395 if (rh == res->rel_heap) { 395 if (rh == res->rel_heap && interrupt_release) {
396 VTRACE("arming timer 0x%p\n", &rh->timer); 396 VTRACE("arming timer 0x%p\n", &rh->timer);
397 397
398 if (!hrtimer_is_hres_active(&rh->timer)) { 398 if (!hrtimer_is_hres_active(&rh->timer)) {
@@ -425,6 +425,7 @@ static void arm_release_timer_res(rt_domain_t *_rt)
425 VTRACE("timer 0x%p has been armed for earlier time\n", &rh->timer); 425 VTRACE("timer 0x%p has been armed for earlier time\n", &rh->timer);
426 } 426 }
427} 427}
428
428void rt_domain_init(rt_domain_t *rt, 429void rt_domain_init(rt_domain_t *rt,
429 bheap_prio_t order, 430 bheap_prio_t order,
430 check_resched_needed_t check, 431 check_resched_needed_t check,