diff options
author | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-09-28 06:20:29 -0400 |
---|---|---|
committer | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-09-28 06:20:29 -0400 |
commit | 983c9dae37b709efbbfc843ef78dc094c6f47fd2 (patch) | |
tree | ef527a1a050aedafd6a2d07a078c847420dec113 | |
parent | 8d1a90a5e4c7594c20c70fbb9e0ef0c77f2c3d1e (diff) |
added support for np-sections in GEDF reservation.
Also, includes WIP of component allocator. This WIP is not included in
the makefile so it won't mess up the compilation.
-rw-r--r-- | include/litmus/reservations/ext_reservation.h | 36 | ||||
-rw-r--r-- | include/litmus/reservations/gedf_reservation.h | 7 | ||||
-rw-r--r-- | include/litmus/reservations/table_driven_ext_reservation.h | 23 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 7 | ||||
-rw-r--r-- | litmus/reservations/Makefile | 1 | ||||
-rw-r--r-- | litmus/reservations/gedf_reservation.c | 169 | ||||
-rw-r--r-- | litmus/reservations/table_driven_ext_reservation.c | 256 | ||||
-rw-r--r-- | litmus/rt_domain.c | 8 | ||||
-rw-r--r-- | litmus/sched_ext_res.c | 52 |
9 files changed, 457 insertions, 102 deletions
diff --git a/include/litmus/reservations/ext_reservation.h b/include/litmus/reservations/ext_reservation.h index 40c9cd9e098e..97b8bad916df 100644 --- a/include/litmus/reservations/ext_reservation.h +++ b/include/litmus/reservations/ext_reservation.h | |||
@@ -25,6 +25,7 @@ typedef void (*drain_budget_t) ( | |||
25 | 25 | ||
26 | typedef struct task_struct* (*dispatch_client_t) ( | 26 | typedef struct task_struct* (*dispatch_client_t) ( |
27 | struct reservation *reservation, | 27 | struct reservation *reservation, |
28 | lt_t* time_slice, | ||
28 | int cpu | 29 | int cpu |
29 | ); | 30 | ); |
30 | 31 | ||
@@ -40,8 +41,15 @@ typedef void (*on_preempt_t) ( | |||
40 | int cpu | 41 | int cpu |
41 | ); | 42 | ); |
42 | 43 | ||
44 | typedef int (*is_np_t) ( | ||
45 | struct reservation *reservation, | ||
46 | int cpu | ||
47 | ); | ||
48 | |||
43 | /* Destructor: called before scheduler is deactivated. */ | 49 | /* Destructor: called before scheduler is deactivated. */ |
44 | typedef void (*shutdown_t)(struct reservation *reservation); | 50 | typedef void (*shutdown_t)( |
51 | struct reservation *reservation | ||
52 | ); | ||
45 | 53 | ||
46 | struct reservation_ops { | 54 | struct reservation_ops { |
47 | drain_budget_t drain_budget; | 55 | drain_budget_t drain_budget; |
@@ -49,6 +57,7 @@ struct reservation_ops { | |||
49 | dispatch_client_t dispatch_client; | 57 | dispatch_client_t dispatch_client; |
50 | on_schedule_t on_schedule; | 58 | on_schedule_t on_schedule; |
51 | on_preempt_t on_preempt; | 59 | on_preempt_t on_preempt; |
60 | is_np_t is_np; | ||
52 | shutdown_t shutdown; | 61 | shutdown_t shutdown; |
53 | }; | 62 | }; |
54 | 63 | ||
@@ -58,6 +67,7 @@ struct reservation { | |||
58 | /* exact meaning defined by impl. */ | 67 | /* exact meaning defined by impl. */ |
59 | lt_t priority; | 68 | lt_t priority; |
60 | lt_t replenishment_time; | 69 | lt_t replenishment_time; |
70 | lt_t cur_budget; | ||
61 | 71 | ||
62 | /* budget stats */ | 72 | /* budget stats */ |
63 | lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */ | 73 | lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */ |
@@ -93,6 +103,7 @@ typedef void (*env_update_time_t) ( | |||
93 | 103 | ||
94 | typedef struct task_struct* (*env_dispatch_t) ( | 104 | typedef struct task_struct* (*env_dispatch_t) ( |
95 | struct reservation_environment* env, | 105 | struct reservation_environment* env, |
106 | lt_t* time_slice, | ||
96 | int cpu); | 107 | int cpu); |
97 | 108 | ||
98 | typedef void (*env_resume_t) ( | 109 | typedef void (*env_resume_t) ( |
@@ -103,12 +114,24 @@ typedef void (*env_suspend_t) ( | |||
103 | struct reservation_environment* env, | 114 | struct reservation_environment* env, |
104 | int cpu); | 115 | int cpu); |
105 | 116 | ||
106 | typedef void (*env_add_res_t) (struct reservation_environment* env, | 117 | typedef void (*env_add_res_t) ( |
107 | struct reservation* res); | 118 | struct reservation_environment* env, |
119 | struct reservation* res, | ||
120 | int cpu); | ||
108 | 121 | ||
109 | typedef void (*env_remove_res_t) (struct reservation_environment* env, | 122 | typedef void (*env_remove_res_t) ( |
123 | struct reservation_environment* env, | ||
110 | struct reservation* res, | 124 | struct reservation* res, |
111 | int complete); | 125 | int complete, |
126 | int cpu); | ||
127 | |||
128 | typedef struct reservation* (*env_find_res_t) ( | ||
129 | struct reservation_environment* env, | ||
130 | int id); | ||
131 | |||
132 | typedef int (*env_is_np_t) ( | ||
133 | struct reservation_environment* env, | ||
134 | int cpu); | ||
112 | 135 | ||
113 | typedef void (*env_shutdown_t) ( | 136 | typedef void (*env_shutdown_t) ( |
114 | struct reservation_environment* env); | 137 | struct reservation_environment* env); |
@@ -120,12 +143,15 @@ struct reservation_environment_ops { | |||
120 | env_suspend_t suspend; | 143 | env_suspend_t suspend; |
121 | env_add_res_t add_res; | 144 | env_add_res_t add_res; |
122 | env_remove_res_t remove_res; | 145 | env_remove_res_t remove_res; |
146 | env_find_res_t find_res_by_id; | ||
147 | env_is_np_t is_np; | ||
123 | env_shutdown_t shutdown; | 148 | env_shutdown_t shutdown; |
124 | }; | 149 | }; |
125 | 150 | ||
126 | struct reservation_environment { | 151 | struct reservation_environment { |
127 | struct reservation_environment_ops* ops; | 152 | struct reservation_environment_ops* ops; |
128 | struct reservation* res; | 153 | struct reservation* res; |
154 | struct list_head all_reservations; | ||
129 | }; | 155 | }; |
130 | 156 | ||
131 | static inline void env_to_res_couple( | 157 | static inline void env_to_res_couple( |
diff --git a/include/litmus/reservations/gedf_reservation.h b/include/litmus/reservations/gedf_reservation.h index 2adaaabbcaf4..e39d632262a3 100644 --- a/include/litmus/reservations/gedf_reservation.h +++ b/include/litmus/reservations/gedf_reservation.h | |||
@@ -10,15 +10,14 @@ | |||
10 | /* ************************************************************************** */ | 10 | /* ************************************************************************** */ |
11 | struct gedf_reservation { | 11 | struct gedf_reservation { |
12 | struct reservation res; | 12 | struct reservation res; |
13 | struct cpu_entry* linked_on; | 13 | struct gedf_cpu_entry* linked_on; |
14 | lt_t cur_budget; | 14 | lt_t cur_budget; |
15 | int will_remove; | 15 | int will_remove; |
16 | int blocked; | 16 | int blocked; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct cpu_entry { | 19 | struct gedf_cpu_entry { |
20 | int id; | 20 | int id; |
21 | struct hrtimer timer; | ||
22 | struct bheap_node* hn; | 21 | struct bheap_node* hn; |
23 | struct gedf_reservation* linked; | 22 | struct gedf_reservation* linked; |
24 | struct gedf_reservation* scheduled; | 23 | struct gedf_reservation* scheduled; |
@@ -60,7 +59,7 @@ struct gedf_reservation_environment { | |||
60 | volatile int num_cpus; | 59 | volatile int num_cpus; |
61 | 60 | ||
62 | /* array of gedf cpu entries */ | 61 | /* array of gedf cpu entries */ |
63 | struct cpu_entry cpu_entries[NR_CPUS]; | 62 | struct gedf_cpu_entry cpu_entries[NR_CPUS]; |
64 | 63 | ||
65 | /* used to order cpus for gedf purposes */ | 64 | /* used to order cpus for gedf purposes */ |
66 | struct bheap cpu_heap; | 65 | struct bheap cpu_heap; |
diff --git a/include/litmus/reservations/table_driven_ext_reservation.h b/include/litmus/reservations/table_driven_ext_reservation.h index 51c32eadb419..100b4eea8cba 100644 --- a/include/litmus/reservations/table_driven_ext_reservation.h +++ b/include/litmus/reservations/table_driven_ext_reservation.h | |||
@@ -4,22 +4,14 @@ | |||
4 | #include <litmus/reservations/ext_reservation.h> | 4 | #include <litmus/reservations/ext_reservation.h> |
5 | 5 | ||
6 | /* ************************************************************************** */ | 6 | /* ************************************************************************** */ |
7 | struct cpu_entry { | ||
8 | int id; | ||
9 | struct hrtimer timer; | ||
10 | struct bheap_node* hn; | ||
11 | struct reservation* linked; | ||
12 | struct reservation* scheduled; | ||
13 | }; | ||
14 | |||
15 | struct mtd_reservation { | 7 | struct mtd_reservation { |
16 | struct reservation res; | 8 | struct reservation res; |
9 | volatile int in_env; | ||
17 | lt_t major_cycle; | 10 | lt_t major_cycle; |
18 | unsigned int interval_index[NR_CPUS]; | 11 | unsigned int interval_index[NR_CPUS]; |
19 | unsigned int num_intervals[NR_CPUS]; | 12 | unsigned int num_intervals[NR_CPUS]; |
20 | struct lt_interval* intervals[NR_CPUS]; | 13 | struct lt_interval* intervals[NR_CPUS]; |
21 | 14 | ||
22 | int num_cur_intervals; | ||
23 | struct lt_interval cur_interval[NR_CPUS]; | 15 | struct lt_interval cur_interval[NR_CPUS]; |
24 | lt_t major_cycle_start; | 16 | lt_t major_cycle_start; |
25 | }; | 17 | }; |
@@ -29,15 +21,22 @@ long alloc_mtd_reservation( | |||
29 | struct reservation** _res | 21 | struct reservation** _res |
30 | ); | 22 | ); |
31 | 23 | ||
24 | struct mtd_cpu_entry { | ||
25 | int id; | ||
26 | struct hrtimer timer; | ||
27 | lt_t cur_time; | ||
28 | mtd_reservation* scheduled; | ||
29 | rt_domain_t domain; | ||
30 | } | ||
31 | |||
32 | /* environment for scheduling reservations via gedf */ | 32 | /* environment for scheduling reservations via gedf */ |
33 | struct td_reservation_environment { | 33 | struct mtd_reservation_environment { |
34 | struct reservation_environment env; | 34 | struct reservation_environment env; |
35 | |||
35 | /* number of active cpus in reservation */ | 36 | /* number of active cpus in reservation */ |
36 | int num_cpus; | 37 | int num_cpus; |
37 | /* array of gedf cpu entries */ | 38 | /* array of gedf cpu entries */ |
38 | struct cpu_entry cpu_entries[NR_CPUS]; | 39 | struct cpu_entry cpu_entries[NR_CPUS]; |
39 | /* smp_processor_id to environment cpu array offset mapping */ | ||
40 | int cpu_mapping[NR_CPUS]; | ||
41 | 40 | ||
42 | /* used to order cpus for gedf purposes */ | 41 | /* used to order cpus for gedf purposes */ |
43 | struct bheap cpu_heap; | 42 | struct bheap cpu_heap; |
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 0fabd7e6ea55..45dfb1e2b76f 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -67,8 +67,11 @@ struct release_heap { | |||
67 | struct list_head list_head; | 67 | struct list_head list_head; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | void suspend_releases(rt_domain_t* rt); | 70 | /* used to manually release jobs */ |
71 | void resume_releases(rt_domain_t* rt); | 71 | void release_jobs_before_now(rt_domain_t* rt); |
72 | |||
73 | void domain_suspend_releases(rt_domain_t* rt); | ||
74 | void domain_resume_releases(rt_domain_t* rt); | ||
72 | 75 | ||
73 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | 76 | static inline struct task_struct* __next_ready(rt_domain_t* rt) |
74 | { | 77 | { |
diff --git a/litmus/reservations/Makefile b/litmus/reservations/Makefile index fcb3685ae72e..cda2b1605e65 100644 --- a/litmus/reservations/Makefile +++ b/litmus/reservations/Makefile | |||
@@ -4,4 +4,3 @@ obj-y += table-driven.o | |||
4 | 4 | ||
5 | obj-y += ext_reservation.o | 5 | obj-y += ext_reservation.o |
6 | obj-y += gedf_reservation.o | 6 | obj-y += gedf_reservation.o |
7 | obj-y += task_reservation.o | ||
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c index aa5eb9d4b821..2b0d37b4d52c 100644 --- a/litmus/reservations/gedf_reservation.c +++ b/litmus/reservations/gedf_reservation.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <litmus/bheap.h> | 8 | #include <litmus/bheap.h> |
9 | #include <litmus/rt_domain.h> | 9 | #include <litmus/rt_domain.h> |
10 | #include <litmus/jobs.h> | 10 | #include <litmus/jobs.h> |
11 | #include <litmus/np.h> | ||
11 | #include <litmus/sched_trace.h> | 12 | #include <litmus/sched_trace.h> |
12 | #include <litmus/debug_trace.h> | 13 | #include <litmus/debug_trace.h> |
13 | #include <litmus/reservations/gedf_reservation.h> | 14 | #include <litmus/reservations/gedf_reservation.h> |
@@ -55,21 +56,21 @@ static int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | |||
55 | */ | 56 | */ |
56 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 57 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) |
57 | { | 58 | { |
58 | struct cpu_entry *a, *b; | 59 | struct gedf_cpu_entry *a, *b; |
59 | a = _a->value; | 60 | a = _a->value; |
60 | b = _b->value; | 61 | b = _b->value; |
61 | /* use higher prio here because prio is deadline value */ | 62 | /* use higher prio here because prio is deadline value */ |
62 | return higher_prio(&a->linked->res, &b->linked->res); | 63 | return higher_prio(&a->linked->res, &b->linked->res); |
63 | } | 64 | } |
64 | 65 | ||
65 | static void update_cpu_position(struct cpu_entry* entry, struct bheap* cpu_heap) | 66 | static void update_cpu_position(struct gedf_cpu_entry* entry, struct bheap* cpu_heap) |
66 | { | 67 | { |
67 | if (likely(bheap_node_in_heap(entry->hn))) | 68 | if (likely(bheap_node_in_heap(entry->hn))) |
68 | bheap_delete(cpu_lower_prio, cpu_heap, entry->hn); | 69 | bheap_delete(cpu_lower_prio, cpu_heap, entry->hn); |
69 | bheap_insert(cpu_lower_prio, cpu_heap, entry->hn); | 70 | bheap_insert(cpu_lower_prio, cpu_heap, entry->hn); |
70 | } | 71 | } |
71 | 72 | ||
72 | static struct cpu_entry* lowest_prio_cpu(struct bheap* cpu_heap) | 73 | static struct gedf_cpu_entry* lowest_prio_cpu(struct bheap* cpu_heap) |
73 | { | 74 | { |
74 | struct bheap_node* hn; | 75 | struct bheap_node* hn; |
75 | hn = bheap_peek(cpu_lower_prio, cpu_heap); | 76 | hn = bheap_peek(cpu_lower_prio, cpu_heap); |
@@ -98,15 +99,10 @@ static int edf_preemption_needed( | |||
98 | 99 | ||
99 | /* ******************************************************************************** */ | 100 | /* ******************************************************************************** */ |
100 | //TODO: add support for checking non-preemptivity | 101 | //TODO: add support for checking non-preemptivity |
101 | static void preempt(struct cpu_entry* entry) | 102 | static void preempt(struct gedf_cpu_entry* entry) |
102 | { | 103 | { |
103 | litmus_reschedule(entry->id); | 104 | if (!entry->scheduled || entry->scheduled->res.ops->is_np(&entry->scheduled->res, entry->id)) |
104 | } | 105 | litmus_reschedule(entry->id); |
105 | |||
106 | static enum hrtimer_restart timer_callback(struct hrtimer* timer) | ||
107 | { | ||
108 | litmus_reschedule_local(); | ||
109 | return HRTIMER_NORESTART; | ||
110 | } | 106 | } |
111 | 107 | ||
112 | static void requeue( | 108 | static void requeue( |
@@ -126,7 +122,7 @@ static void requeue( | |||
126 | static void link_task_to_cpu( | 122 | static void link_task_to_cpu( |
127 | struct gedf_reservation_environment* gedf_env, | 123 | struct gedf_reservation_environment* gedf_env, |
128 | struct gedf_reservation* linked, | 124 | struct gedf_reservation* linked, |
129 | struct cpu_entry* entry) | 125 | struct gedf_cpu_entry* entry) |
130 | { | 126 | { |
131 | 127 | ||
132 | if (entry->linked) | 128 | if (entry->linked) |
@@ -155,13 +151,13 @@ static void unlink( | |||
155 | static void check_for_preemptions(struct gedf_reservation_environment* gedf_env) | 151 | static void check_for_preemptions(struct gedf_reservation_environment* gedf_env) |
156 | { | 152 | { |
157 | struct gedf_reservation* gedf_res; | 153 | struct gedf_reservation* gedf_res; |
158 | struct cpu_entry* last; | 154 | struct gedf_cpu_entry* last; |
159 | 155 | ||
160 | for (last = lowest_prio_cpu(&gedf_env->cpu_heap); | 156 | for (last = lowest_prio_cpu(&gedf_env->cpu_heap); |
161 | edf_preemption_needed(gedf_env, last->linked); | 157 | edf_preemption_needed(gedf_env, last->linked); |
162 | last = lowest_prio_cpu(&gedf_env->cpu_heap)) { | 158 | last = lowest_prio_cpu(&gedf_env->cpu_heap)) { |
163 | gedf_res = (struct gedf_reservation*)__take_ready_res(&gedf_env->domain); | 159 | gedf_res = (struct gedf_reservation*)__take_ready_res(&gedf_env->domain); |
164 | if (last->linked && last->linked->cur_budget) | 160 | if (last->linked && last->linked->res.cur_budget) |
165 | requeue(gedf_env, last->linked); | 161 | requeue(gedf_env, last->linked); |
166 | link_task_to_cpu(gedf_env, gedf_res, last); | 162 | link_task_to_cpu(gedf_env, gedf_res, last); |
167 | preempt(last); | 163 | preempt(last); |
@@ -177,6 +173,27 @@ static void gedf_shutdown( | |||
177 | kfree(res); | 173 | kfree(res); |
178 | } | 174 | } |
179 | 175 | ||
176 | static int gedf_is_np( | ||
177 | struct reservation *res, | ||
178 | int cpu) | ||
179 | { | ||
180 | return res->env->ops->is_np(res->env, cpu); | ||
181 | } | ||
182 | |||
183 | static int gedf_task_is_np( | ||
184 | struct reservation *res, | ||
185 | int cpu) | ||
186 | { | ||
187 | struct task_struct* t = ((struct gedf_task_reservation*)res)->task; | ||
188 | if (is_user_np(t)) { | ||
189 | request_exit_np(t); | ||
190 | return 1; | ||
191 | } else if (is_kernel_np(t)) | ||
192 | return 1; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
180 | static void gedf_task_shutdown( | 197 | static void gedf_task_shutdown( |
181 | struct reservation *res) | 198 | struct reservation *res) |
182 | { | 199 | { |
@@ -200,13 +217,15 @@ static void gedf_on_schedule( | |||
200 | 217 | ||
201 | static struct task_struct* gedf_dispatch_client( | 218 | static struct task_struct* gedf_dispatch_client( |
202 | struct reservation* res, | 219 | struct reservation* res, |
220 | lt_t* time_slice, | ||
203 | int cpu) | 221 | int cpu) |
204 | { | 222 | { |
205 | return res->env->ops->dispatch(res->env, cpu); | 223 | return res->env->ops->dispatch(res->env, time_slice, cpu); |
206 | } | 224 | } |
207 | 225 | ||
208 | static struct task_struct* gedf_task_dispatch_client( | 226 | static struct task_struct* gedf_task_dispatch_client( |
209 | struct reservation* res, | 227 | struct reservation* res, |
228 | lt_t* time_slice, | ||
210 | int cpu) | 229 | int cpu) |
211 | { | 230 | { |
212 | return ((struct gedf_task_reservation*)res)->task; | 231 | return ((struct gedf_task_reservation*)res)->task; |
@@ -219,7 +238,7 @@ static void gedf_replenish_budget( | |||
219 | struct gedf_container_reservation* gedf_cont_res = | 238 | struct gedf_container_reservation* gedf_cont_res = |
220 | (struct gedf_container_reservation*)res; | 239 | (struct gedf_container_reservation*)res; |
221 | res->budget_consumed = 0; | 240 | res->budget_consumed = 0; |
222 | gedf_cont_res->gedf_res.cur_budget = gedf_cont_res->max_budget; | 241 | res->cur_budget = gedf_cont_res->max_budget; |
223 | res->replenishment_time += gedf_cont_res->period; | 242 | res->replenishment_time += gedf_cont_res->period; |
224 | res->priority = res->replenishment_time + gedf_cont_res->relative_deadline; | 243 | res->priority = res->replenishment_time + gedf_cont_res->relative_deadline; |
225 | } | 244 | } |
@@ -229,7 +248,6 @@ static void gedf_task_replenish_budget( | |||
229 | int cpu) | 248 | int cpu) |
230 | { | 249 | { |
231 | struct task_struct* t = ((struct gedf_task_reservation*)res)->task; | 250 | struct task_struct* t = ((struct gedf_task_reservation*)res)->task; |
232 | struct gedf_reservation* gedf_res = container_of(res, struct gedf_reservation, res); | ||
233 | 251 | ||
234 | if (is_completed(t)) { | 252 | if (is_completed(t)) { |
235 | sched_trace_task_completion(t, 0); | 253 | sched_trace_task_completion(t, 0); |
@@ -245,7 +263,7 @@ static void gedf_task_replenish_budget( | |||
245 | TRACE_TASK(t, "overrun budget!\n"); | 263 | TRACE_TASK(t, "overrun budget!\n"); |
246 | } | 264 | } |
247 | res->budget_consumed = 0; | 265 | res->budget_consumed = 0; |
248 | gedf_res->cur_budget = get_exec_cost(t); | 266 | res->cur_budget = get_exec_cost(t); |
249 | } | 267 | } |
250 | 268 | ||
251 | static void gedf_drain_budget( | 269 | static void gedf_drain_budget( |
@@ -253,12 +271,10 @@ static void gedf_drain_budget( | |||
253 | lt_t how_much, | 271 | lt_t how_much, |
254 | int cpu) | 272 | int cpu) |
255 | { | 273 | { |
256 | struct gedf_reservation* gedf_res = container_of(res, struct gedf_reservation, res); | 274 | if (how_much > res->cur_budget) |
257 | 275 | res->cur_budget = 0; | |
258 | if (how_much > gedf_res->cur_budget) | ||
259 | gedf_res->cur_budget = 0; | ||
260 | else | 276 | else |
261 | gedf_res->cur_budget -= how_much; | 277 | res->cur_budget -= how_much; |
262 | res->budget_consumed += how_much; | 278 | res->budget_consumed += how_much; |
263 | res->budget_consumed_total += how_much; | 279 | res->budget_consumed_total += how_much; |
264 | 280 | ||
@@ -271,12 +287,11 @@ static void gedf_task_drain_budget( | |||
271 | int cpu) | 287 | int cpu) |
272 | { | 288 | { |
273 | struct task_struct* t = ((struct gedf_task_reservation*)res)->task; | 289 | struct task_struct* t = ((struct gedf_task_reservation*)res)->task; |
274 | struct gedf_reservation* gedf_res = container_of(res, struct gedf_reservation, res); | ||
275 | 290 | ||
276 | if (how_much > gedf_res->cur_budget || is_completed(t)) | 291 | if (how_much > res->cur_budget || is_completed(t)) |
277 | gedf_res->cur_budget = 0; | 292 | res->cur_budget = 0; |
278 | else | 293 | else |
279 | gedf_res->cur_budget -= how_much; | 294 | res->cur_budget -= how_much; |
280 | res->budget_consumed += how_much; | 295 | res->budget_consumed += how_much; |
281 | res->budget_consumed_total += how_much; | 296 | res->budget_consumed_total += how_much; |
282 | } | 297 | } |
@@ -288,6 +303,7 @@ static struct reservation_ops gedf_cont_ops = | |||
288 | .dispatch_client = gedf_dispatch_client, | 303 | .dispatch_client = gedf_dispatch_client, |
289 | .on_schedule = gedf_on_schedule, | 304 | .on_schedule = gedf_on_schedule, |
290 | .on_preempt = gedf_on_preempt, | 305 | .on_preempt = gedf_on_preempt, |
306 | .is_np = gedf_is_np, | ||
291 | .shutdown = gedf_shutdown | 307 | .shutdown = gedf_shutdown |
292 | }; | 308 | }; |
293 | 309 | ||
@@ -296,6 +312,7 @@ static struct reservation_ops gedf_task_ops = | |||
296 | .drain_budget = gedf_task_drain_budget, | 312 | .drain_budget = gedf_task_drain_budget, |
297 | .replenish_budget = gedf_task_replenish_budget, | 313 | .replenish_budget = gedf_task_replenish_budget, |
298 | .dispatch_client = gedf_task_dispatch_client, | 314 | .dispatch_client = gedf_task_dispatch_client, |
315 | .is_np = gedf_task_is_np, | ||
299 | .shutdown = gedf_task_shutdown | 316 | .shutdown = gedf_task_shutdown |
300 | }; | 317 | }; |
301 | 318 | ||
@@ -347,21 +364,17 @@ static void gedf_env_shutdown( | |||
347 | { | 364 | { |
348 | struct gedf_reservation_environment* gedf_env; | 365 | struct gedf_reservation_environment* gedf_env; |
349 | struct reservation* res; | 366 | struct reservation* res; |
350 | int i; | ||
351 | unsigned long flags; | 367 | unsigned long flags; |
352 | 368 | ||
353 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | 369 | gedf_env = container_of(env, struct gedf_reservation_environment, env); |
354 | 370 | ||
355 | suspend_releases(&gedf_env->domain); | 371 | domain_suspend_releases(&gedf_env->domain); |
356 | 372 | ||
357 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 373 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
358 | /* cancel all budget tracking hrtimers */ | 374 | |
359 | for_each_online_cpu(i) { | ||
360 | hrtimer_cancel(&gedf_env->cpu_entries[i].timer); | ||
361 | } | ||
362 | /* call shutdown on all scheduled reservations */ | 375 | /* call shutdown on all scheduled reservations */ |
363 | while (!list_empty(&gedf_env->all_reservations)) { | 376 | while (!list_empty(&env->all_reservations)) { |
364 | res = list_first_entry(&gedf_env->all_reservations, | 377 | res = list_first_entry(&env->all_reservations, |
365 | struct reservation, all_list); | 378 | struct reservation, all_list); |
366 | list_del(&res->all_list); | 379 | list_del(&res->all_list); |
367 | res->ops->shutdown(res); | 380 | res->ops->shutdown(res); |
@@ -372,12 +385,37 @@ static void gedf_env_shutdown( | |||
372 | kfree(env); | 385 | kfree(env); |
373 | } | 386 | } |
374 | 387 | ||
388 | static int gedf_env_is_np( | ||
389 | struct reservation_environment* env, | ||
390 | int cpu) | ||
391 | { | ||
392 | struct gedf_reservation_environment* gedf_env = | ||
393 | container_of(env, struct gedf_reservation_environment, env); | ||
394 | struct reservation* res = | ||
395 | &gedf_env->cpu_entries[cpu].scheduled->res; | ||
396 | return res->ops->is_np(res, cpu); | ||
397 | } | ||
398 | |||
399 | static struct reservation* gedf_find_res_by_id( | ||
400 | struct reservation_environment* env, | ||
401 | int id) | ||
402 | { | ||
403 | struct reservation* res; | ||
404 | list_for_each_entry(res, &env->all_reservations, all_list) { | ||
405 | if (res->id == id) | ||
406 | return res; | ||
407 | } | ||
408 | return NULL; | ||
409 | } | ||
410 | |||
375 | /* This assumes that is is only called from res itself requesting to be removed | 411 | /* This assumes that is is only called from res itself requesting to be removed |
412 | * This WILL cause rt task to become lost if res is a scheduling entity | ||
376 | */ | 413 | */ |
377 | static void gedf_env_remove_res( | 414 | static void gedf_env_remove_res( |
378 | struct reservation_environment* env, | 415 | struct reservation_environment* env, |
379 | struct reservation* res, | 416 | struct reservation* res, |
380 | int complete) | 417 | int complete, |
418 | int cpu) | ||
381 | { | 419 | { |
382 | struct gedf_reservation_environment* gedf_env; | 420 | struct gedf_reservation_environment* gedf_env; |
383 | struct gedf_reservation* gedf_res; | 421 | struct gedf_reservation* gedf_res; |
@@ -406,7 +444,8 @@ static void gedf_env_remove_res( | |||
406 | 444 | ||
407 | static void gedf_env_add_res( | 445 | static void gedf_env_add_res( |
408 | struct reservation_environment* env, | 446 | struct reservation_environment* env, |
409 | struct reservation* res) | 447 | struct reservation* res, |
448 | int cpu) | ||
410 | { | 449 | { |
411 | struct gedf_reservation_environment* gedf_env; | 450 | struct gedf_reservation_environment* gedf_env; |
412 | struct gedf_reservation* gedf_res; | 451 | struct gedf_reservation* gedf_res; |
@@ -420,7 +459,7 @@ static void gedf_env_add_res( | |||
420 | gedf_res->blocked = 0; | 459 | gedf_res->blocked = 0; |
421 | 460 | ||
422 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 461 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
423 | list_add(&gedf_res->res.all_list, &gedf_env->all_reservations); | 462 | list_add_tail(&gedf_res->res.all_list, &env->all_reservations); |
424 | requeue(gedf_env, gedf_res); | 463 | requeue(gedf_env, gedf_res); |
425 | check_for_preemptions(gedf_env); | 464 | check_for_preemptions(gedf_env); |
426 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); | 465 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); |
@@ -435,7 +474,7 @@ static void gedf_env_suspend( | |||
435 | int cpu) | 474 | int cpu) |
436 | { | 475 | { |
437 | struct gedf_reservation_environment* gedf_env; | 476 | struct gedf_reservation_environment* gedf_env; |
438 | struct cpu_entry* entry; | 477 | struct gedf_cpu_entry* entry; |
439 | unsigned long flags; | 478 | unsigned long flags; |
440 | 479 | ||
441 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | 480 | gedf_env = container_of(env, struct gedf_reservation_environment, env); |
@@ -443,6 +482,9 @@ static void gedf_env_suspend( | |||
443 | 482 | ||
444 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 483 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
445 | 484 | ||
485 | //TODO: More Graceful way to handle forbidden zone violation? | ||
486 | BUG_ON(env->ops->is_np(env, cpu)); | ||
487 | |||
446 | gedf_env->num_cpus--; | 488 | gedf_env->num_cpus--; |
447 | /* on env suspension, we need to preempt scheduled tasks, and unlink linked tasks */ | 489 | /* on env suspension, we need to preempt scheduled tasks, and unlink linked tasks */ |
448 | if (entry->linked) { | 490 | if (entry->linked) { |
@@ -459,9 +501,7 @@ static void gedf_env_suspend( | |||
459 | 501 | ||
460 | /* suspends rt_domain releases when the last core of env is preempted */ | 502 | /* suspends rt_domain releases when the last core of env is preempted */ |
461 | if (!gedf_env->num_cpus) | 503 | if (!gedf_env->num_cpus) |
462 | suspend_releases(&gedf_env->domain); | 504 | domain_suspend_releases(&gedf_env->domain); |
463 | |||
464 | hrtimer_try_to_cancel(&entry->timer); | ||
465 | } | 505 | } |
466 | 506 | ||
467 | static void gedf_env_resume( | 507 | static void gedf_env_resume( |
@@ -469,7 +509,7 @@ static void gedf_env_resume( | |||
469 | int cpu) | 509 | int cpu) |
470 | { | 510 | { |
471 | struct gedf_reservation_environment* gedf_env; | 511 | struct gedf_reservation_environment* gedf_env; |
472 | struct cpu_entry* entry; | 512 | struct gedf_cpu_entry* entry; |
473 | unsigned long flags; | 513 | unsigned long flags; |
474 | 514 | ||
475 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | 515 | gedf_env = container_of(env, struct gedf_reservation_environment, env); |
@@ -477,7 +517,7 @@ static void gedf_env_resume( | |||
477 | 517 | ||
478 | /* resumes rt_domain releases when the first core of env resumes execution */ | 518 | /* resumes rt_domain releases when the first core of env resumes execution */ |
479 | if (!gedf_env->num_cpus) | 519 | if (!gedf_env->num_cpus) |
480 | resume_releases(&gedf_env->domain); | 520 | domain_resume_releases(&gedf_env->domain); |
481 | 521 | ||
482 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 522 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
483 | 523 | ||
@@ -490,12 +530,14 @@ static void gedf_env_resume( | |||
490 | 530 | ||
491 | static struct task_struct* gedf_env_dispatch( | 531 | static struct task_struct* gedf_env_dispatch( |
492 | struct reservation_environment* env, | 532 | struct reservation_environment* env, |
533 | lt_t* time_slice, | ||
493 | int cpu) | 534 | int cpu) |
494 | { | 535 | { |
495 | struct gedf_reservation_environment* gedf_env; | 536 | struct gedf_reservation_environment* gedf_env; |
496 | struct cpu_entry* entry; | 537 | struct gedf_cpu_entry* entry; |
497 | struct task_struct* next = NULL; | 538 | struct task_struct* next = NULL; |
498 | unsigned long flags; | 539 | unsigned long flags; |
540 | int np = 0; | ||
499 | 541 | ||
500 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | 542 | gedf_env = container_of(env, struct gedf_reservation_environment, env); |
501 | entry = &gedf_env->cpu_entries[cpu]; | 543 | entry = &gedf_env->cpu_entries[cpu]; |
@@ -503,14 +545,18 @@ static struct task_struct* gedf_env_dispatch( | |||
503 | BUG_ON(!bheap_node_in_heap(entry->hn)); | 545 | BUG_ON(!bheap_node_in_heap(entry->hn)); |
504 | BUG_ON(entry->id != cpu); | 546 | BUG_ON(entry->id != cpu); |
505 | 547 | ||
548 | if (entry->scheduled) | ||
549 | np = entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu); | ||
550 | |||
506 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 551 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
507 | 552 | ||
553 | |||
508 | /* update linked if linked for this cpu is empty */ | 554 | /* update linked if linked for this cpu is empty */ |
509 | if (!entry->linked) | 555 | if (!entry->linked) |
510 | check_for_preemptions(gedf_env); | 556 | check_for_preemptions(gedf_env); |
511 | 557 | ||
512 | /* if linked and scheduled differ, preempt and schedule accordingly */ | 558 | /* if linked and scheduled differ, preempt and schedule accordingly */ |
513 | if (entry->scheduled != entry->linked) { | 559 | if (!np && entry->scheduled != entry->linked) { |
514 | if (entry->scheduled && entry->scheduled->res.ops->on_preempt) | 560 | if (entry->scheduled && entry->scheduled->res.ops->on_preempt) |
515 | entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu); | 561 | entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu); |
516 | if (entry->linked && entry->linked->res.ops->on_schedule) | 562 | if (entry->linked && entry->linked->res.ops->on_schedule) |
@@ -519,18 +565,14 @@ static struct task_struct* gedf_env_dispatch( | |||
519 | } | 565 | } |
520 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); | 566 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); |
521 | 567 | ||
522 | //TODO: Think about possible problems with entry->scheduled being changed | ||
523 | if (entry->scheduled) { | 568 | if (entry->scheduled) { |
524 | /* let scheduled reservation decide what runs next */ | 569 | /* let scheduled reservation decide what runs next */ |
525 | next = entry->scheduled->res.ops->dispatch_client(&entry->scheduled->res, cpu); | 570 | next = entry->scheduled->res.ops->dispatch_client(&entry->scheduled->res, time_slice, cpu); |
526 | /* set timer for budget expiration */ | 571 | *time_slice = (*time_slice > entry->scheduled->res.cur_budget) ? |
527 | hrtimer_start(&entry->timer, | 572 | entry->scheduled->res.cur_budget : *time_slice; |
528 | ns_to_ktime(litmus_clock() + entry->scheduled->cur_budget), | 573 | } else { |
529 | HRTIMER_MODE_ABS_PINNED); | 574 | *time_slice = ULLONG_MAX; |
530 | } | 575 | } |
531 | /* cancel budget timer when no reservation is set to be scheduled */ | ||
532 | else | ||
533 | hrtimer_try_to_cancel(&entry->timer); | ||
534 | 576 | ||
535 | return next; | 577 | return next; |
536 | } | 578 | } |
@@ -541,7 +583,7 @@ static void gedf_env_update_time( | |||
541 | int cpu) | 583 | int cpu) |
542 | { | 584 | { |
543 | struct gedf_reservation_environment* gedf_env; | 585 | struct gedf_reservation_environment* gedf_env; |
544 | struct cpu_entry* entry; | 586 | struct gedf_cpu_entry* entry; |
545 | unsigned long flags; | 587 | unsigned long flags; |
546 | 588 | ||
547 | gedf_env = container_of(env, struct gedf_reservation_environment, env); | 589 | gedf_env = container_of(env, struct gedf_reservation_environment, env); |
@@ -561,10 +603,11 @@ static void gedf_env_update_time( | |||
561 | /* assumed to already been unlinked by whatever set will_remove */ | 603 | /* assumed to already been unlinked by whatever set will_remove */ |
562 | entry->scheduled->res.ops->shutdown(&entry->scheduled->res); | 604 | entry->scheduled->res.ops->shutdown(&entry->scheduled->res); |
563 | entry->scheduled = NULL; | 605 | entry->scheduled = NULL; |
564 | } else if (!entry->scheduled->cur_budget) { | 606 | } else if (!entry->scheduled->res.cur_budget) { |
565 | entry->scheduled->res.ops->replenish_budget(&entry->scheduled->res, cpu); | 607 | entry->scheduled->res.ops->replenish_budget(&entry->scheduled->res, cpu); |
566 | /* unlink and requeue if not blocked */ | 608 | /* unlink and requeue if not blocked and not np*/ |
567 | if (!entry->scheduled->blocked) { | 609 | if (!entry->scheduled->blocked && |
610 | !entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu)) { | ||
568 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 611 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
569 | unlink(gedf_env, entry->scheduled); | 612 | unlink(gedf_env, entry->scheduled); |
570 | requeue(gedf_env, entry->scheduled); | 613 | requeue(gedf_env, entry->scheduled); |
@@ -594,6 +637,8 @@ static struct reservation_environment_ops gedf_env_ops = { | |||
594 | .suspend = gedf_env_suspend, | 637 | .suspend = gedf_env_suspend, |
595 | .add_res = gedf_env_add_res, | 638 | .add_res = gedf_env_add_res, |
596 | .remove_res = gedf_env_remove_res, | 639 | .remove_res = gedf_env_remove_res, |
640 | .find_res_by_id = gedf_find_res_by_id, | ||
641 | .is_np = gedf_env_is_np, | ||
597 | .shutdown = gedf_env_shutdown | 642 | .shutdown = gedf_env_shutdown |
598 | }; | 643 | }; |
599 | 644 | ||
@@ -612,17 +657,13 @@ long alloc_gedf_reservation_environment( | |||
612 | 657 | ||
613 | /* set environment callback actions */ | 658 | /* set environment callback actions */ |
614 | gedf_env->env.ops = &gedf_env_ops; | 659 | gedf_env->env.ops = &gedf_env_ops; |
660 | INIT_LIST_HEAD(&gedf_env->env.all_reservations); | ||
615 | 661 | ||
616 | INIT_LIST_HEAD(&gedf_env->all_reservations); | ||
617 | gedf_env->num_cpus = 0; | 662 | gedf_env->num_cpus = 0; |
618 | bheap_init(&gedf_env->cpu_heap); | 663 | bheap_init(&gedf_env->cpu_heap); |
619 | for (i = 0; i < max_cpus; i++) { | 664 | for (i = 0; i < max_cpus; i++) { |
620 | gedf_env->cpu_entries[i].id = i; | 665 | gedf_env->cpu_entries[i].id = i; |
621 | 666 | ||
622 | /* initialize cpu timer */ | ||
623 | hrtimer_init(&gedf_env->cpu_entries[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
624 | gedf_env->cpu_entries[i].timer.function = timer_callback; | ||
625 | |||
626 | /* initialize cpu heap node */ | 667 | /* initialize cpu heap node */ |
627 | gedf_env->cpu_entries[i].hn = &gedf_env->cpu_node[i]; | 668 | gedf_env->cpu_entries[i].hn = &gedf_env->cpu_node[i]; |
628 | bheap_node_init(&gedf_env->cpu_entries[i].hn, &gedf_env->cpu_entries[i]); | 669 | bheap_node_init(&gedf_env->cpu_entries[i].hn, &gedf_env->cpu_entries[i]); |
diff --git a/litmus/reservations/table_driven_ext_reservation.c b/litmus/reservations/table_driven_ext_reservation.c new file mode 100644 index 000000000000..ba633f2d5be6 --- /dev/null +++ b/litmus/reservations/table_driven_ext_reservation.c | |||
@@ -0,0 +1,256 @@ | |||
1 | static task_struct* mtd_dispatch_client( | ||
2 | struct reservation* res, | ||
3 | lt_t now, | ||
4 | int cpu) | ||
5 | { | ||
6 | res->env->ops->dispatch(res->env, now, cpu); | ||
7 | } | ||
8 | |||
9 | static void mtd_replenish_budget( | ||
10 | struct reservation* res, | ||
11 | int cpu) | ||
12 | { | ||
13 | struct mtd_reservation* mtd_res = | ||
14 | container_of(res, struct mtd_reservation, res); | ||
15 | |||
16 | mtd_res->interval_index[cpu] = mtd_res->interval_index[cpu] | ||
17 | |||
18 | res->replenishment_time = mtd_res = mtd_res->major_cycple_start; | ||
19 | res->replenishment_time += mtd_tres->intervals[cpu][mtd_res->next_interval[cpu]]; | ||
20 | |||
21 | res->cur_interval[cpu].start = intervals[cpu][interval_index[cpu]].start; | ||
22 | res->cur_interval[cpu].end = intervals[cpu][interval_index[cpu]].end; | ||
23 | } | ||
24 | |||
25 | static void mtd_drain_budget( | ||
26 | struct reservation* res, | ||
27 | lt_t how_much, | ||
28 | int cpu) | ||
29 | { | ||
30 | struct mtd_reservation* mtd_res; | ||
31 | lt_t now, end; | ||
32 | |||
33 | mtd_res = container_of(res, struct mtd_reservation, res[cpu]); | ||
34 | |||
35 | BUG_ON(res != &mtd_res->res[cpu]); | ||
36 | |||
37 | now = litmus_clock(); | ||
38 | end = mtd_res->cur_interval.end; | ||
39 | if (now >= end) | ||
40 | res->cur_budget = 0; | ||
41 | else | ||
42 | res->cur_budget = end - now; | ||
43 | |||
44 | res->env->ops->update_time(res->env, how_much, cpu); | ||
45 | } | ||
46 | |||
47 | static struct reservation_ops mtd_ops = { | ||
48 | .drain_budget = asdf, | ||
49 | }; | ||
50 | |||
51 | long alloc_mtd_reservation( | ||
52 | struct reservation** _res, | ||
53 | unsigned int id, | ||
54 | lt_t major_cycle) | ||
55 | { | ||
56 | struct mtd_reservation* mtd_res; | ||
57 | unsigned int num_slots, id; | ||
58 | int cpu; | ||
59 | |||
60 | mtd_res = kzalloc(sizeof(struct mtd_reservation), GFP_KERNEL); | ||
61 | if (!mtd_res) | ||
62 | return -ENOMEM; | ||
63 | init_ext_reservation(&mtd_res->res, id, mtd_ops); | ||
64 | mtd_res->major_cycle = major_cycle; | ||
65 | } | ||
66 | |||
67 | /* ***************************************************************** */ | ||
68 | static void requeue( | ||
69 | struct mtd_cpu_entry entry, | ||
70 | struct mtd_reservation* mtd_res) | ||
71 | { | ||
72 | BUG_ON(!mtd_res); | ||
73 | BUG_ON(is_queued_res(&mtd_res->res)); | ||
74 | |||
75 | if (lt_before_eq(mtd_res->res.replenishment_time, litmus_clock())) | ||
76 | __add_ready_res(&entry->domain, &mtd_res->res); | ||
77 | else | ||
78 | __add_release_res_no_timer(&entry->domain, &mtd_res->res); | ||
79 | |||
80 | } | ||
81 | |||
82 | /* ***************************************************************** */ | ||
83 | static void mtd_env_shutdown( | ||
84 | struct reservation_environment* env) | ||
85 | { | ||
86 | struct mtd_reservation_environment* gedf_env; | ||
87 | struct reservation* res; | ||
88 | unsigned long flags; | ||
89 | int cpu; | ||
90 | |||
91 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | ||
92 | |||
93 | for_each_online_cpu(cpu) { | ||
94 | domain_suspend_releases(&mtd_env->cpu_entry[cpu]->domain); | ||
95 | } | ||
96 | |||
97 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | ||
98 | |||
99 | /* call shutdown on all scheduled reservations */ | ||
100 | while (!list_empty(&env->all_reservations)) { | ||
101 | res = list_first_entry(&env->all_reservations, | ||
102 | struct reservation, all_list); | ||
103 | list_del(&res->all_list); | ||
104 | res->ops->shutdown(res); | ||
105 | } | ||
106 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); | ||
107 | |||
108 | /* free memory */ | ||
109 | kfree(env); | ||
110 | } | ||
111 | |||
112 | static struct reservation* mtd_find_res_by_id( | ||
113 | struct reservation_environment* env, | ||
114 | int id) | ||
115 | { | ||
116 | struct reservation* res; | ||
117 | list_for_each_entry(res, &env->all_reservations, all_list) { | ||
118 | if (res->id == id) | ||
119 | return res; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /* not supported */ | ||
124 | static void mtd_env_remove_res( | ||
125 | struct reservation_environment* env, | ||
126 | struct reservation* res, | ||
127 | int complete, | ||
128 | int cpu) | ||
129 | { | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | /* the reservation is added one core at a time due to how the table is specified */ | ||
134 | static void mtd_env_add_res( | ||
135 | struct reservation_environment* env, | ||
136 | struct reservation* res, | ||
137 | int cpu) | ||
138 | { | ||
139 | struct mtd_reservation_environment* mtd_env; | ||
140 | struct mtd_reservation* mtd_res; | ||
141 | lt_t tmp; | ||
142 | |||
143 | mtd_res->in_env++; | ||
144 | if (mtd_res->in_env == 1) { | ||
145 | list_add_tail(&mtd_res->res[0].all_list, &env->all_reservations); | ||
146 | } | ||
147 | |||
148 | tmp = div64_u64(now, mtd_res->major_cycle); | ||
149 | mtd_res->major_cycle_start = tmp * mtd_res->major_cycle; | ||
150 | |||
151 | if (mtd_res->num_intervals[cpu]) { | ||
152 | tmp = mtd_res->major_cycle_start; | ||
153 | res->replenishment_time = tmp + mtd_res->cur_interval.start; | ||
154 | |||
155 | raw_spin_lock_irqsave(&mtd_env->cpu_entries[cpu]->timer, flags); | ||
156 | requeue(&mtd_env->cpu_entries[cpu], &mtd_res->res[cpu]); | ||
157 | if (mtd_res->res[cpu].replenish_time <= litmus_clock()) | ||
158 | litmus_reschedule_local(); | ||
159 | raw_spin_unlock_irqrestore(&mtd_env->cpu_entries[cpu]->timer, flags); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* not supported */ | ||
164 | static void mtd_env_suspend( | ||
165 | struct reservation_environment* env, | ||
166 | int cpu) | ||
167 | { | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | /* not supported */ | ||
172 | static void mtd_env_resume( | ||
173 | struct reservation_environment* env, | ||
174 | int cpu) | ||
175 | { | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | /* If two reservations have overlapping intervals on the same core, | ||
180 | * then which one is scheduled is undefined | ||
181 | */ | ||
182 | static struct task_struct* mtd_env_dispatch( | ||
183 | struct reservation_environment* env, | ||
184 | lt_t* time_slice, | ||
185 | int cpu) | ||
186 | { | ||
187 | struct mtd_reservation_environment* mtd_env; | ||
188 | struct mtd_cpu_entry* entry; | ||
189 | struct mtd_reservation* linked; | ||
190 | struct task_struct* next; | ||
191 | unsigned long flags; | ||
192 | lt_t budget; | ||
193 | |||
194 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | ||
195 | entry = &mtd_env->cpu_entries[cpu]; | ||
196 | |||
197 | |||
198 | raw_spin_lock_irqsave(&entry->domain.ready_lock, flags); | ||
199 | |||
200 | /* if linked and scheduled differ, preempt and schedule accordingly */ | ||
201 | linked = __peek_ready_res(&entry->domain); | ||
202 | if (entry->scheduled != linked) { | ||
203 | if (entry->scheduled && entry->scheduled->ops->on_preempt) { | ||
204 | entry->scheduled->ops->on_preempt(entry->scheduled, cpu); | ||
205 | } | ||
206 | if (linked && linked->ops->on_schedule) { | ||
207 | linked->ops->on_schedule(linked, cpu); | ||
208 | } | ||
209 | __take_ready_res(&entry->domain); | ||
210 | entry->scheduled = linked; | ||
211 | } | ||
212 | raw_spin_unlock_irqrestore(&entry->domain.ready_lock, flags); | ||
213 | |||
214 | if (entry->scheduled) { | ||
215 | /* let scheduled reservation decide what runs next */ | ||
216 | next = entry->scheduled->ops->dispatch_client(entry->scheduled, cpu); | ||
217 | *time_slice = (*time_slice > entry->scheduled->cur_budget) ? | ||
218 | entry->scheduled->cur_budget : *time_slice; | ||
219 | } else | ||
220 | *time_slice = ULLONG_MAX; | ||
221 | |||
222 | } | ||
223 | |||
224 | static void mtd_env_update_time( | ||
225 | struct reservation_environment* env, | ||
226 | lt_t how_much, | ||
227 | int cpu) | ||
228 | { | ||
229 | struct mtd_reservation_environment* mtd_env; | ||
230 | struct mtd_cpu_entry* entry; | ||
231 | struct reservation* res; | ||
232 | unsigned long flags; | ||
233 | |||
234 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | ||
235 | entry = &mtd_env->cpu_entries[cpu]; | ||
236 | |||
237 | /* drains budget of ready task */ | ||
238 | /* In the case that multiple tasks on this core share an execution frame, | ||
239 | * only 1 has its budget drained. However, the other tasks will be scheduled | ||
240 | * at the end of the frame for epsilon time and immediately have its budget drained | ||
241 | * before the task of the next frame is scheduled. | ||
242 | * This results in only 1 of the tasks getting actual execution. | ||
243 | */ | ||
244 | if (entry->scheduled) { | ||
245 | entry->scheduled->ops->drain_budget(entry->scheduled, how_much, cpu); | ||
246 | /* if no more budget, replenish and requeue */ | ||
247 | if (!entry->scheduled->cur_budget) { | ||
248 | entry->scheduled->ops->replenish_budget(entry->scheduled, cpu); | ||
249 | raw_spin_lock_irqsave(&entry->domain.ready_lock, flags); | ||
250 | requeue(entry, entry->scheduled); | ||
251 | raw_spin_unlock_irqrestore(&entry->domain.ready_lock, flags); | ||
252 | } | ||
253 | } | ||
254 | /* release tasks in domain */ | ||
255 | release_jobs_before_now(&entry->domain); | ||
256 | } | ||
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 4c37341065f0..1a15e2491a65 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -62,7 +62,7 @@ int release_order(struct binheap_node *a, struct binheap_node *b) | |||
62 | binheap_entry(b, struct release_heap, node)->release_time); | 62 | binheap_entry(b, struct release_heap, node)->release_time); |
63 | } | 63 | } |
64 | 64 | ||
65 | static void release_jobs_before_now(rt_domain_t* rt) | 65 | void release_jobs_before_now(rt_domain_t* rt) |
66 | { | 66 | { |
67 | unsigned long flags; | 67 | unsigned long flags; |
68 | struct release_heap* rh; | 68 | struct release_heap* rh; |
@@ -118,12 +118,12 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | |||
118 | return HRTIMER_RESTART; | 118 | return HRTIMER_RESTART; |
119 | } | 119 | } |
120 | 120 | ||
121 | void suspend_releases(rt_domain_t* rt) | 121 | void domain_suspend_releases(rt_domain_t* rt) |
122 | { | 122 | { |
123 | hrtimer_try_to_cancel(&rt->timer); | 123 | hrtimer_cancel(&rt->timer); |
124 | } | 124 | } |
125 | 125 | ||
126 | void resume_releases(rt_domain_t* rt) | 126 | void domain_resume_releases(rt_domain_t* rt) |
127 | { | 127 | { |
128 | release_jobs_before_now(rt); | 128 | release_jobs_before_now(rt); |
129 | if (rt->release_queue.earliest_release != NO_FUTURE_RELEASE) { | 129 | if (rt->release_queue.earliest_release != NO_FUTURE_RELEASE) { |
diff --git a/litmus/sched_ext_res.c b/litmus/sched_ext_res.c index dd2ae0f02fb3..63f6d821d2d4 100644 --- a/litmus/sched_ext_res.c +++ b/litmus/sched_ext_res.c | |||
@@ -18,7 +18,18 @@ | |||
18 | 18 | ||
19 | struct gedf_reservation_environment* gedf_env; | 19 | struct gedf_reservation_environment* gedf_env; |
20 | 20 | ||
21 | static DEFINE_PER_CPU(lt_t, last_update_time); | 21 | struct cpu_time { |
22 | struct hrtimer timer; | ||
23 | lt_t last_update_time; | ||
24 | }; | ||
25 | |||
26 | static DEFINE_PER_CPU(struct cpu_time, cpu_time); | ||
27 | |||
28 | static enum hrtimer_restart on_budget_timeout(struct hrtimer *timer) | ||
29 | { | ||
30 | litmus_reschedule_local(); | ||
31 | return HRTIMER_NORESTART; | ||
32 | } | ||
22 | 33 | ||
23 | /* | 34 | /* |
24 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | 35 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) |
@@ -63,17 +74,26 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
63 | static struct task_struct* ext_res_schedule(struct task_struct * prev) | 74 | static struct task_struct* ext_res_schedule(struct task_struct * prev) |
64 | { | 75 | { |
65 | int cpu = smp_processor_id(); | 76 | int cpu = smp_processor_id(); |
66 | lt_t now, delta; | 77 | lt_t delta, time_slice; |
78 | struct cpu_time* entry; | ||
67 | struct task_struct* next; | 79 | struct task_struct* next; |
68 | 80 | ||
69 | delta = litmus_clock() - *this_cpu_ptr(&last_update_time); | 81 | entry = this_cpu_ptr(&cpu_time); |
82 | delta = litmus_clock() - entry->last_update_time; | ||
70 | 83 | ||
71 | //TODO: implement per cpu lt_t to track time | 84 | //TODO: implement per cpu lt_t to track time |
72 | 85 | ||
73 | gedf_env->env.ops->update_time(&gedf_env->env, delta, cpu); | 86 | gedf_env->env.ops->update_time(&gedf_env->env, delta, cpu); |
74 | next = gedf_env->env.ops->dispatch(&gedf_env->env, cpu); | 87 | next = gedf_env->env.ops->dispatch(&gedf_env->env, &time_slice, cpu); |
88 | |||
89 | entry->last_update_time = litmus_clock(); | ||
75 | 90 | ||
76 | *this_cpu_ptr(&last_update_time) = litmus_clock(); | 91 | if (time_slice != ULLONG_MAX) { |
92 | hrtimer_start(&entry->timer, | ||
93 | ns_to_ktime(entry->last_update_time + time_slice), | ||
94 | HRTIMER_MODE_ABS_PINNED); | ||
95 | } else | ||
96 | hrtimer_try_to_cancel(&entry->timer); | ||
77 | 97 | ||
78 | sched_state_task_picked(); | 98 | sched_state_task_picked(); |
79 | 99 | ||
@@ -89,7 +109,7 @@ static void ext_res_task_block(struct task_struct *tsk) | |||
89 | TRACE_TASK(tsk, "thread suspends at %llu \n", litmus_clock()); | 109 | TRACE_TASK(tsk, "thread suspends at %llu \n", litmus_clock()); |
90 | 110 | ||
91 | res = (struct reservation*) tsk_rt(tsk)->plugin_state; | 111 | res = (struct reservation*) tsk_rt(tsk)->plugin_state; |
92 | res->par_env->ops->remove_res(res->par_env, res, 0); | 112 | res->par_env->ops->remove_res(res->par_env, res, 0, 0); |
93 | } | 113 | } |
94 | 114 | ||
95 | 115 | ||
@@ -103,7 +123,7 @@ static void ext_res_task_resume(struct task_struct *tsk) | |||
103 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 123 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
104 | 124 | ||
105 | res = (struct reservation*) tsk_rt(tsk)->plugin_state; | 125 | res = (struct reservation*) tsk_rt(tsk)->plugin_state; |
106 | res->par_env->ops->add_res(res->par_env, res); | 126 | res->par_env->ops->add_res(res->par_env, res, 0); |
107 | } | 127 | } |
108 | 128 | ||
109 | static long ext_res_admit_task(struct task_struct *tsk) | 129 | static long ext_res_admit_task(struct task_struct *tsk) |
@@ -137,7 +157,7 @@ static void ext_res_task_new(struct task_struct *tsk, int on_runqueue, | |||
137 | release_at(tsk, now); | 157 | release_at(tsk, now); |
138 | res->replenishment_time = now; | 158 | res->replenishment_time = now; |
139 | 159 | ||
140 | res->par_env->ops->add_res(res->par_env, res); | 160 | res->par_env->ops->add_res(res->par_env, res, 0); |
141 | 161 | ||
142 | if (is_running) | 162 | if (is_running) |
143 | litmus_reschedule_local(); | 163 | litmus_reschedule_local(); |
@@ -169,7 +189,7 @@ static void ext_res_task_exit(struct task_struct *tsk) | |||
169 | res = (struct reservation*)tsk_rt(tsk)->plugin_state; | 189 | res = (struct reservation*)tsk_rt(tsk)->plugin_state; |
170 | par_env = res->par_env; | 190 | par_env = res->par_env; |
171 | 191 | ||
172 | par_env->ops->remove_res(par_env, res, 1); | 192 | par_env->ops->remove_res(par_env, res, 1, 0); |
173 | 193 | ||
174 | TRACE_TASK(tsk, "task exits at %llu \n", litmus_clock()); | 194 | TRACE_TASK(tsk, "task exits at %llu \n", litmus_clock()); |
175 | } | 195 | } |
@@ -310,13 +330,17 @@ static long ext_res_activate_plugin(void) | |||
310 | { | 330 | { |
311 | int cpu; | 331 | int cpu; |
312 | int num_cpus = num_online_cpus(); | 332 | int num_cpus = num_online_cpus(); |
333 | struct cpu_time* entry; | ||
313 | lt_t now = litmus_clock(); | 334 | lt_t now = litmus_clock(); |
314 | 335 | ||
315 | alloc_gedf_reservation_environment(&gedf_env, num_cpus); | 336 | alloc_gedf_reservation_environment(&gedf_env, num_cpus); |
316 | 337 | ||
317 | for_each_online_cpu(cpu) { | 338 | for_each_online_cpu(cpu) { |
318 | TRACE("Initializing CPU%d...\n", cpu); | 339 | TRACE("Initializing CPU%d...\n", cpu); |
319 | *this_cpu_ptr(&last_update_time) = now; | 340 | entry = per_cpu_ptr(&cpu_time, cpu); |
341 | hrtimer_init(&entry->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
342 | entry->timer.function = on_budget_timeout; | ||
343 | entry->last_update_time = now; | ||
320 | gedf_env->cpu_entries[cpu].id = cpu; | 344 | gedf_env->cpu_entries[cpu].id = cpu; |
321 | gedf_env->env.ops->resume(&gedf_env->env, cpu); | 345 | gedf_env->env.ops->resume(&gedf_env->env, cpu); |
322 | } | 346 | } |
@@ -329,8 +353,16 @@ static long ext_res_activate_plugin(void) | |||
329 | 353 | ||
330 | static long ext_res_deactivate_plugin(void) | 354 | static long ext_res_deactivate_plugin(void) |
331 | { | 355 | { |
356 | int cpu; | ||
357 | struct cpu_time* entry; | ||
358 | |||
332 | gedf_env->env.ops->shutdown(&gedf_env->env); | 359 | gedf_env->env.ops->shutdown(&gedf_env->env); |
333 | 360 | ||
361 | for_each_online_cpu(cpu) { | ||
362 | entry = per_cpu_ptr(&cpu_time, cpu); | ||
363 | hrtimer_cancel(&entry->timer); | ||
364 | } | ||
365 | |||
334 | destroy_domain_proc_info(&ext_res_domain_proc_info); | 366 | destroy_domain_proc_info(&ext_res_domain_proc_info); |
335 | return 0; | 367 | return 0; |
336 | } | 368 | } |