diff options
author | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-09-30 19:20:00 -0400 |
---|---|---|
committer | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-09-30 19:20:00 -0400 |
commit | b9de3914e3c26f30c8bd25fb909fc243e65f9a34 (patch) | |
tree | 2cc4665260049d6d0da9827b7daa3b9d25d9ed52 | |
parent | 3f90761ec77c8d75e23d694e8fea8d5a905e7a53 (diff) |
EXT-RES Checkpoint 2 - Hardly Tested.
-rw-r--r-- | include/litmus/reservations/table_driven_ext_reservation.h | 2 | ||||
-rw-r--r-- | litmus/reservations/table_driven_ext_reservation.c | 49 |
2 files changed, 32 insertions, 19 deletions
diff --git a/include/litmus/reservations/table_driven_ext_reservation.h b/include/litmus/reservations/table_driven_ext_reservation.h index 15ba8b968fd7..706aa07be8be 100644 --- a/include/litmus/reservations/table_driven_ext_reservation.h +++ b/include/litmus/reservations/table_driven_ext_reservation.h | |||
@@ -31,7 +31,7 @@ long alloc_mtd_reservation( | |||
31 | 31 | ||
32 | struct mtd_cpu_entry { | 32 | struct mtd_cpu_entry { |
33 | int id; | 33 | int id; |
34 | struct mtd_reservation* scheduled; | 34 | struct reservation* scheduled; |
35 | rt_domain_t domain; | 35 | rt_domain_t domain; |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/litmus/reservations/table_driven_ext_reservation.c b/litmus/reservations/table_driven_ext_reservation.c index 16ce74c6a6f3..71b01b7090c1 100644 --- a/litmus/reservations/table_driven_ext_reservation.c +++ b/litmus/reservations/table_driven_ext_reservation.c | |||
@@ -71,8 +71,12 @@ static void mtd_replenish_budget( | |||
71 | 71 | ||
72 | BUG_ON(res != &mtd_res->res[cpu]); | 72 | BUG_ON(res != &mtd_res->res[cpu]); |
73 | 73 | ||
74 | /* calculate next interval index */ | ||
74 | mtd_res->interval_index[cpu] = | 75 | mtd_res->interval_index[cpu] = |
75 | (mtd_res->interval_index[cpu] + 1) % mtd_res->num_intervals[cpu]; | 76 | (mtd_res->interval_index[cpu] + 1) % mtd_res->num_intervals[cpu]; |
77 | /* if wrap around, then increment major cycle */ | ||
78 | if (!mtd_res->interval_index[cpu]) | ||
79 | mtd_res->major_cycle_start += mtd_res->major_cycle; | ||
76 | 80 | ||
77 | mtd_res->cur_interval[cpu].start = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].start; | 81 | mtd_res->cur_interval[cpu].start = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].start; |
78 | mtd_res->cur_interval[cpu].end = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].end; | 82 | mtd_res->cur_interval[cpu].end = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].end; |
@@ -110,6 +114,8 @@ static struct reservation_ops mtd_ops = { | |||
110 | .dispatch_client = mtd_dispatch_client, | 114 | .dispatch_client = mtd_dispatch_client, |
111 | .on_schedule = mtd_on_schedule, | 115 | .on_schedule = mtd_on_schedule, |
112 | .on_preempt = mtd_on_preempt, | 116 | .on_preempt = mtd_on_preempt, |
117 | .is_np = mtd_is_np, | ||
118 | .shutdown = mtd_shutdown | ||
113 | }; | 119 | }; |
114 | 120 | ||
115 | /* cannot handle installing table of same core from different threads simultaneously. | 121 | /* cannot handle installing table of same core from different threads simultaneously. |
@@ -130,11 +136,13 @@ long mtd_res_install_table( | |||
130 | mtd_res->major_cycle = major_cycle; | 136 | mtd_res->major_cycle = major_cycle; |
131 | mtd_res->num_intervals[cpu] = num_intervals; | 137 | mtd_res->num_intervals[cpu] = num_intervals; |
132 | 138 | ||
139 | /* allocated kernel memory for intervals */ | ||
133 | if (mtd_res->num_intervals[cpu]) | 140 | if (mtd_res->num_intervals[cpu]) |
134 | kfree(&mtd_res->intervals[cpu]); | 141 | kfree(&mtd_res->intervals[cpu]); |
135 | kzalloc | 142 | if (!kzalloc(sizeof(struct lt_interval) * num_intervals, GFP_KERNEL)) |
143 | return ENOMEM; | ||
136 | /* copy from user space to kernel space */ | 144 | /* copy from user space to kernel space */ |
137 | err = copy_from_user(mtd_res->intervals[cpu], intervals); | 145 | err = copy_from_user(mtd_res->intervals[cpu], intervals, sizeof(struct lt_interval) * num_intervals); |
138 | if (err) | 146 | if (err) |
139 | return err; | 147 | return err; |
140 | 148 | ||
@@ -161,6 +169,8 @@ long alloc_mtd_reservation( | |||
161 | init_ext_reservation(&mtd_res->res[i], id, &mtd_ops); | 169 | init_ext_reservation(&mtd_res->res[i], id, &mtd_ops); |
162 | } | 170 | } |
163 | mtd_res->major_cycle = major_cycle; | 171 | mtd_res->major_cycle = major_cycle; |
172 | |||
173 | return 0; | ||
164 | } | 174 | } |
165 | 175 | ||
166 | /* ***************************************************************** */ | 176 | /* ***************************************************************** */ |
@@ -195,7 +205,7 @@ static void mtd_env_shutdown( | |||
195 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | 205 | mtd_env = container_of(env, struct mtd_reservation_environment, env); |
196 | 206 | ||
197 | for_each_online_cpu(cpu) { | 207 | for_each_online_cpu(cpu) { |
198 | domain_suspend_releases(&mtd_env->cpu_entry[cpu]->domain); | 208 | domain_suspend_releases(&mtd_env->cpu_entries[cpu].domain); |
199 | } | 209 | } |
200 | 210 | ||
201 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); | 211 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); |
@@ -227,7 +237,7 @@ static struct reservation* mtd_find_res_by_id( | |||
227 | struct reservation* res; | 237 | struct reservation* res; |
228 | unsigned long flags; | 238 | unsigned long flags; |
229 | struct mtd_reservation_environment* mtd_env = | 239 | struct mtd_reservation_environment* mtd_env = |
230 | container_of(res, struct mtd_reservation_environment, res); | 240 | container_of(env, struct mtd_reservation_environment, env); |
231 | 241 | ||
232 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); | 242 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); |
233 | list_for_each_entry(res, &env->all_reservations, all_list) { | 243 | list_for_each_entry(res, &env->all_reservations, all_list) { |
@@ -259,13 +269,18 @@ static void mtd_env_add_res( | |||
259 | lt_t tmp; | 269 | lt_t tmp; |
260 | unsigned long flags; | 270 | unsigned long flags; |
261 | 271 | ||
272 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | ||
273 | mtd_res = container_of(res, struct mtd_reservation, res[cpu]); | ||
274 | |||
275 | BUG_ON(&mtd_res->res[cpu] != res); | ||
276 | |||
262 | /* only add to list in reservation if is first core of res to be added */ | 277 | /* only add to list in reservation if is first core of res to be added */ |
263 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); | 278 | raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); |
264 | if (!mtd_res->num_cpus) { | 279 | if (!mtd_env->num_cpus) { |
265 | mtd_res->res[0].par_env = env; | 280 | mtd_res->res[0].par_env = env; |
266 | list_add_tail(&mtd_res->res[0].all_list, &env->all_reservations); | 281 | list_add_tail(&mtd_res->res[0].all_list, &env->all_reservations); |
267 | } | 282 | } |
268 | mtd_res->num_cpus++; | 283 | mtd_env->num_cpus++; |
269 | raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); | 284 | raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); |
270 | 285 | ||
271 | tmp = div64_u64(litmus_clock(), mtd_res->major_cycle); | 286 | tmp = div64_u64(litmus_clock(), mtd_res->major_cycle); |
@@ -274,13 +289,13 @@ static void mtd_env_add_res( | |||
274 | res->par_env = env; | 289 | res->par_env = env; |
275 | if (mtd_res->num_intervals[cpu]) { | 290 | if (mtd_res->num_intervals[cpu]) { |
276 | tmp = mtd_res->major_cycle_start; | 291 | tmp = mtd_res->major_cycle_start; |
277 | res->replenishment_time = tmp + mtd_res->cur_interval.start; | 292 | res->replenishment_time = tmp + mtd_res->cur_interval[cpu].start; |
278 | 293 | ||
279 | raw_spin_lock_irqsave(&mtd_env->cpu_entries[cpu]->timer, flags); | 294 | raw_spin_lock_irqsave(&mtd_env->cpu_entries[cpu].domain.ready_lock, flags); |
280 | requeue(&mtd_env->cpu_entries[cpu], &mtd_res->res[cpu]); | 295 | requeue(&mtd_env->cpu_entries[cpu], &mtd_res->res[cpu]); |
281 | if (mtd_res->res[cpu].replenish_time <= litmus_clock()) | 296 | if (mtd_res->res[cpu].replenishment_time <= litmus_clock()) |
282 | litmus_reschedule_local(); | 297 | litmus_reschedule_local(); |
283 | raw_spin_unlock_irqrestore(&mtd_env->cpu_entries[cpu]->timer, flags); | 298 | raw_spin_unlock_irqrestore(&mtd_env->cpu_entries[cpu].domain.ready_lock, flags); |
284 | } | 299 | } |
285 | } | 300 | } |
286 | 301 | ||
@@ -310,10 +325,9 @@ static struct task_struct* mtd_env_dispatch( | |||
310 | { | 325 | { |
311 | struct mtd_reservation_environment* mtd_env; | 326 | struct mtd_reservation_environment* mtd_env; |
312 | struct mtd_cpu_entry* entry; | 327 | struct mtd_cpu_entry* entry; |
313 | struct mtd_reservation* linked; | 328 | struct reservation* linked; |
314 | struct task_struct* next; | 329 | struct task_struct* next = NULL; |
315 | unsigned long flags; | 330 | unsigned long flags; |
316 | lt_t budget; | ||
317 | 331 | ||
318 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | 332 | mtd_env = container_of(env, struct mtd_reservation_environment, env); |
319 | entry = &mtd_env->cpu_entries[cpu]; | 333 | entry = &mtd_env->cpu_entries[cpu]; |
@@ -337,12 +351,13 @@ static struct task_struct* mtd_env_dispatch( | |||
337 | 351 | ||
338 | if (entry->scheduled) { | 352 | if (entry->scheduled) { |
339 | /* let scheduled reservation decide what runs next */ | 353 | /* let scheduled reservation decide what runs next */ |
340 | next = entry->scheduled->ops->dispatch_client(entry->scheduled, cpu); | 354 | next = entry->scheduled->ops->dispatch_client(entry->scheduled, time_slice, cpu); |
341 | *time_slice = (*time_slice > entry->scheduled->cur_budget) ? | 355 | *time_slice = (*time_slice > entry->scheduled->cur_budget) ? |
342 | entry->scheduled->cur_budget : *time_slice; | 356 | entry->scheduled->cur_budget : *time_slice; |
343 | } else | 357 | } else |
344 | *time_slice = ULLONG_MAX; | 358 | *time_slice = ULLONG_MAX; |
345 | 359 | ||
360 | return next; | ||
346 | } | 361 | } |
347 | 362 | ||
348 | static void mtd_env_update_time( | 363 | static void mtd_env_update_time( |
@@ -352,7 +367,6 @@ static void mtd_env_update_time( | |||
352 | { | 367 | { |
353 | struct mtd_reservation_environment* mtd_env; | 368 | struct mtd_reservation_environment* mtd_env; |
354 | struct mtd_cpu_entry* entry; | 369 | struct mtd_cpu_entry* entry; |
355 | struct reservation* res; | ||
356 | unsigned long flags; | 370 | unsigned long flags; |
357 | 371 | ||
358 | mtd_env = container_of(env, struct mtd_reservation_environment, env); | 372 | mtd_env = container_of(env, struct mtd_reservation_environment, env); |
@@ -420,13 +434,12 @@ long alloc_mtd_reservation_environment( | |||
420 | INIT_LIST_HEAD(&mtd_env->env.all_reservations); | 434 | INIT_LIST_HEAD(&mtd_env->env.all_reservations); |
421 | 435 | ||
422 | mtd_env->num_cpus = num_cpus; | 436 | mtd_env->num_cpus = num_cpus; |
423 | bheap_init(&mtd_env->cpu_heap); | ||
424 | for (i = 0; i < num_cpus; i++) { | 437 | for (i = 0; i < num_cpus; i++) { |
425 | mtd_env->cpu_entries[i].id = i; | 438 | mtd_env->cpu_entries[i].id = i; |
426 | /* initialize per cpu domain */ | 439 | /* initialize per cpu domain */ |
427 | rt_domain_init(&gedf_env->domain, mtd_ready_order, NULL, mtd_env_release_jobs); | 440 | rt_domain_init(&mtd_env->cpu_entries[i].domain, mtd_ready_order, NULL, mtd_env_release_jobs); |
428 | } | 441 | } |
429 | 442 | ||
430 | *_env = gedf_env; | 443 | *_env = mtd_env; |
431 | return 0; | 444 | return 0; |
432 | } | 445 | } |