diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-03-30 01:18:47 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-03-30 01:18:47 -0400 |
commit | c23c28f7bdbdbe3f614a37813ef0fa268df6aa76 (patch) | |
tree | 6f4aa2f66b00dc06f68122b4d5d0e03b09b4b1cb /litmus/sched_edf_hsb.c | |
parent | 631137a60abda0e4b052912dd2f6d36d5a38f69c (diff) |
HRT and SRT both working together.
Diffstat (limited to 'litmus/sched_edf_hsb.c')
-rw-r--r-- | litmus/sched_edf_hsb.c | 150 |
1 files changed, 80 insertions, 70 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index 9aeed5d413a8..ac8994e87cca 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -238,6 +238,8 @@ static inline void update_cpu_position(cpu_entry_t *entry) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | |||
242 | |||
241 | * Caller must hold global lock. | 243 | * Caller must hold global lock. |
242 | */ | 244 | */ |
243 | static inline cpu_entry_t* lowest_prio_cpu(void) | 245 | static inline cpu_entry_t* lowest_prio_cpu(void) |
@@ -255,6 +257,7 @@ static inline cpu_entry_t* lowest_prio_cpu(void) | |||
255 | */ | 257 | */ |
256 | static void slack_timer_arm(hrt_server_t *server) | 258 | static void slack_timer_arm(hrt_server_t *server) |
257 | { | 259 | { |
260 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
258 | struct hrtimer *timer = &server->slack_timer; | 261 | struct hrtimer *timer = &server->slack_timer; |
259 | lt_t when_to_fire = get_server_deadline(server) - | 262 | lt_t when_to_fire = get_server_deadline(server) - |
260 | get_server_budget(server); | 263 | get_server_budget(server); |
@@ -273,15 +276,16 @@ static void slack_timer_arm(hrt_server_t *server) | |||
273 | } | 276 | } |
274 | 277 | ||
275 | BUG_ON(when_to_fire >= get_server_deadline(server)); | 278 | BUG_ON(when_to_fire >= get_server_deadline(server)); |
279 | BUG_ON(entry->cpu != smp_processor_id()); | ||
276 | 280 | ||
277 | if (lt_after_eq(litmus_clock(), when_to_fire)) { | 281 | if (lt_after_eq(litmus_clock(), when_to_fire)) { |
278 | TRACE_SUB("slack timer fired immediately, time was %llu,", | 282 | TRACE_SUB("slack timer fired immediately, time was %llu,", |
279 | TIME(when_to_fire)); | 283 | TIME(when_to_fire)); |
280 | server->no_slack = 1; | 284 | server->no_slack = 1; |
281 | } else { | 285 | } else { |
282 | TRACE_SUB("slack timer armed to fire at %llu,", TIME(when_to_fire)); | 286 | TRACE_SUB("slack timer armed to fire at %llu on %d", |
283 | __hrtimer_start_range_ns(timer, | 287 | TIME(when_to_fire), entry->cpu); |
284 | ns_to_ktime(when_to_fire), | 288 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), |
285 | 0 /* delta */, | 289 | 0 /* delta */, |
286 | HRTIMER_MODE_ABS_PINNED, | 290 | HRTIMER_MODE_ABS_PINNED, |
287 | 0 /* no wakeup */); | 291 | 0 /* no wakeup */); |
@@ -293,6 +297,8 @@ static void slack_timer_arm(hrt_server_t *server) | |||
293 | */ | 297 | */ |
294 | static inline void slack_timer_cancel(hrt_server_t *server) | 298 | static inline void slack_timer_cancel(hrt_server_t *server) |
295 | { | 299 | { |
300 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
301 | TRACE_SUB("cancelling slack timer on P%d", entry->cpu); | ||
296 | if (hrtimer_active(&server->slack_timer)) | 302 | if (hrtimer_active(&server->slack_timer)) |
297 | hrtimer_try_to_cancel(&server->slack_timer); | 303 | hrtimer_try_to_cancel(&server->slack_timer); |
298 | } | 304 | } |
@@ -329,8 +335,6 @@ static noinline void link_server(cpu_entry_t *entry, | |||
329 | { | 335 | { |
330 | srt_state_t *server_state; | 336 | srt_state_t *server_state; |
331 | 337 | ||
332 | slack_timer_cancel(&entry->hrt_server); | ||
333 | |||
334 | if (is_srt(entry->linked)) { | 338 | if (is_srt(entry->linked)) { |
335 | server_state = tsk_rt(entry->linked)->plugin_data; | 339 | server_state = tsk_rt(entry->linked)->plugin_data; |
336 | 340 | ||
@@ -361,6 +365,9 @@ static noinline void link_server(cpu_entry_t *entry, | |||
361 | 365 | ||
362 | entry->linked_server = (server_t*)&entry->srt_server; | 366 | entry->linked_server = (server_t*)&entry->srt_server; |
363 | } else if (is_hrt(entry->linked)) { | 367 | } else if (is_hrt(entry->linked)) { |
368 | /* HRT servers should never, ever migrate */ | ||
369 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
370 | |||
364 | entry->linked_server = (server_t*)&entry->hrt_server; | 371 | entry->linked_server = (server_t*)&entry->hrt_server; |
365 | } else { /* BE */ | 372 | } else { /* BE */ |
366 | BUG_ON(!be_server); | 373 | BUG_ON(!be_server); |
@@ -368,9 +375,6 @@ static noinline void link_server(cpu_entry_t *entry, | |||
368 | } | 375 | } |
369 | 376 | ||
370 | server_run(entry->linked_server, entry->linked); | 377 | server_run(entry->linked_server, entry->linked); |
371 | |||
372 | if (!is_hrt(entry->linked)) | ||
373 | slack_timer_arm(&entry->hrt_server); | ||
374 | } | 378 | } |
375 | 379 | ||
376 | /* | 380 | /* |
@@ -382,9 +386,6 @@ static noinline void unlink_server(cpu_entry_t *entry) | |||
382 | { | 386 | { |
383 | srt_state_t *server_state; | 387 | srt_state_t *server_state; |
384 | 388 | ||
385 | if (!entry->linked || !is_hrt(entry->linked)) | ||
386 | slack_timer_cancel(&entry->hrt_server); | ||
387 | |||
388 | if (entry->linked_server) { | 389 | if (entry->linked_server) { |
389 | server_stop(entry->linked_server); | 390 | server_stop(entry->linked_server); |
390 | 391 | ||
@@ -422,8 +423,6 @@ static noinline void unlink_server(cpu_entry_t *entry) | |||
422 | 423 | ||
423 | entry->linked_server = NULL; | 424 | entry->linked_server = NULL; |
424 | } | 425 | } |
425 | |||
426 | slack_timer_arm(&entry->hrt_server); | ||
427 | } | 426 | } |
428 | 427 | ||
429 | /* Update the link of a CPU. | 428 | /* Update the link of a CPU. |
@@ -441,6 +440,7 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry, | |||
441 | int on_cpu; | 440 | int on_cpu; |
442 | 441 | ||
443 | BUG_ON(linked && !is_realtime(linked)); | 442 | BUG_ON(linked && !is_realtime(linked)); |
443 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
444 | 444 | ||
445 | /* Currently linked task is set to be unlinked. */ | 445 | /* Currently linked task is set to be unlinked. */ |
446 | if (entry->linked) { | 446 | if (entry->linked) { |
@@ -458,18 +458,21 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry, | |||
458 | /* this should only happen if not linked already */ | 458 | /* this should only happen if not linked already */ |
459 | BUG_ON(sched->linked == linked); | 459 | BUG_ON(sched->linked == linked); |
460 | 460 | ||
461 | /* If we are already scheduled on the CPU to which we | 461 | /* If we are already scheduled on a CPU to which |
462 | * wanted to link, we don't need to do the swap -- | 462 | * we are not linked, then a task is linked but not |
463 | * we just link ourselves to the CPU and depend on | 463 | * scheduled on another CPU. We can link that task here |
464 | * the caller to get things right. | 464 | * and just re-link ourselves to the other CPU for the |
465 | * same effect. Note that since HRT tasks cannot migrate, | ||
466 | * they can never participate in this swap. | ||
465 | */ | 467 | */ |
466 | if (entry != sched) { | 468 | if (entry != sched && !is_hrt(linked) && |
469 | (!sched->linked || !is_hrt(sched->linked))) { | ||
467 | TRACE_TASK_SUB(linked, | 470 | TRACE_TASK_SUB(linked, |
468 | "already scheduled on %d, updating link.", | 471 | "already scheduled on %d, updating link.", |
469 | sched->cpu); | 472 | sched->cpu); |
473 | |||
470 | tmp_task = sched->linked; | 474 | tmp_task = sched->linked; |
471 | tmp_server = sched->linked_server; | 475 | tmp_server = sched->linked_server; |
472 | |||
473 | unlink_server(sched); | 476 | unlink_server(sched); |
474 | 477 | ||
475 | linked->rt_param.linked_on = sched->cpu; | 478 | linked->rt_param.linked_on = sched->cpu; |
@@ -489,6 +492,7 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry, | |||
489 | if (linked) | 492 | if (linked) |
490 | link_server(entry, be_server); | 493 | link_server(entry, be_server); |
491 | 494 | ||
495 | |||
492 | if (linked) | 496 | if (linked) |
493 | TRACE_TASK_SUB(linked, "linked to %d.", entry->cpu); | 497 | TRACE_TASK_SUB(linked, "linked to %d.", entry->cpu); |
494 | else | 498 | else |
@@ -542,6 +546,8 @@ static noinline void unlink(struct task_struct* t) | |||
542 | */ | 546 | */ |
543 | static inline int is_eligible(struct task_struct *task, hrt_server_t *server) | 547 | static inline int is_eligible(struct task_struct *task, hrt_server_t *server) |
544 | { | 548 | { |
549 | TRACE_SUB("slack: %d, svd: %llu, tdu: %llu", server->no_slack, | ||
550 | get_server_deadline(server), get_deadline(task)); | ||
545 | return server->no_slack || | 551 | return server->no_slack || |
546 | lt_after_eq(get_server_deadline(server), get_deadline(task)); | 552 | lt_after_eq(get_server_deadline(server), get_deadline(task)); |
547 | } | 553 | } |
@@ -618,30 +624,6 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next, | |||
618 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 624 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
619 | } | 625 | } |
620 | 626 | ||
621 | /* | ||
622 | * Correct local link after a change to the local HRT domain. | ||
623 | */ | ||
624 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
625 | { | ||
626 | hrt_server_t *server = &entry->hrt_server; | ||
627 | struct task_struct *next_hrt = next_eligible_hrt(server); | ||
628 | |||
629 | TRACE_SUB("checking for HRT preempt"); | ||
630 | |||
631 | if (next_hrt && | ||
632 | (!entry->linked || !is_hrt(entry->linked) || | ||
633 | edf_preemption_needed(&server->hrt_domain, entry->linked))) { | ||
634 | |||
635 | TRACE_TASK_SUB(next_hrt, "preempting on CPU %d", entry->cpu); | ||
636 | remove(&server->hrt_domain, next_hrt); | ||
637 | preempt(entry, next_hrt, NULL); | ||
638 | } else { | ||
639 | TRACE_SUB("not preempting, Next: %d, Scheduled: %d, is_srt: %d", | ||
640 | !!next_hrt, !!entry->linked, | ||
641 | (entry->linked) ? is_srt(entry->linked) : 0); | ||
642 | } | ||
643 | } | ||
644 | |||
645 | static struct task_struct* check_for_srt_preempt(cpu_entry_t *entry) | 627 | static struct task_struct* check_for_srt_preempt(cpu_entry_t *entry) |
646 | { | 628 | { |
647 | be_server_t *linked_server; | 629 | be_server_t *linked_server; |
@@ -765,13 +747,44 @@ static void check_for_global_preempt(void) | |||
765 | } | 747 | } |
766 | 748 | ||
767 | /* | 749 | /* |
750 | * Correct local link after a change to the local HRT domain. | ||
751 | */ | ||
752 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
753 | { | ||
754 | hrt_server_t *server = &entry->hrt_server; | ||
755 | struct task_struct *next_hrt = next_eligible_hrt(server); | ||
756 | struct task_struct *curr = entry->linked; | ||
757 | |||
758 | TRACE_SUB("checking for HRT preempt on P%d", entry->cpu); | ||
759 | |||
760 | if (next_hrt && | ||
761 | (!entry->linked || !is_hrt(entry->linked) || | ||
762 | edf_preemption_needed(&server->hrt_domain, entry->linked))) { | ||
763 | |||
764 | TRACE_TASK_SUB(next_hrt, "preempting on CPU %d", entry->cpu); | ||
765 | remove(&server->hrt_domain, next_hrt); | ||
766 | preempt(entry, next_hrt, NULL); | ||
767 | |||
768 | /* We might have just kicked off an SRT. Check to see if it | ||
769 | * preempts anything. | ||
770 | */ | ||
771 | if (curr && !is_hrt(curr)) | ||
772 | check_for_global_preempt(); | ||
773 | } else { | ||
774 | TRACE_SUB("not preempting, Next: %d, Scheduled: %d, is_srt: %d", | ||
775 | !!next_hrt, !!entry->linked, | ||
776 | (entry->linked) ? is_srt(entry->linked) : 0); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* | ||
768 | * Assumes called with local irqs disabled. | 781 | * Assumes called with local irqs disabled. |
769 | */ | 782 | */ |
770 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | 783 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) |
771 | { | 784 | { |
772 | int was_empty; | 785 | int was_empty; |
773 | BUG_ON(task_cpu(task) == NO_CPU); | 786 | BUG_ON(task_cpu(task) == NO_CPU); |
774 | TRACE_TASK_SUB(task, "Arriving"); | 787 | TRACE_TASK_SUB(task, "Arriving on P%d", entry->cpu); |
775 | 788 | ||
776 | if (is_hrt(task)) { | 789 | if (is_hrt(task)) { |
777 | requeue(task, &entry->hrt_server.hrt_domain); | 790 | requeue(task, &entry->hrt_server.hrt_domain); |
@@ -860,8 +873,6 @@ static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | |||
860 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | 873 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; |
861 | entry = task_sched_entry(first); | 874 | entry = task_sched_entry(first); |
862 | 875 | ||
863 | raw_spin_lock_irqsave(global_lock, flags); | ||
864 | |||
865 | BUG_ON(!first || !is_hrt(first)); | 876 | BUG_ON(!first || !is_hrt(first)); |
866 | TRACE_TASK(first, "HRTs released at %llu on %d\n", | 877 | TRACE_TASK(first, "HRTs released at %llu on %d\n", |
867 | TIME(litmus_clock()), task_cpu(first)); | 878 | TIME(litmus_clock()), task_cpu(first)); |
@@ -1002,9 +1013,7 @@ static void server_completed(server_t *server, struct task_struct *was_running) | |||
1002 | 1013 | ||
1003 | /* Otherwise we need to pick another task to run */ | 1014 | /* Otherwise we need to pick another task to run */ |
1004 | requeue(was_running, get_rt_domain(entry, was_running)); | 1015 | requeue(was_running, get_rt_domain(entry, was_running)); |
1005 | |||
1006 | edf_hsb_pick_next(entry); | 1016 | edf_hsb_pick_next(entry); |
1007 | |||
1008 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 1017 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
1009 | } | 1018 | } |
1010 | } | 1019 | } |
@@ -1023,8 +1032,7 @@ static void hrt_server_released(pserver_t *_server) | |||
1023 | 1032 | ||
1024 | server->no_slack = 0; | 1033 | server->no_slack = 0; |
1025 | check_for_hrt_preempt(entry); | 1034 | check_for_hrt_preempt(entry); |
1026 | if (!entry->linked || !is_hrt(entry->linked)) | 1035 | slack_timer_arm(server); |
1027 | slack_timer_arm(server); | ||
1028 | } | 1036 | } |
1029 | 1037 | ||
1030 | /* | 1038 | /* |
@@ -1243,33 +1251,35 @@ static void stop_hrt_servers(void) | |||
1243 | */ | 1251 | */ |
1244 | static void start_servers(lt_t time) | 1252 | static void start_servers(lt_t time) |
1245 | { | 1253 | { |
1246 | /* int cpu; */ | 1254 | int cpu; |
1247 | /* lt_t slack_fire; */ | 1255 | lt_t slack_fire; |
1248 | /* cpu_entry_t *entry; */ | 1256 | cpu_entry_t *entry; |
1249 | /* pserver_t *server; */ | 1257 | pserver_t *server; |
1250 | /* be_server_t *be_server; */ | 1258 | /* be_server_t *be_server; */ |
1251 | /* struct list_head *pos; */ | 1259 | /* struct list_head *pos; */ |
1252 | 1260 | ||
1253 | /* /\* Start HRT servers *\/ */ | 1261 | /* Start HRT servers */ |
1254 | /* for_each_online_cpu(cpu) { */ | 1262 | for_each_online_cpu(cpu) { |
1255 | /* entry = &per_cpu(cpu_entries, cpu); */ | 1263 | entry = &per_cpu(cpu_entries, cpu); |
1256 | /* server = &entry->hrt_server.pserver; */ | 1264 | server = &entry->hrt_server.pserver; |
1265 | |||
1257 | 1266 | ||
1258 | /* TRACE("Setting up cpu %d to have timer deadline %llu\n", */ | 1267 | /* This should never be armed at server release */ |
1259 | /* cpu, TIME(get_server_deadline(server))); */ | 1268 | slack_timer_cancel(&entry->hrt_server); |
1260 | 1269 | ||
1261 | /* /\* This should never be armed at server release *\/ */ | 1270 | TRACE("Setting up cpu %d to have timer deadline %llu\n", |
1262 | /* slack_timer_cancel(&entry->hrt_server); */ | 1271 | cpu, TIME(get_server_deadline(server))); |
1263 | 1272 | ||
1264 | /* pserver_start_cpu_releasing(server, time, cpu); */ | 1273 | pserver_start_cpu_releasing(server, time, cpu); |
1265 | 1274 | ||
1266 | /* slack_fire = get_server_deadline(server) - */ | 1275 | slack_fire = get_server_deadline(server) - |
1267 | /* get_server_budget(server); */ | 1276 | get_server_budget(server); |
1268 | /* hrtimer_start_on(cpu, &per_cpu(slack_timer_infos, cpu), */ | 1277 | TRACE_SUB("slack armed to fire at %llu", TIME(slack_fire)); |
1269 | /* &entry->hrt_server.slack_timer, */ | 1278 | hrtimer_start_on(cpu, &per_cpu(slack_timer_infos, cpu), |
1270 | /* ns_to_ktime(slack_fire), */ | 1279 | &entry->hrt_server.slack_timer, |
1271 | /* HRTIMER_MODE_ABS_PINNED); */ | 1280 | ns_to_ktime(slack_fire), |
1272 | /* } */ | 1281 | HRTIMER_MODE_ABS_PINNED); |
1282 | } | ||
1273 | 1283 | ||
1274 | /* /\* Start BE servers *\/ */ | 1284 | /* /\* Start BE servers *\/ */ |
1275 | /* list_for_each(pos, &be_servers) { */ | 1285 | /* list_for_each(pos, &be_servers) { */ |