diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-22 16:45:04 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-22 16:45:04 -0400 |
commit | f32634d476829990f28a868c41e58f2839620b2a (patch) | |
tree | 2edaa3112483900d28d17841963f253e5be67cd4 | |
parent | 78eaa72a6d6d39bdc567a1dee29a5f31fbae863b (diff) |
last tested versionwip-edf-hsb
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/sched_edf_hsb.c | 376 | ||||
-rw-r--r-- | litmus/sched_edf_hsb_noslack.c | 2556 | ||||
-rw-r--r-- | litmus/servers.c | 92 |
4 files changed, 14 insertions, 3013 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 9468312b39e4..62c2bb064581 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -18,8 +18,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
18 | ctrldev.o \ | 18 | ctrldev.o \ |
19 | servers.o \ | 19 | servers.o \ |
20 | sched_gsn_edf.o \ | 20 | sched_gsn_edf.o \ |
21 | sched_edf_hsb.o \ | 21 | sched_edf_hsb.o |
22 | sched_edf_hsb_noslack.o | ||
23 | 22 | ||
24 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 23 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
25 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 24 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index d74f4337ac2b..4bc6c1382171 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -1,29 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * litmus/sched_edf_hsb.c | 2 | * litmus/sched_edf_hsb.c |
3 | * | 3 | * |
4 | * Implentation of the EDF-HSB scheduling algorithm. | 4 | * Implementation of the EDF-HSB scheduling algorithm. |
5 | * | ||
6 | * The following 6 events are fired by timers and not handled by | ||
7 | * the plugin infrastructure itself: | ||
8 | * | ||
9 | * release_[hrt|srt|be]_jobs | ||
10 | * [hrt|be]_server_released | ||
11 | * server_completed (for HRT, SRT, and BE) | ||
12 | * | ||
13 | * The following 4 events are caused by a write to the proc entry | ||
14 | * and should never be run when the plugin is already running: | ||
15 | * stop_[hrt|be]_servers | ||
16 | * admit_[hrt|be]_server | ||
17 | * | ||
18 | * TODO system for removing tasks from their release queues | ||
19 | * TODO clean up link_to_cpu and check_slack args | ||
20 | * TODO move slack completion into release | ||
21 | * TODO fix concurrent arms | ||
22 | * TODO slack and BE servers, include slack higher prio | ||
23 | * TODO start servers should no longer be cessary | ||
24 | * TODO harmonize order of method arguments | ||
25 | * TODO test crazy task_new hack | ||
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
27 | */ | 5 | */ |
28 | #include <linux/module.h> | 6 | #include <linux/module.h> |
29 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
@@ -43,32 +21,17 @@ | |||
43 | #include <litmus/servers.h> | 21 | #include <litmus/servers.h> |
44 | #define DEBUG_EDF_HSB | 22 | #define DEBUG_EDF_HSB |
45 | 23 | ||
46 | /* DOES NOT WORK */ | ||
47 | //#define SLACK_ON_MASTER | ||
48 | |||
49 | #define BE_PROC_NAME "be_servers" | 24 | #define BE_PROC_NAME "be_servers" |
50 | #define HRT_PROC_NAME "hrt_servers" | 25 | #define HRT_PROC_NAME "hrt_servers" |
51 | #define BE_SERVER_BASE 100 | 26 | #define BE_SERVER_BASE 100 |
52 | #define IDLE_SLACK_BASE 1000 | 27 | #define IDLE_SLACK_BASE 1000 |
53 | #define SLACK_MIN NSEC_PER_MSEC | 28 | #define SLACK_MIN NSEC_PER_MSEC |
54 | 29 | ||
55 | /* SCHED_TRACE action events */ | ||
56 | #define SERVER_COMPLETED_ACTION 1 | ||
57 | #define SERVER_RELEASED_ACTION 2 | ||
58 | #define NO_SLACK_ACTION 3 | ||
59 | #define SLACK_RUN_ACTION 4 | ||
60 | #define SLACK_STOP_ACTION 5 | ||
61 | #define SLACK_RECLAIM_ACTION 6 | ||
62 | #define SLACK_EXPIRED_ACTION 7 | ||
63 | #define SLACK_DONATED_ACTION 8 | ||
64 | #define CANDIDATE_ADDED_ACTION 9 | ||
65 | |||
66 | /* Uncomment for human readable time */ | 30 | /* Uncomment for human readable time */ |
67 | #define TIME(x) \ | 31 | #define TIME(x) \ |
68 | (x) | 32 | ({lt_t y = x; \ |
69 | /* ({lt_t y = x; \ */ | 33 | do_div(y, NSEC_PER_MSEC); \ |
70 | /* do_div(y, NSEC_PER_MSEC); \ */ | 34 | y;}) |
71 | /* y;}) */ | ||
72 | #define TRACE_TIMER(fmt, args...) \ | 35 | #define TRACE_TIMER(fmt, args...) \ |
73 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | 36 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ |
74 | TRACE_ARGS, ## args, TIME(litmus_clock())) | 37 | TRACE_ARGS, ## args, TIME(litmus_clock())) |
@@ -77,28 +40,6 @@ | |||
77 | (t)->rt_param.job_params.job_no, ## args) | 40 | (t)->rt_param.job_params.job_no, ## args) |
78 | 41 | ||
79 | /* | 42 | /* |
80 | * Useful debugging macros. Remove for actual use as they cause | ||
81 | * a lot of lock contention. | ||
82 | */ | ||
83 | #ifdef DEBUG_EDF_HSB | ||
84 | |||
85 | #define TRACE_SUB(fmt, args...) \ | ||
86 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ | ||
87 | TRACE_ARGS, ## args) | ||
88 | #define TRACE_TASK_SUB(t, fmt, args...) \ | ||
89 | TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args) | ||
90 | #define TRACE_SERVER_SUB(s, fmt, args...) \ | ||
91 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
92 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \ | ||
93 | TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
94 | #else | ||
95 | #define TRACE_SUB(fmt, args...) | ||
96 | #define TRACE_TASK_SUB(t, fmt, args...) | ||
97 | #define TRACE_SERVER_SUB(s, fmt, args...) | ||
98 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Different types of servers | 43 | * Different types of servers |
103 | */ | 44 | */ |
104 | typedef enum { | 45 | typedef enum { |
@@ -186,7 +127,6 @@ static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | |||
186 | */ | 127 | */ |
187 | static inline int head_in_list(struct list_head *head) | 128 | static inline int head_in_list(struct list_head *head) |
188 | { | 129 | { |
189 | BUG_ON(!head); | ||
190 | return !(head->next == head->prev && head->prev == head); | 130 | return !(head->next == head->prev && head->prev == head); |
191 | } | 131 | } |
192 | 132 | ||
@@ -242,8 +182,6 @@ static void remove_slack(server_t *slack) | |||
242 | { | 182 | { |
243 | if (!slack) | 183 | if (!slack) |
244 | return; | 184 | return; |
245 | TRACE_SERVER_SUB(slack, "slack removed"); | ||
246 | //////sched_trace_action(NULL, SLACK_EXPIRED_ACTION); | ||
247 | 185 | ||
248 | if (head_in_list(&slack->list)) | 186 | if (head_in_list(&slack->list)) |
249 | list_del_init(&slack->list); | 187 | list_del_init(&slack->list); |
@@ -260,12 +198,8 @@ static void add_slack(server_t *slack) | |||
260 | struct list_head *pos; | 198 | struct list_head *pos; |
261 | server_t *queued; | 199 | server_t *queued; |
262 | 200 | ||
263 | TRACE_SERVER_SUB(slack, "slack added"); | 201 | if (head_in_list(&slack->list)) |
264 | |||
265 | if (head_in_list(&slack->list)) { | ||
266 | TRACE_SERVER_SUB(slack, "already in list"); | ||
267 | return; | 202 | return; |
268 | } | ||
269 | 203 | ||
270 | list_for_each_prev(pos, &slack_queue) { | 204 | list_for_each_prev(pos, &slack_queue) { |
271 | queued = list_entry(pos, server_t, list); | 205 | queued = list_entry(pos, server_t, list); |
@@ -303,8 +237,6 @@ static void add_slack_candidate(struct task_struct *task) | |||
303 | struct list_head *pos; | 237 | struct list_head *pos; |
304 | struct task_struct *queued; | 238 | struct task_struct *queued; |
305 | 239 | ||
306 | TRACE_TASK_SUB(task, "candidate added"); | ||
307 | |||
308 | list_for_each_prev(pos, &slack_candidates) { | 240 | list_for_each_prev(pos, &slack_candidates) { |
309 | queued = get_candidate(pos); | 241 | queued = get_candidate(pos); |
310 | if (lt_before_eq(real_deadline(queued), real_deadline(task))) { | 242 | if (lt_before_eq(real_deadline(queued), real_deadline(task))) { |
@@ -321,11 +253,8 @@ static void donate_slack(server_t *donator) | |||
321 | server_t *slack = (server_t*)donator->data; | 253 | server_t *slack = (server_t*)donator->data; |
322 | hrt_server_t *hrt_server; | 254 | hrt_server_t *hrt_server; |
323 | 255 | ||
324 | TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget)); | ||
325 | |||
326 | if (donator->type == S_HRT) { | 256 | if (donator->type == S_HRT) { |
327 | hrt_server = container_of(donator, hrt_server_t, server); | 257 | hrt_server = container_of(donator, hrt_server_t, server); |
328 | BUG_ON(!hrt_server->ready); | ||
329 | } | 258 | } |
330 | 259 | ||
331 | slack->wcet = donator->budget; | 260 | slack->wcet = donator->budget; |
@@ -345,20 +274,15 @@ static noinline void check_donate_slack(server_t *donator, struct task_struct *w | |||
345 | hrt_server_t *hrt_server; | 274 | hrt_server_t *hrt_server; |
346 | int donate = 0; | 275 | int donate = 0; |
347 | 276 | ||
348 | TRACE_SERVER_SUB(donator, "checking donation"); | ||
349 | |||
350 | if (!slack) | 277 | if (!slack) |
351 | return; | 278 | return; |
352 | 279 | ||
353 | /* Donating small amounts of slack will result in excess migrations */ | 280 | /* Donating small amounts of slack will result in excess migrations */ |
354 | if (donator->budget < SLACK_MIN) | 281 | if (donator->budget < SLACK_MIN || server_has_slack(donator)) |
355 | return; | 282 | return; |
356 | 283 | ||
357 | if (server_has_slack(donator)) { | 284 | if (server_has_slack(donator)) |
358 | TRACE_SERVER_SUB(donator, "dead: %d, rel: %d, job: %d already donated", | ||
359 | slack->deadline, slack->release, slack->job_no); | ||
360 | return; | 285 | return; |
361 | } | ||
362 | 286 | ||
363 | if (donator->type == S_HRT) { | 287 | if (donator->type == S_HRT) { |
364 | hrt_server = container_of(donator, hrt_server_t, server); | 288 | hrt_server = container_of(donator, hrt_server_t, server); |
@@ -378,8 +302,6 @@ static noinline void check_donate_slack(server_t *donator, struct task_struct *w | |||
378 | if (!donate) | 302 | if (!donate) |
379 | return; | 303 | return; |
380 | 304 | ||
381 | ////sched_trace_action(was_scheduled, SLACK_DONATED_ACTION); | ||
382 | |||
383 | donate_slack(donator); | 305 | donate_slack(donator); |
384 | } | 306 | } |
385 | 307 | ||
@@ -392,7 +314,6 @@ static noinline void check_donate_slack(server_t *donator, struct task_struct *w | |||
392 | */ | 314 | */ |
393 | static void check_slack_candidate(struct task_struct *task) | 315 | static void check_slack_candidate(struct task_struct *task) |
394 | { | 316 | { |
395 | TRACE_TASK_SUB(task, "checking for candidate"); | ||
396 | if (is_srt(task) && | 317 | if (is_srt(task) && |
397 | /* The task has been synchronously released */ | 318 | /* The task has been synchronously released */ |
398 | task_job_no(task) > 2 && | 319 | task_job_no(task) > 2 && |
@@ -405,7 +326,6 @@ static void check_slack_candidate(struct task_struct *task) | |||
405 | } else if (is_srt(task) && | 326 | } else if (is_srt(task) && |
406 | is_released(task, litmus_clock()) && | 327 | is_released(task, litmus_clock()) && |
407 | !is_queued(task)) { | 328 | !is_queued(task)) { |
408 | TRACE_TASK_SUB(task, "candidate has been released!"); | ||
409 | __add_ready(&srt_domain, task); | 329 | __add_ready(&srt_domain, task); |
410 | } | 330 | } |
411 | } | 331 | } |
@@ -421,7 +341,6 @@ static noinline server_t* next_eligible_slack_server(void) | |||
421 | 341 | ||
422 | while (!list_empty(&slack_queue)) { | 342 | while (!list_empty(&slack_queue)) { |
423 | next_slack = list_entry(slack_queue.next, server_t, list); | 343 | next_slack = list_entry(slack_queue.next, server_t, list); |
424 | BUG_ON(!next_slack); | ||
425 | 344 | ||
426 | if (lt_after(next_slack->deadline, now) && | 345 | if (lt_after(next_slack->deadline, now) && |
427 | lt_after(next_slack->budget, SLACK_MIN) && | 346 | lt_after(next_slack->budget, SLACK_MIN) && |
@@ -429,7 +348,6 @@ static noinline server_t* next_eligible_slack_server(void) | |||
429 | break; | 348 | break; |
430 | } else { | 349 | } else { |
431 | /* Slack has expired or has too little time */ | 350 | /* Slack has expired or has too little time */ |
432 | BUG_ON(next_slack->id == 1001); | ||
433 | remove_slack(next_slack); | 351 | remove_slack(next_slack); |
434 | next_slack = NULL; | 352 | next_slack = NULL; |
435 | } | 353 | } |
@@ -552,10 +470,8 @@ static void slack_timer_arm(hrt_server_t *hrt_server) | |||
552 | struct hrtimer *timer; | 470 | struct hrtimer *timer; |
553 | lt_t now = litmus_clock(), when_to_fire; | 471 | lt_t now = litmus_clock(), when_to_fire; |
554 | 472 | ||
555 | if (!check_hrt_server_initialized(hrt_server)) { | 473 | if (!check_hrt_server_initialized(hrt_server)) |
556 | TRACE_SERVER_SUB(&hrt_server->server, "not initialized"); | ||
557 | return; | 474 | return; |
558 | } | ||
559 | 475 | ||
560 | timer = &hrt_server->slack_timer; | 476 | timer = &hrt_server->slack_timer; |
561 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | 477 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); |
@@ -573,27 +489,12 @@ static void slack_timer_arm(hrt_server_t *hrt_server) | |||
573 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || | 489 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || |
574 | hrt_server->no_slack || hrt_server->server.budget == 0 || | 490 | hrt_server->no_slack || hrt_server->server.budget == 0 || |
575 | !hrt_server->ready) { | 491 | !hrt_server->ready) { |
576 | TRACE_SERVER_SUB(&hrt_server->server, | ||
577 | "not arming slack timer on P%d, %d %d %d %d %d", | ||
578 | entry->cpu, | ||
579 | hrtimer_active(timer), hrt_server->server.deadline == 0, | ||
580 | hrt_server->no_slack, hrt_server->server.budget == 0, | ||
581 | !hrt_server->ready); | ||
582 | return; | 492 | return; |
583 | } | 493 | } |
584 | 494 | ||
585 | if (when_to_fire >= hrt_server->server.deadline) { | ||
586 | TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu", | ||
587 | when_to_fire, hrt_server->server.deadline, | ||
588 | hrt_server->server.budget); | ||
589 | BUG_ON(1); | ||
590 | } | ||
591 | |||
592 | /* Arm timer */ | 495 | /* Arm timer */ |
593 | if (lt_after_eq(now, when_to_fire)) { | 496 | if (lt_after_eq(now, when_to_fire)) { |
594 | /* 'Fire' immediately */ | 497 | /* 'Fire' immediately */ |
595 | TRACE_SERVER_SUB(&hrt_server->server, | ||
596 | "immediate: %llu", when_to_fire); | ||
597 | hrt_server->no_slack = 1; | 498 | hrt_server->no_slack = 1; |
598 | } else if (cpu != smp_processor_id()) { | 499 | } else if (cpu != smp_processor_id()) { |
599 | err = hrtimer_start_on(cpu, | 500 | err = hrtimer_start_on(cpu, |
@@ -601,15 +502,10 @@ static void slack_timer_arm(hrt_server_t *hrt_server) | |||
601 | &hrt_server->slack_timer, | 502 | &hrt_server->slack_timer, |
602 | ns_to_ktime(when_to_fire), | 503 | ns_to_ktime(when_to_fire), |
603 | HRTIMER_MODE_ABS_PINNED); | 504 | HRTIMER_MODE_ABS_PINNED); |
604 | if (err) | ||
605 | TRACE_SERVER_SUB(&hrt_server->server, "failed to arm slack"); | ||
606 | } else { | 505 | } else { |
607 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), | 506 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), |
608 | 0, HRTIMER_MODE_ABS_PINNED, 0); | 507 | 0, HRTIMER_MODE_ABS_PINNED, 0); |
609 | } | 508 | } |
610 | |||
611 | TRACE_SUB("slack timer 0x%x armed to fire at %llu on P%d", | ||
612 | timer, TIME(when_to_fire), entry->cpu); | ||
613 | } | 509 | } |
614 | 510 | ||
615 | /* | 511 | /* |
@@ -620,16 +516,7 @@ static inline void slack_timer_cancel(hrt_server_t *hrt_server) | |||
620 | int ret; | 516 | int ret; |
621 | if (hrtimer_active(&hrt_server->slack_timer)) { | 517 | if (hrtimer_active(&hrt_server->slack_timer)) { |
622 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); | 518 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); |
623 | if (ret == -1) { | 519 | } |
624 | TRACE_SERVER_SUB(&hrt_server->server, | ||
625 | "slack timer was running concurrently"); | ||
626 | } else { | ||
627 | TRACE_SERVER_SUB(&hrt_server->server, | ||
628 | "slack timer cancelled"); | ||
629 | } | ||
630 | } else { | ||
631 | TRACE_SERVER_SUB(&hrt_server->server, "slack not active"); | ||
632 | } | ||
633 | } | 520 | } |
634 | 521 | ||
635 | /* | 522 | /* |
@@ -664,18 +551,13 @@ static void requeue_server(server_t *server, lt_t now) | |||
664 | if (!added) { | 551 | if (!added) { |
665 | /* Mark servers as released */ | 552 | /* Mark servers as released */ |
666 | if (server->type == S_HRT) { | 553 | if (server->type == S_HRT) { |
667 | TRACE_SERVER_SUB(server, "P%d now ready at %llu", now); | ||
668 | hrt_server = container_of(server, hrt_server_t, server); | 554 | hrt_server = container_of(server, hrt_server_t, server); |
669 | hrt_server->ready = 1; | 555 | hrt_server->ready = 1; |
670 | remove_slack(server_slack(server)); | 556 | remove_slack(server_slack(server)); |
671 | hrt_server->no_slack = 0; | 557 | hrt_server->no_slack = 0; |
672 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
673 | } else if (server->type == S_BE) { | 558 | } else if (server->type == S_BE) { |
674 | TRACE_SERVER_SUB(server, "BE added to ready"); | ||
675 | bheap_insert(server_order, &be_ready_servers, server->hn); | 559 | bheap_insert(server_order, &be_ready_servers, server->hn); |
676 | } | 560 | } |
677 | } else { | ||
678 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
679 | } | 561 | } |
680 | } | 562 | } |
681 | 563 | ||
@@ -691,22 +573,9 @@ static void reclaim_slack(server_t *slack) | |||
691 | return; | 573 | return; |
692 | 574 | ||
693 | /* SRT servers do not ever reclaim slack */ | 575 | /* SRT servers do not ever reclaim slack */ |
694 | ////sched_trace_action(NULL, SLACK_RECLAIM_ACTION); | ||
695 | |||
696 | exec = slack->wcet - slack->budget; | 576 | exec = slack->wcet - slack->budget; |
697 | TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec)); | ||
698 | |||
699 | BUG_ON(is_server_linked(donator)); | ||
700 | BUG_ON(!slack->wcet); | ||
701 | BUG_ON(!donator->budget); | ||
702 | |||
703 | donator->budget = lt_subtract(donator->budget, exec); | 577 | donator->budget = lt_subtract(donator->budget, exec); |
704 | slack->wcet = slack->budget; | 578 | slack->wcet = slack->budget; |
705 | |||
706 | /* If budget exhausted, server needs to wait for next release */ | ||
707 | if (!donator->budget) { | ||
708 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | ||
709 | } | ||
710 | } | 579 | } |
711 | 580 | ||
712 | /* | 581 | /* |
@@ -717,31 +586,6 @@ static noinline void link_server(cpu_entry_t *entry, | |||
717 | { | 586 | { |
718 | 587 | ||
719 | if (entry->linked) { | 588 | if (entry->linked) { |
720 | /* Massive state check */ | ||
721 | if (next_server->type == S_SRT) { | ||
722 | /* SRT task cannot get ahead of its server */ | ||
723 | BUG_ON(next_server->job_no + 1 < task_job_no(entry->linked)); | ||
724 | BUG_ON(lt_after(get_deadline(entry->linked), | ||
725 | next_server->deadline)); | ||
726 | } else if (next_server->type == S_HRT) { | ||
727 | /* HRT servers should never, ever migrate */ | ||
728 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
729 | BUG_ON(!entry->hrt_server.ready); | ||
730 | } else if (next_server->type == S_SLACK) { | ||
731 | /* Should have already been removed from slack list */ | ||
732 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
733 | BUG_ON(is_be(entry->linked) && is_queued(entry->linked)); | ||
734 | ////sched_trace_action(entry->linked, SLACK_RUN_ACTION); | ||
735 | BUG_ON(is_srt(entry->linked) && | ||
736 | task_srt_server(entry->linked)->job_no <= | ||
737 | task_job_no(entry->linked)); | ||
738 | } else { /* BE */ | ||
739 | /* Should have already been removed from ready heap */ | ||
740 | BUG_ON(bheap_node_in_heap(next_server->hn)); | ||
741 | BUG_ON(is_queued(entry->linked)); | ||
742 | ////sched_trace_action(entry->linked, next_server->id); | ||
743 | } | ||
744 | |||
745 | if (next_server->type != S_SLACK && | 589 | if (next_server->type != S_SLACK && |
746 | (head_in_list(&server_slack(next_server)->list))) { | 590 | (head_in_list(&server_slack(next_server)->list))) { |
747 | remove_slack(server_slack(next_server)); | 591 | remove_slack(server_slack(next_server)); |
@@ -765,9 +609,6 @@ static noinline void link_server(cpu_entry_t *entry, | |||
765 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) | 609 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) |
766 | { | 610 | { |
767 | server_t *server = entry->linked_server; | 611 | server_t *server = entry->linked_server; |
768 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
769 | |||
770 | BUG_ON(!entry->linked_server); | ||
771 | 612 | ||
772 | server_stop(entry->linked_server); | 613 | server_stop(entry->linked_server); |
773 | server = entry->linked_server; | 614 | server = entry->linked_server; |
@@ -778,16 +619,12 @@ static noinline void unlink_server(cpu_entry_t *entry, int requeue) | |||
778 | 619 | ||
779 | if (server->type == S_SLACK && server->deadline) { | 620 | if (server->type == S_SLACK && server->deadline) { |
780 | add_slack(server); | 621 | add_slack(server); |
781 | ////sched_trace_action(entry->linked, SLACK_STOP_ACTION); | ||
782 | 622 | ||
783 | /* Donator needs to absorb slack execution time */ | 623 | /* Donator needs to absorb slack execution time */ |
784 | reclaim_slack(server); | 624 | reclaim_slack(server); |
785 | } else if (server->type != S_SRT) { | 625 | } else if (server->type != S_SRT) { |
786 | requeue_server(server, litmus_clock()); | 626 | requeue_server(server, litmus_clock()); |
787 | } | 627 | } |
788 | |||
789 | if (server->type == S_HRT && hrt_server->ready) | ||
790 | BUG_ON(head_in_list(&server_slack(server)->list)); | ||
791 | } | 628 | } |
792 | 629 | ||
793 | static void requeue(struct task_struct *task, rt_domain_t *domain); | 630 | static void requeue(struct task_struct *task, rt_domain_t *domain); |
@@ -807,14 +644,6 @@ static noinline void link_to_cpu(cpu_entry_t *entry, | |||
807 | struct task_struct *tmp_task; | 644 | struct task_struct *tmp_task; |
808 | int on_cpu; | 645 | int on_cpu; |
809 | 646 | ||
810 | BUG_ON(linked && !is_realtime(linked)); | ||
811 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
812 | BUG_ON(entry->cpu == edf_hsb_release_master); | ||
813 | |||
814 | if (linked) | ||
815 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | ||
816 | entry->cpu); | ||
817 | |||
818 | /* Currently linked task is set to be unlinked. */ | 647 | /* Currently linked task is set to be unlinked. */ |
819 | if (entry->linked) { | 648 | if (entry->linked) { |
820 | unlink_server(entry, 1); | 649 | unlink_server(entry, 1); |
@@ -835,10 +664,6 @@ static noinline void link_to_cpu(cpu_entry_t *entry, | |||
835 | if (entry != sched && | 664 | if (entry != sched && |
836 | sched->linked && is_hrt(sched->linked)) { | 665 | sched->linked && is_hrt(sched->linked)) { |
837 | /* We are already scheduled on a CPU with an HRT */ | 666 | /* We are already scheduled on a CPU with an HRT */ |
838 | TRACE_TASK_SUB(linked, | ||
839 | "cannot move to scheduled CPU P%d", | ||
840 | sched->cpu); | ||
841 | |||
842 | requeue_server(next_server, litmus_clock()); | 667 | requeue_server(next_server, litmus_clock()); |
843 | requeue(linked, get_rt_domain(entry, linked)); | 668 | requeue(linked, get_rt_domain(entry, linked)); |
844 | 669 | ||
@@ -848,11 +673,6 @@ static noinline void link_to_cpu(cpu_entry_t *entry, | |||
848 | /* Link to the CPU we are scheduled on by swapping | 673 | /* Link to the CPU we are scheduled on by swapping |
849 | * with that CPU's linked task. | 674 | * with that CPU's linked task. |
850 | */ | 675 | */ |
851 | BUG_ON(is_hrt(linked)); | ||
852 | |||
853 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
854 | sched->cpu); | ||
855 | |||
856 | tmp_task = sched->linked; | 676 | tmp_task = sched->linked; |
857 | tmp_server = sched->linked_server; | 677 | tmp_server = sched->linked_server; |
858 | 678 | ||
@@ -875,14 +695,6 @@ static noinline void link_to_cpu(cpu_entry_t *entry, | |||
875 | entry->linked = linked; | 695 | entry->linked = linked; |
876 | link_server(entry, next_server); | 696 | link_server(entry, next_server); |
877 | update_cpu_position(entry); | 697 | update_cpu_position(entry); |
878 | |||
879 | BUG_ON(!entry->linked && entry->linked_server); | ||
880 | |||
881 | if (linked) | ||
882 | TRACE_TASK_SERVER_SUB(linked, next_server, | ||
883 | "linked to %d", entry->cpu); | ||
884 | else | ||
885 | TRACE_SUB("NULL linked to %d", entry->cpu); | ||
886 | } | 698 | } |
887 | 699 | ||
888 | /* | 700 | /* |
@@ -906,8 +718,6 @@ static noinline void unlink(struct task_struct* t) | |||
906 | { | 718 | { |
907 | cpu_entry_t *entry; | 719 | cpu_entry_t *entry; |
908 | 720 | ||
909 | BUG_ON(!t); | ||
910 | |||
911 | if (t->rt_param.linked_on != NO_CPU) { | 721 | if (t->rt_param.linked_on != NO_CPU) { |
912 | /* Unlink */ | 722 | /* Unlink */ |
913 | entry = task_linked_entry(t); | 723 | entry = task_linked_entry(t); |
@@ -925,7 +735,7 @@ static noinline void unlink(struct task_struct* t) | |||
925 | entry->scheduled_server && /* Can be NULL on task_new */ | 735 | entry->scheduled_server && /* Can be NULL on task_new */ |
926 | entry->scheduled_server->type == S_SLACK) { | 736 | entry->scheduled_server->type == S_SLACK) { |
927 | 737 | ||
928 | TRACE_TASK_SUB(t, "unlinked on slack server"); | 738 | TRACE_TASK(t, "unlinked on slack server\n"); |
929 | 739 | ||
930 | } else if (is_released(t, litmus_clock())) { | 740 | } else if (is_released(t, litmus_clock())) { |
931 | /* This is an interesting situation: t is scheduled, | 741 | /* This is an interesting situation: t is scheduled, |
@@ -935,7 +745,6 @@ static noinline void unlink(struct task_struct* t) | |||
935 | * been preempted but completes before it is | 745 | * been preempted but completes before it is |
936 | * descheduled. | 746 | * descheduled. |
937 | */ | 747 | */ |
938 | TRACE_TASK_SUB(t, "removing from domain"); | ||
939 | remove(get_rt_domain(entry, t), t); | 748 | remove(get_rt_domain(entry, t), t); |
940 | BUG_ON(is_queued(t)); | 749 | BUG_ON(is_queued(t)); |
941 | } | 750 | } |
@@ -944,7 +753,6 @@ static noinline void unlink(struct task_struct* t) | |||
944 | if (head_in_list(&task_data(t)->candidate_list)) { | 753 | if (head_in_list(&task_data(t)->candidate_list)) { |
945 | list_del_init(&task_data(t)->candidate_list); | 754 | list_del_init(&task_data(t)->candidate_list); |
946 | } | 755 | } |
947 | |||
948 | } | 756 | } |
949 | 757 | ||
950 | /* | 758 | /* |
@@ -955,10 +763,6 @@ static noinline void unlink(struct task_struct* t) | |||
955 | static inline int is_eligible(struct task_struct *task, | 763 | static inline int is_eligible(struct task_struct *task, |
956 | hrt_server_t *hrt_server) | 764 | hrt_server_t *hrt_server) |
957 | { | 765 | { |
958 | TRACE_TASK_SUB(task, "%d %d %llu %llu", | ||
959 | hrt_server->ready, hrt_server->no_slack, | ||
960 | hrt_server->server.deadline, | ||
961 | get_deadline(task)); | ||
962 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && | 766 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && |
963 | (hrt_server->no_slack || | 767 | (hrt_server->no_slack || |
964 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); | 768 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); |
@@ -970,12 +774,9 @@ static inline int is_eligible(struct task_struct *task, | |||
970 | static inline void catchup_server(server_t *server, lt_t time) | 774 | static inline void catchup_server(server_t *server, lt_t time) |
971 | { | 775 | { |
972 | lt_t diff, sub; | 776 | lt_t diff, sub; |
973 | |||
974 | diff = time - server->deadline; | 777 | diff = time - server->deadline; |
975 | sub = diff % server->period; | 778 | sub = diff % server->period; |
976 | |||
977 | server_release_at(server, time - sub); | 779 | server_release_at(server, time - sub); |
978 | TRACE_SERVER_SUB(server, "catching up to %llu", time); | ||
979 | } | 780 | } |
980 | 781 | ||
981 | static noinline int catchup_srt_server(struct task_struct *task) | 782 | static noinline int catchup_srt_server(struct task_struct *task) |
@@ -994,11 +795,6 @@ static noinline int catchup_srt_server(struct task_struct *task) | |||
994 | /* Get the new release */ | 795 | /* Get the new release */ |
995 | release = srt_server->release + jobs * srt_server->period; | 796 | release = srt_server->release + jobs * srt_server->period; |
996 | 797 | ||
997 | TRACE_SERVER_SUB(srt_server, "catching up to %llu, job %d", | ||
998 | release, srt_server->job_no + jobs); | ||
999 | |||
1000 | BUG_ON(jobs < 1); | ||
1001 | |||
1002 | /* Update server state */ | 798 | /* Update server state */ |
1003 | server_release_at(srt_server, release); | 799 | server_release_at(srt_server, release); |
1004 | srt_server->job_no += jobs - 1; | 800 | srt_server->job_no += jobs - 1; |
@@ -1008,9 +804,6 @@ static noinline int catchup_srt_server(struct task_struct *task) | |||
1008 | tsk_rt(task)->job_params.deadline = srt_server->deadline; | 804 | tsk_rt(task)->job_params.deadline = srt_server->deadline; |
1009 | 805 | ||
1010 | rv = 1; | 806 | rv = 1; |
1011 | |||
1012 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1013 | |||
1014 | } else if (lt_before(srt_server->deadline, now) && | 807 | } else if (lt_before(srt_server->deadline, now) && |
1015 | srt_server->job_no <= 1) { | 808 | srt_server->job_no <= 1) { |
1016 | 809 | ||
@@ -1018,8 +811,6 @@ static noinline int catchup_srt_server(struct task_struct *task) | |||
1018 | srt_server->job_no = task_job_no(task); | 811 | srt_server->job_no = task_job_no(task); |
1019 | } | 812 | } |
1020 | 813 | ||
1021 | BUG_ON(srt_server->job_no == 0); | ||
1022 | |||
1023 | return rv; | 814 | return rv; |
1024 | } | 815 | } |
1025 | 816 | ||
@@ -1042,44 +833,28 @@ static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | |||
1042 | budget = hrt_server->server.budget; | 833 | budget = hrt_server->server.budget; |
1043 | slack = lt_subtract(dead, budget); | 834 | slack = lt_subtract(dead, budget); |
1044 | 835 | ||
1045 | TRACE_SERVER_SUB(&hrt_server->server, "dead: %llu, budget: %llu" | ||
1046 | "now: %llu, slack: %llu", | ||
1047 | TIME(dead), TIME(budget), TIME(now), TIME(slack)); | ||
1048 | |||
1049 | if (!head_in_list(&hrt_server->server.release_list) && | 836 | if (!head_in_list(&hrt_server->server.release_list) && |
1050 | lt_before_eq(dead, now)) { | 837 | lt_before_eq(dead, now)) { |
1051 | /* The server missed a release */ | 838 | /* The server missed a release */ |
1052 | catchup_server(&hrt_server->server, now); | 839 | catchup_server(&hrt_server->server, now); |
1053 | TRACE_SERVER_SUB(&hrt_server->server, "now ready"); | ||
1054 | hrt_server->ready = 1; | 840 | hrt_server->ready = 1; |
1055 | remove_slack(server_slack(&hrt_server->server)); | 841 | remove_slack(server_slack(&hrt_server->server)); |
1056 | hrt_server->no_slack = 0; | 842 | hrt_server->no_slack = 0; |
1057 | 843 | ||
1058 | slack = lt_subtract(hrt_server->server.deadline, | 844 | slack = lt_subtract(hrt_server->server.deadline, |
1059 | hrt_server->server.budget); | 845 | hrt_server->server.budget); |
1060 | |||
1061 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1062 | } | 846 | } |
1063 | 847 | ||
1064 | /* If the slack timer is active, this is not necessary */ | 848 | /* If the slack timer is active, this is not necessary */ |
1065 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { | 849 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { |
1066 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { | 850 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { |
1067 | /* The server missed the shift to no slack */ | 851 | /* The server missed the shift to no slack */ |
1068 | TRACE_SERVER_SUB(&hrt_server->server, "no slack: %llu", | ||
1069 | TIME(slack)); | ||
1070 | hrt_server->no_slack = 1; | 852 | hrt_server->no_slack = 1; |
1071 | ////sched_trace_action(task, NO_SLACK_ACTION); | ||
1072 | } else { | 853 | } else { |
1073 | slack_timer_arm(hrt_server); | 854 | slack_timer_arm(hrt_server); |
1074 | } | 855 | } |
1075 | } | 856 | } |
1076 | 857 | ||
1077 | } else { | ||
1078 | TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu %d %d", | ||
1079 | hrt_server->server.deadline, | ||
1080 | is_server_linked(&hrt_server->server), | ||
1081 | now, check_hrt_server_initialized(hrt_server), | ||
1082 | !is_server_linked(&hrt_server->server)); | ||
1083 | } | 858 | } |
1084 | 859 | ||
1085 | if (!hrt_server->server.budget || | 860 | if (!hrt_server->server.budget || |
@@ -1087,16 +862,11 @@ static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | |||
1087 | 862 | ||
1088 | if (!hrt_server->server.budget && | 863 | if (!hrt_server->server.budget && |
1089 | !head_in_list(&hrt_server->server.release_list)) { | 864 | !head_in_list(&hrt_server->server.release_list)) { |
1090 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | ||
1091 | catchup_server(&hrt_server->server, now); | 865 | catchup_server(&hrt_server->server, now); |
1092 | requeue_server(&hrt_server->server, now); | 866 | requeue_server(&hrt_server->server, now); |
1093 | slack_timer_arm(hrt_server); | 867 | slack_timer_arm(hrt_server); |
1094 | } | 868 | } |
1095 | 869 | ||
1096 | if (task) { | ||
1097 | TRACE_TASK_SUB(task, "not eligible, budget: %llu", | ||
1098 | TIME(hrt_server->server.budget)); | ||
1099 | } | ||
1100 | task = NULL; | 870 | task = NULL; |
1101 | 871 | ||
1102 | /* Donate slack if we have nothing to schedule */ | 872 | /* Donate slack if we have nothing to schedule */ |
@@ -1126,7 +896,6 @@ static noinline struct task_struct* next_eligible_srt(void) | |||
1126 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || | 896 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || |
1127 | unlikely(!is_realtime(next_srt)) || | 897 | unlikely(!is_realtime(next_srt)) || |
1128 | tsk_rt(next_srt)->linked_on != NO_CPU)) { | 898 | tsk_rt(next_srt)->linked_on != NO_CPU)) { |
1129 | TRACE_TASK_SUB(next_srt, "removing finished task"); | ||
1130 | remove(&srt_domain, next_srt); | 899 | remove(&srt_domain, next_srt); |
1131 | next_srt = __peek_ready(&srt_domain); | 900 | next_srt = __peek_ready(&srt_domain); |
1132 | } | 901 | } |
@@ -1170,14 +939,12 @@ static noinline server_t* next_eligible_be_server(void) | |||
1170 | while (be_server && (lt_before_eq(be_server->deadline, now) || | 939 | while (be_server && (lt_before_eq(be_server->deadline, now) || |
1171 | is_server_linked(be_server))) { | 940 | is_server_linked(be_server))) { |
1172 | if (!be_server->deadline) { | 941 | if (!be_server->deadline) { |
1173 | TRACE_SERVER_SUB(be_server, "not intialized"); | ||
1174 | return NULL; | 942 | return NULL; |
1175 | } | 943 | } |
1176 | bheap_delete(server_order, &be_ready_servers, | 944 | bheap_delete(server_order, &be_ready_servers, |
1177 | be_server->hn); | 945 | be_server->hn); |
1178 | 946 | ||
1179 | if (is_server_linked(be_server)) { | 947 | if (is_server_linked(be_server)) { |
1180 | TRACE_SERVER_SUB(be_server, "linked"); | ||
1181 | be_server = next_be_server(); | 948 | be_server = next_be_server(); |
1182 | return NULL; | 949 | return NULL; |
1183 | } | 950 | } |
@@ -1187,22 +954,12 @@ static noinline server_t* next_eligible_be_server(void) | |||
1187 | bheap_insert(server_order, &be_ready_servers, | 954 | bheap_insert(server_order, &be_ready_servers, |
1188 | be_server->hn); | 955 | be_server->hn); |
1189 | be_server = next_be_server(); | 956 | be_server = next_be_server(); |
1190 | TRACE_SERVER_SUB(be_server, "catching up BE server"); | ||
1191 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); /* Release */ | ||
1192 | } | 957 | } |
1193 | 958 | ||
1194 | if (be_server && lt_before(now, be_server->release)) { | 959 | if (be_server && lt_before(now, be_server->release)) { |
1195 | TRACE_SERVER_SUB(be_server, "not released"); | ||
1196 | be_server = NULL; | 960 | be_server = NULL; |
1197 | } | 961 | } |
1198 | 962 | ||
1199 | if (be_server) { | ||
1200 | TRACE_SERVER_SUB(be_server, "dead: %llu, rel: %llu, budget: %llu", | ||
1201 | be_server->deadline, be_server->release, | ||
1202 | be_server->budget); | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | return be_server; | 963 | return be_server; |
1207 | } | 964 | } |
1208 | 965 | ||
@@ -1214,7 +971,6 @@ static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | |||
1214 | lt_t now = litmus_clock(); | 971 | lt_t now = litmus_clock(); |
1215 | int was_added; | 972 | int was_added; |
1216 | 973 | ||
1217 | BUG_ON(!is_realtime(task)); | ||
1218 | if (head_in_list(&task_data(task)->candidate_list)) { | 974 | if (head_in_list(&task_data(task)->candidate_list)) { |
1219 | list_del_init(&task_data(task)->candidate_list); | 975 | list_del_init(&task_data(task)->candidate_list); |
1220 | } | 976 | } |
@@ -1222,23 +978,17 @@ static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | |||
1222 | check_slack_candidate(task); | 978 | check_slack_candidate(task); |
1223 | 979 | ||
1224 | if (is_queued(task)) { | 980 | if (is_queued(task)) { |
1225 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | 981 | TRACE_TASK(task, "not requeueing, already queued\n"); |
1226 | } else if (is_released(task, now)) { | 982 | } else if (is_released(task, now)) { |
1227 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", | ||
1228 | get_release(task), get_deadline(task), | ||
1229 | get_rt_period(task), now); | ||
1230 | __add_ready(domain, task); | 983 | __add_ready(domain, task); |
1231 | } else { | 984 | } else { |
1232 | /* Task needs to wait until it is released */ | 985 | /* Task needs to wait until it is released */ |
1233 | TRACE_TASK_SUB(task, "requeuing on release"); | ||
1234 | |||
1235 | was_added = add_release(domain, task); | 986 | was_added = add_release(domain, task); |
1236 | 987 | ||
1237 | /* The release time happened before we added ourselves | 988 | /* The release time happened before we added ourselves |
1238 | * to the heap. We can now add to ready. | 989 | * to the heap. We can now add to ready. |
1239 | */ | 990 | */ |
1240 | if (!was_added) { | 991 | if (!was_added) { |
1241 | TRACE_TASK_SUB(task, "missed release, going to ready"); | ||
1242 | __add_ready(domain, task); | 992 | __add_ready(domain, task); |
1243 | } | 993 | } |
1244 | } | 994 | } |
@@ -1285,13 +1035,6 @@ static void next_global_task(cpu_entry_t *entry, | |||
1285 | next_slack = next_eligible_slack(); | 1035 | next_slack = next_eligible_slack(); |
1286 | slack_server = next_eligible_slack_server(); | 1036 | slack_server = next_eligible_slack_server(); |
1287 | 1037 | ||
1288 | TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d " | ||
1289 | "next_slack: %d", (be_server) ? be_server->id : -1, | ||
1290 | (next_be) ? next_be->pid : -1, | ||
1291 | (next_srt) ? next_srt->pid : -1, | ||
1292 | (slack_server) ? slack_server->id : -1, | ||
1293 | (next_slack) ? next_slack->pid : -1); | ||
1294 | |||
1295 | /* Check if the servers can schedule the task linked to entry */ | 1038 | /* Check if the servers can schedule the task linked to entry */ |
1296 | if (entry && entry->linked) { | 1039 | if (entry && entry->linked) { |
1297 | 1040 | ||
@@ -1336,8 +1079,6 @@ static void next_global_task(cpu_entry_t *entry, | |||
1336 | earlier_server_task(*next_server, *next_task, | 1079 | earlier_server_task(*next_server, *next_task, |
1337 | slack_server, next_slack, | 1080 | slack_server, next_slack, |
1338 | next_server, next_task); | 1081 | next_server, next_task); |
1339 | |||
1340 | //BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release)); | ||
1341 | } | 1082 | } |
1342 | 1083 | ||
1343 | /* | 1084 | /* |
@@ -1348,23 +1089,17 @@ static void remove_from_ready(server_t *server, struct task_struct *task, | |||
1348 | { | 1089 | { |
1349 | server_t *slack; | 1090 | server_t *slack; |
1350 | rt_domain_t *domain; | 1091 | rt_domain_t *domain; |
1351 | BUG_ON(!server); | ||
1352 | BUG_ON(!entry); | ||
1353 | BUG_ON(!task); | ||
1354 | 1092 | ||
1355 | if (server->type == S_SLACK) { | 1093 | if (server->type == S_SLACK) { |
1356 | TRACE_SERVER_SUB(server, "removed from slack list"); | ||
1357 | list_del_init(&server->list); | 1094 | list_del_init(&server->list); |
1358 | 1095 | ||
1359 | /* Remove from consideration of BE servers */ | 1096 | /* Remove from consideration of BE servers */ |
1360 | if (is_be(task) && is_queued(task)) { | 1097 | if (is_be(task) && is_queued(task)) { |
1361 | TRACE_TASK_SUB(task, "BE removed from ready"); | ||
1362 | remove(&be_domain, task); | 1098 | remove(&be_domain, task); |
1363 | } | 1099 | } |
1364 | 1100 | ||
1365 | /* Remove from consideration of slack servers */ | 1101 | /* Remove from consideration of slack servers */ |
1366 | if (head_in_list(&task_data(task)->candidate_list)) { | 1102 | if (head_in_list(&task_data(task)->candidate_list)) { |
1367 | TRACE_TASK_SUB(task, "deleting candidate"); | ||
1368 | list_del_init(&task_data(task)->candidate_list); | 1103 | list_del_init(&task_data(task)->candidate_list); |
1369 | } | 1104 | } |
1370 | } else { | 1105 | } else { |
@@ -1373,20 +1108,14 @@ static void remove_from_ready(server_t *server, struct task_struct *task, | |||
1373 | remove_slack(slack); | 1108 | remove_slack(slack); |
1374 | } | 1109 | } |
1375 | if (server->type == S_BE) { | 1110 | if (server->type == S_BE) { |
1376 | TRACE_SERVER_SUB(server, "server removed from ready"); | ||
1377 | BUG_ON(!server->hn); | ||
1378 | bheap_delete(server_order, &be_ready_servers, | 1111 | bheap_delete(server_order, &be_ready_servers, |
1379 | server->hn); | 1112 | server->hn); |
1380 | } | 1113 | } |
1381 | if (is_queued(task)) { | 1114 | if (is_queued(task)) { |
1382 | domain = get_rt_domain(entry, task); | 1115 | domain = get_rt_domain(entry, task); |
1383 | BUG_ON(!domain); | ||
1384 | TRACE_TASK_SUB(task, "removed from ready"); | ||
1385 | remove(domain, task); | 1116 | remove(domain, task); |
1386 | } | 1117 | } |
1387 | } | 1118 | } |
1388 | |||
1389 | BUG_ON(!task_data(task)); | ||
1390 | } | 1119 | } |
1391 | 1120 | ||
1392 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); | 1121 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); |
@@ -1411,8 +1140,6 @@ static void edf_hsb_pick_next(cpu_entry_t *entry) | |||
1411 | if (next_task) { | 1140 | if (next_task) { |
1412 | remove_from_ready(next_server, next_task, entry); | 1141 | remove_from_ready(next_server, next_task, entry); |
1413 | check_for_slack_preempt(next_task, next_server, entry, 1); | 1142 | check_for_slack_preempt(next_task, next_server, entry, 1); |
1414 | TRACE_TASK_SERVER_SUB(next_task, next_server, | ||
1415 | "removing and picked"); | ||
1416 | 1143 | ||
1417 | /* A slack preemption could cause something that was already | 1144 | /* A slack preemption could cause something that was already |
1418 | * running to be 'swapped' to this CPU in link_to_cpu. | 1145 | * running to be 'swapped' to this CPU in link_to_cpu. |
@@ -1421,7 +1148,6 @@ static void edf_hsb_pick_next(cpu_entry_t *entry) | |||
1421 | linked = entry->linked; | 1148 | linked = entry->linked; |
1422 | unlink(entry->linked); | 1149 | unlink(entry->linked); |
1423 | requeue(linked, get_rt_domain(entry, linked)); | 1150 | requeue(linked, get_rt_domain(entry, linked)); |
1424 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1425 | } | 1151 | } |
1426 | link_to_cpu(entry, next_task, next_server); | 1152 | link_to_cpu(entry, next_task, next_server); |
1427 | } | 1153 | } |
@@ -1437,9 +1163,6 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next, | |||
1437 | struct task_struct *linked; | 1163 | struct task_struct *linked; |
1438 | rt_domain_t *domain; | 1164 | rt_domain_t *domain; |
1439 | 1165 | ||
1440 | TRACE_TASK_SERVER_SUB(next, next_server, | ||
1441 | "preempting on P%d", entry->cpu); | ||
1442 | |||
1443 | remove_from_ready(next_server, next, entry); | 1166 | remove_from_ready(next_server, next, entry); |
1444 | 1167 | ||
1445 | check_for_slack_preempt(next, next_server, entry, slack_resched); | 1168 | check_for_slack_preempt(next, next_server, entry, slack_resched); |
@@ -1475,14 +1198,7 @@ static noinline void check_for_slack_preempt(struct task_struct *task, | |||
1475 | entry = task_linked_entry(task); | 1198 | entry = task_linked_entry(task); |
1476 | 1199 | ||
1477 | if (entry != next_entry) { | 1200 | if (entry != next_entry) { |
1478 | TRACE_TASK_SUB(task, "was on P%d", entry->cpu); | ||
1479 | |||
1480 | unlink(task); | 1201 | unlink(task); |
1481 | |||
1482 | /* if (resched) { */ | ||
1483 | /* edf_hsb_pick_next(entry); */ | ||
1484 | /* preempt_if_preemptable(entry->scheduled, entry->cpu); */ | ||
1485 | /* } */ | ||
1486 | } | 1202 | } |
1487 | } | 1203 | } |
1488 | 1204 | ||
@@ -1496,16 +1212,9 @@ static noinline void check_for_slack_preempt(struct task_struct *task, | |||
1496 | requeue(slack_task, get_rt_domain(entry, slack_task)); | 1212 | requeue(slack_task, get_rt_domain(entry, slack_task)); |
1497 | 1213 | ||
1498 | if (entry != next_entry && resched) { | 1214 | if (entry != next_entry && resched) { |
1499 | TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu); | ||
1500 | /* Force a reschedule */ | 1215 | /* Force a reschedule */ |
1501 | edf_hsb_pick_next(entry); | 1216 | edf_hsb_pick_next(entry); |
1502 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 1217 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
1503 | } else { | ||
1504 | /* This can only happen on a preemption. If a preemption | ||
1505 | * happens, the task will be requeued elsewhere. | ||
1506 | * Obviously the next task has already been chosen. | ||
1507 | */ | ||
1508 | TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu); | ||
1509 | } | 1218 | } |
1510 | } | 1219 | } |
1511 | } | 1220 | } |
@@ -1521,8 +1230,6 @@ static void check_for_global_preempt(void) | |||
1521 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | 1230 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ |
1522 | 1231 | ||
1523 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | 1232 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { |
1524 | /* HRT cpus should not be in this heap */ | ||
1525 | BUG_ON(entry->linked && is_hrt(entry->linked)); | ||
1526 | 1233 | ||
1527 | next_global_task(entry, &next_server, &next_task); | 1234 | next_global_task(entry, &next_server, &next_task); |
1528 | 1235 | ||
@@ -1548,10 +1255,6 @@ static void check_for_global_preempt(void) | |||
1548 | 1255 | ||
1549 | if (sched != entry && sched->linked && | 1256 | if (sched != entry && sched->linked && |
1550 | is_hrt(sched->linked)) { | 1257 | is_hrt(sched->linked)) { |
1551 | |||
1552 | TRACE_TASK_SUB(next_task, | ||
1553 | "Already on P%d", | ||
1554 | sched->cpu); | ||
1555 | break; | 1258 | break; |
1556 | } | 1259 | } |
1557 | } | 1260 | } |
@@ -1580,8 +1283,6 @@ static void check_for_hrt_preempt(cpu_entry_t *entry) | |||
1580 | 1283 | ||
1581 | preempt(entry, next_hrt, &hrt_server->server, 1); | 1284 | preempt(entry, next_hrt, &hrt_server->server, 1); |
1582 | 1285 | ||
1583 | } else { | ||
1584 | TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting"); | ||
1585 | } | 1286 | } |
1586 | } | 1287 | } |
1587 | 1288 | ||
@@ -1594,8 +1295,6 @@ static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | |||
1594 | 1295 | ||
1595 | BUG_ON(task_cpu(task) == NO_CPU); | 1296 | BUG_ON(task_cpu(task) == NO_CPU); |
1596 | 1297 | ||
1597 | TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu); | ||
1598 | |||
1599 | if (is_hrt(task)) { | 1298 | if (is_hrt(task)) { |
1600 | requeue(task, &entry->hrt_server.hrt_domain); | 1299 | requeue(task, &entry->hrt_server.hrt_domain); |
1601 | check_for_hrt_preempt(entry); | 1300 | check_for_hrt_preempt(entry); |
@@ -1633,7 +1332,6 @@ static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | |||
1633 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | 1332 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; |
1634 | entry = task_sched_entry(first); | 1333 | entry = task_sched_entry(first); |
1635 | 1334 | ||
1636 | BUG_ON(!first || !is_hrt(first)); | ||
1637 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", | 1335 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", |
1638 | TIME(litmus_clock()), task_cpu(first)); | 1336 | TIME(litmus_clock()), task_cpu(first)); |
1639 | 1337 | ||
@@ -1698,7 +1396,6 @@ static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) | |||
1698 | 1396 | ||
1699 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); | 1397 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); |
1700 | BUG_ON(!server->ready); | 1398 | BUG_ON(!server->ready); |
1701 | ////sched_trace_action(entry->linked, NO_SLACK_ACTION); | ||
1702 | 1399 | ||
1703 | /* Set new state of entry */ | 1400 | /* Set new state of entry */ |
1704 | server->no_slack = 1; | 1401 | server->no_slack = 1; |
@@ -1720,8 +1417,6 @@ static void job_completion(cpu_entry_t *entry, struct task_struct* task) | |||
1720 | server_t *server = entry->linked_server; | 1417 | server_t *server = entry->linked_server; |
1721 | set_rt_flags(task, RT_F_SLEEP); | 1418 | set_rt_flags(task, RT_F_SLEEP); |
1722 | 1419 | ||
1723 | TRACE_TASK_SUB(task, "completed"); | ||
1724 | |||
1725 | unlink(task); | 1420 | unlink(task); |
1726 | check_donate_slack(server, task); | 1421 | check_donate_slack(server, task); |
1727 | 1422 | ||
@@ -1731,30 +1426,19 @@ static void job_completion(cpu_entry_t *entry, struct task_struct* task) | |||
1731 | if (server->type == S_SLACK && is_srt(task)) { | 1426 | if (server->type == S_SLACK && is_srt(task)) { |
1732 | tsk_rt(task)->job_params.job_no++; | 1427 | tsk_rt(task)->job_params.job_no++; |
1733 | sched_trace_task_release(task); | 1428 | sched_trace_task_release(task); |
1734 | TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, " | ||
1735 | "rel: %llu, dead: %llu", | ||
1736 | TIME(get_release(task)), | ||
1737 | TIME(get_deadline(task))); | ||
1738 | } else if (server->type == S_SRT) { | 1429 | } else if (server->type == S_SRT) { |
1739 | /* If the task is behind the server it must release immediately, | 1430 | /* If the task is behind the server it must release immediately, |
1740 | * leaving its release time and deadline unchanged. | 1431 | * leaving its release time and deadline unchanged. |
1741 | */ | 1432 | */ |
1742 | if (server->job_no > tsk_rt(task)->job_params.job_no) { | 1433 | if (server->job_no > tsk_rt(task)->job_params.job_no) { |
1743 | TRACE_TASK_SUB(task, "catching up"); | ||
1744 | tsk_rt(task)->job_params.job_no++; | 1434 | tsk_rt(task)->job_params.job_no++; |
1745 | } else { | 1435 | } else { |
1746 | /* Otherwise release them both */ | 1436 | /* Otherwise release them both */ |
1747 | prepare_for_next_period(task); | 1437 | prepare_for_next_period(task); |
1748 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1749 | TIME(get_release(task)), | ||
1750 | TIME(get_deadline(task))); | ||
1751 | server_release(server); | 1438 | server_release(server); |
1752 | } | 1439 | } |
1753 | } else { | 1440 | } else { |
1754 | prepare_for_next_period(task); | 1441 | prepare_for_next_period(task); |
1755 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1756 | TIME(get_release(task)), | ||
1757 | TIME(get_deadline(task))); | ||
1758 | } | 1442 | } |
1759 | 1443 | ||
1760 | if (is_released(task, litmus_clock())) | 1444 | if (is_released(task, litmus_clock())) |
@@ -1776,11 +1460,8 @@ static void server_completed(server_t *server, struct task_struct *task) | |||
1776 | cpu_entry_t *entry = task_linked_entry(task); | 1460 | cpu_entry_t *entry = task_linked_entry(task); |
1777 | 1461 | ||
1778 | BUG_ON(entry->linked != task); | 1462 | BUG_ON(entry->linked != task); |
1779 | BUG_ON(entry->linked_server != server); | ||
1780 | 1463 | ||
1781 | if (server->type == S_SRT) { | 1464 | if (server->type == S_SRT) { |
1782 | TRACE_TASK_SUB(task, "must wait on server"); | ||
1783 | |||
1784 | /* The job must now take the priority and release time | 1465 | /* The job must now take the priority and release time |
1785 | * of the next server. We do this so that we can still | 1466 | * of the next server. We do this so that we can still |
1786 | * use rt_domain and other handy methods to still work | 1467 | * use rt_domain and other handy methods to still work |
@@ -1791,15 +1472,10 @@ static void server_completed(server_t *server, struct task_struct *task) | |||
1791 | tsk_rt(task)->job_params.release = server->deadline; | 1472 | tsk_rt(task)->job_params.release = server->deadline; |
1792 | tsk_rt(task)->job_params.deadline = server->deadline + | 1473 | tsk_rt(task)->job_params.deadline = server->deadline + |
1793 | get_rt_period(task); | 1474 | get_rt_period(task); |
1794 | TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu", | ||
1795 | TIME(get_deadline(task)), | ||
1796 | TIME(get_release(task))); | ||
1797 | |||
1798 | } else if (server->type == S_HRT) { | 1475 | } else if (server->type == S_HRT) { |
1799 | /* Update state of HRT server */ | 1476 | /* Update state of HRT server */ |
1800 | hrt_server = container_of(server, hrt_server_t, server); | 1477 | hrt_server = container_of(server, hrt_server_t, server); |
1801 | hrt_server->ready = 0; | 1478 | hrt_server->ready = 0; |
1802 | TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu); | ||
1803 | 1479 | ||
1804 | if (hrtimer_active(&hrt_server->slack_timer)) | 1480 | if (hrtimer_active(&hrt_server->slack_timer)) |
1805 | slack_timer_cancel(hrt_server); | 1481 | slack_timer_cancel(hrt_server); |
@@ -1809,8 +1485,6 @@ static void server_completed(server_t *server, struct task_struct *task) | |||
1809 | server_release(server); | 1485 | server_release(server); |
1810 | } | 1486 | } |
1811 | 1487 | ||
1812 | sched_trace_action(task, SERVER_COMPLETED_ACTION); | ||
1813 | |||
1814 | unlink(task); | 1488 | unlink(task); |
1815 | requeue(task, get_rt_domain(entry, task)); | 1489 | requeue(task, get_rt_domain(entry, task)); |
1816 | 1490 | ||
@@ -1832,7 +1506,6 @@ static void hrt_server_released(server_t *server) | |||
1832 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); | 1506 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); |
1833 | 1507 | ||
1834 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); | 1508 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); |
1835 | TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu); | ||
1836 | 1509 | ||
1837 | hrt_server->no_slack = 0; | 1510 | hrt_server->no_slack = 0; |
1838 | hrt_server->ready = 1; | 1511 | hrt_server->ready = 1; |
@@ -1858,7 +1531,6 @@ static void servers_released(struct list_head *servers) | |||
1858 | 1531 | ||
1859 | raw_spin_lock_irqsave(global_lock, flags); | 1532 | raw_spin_lock_irqsave(global_lock, flags); |
1860 | 1533 | ||
1861 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
1862 | TRACE_TIMER("Servers released"); | 1534 | TRACE_TIMER("Servers released"); |
1863 | 1535 | ||
1864 | list_for_each_safe(pos, safe, servers) { | 1536 | list_for_each_safe(pos, safe, servers) { |
@@ -1870,7 +1542,6 @@ static void servers_released(struct list_head *servers) | |||
1870 | check_donate_slack(server, NULL); | 1542 | check_donate_slack(server, NULL); |
1871 | was_be = 1; | 1543 | was_be = 1; |
1872 | BUG_ON(bheap_node_in_heap(server->hn)); | 1544 | BUG_ON(bheap_node_in_heap(server->hn)); |
1873 | TRACE_SERVER_SUB(server, "inserting BE server"); | ||
1874 | bheap_insert(server_order, &be_ready_servers, | 1545 | bheap_insert(server_order, &be_ready_servers, |
1875 | server->hn); | 1546 | server->hn); |
1876 | check_donate_slack(server, NULL); | 1547 | check_donate_slack(server, NULL); |
@@ -1913,8 +1584,6 @@ static int admit_be_server(unsigned long long wcet, | |||
1913 | be_server->type = S_BE; | 1584 | be_server->type = S_BE; |
1914 | server_slack_create(be_server); | 1585 | server_slack_create(be_server); |
1915 | 1586 | ||
1916 | TRACE_SERVER_SUB(be_server, "admitted BE server"); | ||
1917 | |||
1918 | list_add(&be_server->list, &be_servers); | 1587 | list_add(&be_server->list, &be_servers); |
1919 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | 1588 | bheap_insert(server_order, &be_ready_servers, be_server->hn); |
1920 | 1589 | ||
@@ -2037,8 +1706,6 @@ static void start_servers(lt_t time) | |||
2037 | server_t *be_server; | 1706 | server_t *be_server; |
2038 | struct list_head *pos; | 1707 | struct list_head *pos; |
2039 | 1708 | ||
2040 | TRACE_SUB("starting servers at %llu", time); | ||
2041 | |||
2042 | /* Start HRT servers */ | 1709 | /* Start HRT servers */ |
2043 | for_each_online_cpu(cpu) { | 1710 | for_each_online_cpu(cpu) { |
2044 | entry = &per_cpu(cpu_entries, cpu); | 1711 | entry = &per_cpu(cpu_entries, cpu); |
@@ -2068,7 +1735,6 @@ static void start_servers(lt_t time) | |||
2068 | server_release_at(be_server, time - be_server->period); | 1735 | server_release_at(be_server, time - be_server->period); |
2069 | 1736 | ||
2070 | TRACE("Releasing BE server %d\n", be_server->id); | 1737 | TRACE("Releasing BE server %d\n", be_server->id); |
2071 | TRACE_SERVER_SUB(be_server, "inserting be server"); | ||
2072 | } | 1738 | } |
2073 | } | 1739 | } |
2074 | 1740 | ||
@@ -2167,7 +1833,6 @@ static void edf_hsb_task_block(struct task_struct *task) | |||
2167 | 1833 | ||
2168 | unlink(task); | 1834 | unlink(task); |
2169 | 1835 | ||
2170 | /* TODO: necessary? */ | ||
2171 | if (task == linked) { | 1836 | if (task == linked) { |
2172 | check_donate_slack(linked_server, task); | 1837 | check_donate_slack(linked_server, task); |
2173 | } | 1838 | } |
@@ -2229,17 +1894,6 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | |||
2229 | 1894 | ||
2230 | curr = entry->scheduled; | 1895 | curr = entry->scheduled; |
2231 | 1896 | ||
2232 | if (entry->scheduled && !is_realtime(prev)) { | ||
2233 | TRACE_TASK_SUB(entry->scheduled, "Stack deadlock!"); | ||
2234 | } | ||
2235 | |||
2236 | TRACE("server_budget: %llu, server_deadline: %llu, " | ||
2237 | "curr_time: %llu, no_slack: %d, ready: %d\n", | ||
2238 | TIME(entry->hrt_server.server.budget), | ||
2239 | TIME(entry->hrt_server.server.deadline), | ||
2240 | TIME(litmus_clock()), entry->hrt_server.no_slack, | ||
2241 | entry->hrt_server.ready); | ||
2242 | |||
2243 | /* Determine state */ | 1897 | /* Determine state */ |
2244 | blocks = curr && !is_running(curr); | 1898 | blocks = curr && !is_running(curr); |
2245 | preempted = entry->scheduled != entry->linked; | 1899 | preempted = entry->scheduled != entry->linked; |
@@ -2295,10 +1949,6 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | |||
2295 | 1949 | ||
2296 | raw_spin_unlock_irqrestore(global_lock, flags); | 1950 | raw_spin_unlock_irqrestore(global_lock, flags); |
2297 | 1951 | ||
2298 | if (!entry->scheduled && !next_eligible_slack_server()) { | ||
2299 | TRACE_SUB("A slack server has dissapeared!"); | ||
2300 | } | ||
2301 | |||
2302 | return entry->scheduled; | 1952 | return entry->scheduled; |
2303 | } | 1953 | } |
2304 | 1954 | ||
@@ -2343,7 +1993,6 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | |||
2343 | * This tends to happen when the first tasks enter the system. | 1993 | * This tends to happen when the first tasks enter the system. |
2344 | */ | 1994 | */ |
2345 | if (running) { | 1995 | if (running) { |
2346 | //BUG_ON(entry->scheduled); | ||
2347 | 1996 | ||
2348 | #ifdef CONFIG_RELEASE_MASTER | 1997 | #ifdef CONFIG_RELEASE_MASTER |
2349 | if (entry->cpu != edf_hsb_release_master) { | 1998 | if (entry->cpu != edf_hsb_release_master) { |
@@ -2547,7 +2196,6 @@ static void exit_edf_hsb(void) | |||
2547 | 2196 | ||
2548 | if (edf_hsb_proc_dir) { | 2197 | if (edf_hsb_proc_dir) { |
2549 | remove_plugin_proc_dir(&edf_hsb_plugin); | 2198 | remove_plugin_proc_dir(&edf_hsb_plugin); |
2550 | /* TODO: is this wrong? */ | ||
2551 | edf_hsb_proc_dir = NULL; | 2199 | edf_hsb_proc_dir = NULL; |
2552 | } | 2200 | } |
2553 | } | 2201 | } |
diff --git a/litmus/sched_edf_hsb_noslack.c b/litmus/sched_edf_hsb_noslack.c deleted file mode 100644 index 4d91f99d4094..000000000000 --- a/litmus/sched_edf_hsb_noslack.c +++ /dev/null | |||
@@ -1,2556 +0,0 @@ | |||
1 | /* | ||
2 | * litmus/sched_edf_hsb.c | ||
3 | * | ||
4 | * Implentation of the EDF-HSB scheduling algorithm. | ||
5 | * | ||
6 | * The following 6 events are fired by timers and not handled by | ||
7 | * the plugin infrastructure itself: | ||
8 | * | ||
9 | * release_[hrt|srt|be]_jobs | ||
10 | * [hrt|be]_server_released | ||
11 | * server_completed (for HRT, SRT, and BE) | ||
12 | * | ||
13 | * The following 4 events are caused by a write to the proc entry | ||
14 | * and should never be run when the plugin is already running: | ||
15 | * stop_[hrt|be]_servers | ||
16 | * admit_[hrt|be]_server | ||
17 | * | ||
18 | * TODO system for removing tasks from their release queues | ||
19 | * TODO clean up link_to_cpu and check_slack args | ||
20 | * TODO move slack completion into release | ||
21 | * TODO fix concurrent arms | ||
22 | * TODO slack and BE servers, include slack higher prio | ||
23 | * TODO start servers should no longer be cessary | ||
24 | * TODO harmonize order of method arguments | ||
25 | * TODO test crazy task_new hack | ||
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
27 | */ | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/ctype.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | |||
36 | #include <litmus/litmus.h> | ||
37 | #include <litmus/bheap.h> | ||
38 | #include <litmus/jobs.h> | ||
39 | #include <litmus/litmus_proc.h> | ||
40 | #include <litmus/sched_plugin.h> | ||
41 | #include <litmus/edf_common.h> | ||
42 | #include <litmus/sched_trace.h> | ||
43 | #include <litmus/servers.h> | ||
44 | #define DEBUG_EDF_HSB | ||
45 | |||
46 | /* DOES NOT WORK */ | ||
47 | //#define SLACK_ON_MASTER | ||
48 | |||
49 | #define BE_PROC_NAME "be_servers" | ||
50 | #define HRT_PROC_NAME "hrt_servers" | ||
51 | #define BE_SERVER_BASE 100 | ||
52 | #define IDLE_SLACK_BASE 1000 | ||
53 | #define SLACK_MIN NSEC_PER_MSEC | ||
54 | |||
55 | /* SCHED_TRACE action events */ | ||
56 | #define SERVER_COMPLETED_ACTION 1 | ||
57 | #define SERVER_RELEASED_ACTION 2 | ||
58 | #define NO_SLACK_ACTION 3 | ||
59 | #define SLACK_RUN_ACTION 4 | ||
60 | #define SLACK_STOP_ACTION 5 | ||
61 | #define SLACK_RECLAIM_ACTION 6 | ||
62 | #define SLACK_EXPIRED_ACTION 7 | ||
63 | #define SLACK_DONATED_ACTION 8 | ||
64 | #define CANDIDATE_ADDED_ACTION 9 | ||
65 | |||
66 | /* Uncomment for human readable time */ | ||
67 | #define TIME(x) \ | ||
68 | (x) | ||
69 | /* ({lt_t y = x; \ */ | ||
70 | /* do_div(y, NSEC_PER_MSEC); \ */ | ||
71 | /* y;}) */ | ||
72 | #define TRACE_TIMER(fmt, args...) \ | ||
73 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | ||
74 | TRACE_ARGS, ## args, TIME(litmus_clock())) | ||
75 | #define TRACE_TASK_TIMER(t, fmt, args...) \ | ||
76 | TRACE_TIMER("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
77 | (t)->rt_param.job_params.job_no, ## args) | ||
78 | |||
79 | /* | ||
80 | * Useful debugging macros. Remove for actual use as they cause | ||
81 | * a lot of lock contention. | ||
82 | */ | ||
83 | #ifdef DEBUG_EDF_HSB | ||
84 | |||
85 | #define TRACE_SUB(fmt, args...) \ | ||
86 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ | ||
87 | TRACE_ARGS, ## args) | ||
88 | #define TRACE_TASK_SUB(t, fmt, args...) \ | ||
89 | TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args) | ||
90 | #define TRACE_SERVER_SUB(s, fmt, args...) \ | ||
91 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
92 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \ | ||
93 | TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
94 | #else | ||
95 | #define TRACE_SUB(fmt, args...) | ||
96 | #define TRACE_TASK_SUB(t, fmt, args...) | ||
97 | #define TRACE_SERVER_SUB(s, fmt, args...) | ||
98 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Different types of servers | ||
103 | */ | ||
104 | typedef enum { | ||
105 | S_HRT, | ||
106 | S_SRT, | ||
107 | S_BE, | ||
108 | S_SLACK | ||
109 | } server_type_t; | ||
110 | |||
111 | /* | ||
112 | * A server running HRT tasks | ||
113 | */ | ||
114 | typedef struct { | ||
115 | server_t server; | ||
116 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ | ||
117 | int ready; /* False if waiting for next release */ | ||
118 | int no_slack; | ||
119 | struct hrtimer slack_timer; /* Server has no slack when: | ||
120 | * (deadline - budget) <= current_time. | ||
121 | */ | ||
122 | struct hrtimer_start_on_info slack_timer_info; | ||
123 | } hrt_server_t; | ||
124 | |||
125 | /* | ||
126 | * State of a single CPU | ||
127 | */ | ||
128 | typedef struct { | ||
129 | int cpu; | ||
130 | struct task_struct* scheduled; /* Task that should be running */ | ||
131 | struct task_struct* linked; /* Task that actually is running */ | ||
132 | server_t *scheduled_server; | ||
133 | server_t *linked_server; /* The server running on this cpu. | ||
134 | * Note that what it is 'running' is | ||
135 | * linked, not scheduled. | ||
136 | */ | ||
137 | hrt_server_t hrt_server; /* One HRT server per CPU */ | ||
138 | struct bheap_node* hn; /* For the cpu_heap */ | ||
139 | } cpu_entry_t; | ||
140 | |||
141 | /* | ||
142 | * Data assigned to each task | ||
143 | */ | ||
144 | typedef struct task_data { | ||
145 | server_t *srt_server; /* If the task is SRT, its server */ | ||
146 | struct list_head candidate_list; /* List of slack canditates */ | ||
147 | struct task_struct *owner; | ||
148 | } task_data_t; | ||
149 | |||
150 | /* CPU state */ | ||
151 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, noslack_cpu_entries); | ||
152 | static struct bheap cpu_heap; | ||
153 | static struct bheap_node cpu_heap_node[NR_CPUS]; | ||
154 | /* Task domains */ | ||
155 | static rt_domain_t srt_domain; | ||
156 | static rt_domain_t be_domain; | ||
157 | /* Useful tools for server scheduling */ | ||
158 | static server_domain_t server_domain; | ||
159 | /* BE server support */ | ||
160 | static struct list_head be_servers; | ||
161 | static struct bheap be_ready_servers; | ||
162 | /* Slack support */ | ||
163 | static struct list_head slack_queue; | ||
164 | static struct list_head slack_candidates; | ||
165 | /* CPU which will release tasks and global servers */ | ||
166 | static int edf_hsb_release_master; | ||
167 | /* Cache to store task_data structs */ | ||
168 | static struct kmem_cache *task_data_cache; | ||
169 | |||
170 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; | ||
171 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | ||
172 | |||
173 | #define task_sched_entry(task) (&per_cpu(noslack_cpu_entries, task_cpu(task))) | ||
174 | #define task_linked_entry(task) (&per_cpu(noslack_cpu_entries, task->rt_param.linked_on)) | ||
175 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) | ||
176 | #define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data) | ||
177 | #define task_srt_server(task) ((server_t*)task_data(task)->srt_server) | ||
178 | #define server_slack(s) ((server_t*)(s)->data) | ||
179 | #define server_has_slack(s) (server_slack(s)->deadline != 0) | ||
180 | #define local_cpu_entry (&__get_cpu_var(noslack_cpu_entries)) | ||
181 | #define global_lock (&srt_domain.ready_lock) | ||
182 | #define is_active_plugin (litmus == &edf_hsb_plugin) | ||
183 | |||
184 | /* | ||
185 | * This only works if items are deleted with list_del_init. | ||
186 | */ | ||
187 | static inline int head_in_list(struct list_head *head) | ||
188 | { | ||
189 | BUG_ON(!head); | ||
190 | return !(head->next == head->prev && head->prev == head); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Returns slack server running the task or NULL if N/A. | ||
195 | */ | ||
196 | static inline server_t* task_slack_server(struct task_struct *task) | ||
197 | { | ||
198 | server_t *slack_server = NULL; | ||
199 | if (task->rt_param.linked_on != NO_CPU) { | ||
200 | slack_server = task_linked_entry(task)->linked_server; | ||
201 | if (slack_server->type != S_SLACK) | ||
202 | slack_server = NULL; | ||
203 | } | ||
204 | return slack_server; | ||
205 | } | ||
206 | |||
207 | static task_data_t* task_data_alloc(int gfp_flags) | ||
208 | { | ||
209 | return kmem_cache_alloc(task_data_cache, gfp_flags); | ||
210 | } | ||
211 | |||
212 | static void task_data_free(task_data_t* data) | ||
213 | { | ||
214 | kmem_cache_free(task_data_cache, data); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Donating servers pre-allocate a server for slack to avoid runtime | ||
219 | * calls to kmalloc. | ||
220 | */ | ||
221 | static void server_slack_create(server_t *donator) | ||
222 | { | ||
223 | server_t *slack = server_alloc(GFP_ATOMIC); | ||
224 | |||
225 | server_init(slack, &server_domain, -donator->id, 0, 0, 1); | ||
226 | slack->type = S_SLACK; | ||
227 | slack->data = donator; | ||
228 | donator->data = slack; | ||
229 | } | ||
230 | |||
231 | |||
232 | static void server_slack_destroy(server_t *donator) | ||
233 | { | ||
234 | server_t *slack = (server_t*)donator->data; | ||
235 | |||
236 | donator->data = NULL; | ||
237 | server_destroy(slack); | ||
238 | server_free(slack); | ||
239 | } | ||
240 | |||
241 | static void remove_slack(server_t *slack) | ||
242 | { | ||
243 | if (!slack) | ||
244 | return; | ||
245 | TRACE_SERVER_SUB(slack, "slack removed"); | ||
246 | //////sched_trace_action(NULL, SLACK_EXPIRED_ACTION); | ||
247 | |||
248 | if (head_in_list(&slack->list)) | ||
249 | list_del_init(&slack->list); | ||
250 | slack->deadline = 0; | ||
251 | slack->budget = 0; | ||
252 | slack->wcet = 0; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Slack queue is EDF. | ||
257 | */ | ||
258 | static void add_slack(server_t *slack) | ||
259 | { | ||
260 | struct list_head *pos; | ||
261 | server_t *queued; | ||
262 | |||
263 | TRACE_SERVER_SUB(slack, "slack added"); | ||
264 | |||
265 | if (head_in_list(&slack->list)) { | ||
266 | TRACE_SERVER_SUB(slack, "already in list"); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | list_for_each_prev(pos, &slack_queue) { | ||
271 | queued = list_entry(pos, server_t, list); | ||
272 | if (lt_before_eq(queued->deadline, slack->deadline)) { | ||
273 | __list_add(&slack->list, pos, pos->next); | ||
274 | return; | ||
275 | } | ||
276 | } | ||
277 | list_add(&slack->list, &slack_queue); | ||
278 | } | ||
279 | |||
280 | static inline struct task_struct* get_candidate(struct list_head *pos) | ||
281 | { | ||
282 | struct task_struct *task = NULL; | ||
283 | task_data_t *data; | ||
284 | if (!list_empty(pos)) { | ||
285 | data = list_entry(pos, task_data_t, candidate_list); | ||
286 | task = data->owner; | ||
287 | } | ||
288 | return task; | ||
289 | } | ||
290 | |||
291 | static inline lt_t real_deadline(struct task_struct *task) | ||
292 | { | ||
293 | server_t *server = task_srt_server(task); | ||
294 | int job_diff = server->job_no - task_job_no(task); | ||
295 | return get_deadline(task) - job_diff * get_rt_period(task); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Candidate queue is EDF. | ||
300 | */ | ||
301 | static void add_slack_candidate(struct task_struct *task) | ||
302 | { | ||
303 | struct list_head *pos; | ||
304 | struct task_struct *queued; | ||
305 | |||
306 | TRACE_TASK_SUB(task, "candidate added"); | ||
307 | |||
308 | list_for_each_prev(pos, &slack_candidates) { | ||
309 | queued = get_candidate(pos); | ||
310 | if (lt_before_eq(real_deadline(queued), real_deadline(task))) { | ||
311 | __list_add(&task_data(task)->candidate_list, | ||
312 | pos, pos->next); | ||
313 | return; | ||
314 | } | ||
315 | } | ||
316 | list_add(&task_data(task)->candidate_list, &slack_candidates); | ||
317 | } | ||
318 | |||
319 | static void donate_slack(server_t *donator) | ||
320 | { | ||
321 | server_t *slack = (server_t*)donator->data; | ||
322 | hrt_server_t *hrt_server; | ||
323 | |||
324 | TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget)); | ||
325 | |||
326 | if (donator->type == S_HRT) { | ||
327 | hrt_server = container_of(donator, hrt_server_t, server); | ||
328 | BUG_ON(!hrt_server->ready); | ||
329 | } | ||
330 | |||
331 | slack->wcet = donator->budget; | ||
332 | slack->budget = donator->budget; | ||
333 | slack->deadline = donator->deadline; | ||
334 | |||
335 | add_slack(slack); | ||
336 | } | ||
337 | |||
338 | #if 0 | ||
339 | /* | ||
340 | * Donate any available slack from a server. | ||
341 | */ | ||
342 | static noinline void check_donate_slack(server_t *donator, struct task_struct *was_scheduled) | ||
343 | { | ||
344 | server_t *slack = server_slack(donator); | ||
345 | hrt_server_t *hrt_server; | ||
346 | int donate = 0; | ||
347 | |||
348 | TRACE_SERVER_SUB(donator, "checking donation"); | ||
349 | |||
350 | if (!slack) | ||
351 | return; | ||
352 | |||
353 | /* Donating small amounts of slack will result in excess migrations */ | ||
354 | if (donator->budget < SLACK_MIN) | ||
355 | return; | ||
356 | |||
357 | if (server_has_slack(donator)) { | ||
358 | TRACE_SERVER_SUB(donator, "dead: %d, rel: %d, job: %d already donated", | ||
359 | slack->deadline, slack->release, slack->job_no); | ||
360 | return; | ||
361 | } | ||
362 | |||
363 | if (donator->type == S_HRT) { | ||
364 | hrt_server = container_of(donator, hrt_server_t, server); | ||
365 | } | ||
366 | |||
367 | /* Donate if the server is waiting for a task release */ | ||
368 | if ((donator->type == S_SRT && | ||
369 | donator->job_no <= task_job_no(was_scheduled)) || | ||
370 | (donator->type == S_HRT && | ||
371 | hrt_server->no_slack && hrt_server->ready && | ||
372 | !__jobs_pending(&hrt_server->hrt_domain)) || | ||
373 | (donator->type == S_BE && | ||
374 | !__jobs_pending(&be_domain))) { | ||
375 | donate = 1; | ||
376 | } | ||
377 | |||
378 | if (!donate) | ||
379 | return; | ||
380 | |||
381 | ////sched_trace_action(was_scheduled, SLACK_DONATED_ACTION); | ||
382 | |||
383 | donate_slack(donator); | ||
384 | } | ||
385 | |||
386 | #else | ||
387 | #define check_donate_slack(a, b) | ||
388 | #endif | ||
389 | |||
390 | /* | ||
391 | * Adds the task to the candidate queue if it is eligible for slack stealing. | ||
392 | */ | ||
393 | static void check_slack_candidate(struct task_struct *task) | ||
394 | { | ||
395 | TRACE_TASK_SUB(task, "checking for candidate"); | ||
396 | if (is_srt(task) && | ||
397 | /* The task has been synchronously released */ | ||
398 | task_job_no(task) > 2 && | ||
399 | /* The SRT task is behind its server */ | ||
400 | task_srt_server(task)->job_no > task_job_no(task) && | ||
401 | /* The task hasn't already been added to the list */ | ||
402 | !head_in_list(&task_data(task)->candidate_list)) { | ||
403 | |||
404 | add_slack_candidate(task); | ||
405 | } else if (is_srt(task) && | ||
406 | is_released(task, litmus_clock()) && | ||
407 | !is_queued(task)) { | ||
408 | TRACE_TASK_SUB(task, "candidate has been released!"); | ||
409 | __add_ready(&srt_domain, task); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Returns the next eligible slack server. This will remove any expired | ||
415 | * slack servers still present in the list. | ||
416 | */ | ||
417 | static noinline server_t* next_eligible_slack_server(void) | ||
418 | { | ||
419 | server_t *next_slack = NULL; | ||
420 | lt_t now = litmus_clock(); | ||
421 | |||
422 | while (!list_empty(&slack_queue)) { | ||
423 | next_slack = list_entry(slack_queue.next, server_t, list); | ||
424 | BUG_ON(!next_slack); | ||
425 | |||
426 | if (lt_after(next_slack->deadline, now) && | ||
427 | lt_after(next_slack->budget, SLACK_MIN) && | ||
428 | !is_server_linked(next_slack)) { | ||
429 | break; | ||
430 | } else { | ||
431 | /* Slack has expired or has too little time */ | ||
432 | BUG_ON(next_slack->id == 1001); | ||
433 | remove_slack(next_slack); | ||
434 | next_slack = NULL; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | return next_slack; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Returns the next SRT task that is tardy or will be tardy. If none | ||
443 | * are available, will return a tardy BE task if present. | ||
444 | */ | ||
445 | static noinline struct task_struct* next_eligible_slack(void) | ||
446 | { | ||
447 | struct task_struct *next = get_candidate(slack_candidates.next); | ||
448 | |||
449 | while (next && task_srt_server(next)->job_no <= task_job_no(next)) { | ||
450 | list_del_init(&task_data(next)->candidate_list); | ||
451 | next = get_candidate(slack_candidates.next); | ||
452 | } | ||
453 | |||
454 | /* We couldn't find an SRT to schedule. Find a BE which is | ||
455 | * either tardy or cannot run due to a lack of servers. | ||
456 | */ | ||
457 | if (!next) { | ||
458 | next = __peek_ready(&be_domain); | ||
459 | } | ||
460 | |||
461 | return next; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Order BE tasks FIFO. | ||
466 | */ | ||
467 | static inline int be_higher_prio(struct task_struct *first, struct task_struct *second) | ||
468 | { | ||
469 | return lt_before(get_release(first), get_release(second)) || | ||
470 | |||
471 | /* Break by PID */ | ||
472 | (get_release(first) == get_release(second) && | ||
473 | (first->pid < second->pid)); | ||
474 | } | ||
475 | |||
476 | static int be_ready_order(struct bheap_node *a, struct bheap_node *b) | ||
477 | { | ||
478 | struct task_struct *first, *second; | ||
479 | first = bheap2task(a); | ||
480 | second = bheap2task(b); | ||
481 | if (!first || !second) | ||
482 | return first && !second; | ||
483 | return be_higher_prio(first, second); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Order servers by EDF. | ||
488 | */ | ||
489 | static inline int server_higher_prio(server_t *first, server_t *second) | ||
490 | { | ||
491 | return lt_before(first->deadline, second->deadline) || | ||
492 | /* Break by id */ | ||
493 | (first->deadline == second->deadline && | ||
494 | first->id < second->id); | ||
495 | } | ||
496 | |||
497 | static int server_order(struct bheap_node *a, struct bheap_node *b) | ||
498 | { | ||
499 | server_t *first, *second; | ||
500 | first = a->value; | ||
501 | second = b->value; | ||
502 | return server_higher_prio(first, second); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Order CPU's by deadlines of their servers. | ||
507 | */ | ||
508 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | ||
509 | { | ||
510 | cpu_entry_t *first, *second; | ||
511 | first = a->value; | ||
512 | second = b->value; | ||
513 | if (first->linked && second->linked) { | ||
514 | return !server_higher_prio(first->linked_server, | ||
515 | second->linked_server); | ||
516 | } | ||
517 | return second->linked && !first->linked; | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Move the CPU entry to the correct position in the queue. | ||
522 | */ | ||
523 | static inline void update_cpu_position(cpu_entry_t *entry) | ||
524 | { | ||
525 | if (likely(bheap_node_in_heap(entry->hn))) | ||
526 | bheap_delete(server_order, &cpu_heap, entry->hn); | ||
527 | /* Don't leave HRT CPUs in the heap as its order only matters | ||
528 | * for global preempts. | ||
529 | */ | ||
530 | if (!entry->linked || !is_hrt(entry->linked)) | ||
531 | bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); | ||
532 | } | ||
533 | |||
534 | static inline cpu_entry_t* lowest_prio_cpu(void) | ||
535 | { | ||
536 | struct bheap_node *hn = bheap_peek(cpu_lower_prio, &cpu_heap); | ||
537 | return (hn) ? hn->value : NULL; | ||
538 | } | ||
539 | |||
540 | static inline int check_hrt_server_initialized(hrt_server_t *hrt_server) | ||
541 | { | ||
542 | return hrt_server->server.wcet && hrt_server->server.period; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Arms the slack timer for the server, if necessary. | ||
547 | */ | ||
548 | static void slack_timer_arm(hrt_server_t *hrt_server) | ||
549 | { | ||
550 | int cpu, err; | ||
551 | cpu_entry_t *entry; | ||
552 | struct hrtimer *timer; | ||
553 | lt_t now = litmus_clock(), when_to_fire; | ||
554 | |||
555 | if (!check_hrt_server_initialized(hrt_server)) { | ||
556 | TRACE_SERVER_SUB(&hrt_server->server, "not initialized"); | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | timer = &hrt_server->slack_timer; | ||
561 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
562 | |||
563 | #ifdef SLACK_ON_MASTER | ||
564 | if (edf_hsb_release_master != NO_CPU) | ||
565 | cpu = edf_hsb_release_master; | ||
566 | else | ||
567 | #endif | ||
568 | cpu = entry->cpu; | ||
569 | |||
570 | when_to_fire = hrt_server->server.deadline - hrt_server->server.budget; | ||
571 | |||
572 | /* Ensure the timer is needed */ | ||
573 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || | ||
574 | hrt_server->no_slack || hrt_server->server.budget == 0 || | ||
575 | !hrt_server->ready) { | ||
576 | TRACE_SERVER_SUB(&hrt_server->server, | ||
577 | "not arming slack timer on P%d, %d %d %d %d %d", | ||
578 | entry->cpu, | ||
579 | hrtimer_active(timer), hrt_server->server.deadline == 0, | ||
580 | hrt_server->no_slack, hrt_server->server.budget == 0, | ||
581 | !hrt_server->ready); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | if (when_to_fire >= hrt_server->server.deadline) { | ||
586 | TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu", | ||
587 | when_to_fire, hrt_server->server.deadline, | ||
588 | hrt_server->server.budget); | ||
589 | BUG_ON(1); | ||
590 | } | ||
591 | |||
592 | /* Arm timer */ | ||
593 | if (lt_after_eq(now, when_to_fire)) { | ||
594 | /* 'Fire' immediately */ | ||
595 | TRACE_SERVER_SUB(&hrt_server->server, | ||
596 | "immediate: %llu", when_to_fire); | ||
597 | hrt_server->no_slack = 1; | ||
598 | } else if (cpu != smp_processor_id()) { | ||
599 | err = hrtimer_start_on(cpu, | ||
600 | &hrt_server->slack_timer_info, | ||
601 | &hrt_server->slack_timer, | ||
602 | ns_to_ktime(when_to_fire), | ||
603 | HRTIMER_MODE_ABS_PINNED); | ||
604 | if (err) | ||
605 | TRACE_SERVER_SUB(&hrt_server->server, "failed to arm slack"); | ||
606 | } else { | ||
607 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), | ||
608 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
609 | } | ||
610 | |||
611 | TRACE_SUB("slack timer 0x%x armed to fire at %llu on P%d", | ||
612 | timer, TIME(when_to_fire), entry->cpu); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Does nothing if the slack timer is not armed. | ||
617 | */ | ||
618 | static inline void slack_timer_cancel(hrt_server_t *hrt_server) | ||
619 | { | ||
620 | int ret; | ||
621 | if (hrtimer_active(&hrt_server->slack_timer)) { | ||
622 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); | ||
623 | if (ret == -1) { | ||
624 | TRACE_SERVER_SUB(&hrt_server->server, | ||
625 | "slack timer was running concurrently"); | ||
626 | } else { | ||
627 | TRACE_SERVER_SUB(&hrt_server->server, | ||
628 | "slack timer cancelled"); | ||
629 | } | ||
630 | } else { | ||
631 | TRACE_SERVER_SUB(&hrt_server->server, "slack not active"); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * Handles subtraction of lt_t without underflows. | ||
637 | */ | ||
638 | static inline lt_t lt_subtract(lt_t a, lt_t b) | ||
639 | { | ||
640 | long long sub = (long long)a - (long long)b; | ||
641 | if (sub >= 0) | ||
642 | return sub; | ||
643 | else | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static void requeue_server(server_t *server, lt_t now) | ||
648 | { | ||
649 | int added = 0; | ||
650 | hrt_server_t *hrt_server; | ||
651 | |||
652 | if (server->type == S_SRT) | ||
653 | return; | ||
654 | |||
655 | if (server->type == S_SLACK) { | ||
656 | add_slack(server); | ||
657 | return; | ||
658 | } | ||
659 | |||
660 | if (lt_before(now, server->release)) { | ||
661 | added = add_server_release(server, &server_domain); | ||
662 | } | ||
663 | |||
664 | if (!added) { | ||
665 | /* Mark servers as released */ | ||
666 | if (server->type == S_HRT) { | ||
667 | TRACE_SERVER_SUB(server, "P%d now ready at %llu", now); | ||
668 | hrt_server = container_of(server, hrt_server_t, server); | ||
669 | hrt_server->ready = 1; | ||
670 | remove_slack(server_slack(server)); | ||
671 | hrt_server->no_slack = 0; | ||
672 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
673 | } else if (server->type == S_BE) { | ||
674 | TRACE_SERVER_SUB(server, "BE added to ready"); | ||
675 | bheap_insert(server_order, &be_ready_servers, server->hn); | ||
676 | } | ||
677 | } else { | ||
678 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Absorbs a task's execution time into its donator. | ||
684 | */ | ||
685 | static void reclaim_slack(server_t *slack) | ||
686 | { | ||
687 | lt_t exec; | ||
688 | server_t *donator = server_slack(slack); | ||
689 | |||
690 | if (!donator || lt_before_eq(slack->deadline, litmus_clock())) | ||
691 | return; | ||
692 | |||
693 | /* SRT servers do not ever reclaim slack */ | ||
694 | ////sched_trace_action(NULL, SLACK_RECLAIM_ACTION); | ||
695 | |||
696 | exec = slack->wcet - slack->budget; | ||
697 | TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec)); | ||
698 | |||
699 | BUG_ON(is_server_linked(donator)); | ||
700 | BUG_ON(!slack->wcet); | ||
701 | BUG_ON(!donator->budget); | ||
702 | |||
703 | donator->budget = lt_subtract(donator->budget, exec); | ||
704 | slack->wcet = slack->budget; | ||
705 | |||
706 | /* If budget exhausted, server needs to wait for next release */ | ||
707 | if (!donator->budget) { | ||
708 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | ||
709 | } | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Begins server execution and arms any timers necessary. | ||
714 | */ | ||
715 | static noinline void link_server(cpu_entry_t *entry, | ||
716 | server_t *next_server) | ||
717 | { | ||
718 | |||
719 | if (entry->linked) { | ||
720 | /* Massive state check */ | ||
721 | if (next_server->type == S_SRT) { | ||
722 | /* SRT task cannot get ahead of its server */ | ||
723 | BUG_ON(next_server->job_no + 1 < task_job_no(entry->linked)); | ||
724 | BUG_ON(lt_after(get_deadline(entry->linked), | ||
725 | next_server->deadline)); | ||
726 | } else if (next_server->type == S_HRT) { | ||
727 | /* HRT servers should never, ever migrate */ | ||
728 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
729 | BUG_ON(!entry->hrt_server.ready); | ||
730 | } else if (next_server->type == S_SLACK) { | ||
731 | /* Should have already been removed from slack list */ | ||
732 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
733 | BUG_ON(is_be(entry->linked) && is_queued(entry->linked)); | ||
734 | ////sched_trace_action(entry->linked, SLACK_RUN_ACTION); | ||
735 | BUG_ON(is_srt(entry->linked) && | ||
736 | task_srt_server(entry->linked)->job_no <= | ||
737 | task_job_no(entry->linked)); | ||
738 | } else { /* BE */ | ||
739 | /* Should have already been removed from ready heap */ | ||
740 | BUG_ON(bheap_node_in_heap(next_server->hn)); | ||
741 | BUG_ON(is_queued(entry->linked)); | ||
742 | ////sched_trace_action(entry->linked, next_server->id); | ||
743 | } | ||
744 | |||
745 | if (next_server->type != S_SLACK && | ||
746 | (head_in_list(&server_slack(next_server)->list))) { | ||
747 | remove_slack(server_slack(next_server)); | ||
748 | } | ||
749 | |||
750 | entry->linked_server = next_server; | ||
751 | server_run(entry->linked_server, entry->linked); | ||
752 | } | ||
753 | |||
754 | /* Timer necessary whenever an HRT is not running */ | ||
755 | if (!entry->linked || !is_hrt(entry->linked)) | ||
756 | slack_timer_arm(&entry->hrt_server); | ||
757 | else | ||
758 | slack_timer_cancel(&entry->hrt_server); | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * Stops server execution and timers. This will also re-add servers | ||
763 | * to any collections they should be members of. | ||
764 | */ | ||
765 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) | ||
766 | { | ||
767 | server_t *server = entry->linked_server; | ||
768 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
769 | |||
770 | BUG_ON(!entry->linked_server); | ||
771 | |||
772 | server_stop(entry->linked_server); | ||
773 | server = entry->linked_server; | ||
774 | entry->linked_server = NULL; | ||
775 | |||
776 | if (!requeue) | ||
777 | return; | ||
778 | |||
779 | if (server->type == S_SLACK && server->deadline) { | ||
780 | add_slack(server); | ||
781 | ////sched_trace_action(entry->linked, SLACK_STOP_ACTION); | ||
782 | |||
783 | /* Donator needs to absorb slack execution time */ | ||
784 | reclaim_slack(server); | ||
785 | } else if (server->type != S_SRT) { | ||
786 | requeue_server(server, litmus_clock()); | ||
787 | } | ||
788 | |||
789 | if (server->type == S_HRT && hrt_server->ready) | ||
790 | BUG_ON(head_in_list(&server_slack(server)->list)); | ||
791 | } | ||
792 | |||
793 | static void requeue(struct task_struct *task, rt_domain_t *domain); | ||
794 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, struct task_struct *task); | ||
795 | |||
796 | /* Update the link of a CPU. | ||
797 | * Handles the case where the to-be-linked task is already | ||
798 | * scheduled on a different CPU. The last argument is only needed | ||
799 | * for BE tasks as their servers can't be determined here. | ||
800 | */ | ||
801 | static noinline void link_to_cpu(cpu_entry_t *entry, | ||
802 | struct task_struct* linked, | ||
803 | server_t* next_server) | ||
804 | { | ||
805 | cpu_entry_t *sched; | ||
806 | server_t *tmp_server; | ||
807 | struct task_struct *tmp_task; | ||
808 | int on_cpu; | ||
809 | |||
810 | BUG_ON(linked && !is_realtime(linked)); | ||
811 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
812 | BUG_ON(entry->cpu == edf_hsb_release_master); | ||
813 | |||
814 | if (linked) | ||
815 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | ||
816 | entry->cpu); | ||
817 | |||
818 | /* Currently linked task is set to be unlinked. */ | ||
819 | if (entry->linked) { | ||
820 | unlink_server(entry, 1); | ||
821 | entry->linked->rt_param.linked_on = NO_CPU; | ||
822 | entry->linked = NULL; | ||
823 | } | ||
824 | |||
825 | /* Link new task to CPU. */ | ||
826 | if (linked) { | ||
827 | set_rt_flags(linked, RT_F_RUNNING); | ||
828 | /* Handle task is already scheduled somewhere! */ | ||
829 | on_cpu = linked->rt_param.scheduled_on; | ||
830 | if (on_cpu != NO_CPU) { | ||
831 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
832 | /* This should only happen if not linked already */ | ||
833 | BUG_ON(sched->linked == linked); | ||
834 | |||
835 | if (entry != sched && | ||
836 | sched->linked && is_hrt(sched->linked)) { | ||
837 | /* We are already scheduled on a CPU with an HRT */ | ||
838 | TRACE_TASK_SUB(linked, | ||
839 | "cannot move to scheduled CPU P%d", | ||
840 | sched->cpu); | ||
841 | |||
842 | requeue_server(next_server, litmus_clock()); | ||
843 | requeue(linked, get_rt_domain(entry, linked)); | ||
844 | |||
845 | linked = NULL; | ||
846 | next_server = NULL; | ||
847 | } else if (entry != sched) { | ||
848 | /* Link to the CPU we are scheduled on by swapping | ||
849 | * with that CPU's linked task. | ||
850 | */ | ||
851 | BUG_ON(is_hrt(linked)); | ||
852 | |||
853 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
854 | sched->cpu); | ||
855 | |||
856 | tmp_task = sched->linked; | ||
857 | tmp_server = sched->linked_server; | ||
858 | |||
859 | if (tmp_task) | ||
860 | unlink_server(sched, 0); | ||
861 | |||
862 | linked->rt_param.linked_on = sched->cpu; | ||
863 | sched->linked = linked; | ||
864 | link_server(sched, next_server); | ||
865 | |||
866 | update_cpu_position(sched); | ||
867 | |||
868 | linked = tmp_task; | ||
869 | next_server = tmp_server; | ||
870 | } | ||
871 | } | ||
872 | if (linked) /* Might be NULL due to swap */ | ||
873 | linked->rt_param.linked_on = entry->cpu; | ||
874 | } | ||
875 | entry->linked = linked; | ||
876 | link_server(entry, next_server); | ||
877 | update_cpu_position(entry); | ||
878 | |||
879 | BUG_ON(!entry->linked && entry->linked_server); | ||
880 | |||
881 | if (linked) | ||
882 | TRACE_TASK_SERVER_SUB(linked, next_server, | ||
883 | "linked to %d", entry->cpu); | ||
884 | else | ||
885 | TRACE_SUB("NULL linked to %d", entry->cpu); | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Grab the local HRT or global SRT or BE domain for the task. | ||
890 | */ | ||
891 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, | ||
892 | struct task_struct *task) | ||
893 | { | ||
894 | if (is_hrt(task)) | ||
895 | return &entry->hrt_server.hrt_domain; | ||
896 | else if (is_srt(task)) | ||
897 | return &srt_domain; | ||
898 | else /* BE */ | ||
899 | return &be_domain; | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * Ensures the task is not linked anywhere nor present in any ready queues. | ||
904 | */ | ||
905 | static noinline void unlink(struct task_struct* t) | ||
906 | { | ||
907 | cpu_entry_t *entry; | ||
908 | |||
909 | BUG_ON(!t); | ||
910 | |||
911 | if (t->rt_param.linked_on != NO_CPU) { | ||
912 | /* Unlink */ | ||
913 | entry = task_linked_entry(t); | ||
914 | link_to_cpu(entry, NULL, NULL); | ||
915 | } else if (is_queued(t)) { | ||
916 | entry = task_sched_entry(t); | ||
917 | |||
918 | /* A task that is unlinked due to a slack server must be treated | ||
919 | * differently. It is probably queued in a release_queue, but | ||
920 | * a race condition could allow is_released() to return true | ||
921 | * even when the task has not yet been released. Attempting | ||
922 | * to remove the task in this case would be disastrous. | ||
923 | */ | ||
924 | if (entry->scheduled == t && | ||
925 | entry->scheduled_server && /* Can be NULL on task_new */ | ||
926 | entry->scheduled_server->type == S_SLACK) { | ||
927 | |||
928 | TRACE_TASK_SUB(t, "unlinked on slack server"); | ||
929 | |||
930 | } else if (is_released(t, litmus_clock())) { | ||
931 | /* This is an interesting situation: t is scheduled, | ||
932 | * but has already been unlinked. It was re-added to | ||
933 | * a ready queue of some sort but now needs to | ||
934 | * be removed. This usually happens when a job has | ||
935 | * been preempted but completes before it is | ||
936 | * descheduled. | ||
937 | */ | ||
938 | TRACE_TASK_SUB(t, "removing from domain"); | ||
939 | remove(get_rt_domain(entry, t), t); | ||
940 | BUG_ON(is_queued(t)); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | if (head_in_list(&task_data(t)->candidate_list)) { | ||
945 | list_del_init(&task_data(t)->candidate_list); | ||
946 | } | ||
947 | |||
948 | } | ||
949 | |||
950 | /* | ||
951 | * A job generated by a HRT task is eligible if either the job's deadline | ||
952 | * is earlier than the server's next deadline, or the server has zero slack | ||
953 | * time in its current period. | ||
954 | */ | ||
955 | static inline int is_eligible(struct task_struct *task, | ||
956 | hrt_server_t *hrt_server) | ||
957 | { | ||
958 | TRACE_TASK_SUB(task, "%d %d %llu %llu", | ||
959 | hrt_server->ready, hrt_server->no_slack, | ||
960 | hrt_server->server.deadline, | ||
961 | get_deadline(task)); | ||
962 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && | ||
963 | (hrt_server->no_slack || | ||
964 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Set the server to release at the closest preceding deadline to time. | ||
969 | */ | ||
970 | static inline void catchup_server(server_t *server, lt_t time) | ||
971 | { | ||
972 | lt_t diff, sub; | ||
973 | |||
974 | diff = time - server->deadline; | ||
975 | sub = diff % server->period; | ||
976 | |||
977 | server_release_at(server, time - sub); | ||
978 | TRACE_SERVER_SUB(server, "catching up to %llu", time); | ||
979 | } | ||
980 | |||
981 | static noinline int catchup_srt_server(struct task_struct *task) | ||
982 | { | ||
983 | int jobs, rv = 0; | ||
984 | lt_t release; | ||
985 | lt_t now = litmus_clock(); | ||
986 | server_t *srt_server = task_srt_server(task); | ||
987 | |||
988 | if (lt_before(srt_server->deadline, now) && | ||
989 | srt_server->job_no > 1) { | ||
990 | /* Calculate the number of jobs behind the server is */ | ||
991 | jobs = lt_subtract(now, srt_server->deadline) / | ||
992 | srt_server->period + 1; | ||
993 | |||
994 | /* Get the new release */ | ||
995 | release = srt_server->release + jobs * srt_server->period; | ||
996 | |||
997 | TRACE_SERVER_SUB(srt_server, "catching up to %llu, job %d", | ||
998 | release, srt_server->job_no + jobs); | ||
999 | |||
1000 | BUG_ON(jobs < 1); | ||
1001 | |||
1002 | /* Update server state */ | ||
1003 | server_release_at(srt_server, release); | ||
1004 | srt_server->job_no += jobs - 1; | ||
1005 | |||
1006 | /* Force task to take characteristics of server */ | ||
1007 | tsk_rt(task)->job_params.release = srt_server->release; | ||
1008 | tsk_rt(task)->job_params.deadline = srt_server->deadline; | ||
1009 | |||
1010 | rv = 1; | ||
1011 | |||
1012 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1013 | |||
1014 | } else if (lt_before(srt_server->deadline, now) && | ||
1015 | srt_server->job_no <= 1) { | ||
1016 | |||
1017 | server_release_at(srt_server, get_release(task)); | ||
1018 | srt_server->job_no = task_job_no(task); | ||
1019 | } | ||
1020 | |||
1021 | BUG_ON(srt_server->job_no == 0); | ||
1022 | |||
1023 | return rv; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * If the server is eligible, return the next eligible job. If the server is | ||
1028 | * ineligible or there are no eligible jobs, returns NULL. This will re-release | ||
1029 | * any servers that are behind. | ||
1030 | */ | ||
1031 | static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | ||
1032 | { | ||
1033 | lt_t now = litmus_clock(); | ||
1034 | lt_t dead, slack, budget; | ||
1035 | struct task_struct *task = __peek_ready(&hrt_server->hrt_domain); | ||
1036 | |||
1037 | /* Catch up server if it is initialized, not running, and late */ | ||
1038 | if (check_hrt_server_initialized(hrt_server) && | ||
1039 | !is_server_linked(&hrt_server->server)) { | ||
1040 | |||
1041 | dead = hrt_server->server.deadline; | ||
1042 | budget = hrt_server->server.budget; | ||
1043 | slack = lt_subtract(dead, budget); | ||
1044 | |||
1045 | TRACE_SERVER_SUB(&hrt_server->server, "dead: %llu, budget: %llu" | ||
1046 | "now: %llu, slack: %llu", | ||
1047 | TIME(dead), TIME(budget), TIME(now), TIME(slack)); | ||
1048 | |||
1049 | if (!head_in_list(&hrt_server->server.release_list) && | ||
1050 | lt_before_eq(dead, now)) { | ||
1051 | /* The server missed a release */ | ||
1052 | catchup_server(&hrt_server->server, now); | ||
1053 | TRACE_SERVER_SUB(&hrt_server->server, "now ready"); | ||
1054 | hrt_server->ready = 1; | ||
1055 | remove_slack(server_slack(&hrt_server->server)); | ||
1056 | hrt_server->no_slack = 0; | ||
1057 | |||
1058 | slack = lt_subtract(hrt_server->server.deadline, | ||
1059 | hrt_server->server.budget); | ||
1060 | |||
1061 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1062 | } | ||
1063 | |||
1064 | /* If the slack timer is active, this is not necessary */ | ||
1065 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { | ||
1066 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { | ||
1067 | /* The server missed the shift to no slack */ | ||
1068 | TRACE_SERVER_SUB(&hrt_server->server, "no slack: %llu", | ||
1069 | TIME(slack)); | ||
1070 | hrt_server->no_slack = 1; | ||
1071 | ////sched_trace_action(task, NO_SLACK_ACTION); | ||
1072 | } else { | ||
1073 | slack_timer_arm(hrt_server); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | } else { | ||
1078 | TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu %d %d", | ||
1079 | hrt_server->server.deadline, | ||
1080 | is_server_linked(&hrt_server->server), | ||
1081 | now, check_hrt_server_initialized(hrt_server), | ||
1082 | !is_server_linked(&hrt_server->server)); | ||
1083 | } | ||
1084 | |||
1085 | if (!hrt_server->server.budget || | ||
1086 | (task && !is_eligible(task, hrt_server))) { | ||
1087 | |||
1088 | if (!hrt_server->server.budget && | ||
1089 | !head_in_list(&hrt_server->server.release_list)) { | ||
1090 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | ||
1091 | catchup_server(&hrt_server->server, now); | ||
1092 | requeue_server(&hrt_server->server, now); | ||
1093 | slack_timer_arm(hrt_server); | ||
1094 | } | ||
1095 | |||
1096 | if (task) { | ||
1097 | TRACE_TASK_SUB(task, "not eligible, budget: %llu", | ||
1098 | TIME(hrt_server->server.budget)); | ||
1099 | } | ||
1100 | task = NULL; | ||
1101 | |||
1102 | /* Donate slack if we have nothing to schedule */ | ||
1103 | if (hrt_server->ready && hrt_server->no_slack) { | ||
1104 | check_donate_slack(&hrt_server->server, NULL); | ||
1105 | } | ||
1106 | } | ||
1107 | |||
1108 | return task; | ||
1109 | } | ||
1110 | |||
1111 | /* | ||
1112 | * This will catch up the SRT's server if it is behind. | ||
1113 | */ | ||
1114 | static noinline struct task_struct* next_eligible_srt(void) | ||
1115 | { | ||
1116 | int done = 0; | ||
1117 | struct task_struct *next_srt; | ||
1118 | |||
1119 | while (!done) { | ||
1120 | next_srt = __peek_ready(&srt_domain); | ||
1121 | |||
1122 | /* A blocking task might pollute the SRT domain if the | ||
1123 | * task blocked while it was being run by a slack server. | ||
1124 | * Remove and ignore this task. | ||
1125 | */ | ||
1126 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || | ||
1127 | unlikely(!is_realtime(next_srt)) || | ||
1128 | tsk_rt(next_srt)->linked_on != NO_CPU)) { | ||
1129 | TRACE_TASK_SUB(next_srt, "removing finished task"); | ||
1130 | remove(&srt_domain, next_srt); | ||
1131 | next_srt = __peek_ready(&srt_domain); | ||
1132 | } | ||
1133 | |||
1134 | /* If the task blocked for awhile or has otherwise not been | ||
1135 | * accessed, its server could have fallen behind. | ||
1136 | */ | ||
1137 | if (next_srt) { | ||
1138 | done = !catchup_srt_server(next_srt); | ||
1139 | |||
1140 | /* The parameters were modified. Re-insert the task. */ | ||
1141 | if (!done) { | ||
1142 | remove(&srt_domain, next_srt); | ||
1143 | __add_ready(&srt_domain, next_srt); | ||
1144 | } else if (is_server_linked(task_srt_server(next_srt))){ | ||
1145 | remove(&srt_domain, next_srt); | ||
1146 | done = 0; | ||
1147 | } | ||
1148 | } else { | ||
1149 | done = 1; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | return next_srt; | ||
1154 | } | ||
1155 | |||
1156 | static inline server_t* next_be_server(void) | ||
1157 | { | ||
1158 | struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers); | ||
1159 | return (hn) ? hn->value : NULL; | ||
1160 | } | ||
1161 | |||
1162 | static noinline server_t* next_eligible_be_server(void) | ||
1163 | { | ||
1164 | server_t *be_server = next_be_server(); | ||
1165 | lt_t now = litmus_clock(); | ||
1166 | |||
1167 | /* Catch up any late be servers. This happens when the servers could | ||
1168 | * not find tasks to schedule or if the system is overutilized. | ||
1169 | */ | ||
1170 | while (be_server && (lt_before_eq(be_server->deadline, now) || | ||
1171 | is_server_linked(be_server))) { | ||
1172 | if (!be_server->deadline) { | ||
1173 | TRACE_SERVER_SUB(be_server, "not intialized"); | ||
1174 | return NULL; | ||
1175 | } | ||
1176 | bheap_delete(server_order, &be_ready_servers, | ||
1177 | be_server->hn); | ||
1178 | |||
1179 | if (is_server_linked(be_server)) { | ||
1180 | TRACE_SERVER_SUB(be_server, "linked"); | ||
1181 | be_server = next_be_server(); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | catchup_server(be_server, now); | ||
1186 | check_donate_slack(be_server, NULL); | ||
1187 | bheap_insert(server_order, &be_ready_servers, | ||
1188 | be_server->hn); | ||
1189 | be_server = next_be_server(); | ||
1190 | TRACE_SERVER_SUB(be_server, "catching up BE server"); | ||
1191 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); /* Release */ | ||
1192 | } | ||
1193 | |||
1194 | if (be_server && lt_before(now, be_server->release)) { | ||
1195 | TRACE_SERVER_SUB(be_server, "not released"); | ||
1196 | be_server = NULL; | ||
1197 | } | ||
1198 | |||
1199 | if (be_server) { | ||
1200 | TRACE_SERVER_SUB(be_server, "dead: %llu, rel: %llu, budget: %llu", | ||
1201 | be_server->deadline, be_server->release, | ||
1202 | be_server->budget); | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | return be_server; | ||
1207 | } | ||
1208 | |||
1209 | /* | ||
1210 | * Adds a task to the appropriate queue (ready / release) in a domain. | ||
1211 | */ | ||
1212 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | ||
1213 | { | ||
1214 | lt_t now = litmus_clock(); | ||
1215 | int was_added; | ||
1216 | |||
1217 | BUG_ON(!is_realtime(task)); | ||
1218 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1219 | list_del_init(&task_data(task)->candidate_list); | ||
1220 | } | ||
1221 | |||
1222 | check_slack_candidate(task); | ||
1223 | |||
1224 | if (is_queued(task)) { | ||
1225 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | ||
1226 | } else if (is_released(task, now)) { | ||
1227 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", | ||
1228 | get_release(task), get_deadline(task), | ||
1229 | get_rt_period(task), now); | ||
1230 | __add_ready(domain, task); | ||
1231 | } else { | ||
1232 | /* Task needs to wait until it is released */ | ||
1233 | TRACE_TASK_SUB(task, "requeuing on release"); | ||
1234 | |||
1235 | was_added = add_release(domain, task); | ||
1236 | |||
1237 | /* The release time happened before we added ourselves | ||
1238 | * to the heap. We can now add to ready. | ||
1239 | */ | ||
1240 | if (!was_added) { | ||
1241 | TRACE_TASK_SUB(task, "missed release, going to ready"); | ||
1242 | __add_ready(domain, task); | ||
1243 | } | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | static inline void earlier_server_task(server_t *first, | ||
1248 | struct task_struct *first_task, | ||
1249 | server_t *second, | ||
1250 | struct task_struct *second_task, | ||
1251 | server_t **server, | ||
1252 | struct task_struct **task) | ||
1253 | { | ||
1254 | if (!first || | ||
1255 | (second && lt_before_eq(second->deadline, first->deadline))) { | ||
1256 | *server = second; | ||
1257 | *task = second_task; | ||
1258 | } else { | ||
1259 | *server = first; | ||
1260 | *task = first_task; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | /* | ||
1265 | * Set server and task to the next server and task respectively. | ||
1266 | * If entry is not null, the next server will see if it can schedule | ||
1267 | * entry's linked task. | ||
1268 | */ | ||
1269 | static void next_global_task(cpu_entry_t *entry, | ||
1270 | server_t **next_server, | ||
1271 | struct task_struct **next_task) | ||
1272 | { | ||
1273 | struct task_struct *next_srt, *next_be, *next_slack; | ||
1274 | server_t *be_server, *slack_server, *srt_server; | ||
1275 | |||
1276 | *next_server = NULL; | ||
1277 | *next_task = NULL; | ||
1278 | |||
1279 | next_srt = next_eligible_srt(); | ||
1280 | srt_server = (next_srt) ? task_srt_server(next_srt) : NULL; | ||
1281 | |||
1282 | next_be = __peek_ready(&be_domain); | ||
1283 | be_server = next_eligible_be_server(); | ||
1284 | |||
1285 | next_slack = next_eligible_slack(); | ||
1286 | slack_server = next_eligible_slack_server(); | ||
1287 | |||
1288 | TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d " | ||
1289 | "next_slack: %d", (be_server) ? be_server->id : -1, | ||
1290 | (next_be) ? next_be->pid : -1, | ||
1291 | (next_srt) ? next_srt->pid : -1, | ||
1292 | (slack_server) ? slack_server->id : -1, | ||
1293 | (next_slack) ? next_slack->pid : -1); | ||
1294 | |||
1295 | /* Check if the servers can schedule the task linked to entry */ | ||
1296 | if (entry && entry->linked) { | ||
1297 | |||
1298 | if (entry->linked_server->type == S_BE && | ||
1299 | (!next_be || | ||
1300 | lt_before(get_release(entry->linked), | ||
1301 | get_release(next_be)))) { | ||
1302 | |||
1303 | next_be = entry->linked; | ||
1304 | } else if (entry->linked_server->type == S_SLACK && | ||
1305 | (!next_slack || | ||
1306 | lt_before(get_deadline(entry->linked), | ||
1307 | get_deadline(next_slack)))) { | ||
1308 | |||
1309 | next_slack = entry->linked; | ||
1310 | } | ||
1311 | } | ||
1312 | |||
1313 | /* Remove tasks without servers and vice versa from contention */ | ||
1314 | if (!next_be || !be_server) { | ||
1315 | next_be = NULL; | ||
1316 | be_server = NULL; | ||
1317 | } | ||
1318 | if (!next_slack || !slack_server) { | ||
1319 | next_slack = NULL; | ||
1320 | slack_server = NULL; | ||
1321 | } | ||
1322 | |||
1323 | /* Favor BE servers. If we don't, then a BE server might lose | ||
1324 | * out to its own slack. | ||
1325 | */ | ||
1326 | if (slack_server && be_server && | ||
1327 | be_server->deadline == slack_server->deadline) { | ||
1328 | next_slack = NULL; | ||
1329 | slack_server = NULL; | ||
1330 | } | ||
1331 | |||
1332 | /* There is probably a better way to do this */ | ||
1333 | earlier_server_task(srt_server, next_srt, | ||
1334 | be_server, next_be, | ||
1335 | next_server, next_task); | ||
1336 | earlier_server_task(*next_server, *next_task, | ||
1337 | slack_server, next_slack, | ||
1338 | next_server, next_task); | ||
1339 | |||
1340 | //BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release)); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * Remove the task and server from any ready queues. | ||
1345 | */ | ||
1346 | static void remove_from_ready(server_t *server, struct task_struct *task, | ||
1347 | cpu_entry_t *entry) | ||
1348 | { | ||
1349 | server_t *slack; | ||
1350 | rt_domain_t *domain; | ||
1351 | BUG_ON(!server); | ||
1352 | BUG_ON(!entry); | ||
1353 | BUG_ON(!task); | ||
1354 | |||
1355 | if (server->type == S_SLACK) { | ||
1356 | TRACE_SERVER_SUB(server, "removed from slack list"); | ||
1357 | list_del_init(&server->list); | ||
1358 | |||
1359 | /* Remove from consideration of BE servers */ | ||
1360 | if (is_be(task) && is_queued(task)) { | ||
1361 | TRACE_TASK_SUB(task, "BE removed from ready"); | ||
1362 | remove(&be_domain, task); | ||
1363 | } | ||
1364 | |||
1365 | /* Remove from consideration of slack servers */ | ||
1366 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1367 | TRACE_TASK_SUB(task, "deleting candidate"); | ||
1368 | list_del_init(&task_data(task)->candidate_list); | ||
1369 | } | ||
1370 | } else { | ||
1371 | slack = server_slack(server); | ||
1372 | if (slack && head_in_list(&slack->list)) { | ||
1373 | remove_slack(slack); | ||
1374 | } | ||
1375 | if (server->type == S_BE) { | ||
1376 | TRACE_SERVER_SUB(server, "server removed from ready"); | ||
1377 | BUG_ON(!server->hn); | ||
1378 | bheap_delete(server_order, &be_ready_servers, | ||
1379 | server->hn); | ||
1380 | } | ||
1381 | if (is_queued(task)) { | ||
1382 | domain = get_rt_domain(entry, task); | ||
1383 | BUG_ON(!domain); | ||
1384 | TRACE_TASK_SUB(task, "removed from ready"); | ||
1385 | remove(domain, task); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | BUG_ON(!task_data(task)); | ||
1390 | } | ||
1391 | |||
1392 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); | ||
1393 | |||
1394 | /* | ||
1395 | * Finds and links the next server and task to an entry with no linked task. | ||
1396 | */ | ||
1397 | static void edf_hsb_pick_next(cpu_entry_t *entry) | ||
1398 | { | ||
1399 | struct task_struct *next_task, *linked; | ||
1400 | server_t *next_server; | ||
1401 | |||
1402 | BUG_ON(entry->linked); | ||
1403 | |||
1404 | next_task = next_eligible_hrt(&entry->hrt_server); | ||
1405 | if (next_task) | ||
1406 | next_server = &entry->hrt_server.server; | ||
1407 | else | ||
1408 | next_global_task(NULL, &next_server, &next_task); | ||
1409 | |||
1410 | |||
1411 | if (next_task) { | ||
1412 | remove_from_ready(next_server, next_task, entry); | ||
1413 | check_for_slack_preempt(next_task, next_server, entry, 1); | ||
1414 | TRACE_TASK_SERVER_SUB(next_task, next_server, | ||
1415 | "removing and picked"); | ||
1416 | |||
1417 | /* A slack preemption could cause something that was already | ||
1418 | * running to be 'swapped' to this CPU in link_to_cpu. | ||
1419 | */ | ||
1420 | if (entry->linked) { | ||
1421 | linked = entry->linked; | ||
1422 | unlink(entry->linked); | ||
1423 | requeue(linked, get_rt_domain(entry, linked)); | ||
1424 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1425 | } | ||
1426 | link_to_cpu(entry, next_task, next_server); | ||
1427 | } | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * Preempt the currently running server and task with new ones. | ||
1432 | * It is possible that either only the server or the task is different here. | ||
1433 | */ | ||
1434 | static void preempt(cpu_entry_t *entry, struct task_struct *next, | ||
1435 | server_t *next_server, int slack_resched) | ||
1436 | { | ||
1437 | struct task_struct *linked; | ||
1438 | rt_domain_t *domain; | ||
1439 | |||
1440 | TRACE_TASK_SERVER_SUB(next, next_server, | ||
1441 | "preempting on P%d", entry->cpu); | ||
1442 | |||
1443 | remove_from_ready(next_server, next, entry); | ||
1444 | |||
1445 | check_for_slack_preempt(next, next_server, entry, slack_resched); | ||
1446 | linked = entry->linked; | ||
1447 | link_to_cpu(entry, next, next_server); | ||
1448 | |||
1449 | /* No need for this if only the server was preempted */ | ||
1450 | if (!linked || linked != entry->linked) { | ||
1451 | if (linked) { | ||
1452 | domain = get_rt_domain(entry, linked); | ||
1453 | requeue(linked, domain); | ||
1454 | } | ||
1455 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1456 | } | ||
1457 | } | ||
1458 | |||
1459 | /* | ||
1460 | * Causes a preemption if: | ||
1461 | * 1. task is being run by a slack server on a different CPU | ||
1462 | * 2. slack donated by server is running a task on a different CPU | ||
1463 | */ | ||
1464 | static noinline void check_for_slack_preempt(struct task_struct *task, | ||
1465 | server_t *server, | ||
1466 | cpu_entry_t *next_entry, | ||
1467 | int resched) | ||
1468 | { | ||
1469 | cpu_entry_t *entry = NULL; | ||
1470 | server_t *slack = server_slack(server); | ||
1471 | struct task_struct *slack_task; | ||
1472 | |||
1473 | /* The task is currently being run by another server */ | ||
1474 | if (tsk_rt(task)->linked_on != NO_CPU) { | ||
1475 | entry = task_linked_entry(task); | ||
1476 | |||
1477 | if (entry != next_entry) { | ||
1478 | TRACE_TASK_SUB(task, "was on P%d", entry->cpu); | ||
1479 | |||
1480 | unlink(task); | ||
1481 | |||
1482 | /* if (resched) { */ | ||
1483 | /* edf_hsb_pick_next(entry); */ | ||
1484 | /* preempt_if_preemptable(entry->scheduled, entry->cpu); */ | ||
1485 | /* } */ | ||
1486 | } | ||
1487 | } | ||
1488 | |||
1489 | /* The server's slack is currently being run */ | ||
1490 | if (slack && is_server_linked(slack)) { | ||
1491 | entry = &per_cpu(noslack_cpu_entries, slack->cpu); | ||
1492 | slack_task = server_task(slack); | ||
1493 | |||
1494 | unlink(slack_task); | ||
1495 | remove_slack(slack); | ||
1496 | requeue(slack_task, get_rt_domain(entry, slack_task)); | ||
1497 | |||
1498 | if (entry != next_entry && resched) { | ||
1499 | TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu); | ||
1500 | /* Force a reschedule */ | ||
1501 | edf_hsb_pick_next(entry); | ||
1502 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1503 | } else { | ||
1504 | /* This can only happen on a preemption. If a preemption | ||
1505 | * happens, the task will be requeued elsewhere. | ||
1506 | * Obviously the next task has already been chosen. | ||
1507 | */ | ||
1508 | TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu); | ||
1509 | } | ||
1510 | } | ||
1511 | } | ||
1512 | |||
1513 | /* | ||
1514 | * Check for any necessary non-hrt preemptions. | ||
1515 | */ | ||
1516 | static void check_for_global_preempt(void) | ||
1517 | { | ||
1518 | cpu_entry_t *entry, *sched; | ||
1519 | server_t *next_server; | ||
1520 | int on_cpu; | ||
1521 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | ||
1522 | |||
1523 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | ||
1524 | /* HRT cpus should not be in this heap */ | ||
1525 | BUG_ON(entry->linked && is_hrt(entry->linked)); | ||
1526 | |||
1527 | next_global_task(entry, &next_server, &next_task); | ||
1528 | |||
1529 | if (!next_server) | ||
1530 | break; | ||
1531 | |||
1532 | /* Preempt only if we have an earlier deadline */ | ||
1533 | if (entry->linked && | ||
1534 | !lt_before(next_server->deadline, | ||
1535 | entry->linked_server->deadline)) { | ||
1536 | break; | ||
1537 | } | ||
1538 | |||
1539 | /* If we are scheduled on another CPU, the link code | ||
1540 | * will force us to link to that CPU and try and link | ||
1541 | * that CPU's task to this CPU. This is impossible | ||
1542 | * if that CPU has linked HRT tasks which cannot | ||
1543 | * migrate. | ||
1544 | */ | ||
1545 | on_cpu = next_task->rt_param.scheduled_on; | ||
1546 | if (on_cpu != NO_CPU) { | ||
1547 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
1548 | |||
1549 | if (sched != entry && sched->linked && | ||
1550 | is_hrt(sched->linked)) { | ||
1551 | |||
1552 | TRACE_TASK_SUB(next_task, | ||
1553 | "Already on P%d", | ||
1554 | sched->cpu); | ||
1555 | break; | ||
1556 | } | ||
1557 | } | ||
1558 | |||
1559 | /* We do not reschedule if this causes a slack preemption | ||
1560 | * because we will detect if we should reschedule on the | ||
1561 | * next iteration of the loop. | ||
1562 | */ | ||
1563 | preempt(entry, next_task, next_server, | ||
1564 | 0 /* Don't reschedule on a slack preemption */); | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Correct local link after a change to the local HRT domain. | ||
1570 | */ | ||
1571 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
1572 | { | ||
1573 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1574 | struct task_struct *next_hrt = next_eligible_hrt(hrt_server); | ||
1575 | |||
1576 | if (next_hrt && | ||
1577 | (!entry->linked || !is_hrt(entry->linked) || | ||
1578 | !is_eligible(entry->linked, hrt_server) || | ||
1579 | edf_preemption_needed(&hrt_server->hrt_domain, entry->linked))) { | ||
1580 | |||
1581 | preempt(entry, next_hrt, &hrt_server->server, 1); | ||
1582 | |||
1583 | } else { | ||
1584 | TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting"); | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Assumes called with local irqs disabled. | ||
1590 | */ | ||
1591 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | ||
1592 | { | ||
1593 | int was_empty; | ||
1594 | |||
1595 | BUG_ON(task_cpu(task) == NO_CPU); | ||
1596 | |||
1597 | TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu); | ||
1598 | |||
1599 | if (is_hrt(task)) { | ||
1600 | requeue(task, &entry->hrt_server.hrt_domain); | ||
1601 | check_for_hrt_preempt(entry); | ||
1602 | } else if (is_srt(task)) { | ||
1603 | requeue(task, &srt_domain); | ||
1604 | check_for_global_preempt(); | ||
1605 | } else /* BE */ { | ||
1606 | was_empty = !__jobs_pending(&be_domain); | ||
1607 | requeue(task, &be_domain); | ||
1608 | |||
1609 | /* Only way this could cause a preemption is if an eligible | ||
1610 | * BE server could not queue up a task. | ||
1611 | */ | ||
1612 | if (was_empty && __jobs_pending(&be_domain)) | ||
1613 | check_for_global_preempt(); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | /****************************************************************************** | ||
1618 | * Timer methods | ||
1619 | ******************************************************************************/ | ||
1620 | |||
1621 | /* | ||
1622 | * Merges a group of released HRT tasks into a ready queue and checks | ||
1623 | * for preeemptions. | ||
1624 | */ | ||
1625 | static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1626 | { | ||
1627 | unsigned long flags; | ||
1628 | struct task_struct *first; | ||
1629 | cpu_entry_t *entry; | ||
1630 | |||
1631 | raw_spin_lock_irqsave(global_lock, flags); | ||
1632 | |||
1633 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | ||
1634 | entry = task_sched_entry(first); | ||
1635 | |||
1636 | BUG_ON(!first || !is_hrt(first)); | ||
1637 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", | ||
1638 | TIME(litmus_clock()), task_cpu(first)); | ||
1639 | |||
1640 | __merge_ready(domain, tasks); | ||
1641 | check_for_hrt_preempt(entry); | ||
1642 | |||
1643 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1644 | } | ||
1645 | |||
1646 | /* | ||
1647 | * Merges a group of released tasks into a ready queue and checks to see | ||
1648 | * if scheduled needs to be called. | ||
1649 | */ | ||
1650 | static void release_srt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1651 | { | ||
1652 | unsigned long flags; | ||
1653 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1654 | |||
1655 | raw_spin_lock_irqsave(global_lock, flags); | ||
1656 | |||
1657 | TRACE_TASK(first, "SRT tasks released at %llu\n", TIME(litmus_clock())); | ||
1658 | |||
1659 | __merge_ready(domain, tasks); | ||
1660 | check_for_global_preempt(); | ||
1661 | |||
1662 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * Merges a group of released tasks into a ready queue and checks to see | ||
1667 | * if scheduled needs to be called. | ||
1668 | */ | ||
1669 | static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1670 | { | ||
1671 | unsigned long flags; | ||
1672 | int was_empty; | ||
1673 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1674 | |||
1675 | TRACE_TASK(first, "BE tasks released at %llu\n", TIME(litmus_clock()));; | ||
1676 | |||
1677 | raw_spin_lock_irqsave(global_lock, flags); | ||
1678 | |||
1679 | was_empty = !__jobs_pending(domain); | ||
1680 | __merge_ready(domain, tasks); | ||
1681 | if (was_empty) { | ||
1682 | /* Only way this could cause a preemption is if an BE server | ||
1683 | * could not find a task to run. | ||
1684 | */ | ||
1685 | check_for_global_preempt(); | ||
1686 | } | ||
1687 | |||
1688 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1689 | } | ||
1690 | |||
1691 | static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) | ||
1692 | { | ||
1693 | unsigned long flags; | ||
1694 | hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer); | ||
1695 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
1696 | |||
1697 | raw_spin_lock_irqsave(global_lock, flags); | ||
1698 | |||
1699 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); | ||
1700 | BUG_ON(!server->ready); | ||
1701 | ////sched_trace_action(entry->linked, NO_SLACK_ACTION); | ||
1702 | |||
1703 | /* Set new state of entry */ | ||
1704 | server->no_slack = 1; | ||
1705 | check_for_hrt_preempt(entry); | ||
1706 | |||
1707 | /* Donate slack if the HRT server cannot run anything */ | ||
1708 | if (!entry->linked || !is_hrt(entry->linked)) { | ||
1709 | check_donate_slack(&server->server, NULL); | ||
1710 | check_for_global_preempt(); | ||
1711 | } | ||
1712 | |||
1713 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1714 | |||
1715 | return HRTIMER_NORESTART; | ||
1716 | } | ||
1717 | |||
1718 | static void job_completion(cpu_entry_t *entry, struct task_struct* task) | ||
1719 | { | ||
1720 | server_t *server = entry->linked_server; | ||
1721 | set_rt_flags(task, RT_F_SLEEP); | ||
1722 | |||
1723 | TRACE_TASK_SUB(task, "completed"); | ||
1724 | |||
1725 | unlink(task); | ||
1726 | check_donate_slack(server, task); | ||
1727 | |||
1728 | /* If a slack server completed an SRT task, the work for the | ||
1729 | * next job arrival has already been done. | ||
1730 | */ | ||
1731 | if (server->type == S_SLACK && is_srt(task)) { | ||
1732 | tsk_rt(task)->job_params.job_no++; | ||
1733 | sched_trace_task_release(task); | ||
1734 | TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, " | ||
1735 | "rel: %llu, dead: %llu", | ||
1736 | TIME(get_release(task)), | ||
1737 | TIME(get_deadline(task))); | ||
1738 | } else if (server->type == S_SRT) { | ||
1739 | /* If the task is behind the server it must release immediately, | ||
1740 | * leaving its release time and deadline unchanged. | ||
1741 | */ | ||
1742 | if (server->job_no > tsk_rt(task)->job_params.job_no) { | ||
1743 | TRACE_TASK_SUB(task, "catching up"); | ||
1744 | tsk_rt(task)->job_params.job_no++; | ||
1745 | } else { | ||
1746 | /* Otherwise release them both */ | ||
1747 | prepare_for_next_period(task); | ||
1748 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1749 | TIME(get_release(task)), | ||
1750 | TIME(get_deadline(task))); | ||
1751 | server_release(server); | ||
1752 | } | ||
1753 | } else { | ||
1754 | prepare_for_next_period(task); | ||
1755 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1756 | TIME(get_release(task)), | ||
1757 | TIME(get_deadline(task))); | ||
1758 | } | ||
1759 | |||
1760 | if (is_released(task, litmus_clock())) | ||
1761 | sched_trace_task_release(task); | ||
1762 | |||
1763 | /* Don't requeue a blocking task */ | ||
1764 | if (is_running(task)) | ||
1765 | job_arrival(task, entry); | ||
1766 | |||
1767 | sched_trace_task_completion(task, 1); | ||
1768 | } | ||
1769 | |||
1770 | /* | ||
1771 | * Assumes called with local irqs disabled. | ||
1772 | */ | ||
1773 | static void server_completed(server_t *server, struct task_struct *task) | ||
1774 | { | ||
1775 | hrt_server_t *hrt_server; | ||
1776 | cpu_entry_t *entry = task_linked_entry(task); | ||
1777 | |||
1778 | BUG_ON(entry->linked != task); | ||
1779 | BUG_ON(entry->linked_server != server); | ||
1780 | |||
1781 | if (server->type == S_SRT) { | ||
1782 | TRACE_TASK_SUB(task, "must wait on server"); | ||
1783 | |||
1784 | /* The job must now take the priority and release time | ||
1785 | * of the next server. We do this so that we can still | ||
1786 | * use rt_domain and other handy methods to still work | ||
1787 | * with SRT jobs. Because this can ONLY happen if the | ||
1788 | * task's job number gets behind the server's, we can | ||
1789 | * easily detect the job catching up later. | ||
1790 | */ | ||
1791 | tsk_rt(task)->job_params.release = server->deadline; | ||
1792 | tsk_rt(task)->job_params.deadline = server->deadline + | ||
1793 | get_rt_period(task); | ||
1794 | TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu", | ||
1795 | TIME(get_deadline(task)), | ||
1796 | TIME(get_release(task))); | ||
1797 | |||
1798 | } else if (server->type == S_HRT) { | ||
1799 | /* Update state of HRT server */ | ||
1800 | hrt_server = container_of(server, hrt_server_t, server); | ||
1801 | hrt_server->ready = 0; | ||
1802 | TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu); | ||
1803 | |||
1804 | if (hrtimer_active(&hrt_server->slack_timer)) | ||
1805 | slack_timer_cancel(hrt_server); | ||
1806 | } | ||
1807 | |||
1808 | if (server->type != S_SLACK) { | ||
1809 | server_release(server); | ||
1810 | } | ||
1811 | |||
1812 | sched_trace_action(task, SERVER_COMPLETED_ACTION); | ||
1813 | |||
1814 | unlink(task); | ||
1815 | requeue(task, get_rt_domain(entry, task)); | ||
1816 | |||
1817 | /* We know this CPU needs to pick its next task */ | ||
1818 | edf_hsb_pick_next(entry); | ||
1819 | |||
1820 | /* Only cause a reschedule if something new was scheduled. A task | ||
1821 | * could merely have swapped servers. | ||
1822 | */ | ||
1823 | if (entry->linked != task) | ||
1824 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1825 | else | ||
1826 | entry->scheduled_server = entry->linked_server; | ||
1827 | } | ||
1828 | |||
1829 | static void hrt_server_released(server_t *server) | ||
1830 | { | ||
1831 | hrt_server_t *hrt_server = container_of(server, hrt_server_t, server); | ||
1832 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
1833 | |||
1834 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); | ||
1835 | TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu); | ||
1836 | |||
1837 | hrt_server->no_slack = 0; | ||
1838 | hrt_server->ready = 1; | ||
1839 | remove_slack(server_slack(&hrt_server->server)); | ||
1840 | |||
1841 | check_for_hrt_preempt(entry); | ||
1842 | |||
1843 | /* Ensure slack timer is only running if the current | ||
1844 | * job is not HRT. | ||
1845 | */ | ||
1846 | if (entry->linked && is_hrt(entry->linked)) | ||
1847 | slack_timer_cancel(hrt_server); | ||
1848 | else | ||
1849 | slack_timer_arm(hrt_server); | ||
1850 | } | ||
1851 | |||
1852 | static void servers_released(struct list_head *servers) | ||
1853 | { | ||
1854 | int was_be = 0; | ||
1855 | unsigned long flags; | ||
1856 | struct list_head *pos, *safe; | ||
1857 | server_t *server; | ||
1858 | |||
1859 | raw_spin_lock_irqsave(global_lock, flags); | ||
1860 | |||
1861 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
1862 | TRACE_TIMER("Servers released"); | ||
1863 | |||
1864 | list_for_each_safe(pos, safe, servers) { | ||
1865 | server = list_entry(pos, server_t, release_list); | ||
1866 | |||
1867 | list_del_init(pos); | ||
1868 | |||
1869 | if (server->type == S_BE) { | ||
1870 | check_donate_slack(server, NULL); | ||
1871 | was_be = 1; | ||
1872 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
1873 | TRACE_SERVER_SUB(server, "inserting BE server"); | ||
1874 | bheap_insert(server_order, &be_ready_servers, | ||
1875 | server->hn); | ||
1876 | check_donate_slack(server, NULL); | ||
1877 | } else { /* HRT server */ | ||
1878 | hrt_server_released(server); | ||
1879 | } | ||
1880 | } | ||
1881 | |||
1882 | if (was_be) | ||
1883 | check_for_global_preempt(); | ||
1884 | |||
1885 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1886 | } | ||
1887 | |||
1888 | /****************************************************************************** | ||
1889 | * Server management methods | ||
1890 | ******************************************************************************/ | ||
1891 | |||
1892 | static int curr_be = 0; | ||
1893 | |||
1894 | /* | ||
1895 | * A BE server has been added in a proc entry. | ||
1896 | */ | ||
1897 | static int admit_be_server(unsigned long long wcet, | ||
1898 | unsigned long long period, | ||
1899 | int cpu) | ||
1900 | { | ||
1901 | int rv = 0; | ||
1902 | server_t *be_server; | ||
1903 | |||
1904 | if (cpu != NO_CPU) { | ||
1905 | rv = -EINVAL; | ||
1906 | goto out; | ||
1907 | } | ||
1908 | |||
1909 | be_server = server_alloc(GFP_ATOMIC); | ||
1910 | server_init(be_server, &server_domain, | ||
1911 | BE_SERVER_BASE + ++curr_be, | ||
1912 | wcet, period, 1); | ||
1913 | be_server->type = S_BE; | ||
1914 | server_slack_create(be_server); | ||
1915 | |||
1916 | TRACE_SERVER_SUB(be_server, "admitted BE server"); | ||
1917 | |||
1918 | list_add(&be_server->list, &be_servers); | ||
1919 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
1920 | |||
1921 | out: | ||
1922 | return rv; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Output all BE servers to a proc entry. | ||
1927 | */ | ||
1928 | static void list_be_servers(server_proc_t *proc) | ||
1929 | { | ||
1930 | struct list_head *pos; | ||
1931 | server_t *be_server; | ||
1932 | |||
1933 | list_for_each(pos, &be_servers) { | ||
1934 | be_server = list_entry(pos, server_t, list); | ||
1935 | list_server(be_server, NO_CPU, proc); | ||
1936 | } | ||
1937 | } | ||
1938 | |||
1939 | /* | ||
1940 | * Halts and destroys all BE servers. | ||
1941 | */ | ||
1942 | static void stop_be_servers(void) | ||
1943 | { | ||
1944 | server_t *be_server; | ||
1945 | struct list_head *pos, *safe; | ||
1946 | |||
1947 | list_for_each_safe(pos, safe, &be_servers) { | ||
1948 | be_server = list_entry(pos, server_t, list); | ||
1949 | |||
1950 | list_del_init(pos); | ||
1951 | if (bheap_node_in_heap(be_server->hn)) | ||
1952 | bheap_delete(server_order, &be_ready_servers, | ||
1953 | be_server->hn); | ||
1954 | server_slack_destroy(be_server); | ||
1955 | server_destroy(be_server); | ||
1956 | server_free(be_server); | ||
1957 | } | ||
1958 | } | ||
1959 | |||
1960 | /* | ||
1961 | * An HRT server has been added in a proc entry. | ||
1962 | */ | ||
1963 | static int admit_hrt_server(unsigned long long wcet, | ||
1964 | unsigned long long period, | ||
1965 | int cpu) | ||
1966 | { | ||
1967 | cpu_entry_t *entry = &per_cpu(noslack_cpu_entries, cpu); | ||
1968 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1969 | struct hrtimer *slack_timer = &hrt_server->slack_timer; | ||
1970 | |||
1971 | server_init(&hrt_server->server, &server_domain, | ||
1972 | cpu, wcet, period, 1); | ||
1973 | server_slack_create(&hrt_server->server); | ||
1974 | hrt_server->no_slack = 0; | ||
1975 | hrt_server->ready = 1; | ||
1976 | hrt_server->server.type = S_HRT; | ||
1977 | |||
1978 | edf_domain_init(&hrt_server->hrt_domain, NULL, | ||
1979 | release_hrt_jobs); | ||
1980 | |||
1981 | hrtimer_init(slack_timer, | ||
1982 | CLOCK_MONOTONIC, | ||
1983 | HRTIMER_MODE_ABS); | ||
1984 | slack_timer->function = slack_timer_fire; | ||
1985 | |||
1986 | return 0; | ||
1987 | } | ||
1988 | |||
1989 | /* | ||
1990 | * Print all HRT servers to a proc entry. | ||
1991 | */ | ||
1992 | static void list_hrt_servers(server_proc_t *proc) | ||
1993 | { | ||
1994 | cpu_entry_t *entry; | ||
1995 | hrt_server_t *hrt_server; | ||
1996 | int cpu; | ||
1997 | |||
1998 | for_each_online_cpu(cpu) { | ||
1999 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2000 | hrt_server = &entry->hrt_server; | ||
2001 | list_server(&hrt_server->server, cpu, proc); | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | /* | ||
2006 | * Stops all hrt server timers and resets all fields to 0. | ||
2007 | */ | ||
2008 | static void stop_hrt_servers(void) | ||
2009 | { | ||
2010 | int cpu; | ||
2011 | cpu_entry_t *entry; | ||
2012 | hrt_server_t *hrt_server; | ||
2013 | |||
2014 | for_each_online_cpu(cpu) { | ||
2015 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2016 | hrt_server = &entry->hrt_server; | ||
2017 | |||
2018 | if (hrt_server->server.data) | ||
2019 | server_slack_destroy(&hrt_server->server); | ||
2020 | slack_timer_cancel(hrt_server); | ||
2021 | |||
2022 | hrt_server->no_slack = 0; | ||
2023 | hrt_server->ready = 0; | ||
2024 | hrt_server->server.period = 0; | ||
2025 | hrt_server->server.wcet = 0; | ||
2026 | } | ||
2027 | } | ||
2028 | |||
2029 | /* | ||
2030 | * Starts timers used to manage servers. | ||
2031 | */ | ||
2032 | static void start_servers(lt_t time) | ||
2033 | { | ||
2034 | int cpu; | ||
2035 | cpu_entry_t *entry; | ||
2036 | server_t *server; | ||
2037 | server_t *be_server; | ||
2038 | struct list_head *pos; | ||
2039 | |||
2040 | TRACE_SUB("starting servers at %llu", time); | ||
2041 | |||
2042 | /* Start HRT servers */ | ||
2043 | for_each_online_cpu(cpu) { | ||
2044 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2045 | server = &entry->hrt_server.server; | ||
2046 | |||
2047 | if (!check_hrt_server_initialized(&entry->hrt_server)) | ||
2048 | goto loop_end; | ||
2049 | |||
2050 | /* Cause a catchup later */ | ||
2051 | server_release_at(server, time - server->period); | ||
2052 | entry->hrt_server.ready = 1; | ||
2053 | |||
2054 | TRACE("Setting up cpu %d to have timer deadline %llu\n", | ||
2055 | cpu, TIME(server->deadline)); | ||
2056 | loop_end: | ||
2057 | cpu = cpu; | ||
2058 | } | ||
2059 | |||
2060 | /* Start BE servers */ | ||
2061 | list_for_each(pos, &be_servers) { | ||
2062 | be_server = list_entry(pos, server_t, list); | ||
2063 | |||
2064 | if (!bheap_node_in_heap(be_server->hn)) | ||
2065 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
2066 | |||
2067 | /* Cause a catchup later */ | ||
2068 | server_release_at(be_server, time - be_server->period); | ||
2069 | |||
2070 | TRACE("Releasing BE server %d\n", be_server->id); | ||
2071 | TRACE_SERVER_SUB(be_server, "inserting be server"); | ||
2072 | } | ||
2073 | } | ||
2074 | |||
2075 | /****************************************************************************** | ||
2076 | * Plugin methods | ||
2077 | ******************************************************************************/ | ||
2078 | |||
2079 | static long edf_hsb_activate_plugin(void) | ||
2080 | { | ||
2081 | int cpu; | ||
2082 | cpu_entry_t *entry; | ||
2083 | #ifdef CONFIG_RELEASE_MASTER | ||
2084 | edf_hsb_release_master = atomic_read(&release_master_cpu); | ||
2085 | #else | ||
2086 | edf_hsb_release_master = NO_CPU; | ||
2087 | #endif | ||
2088 | server_domain.release_master = edf_hsb_release_master; | ||
2089 | |||
2090 | for_each_online_cpu(cpu) { | ||
2091 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2092 | #ifdef CONFIG_RELEASE_MASTER | ||
2093 | if (cpu != edf_hsb_release_master) | ||
2094 | #endif | ||
2095 | update_cpu_position(entry); | ||
2096 | } | ||
2097 | |||
2098 | start_servers(litmus_clock()); | ||
2099 | |||
2100 | TRACE("activating EDF-HSB plugin.\n"); | ||
2101 | return 0; | ||
2102 | } | ||
2103 | |||
2104 | /* | ||
2105 | * Requires a processor be specified for any task run on the system. | ||
2106 | */ | ||
2107 | static long edf_hsb_admit_task(struct task_struct *task) | ||
2108 | { | ||
2109 | cpu_entry_t *entry = task_sched_entry(task); | ||
2110 | |||
2111 | TRACE_TASK(task, "Admitting\n"); | ||
2112 | |||
2113 | if (is_hrt(task)) { | ||
2114 | return check_hrt_server_initialized(&entry->hrt_server) && | ||
2115 | ((task_cpu(task) == task->rt_param.task_params.cpu) && | ||
2116 | (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL; | ||
2117 | } else { | ||
2118 | /* If the task is not HRT, we don't want to force the user | ||
2119 | * to specify a CPU. | ||
2120 | */ | ||
2121 | return 0; | ||
2122 | } | ||
2123 | } | ||
2124 | |||
2125 | /* | ||
2126 | * Stops all servers from running. | ||
2127 | */ | ||
2128 | static long edf_hsb_deactivate_plugin(void) | ||
2129 | { | ||
2130 | cpu_entry_t *cpu_entry; | ||
2131 | hrt_server_t *hrt_server; | ||
2132 | unsigned long flags; | ||
2133 | int cpu; | ||
2134 | |||
2135 | local_irq_save(flags); | ||
2136 | |||
2137 | for_each_online_cpu(cpu) { | ||
2138 | cpu_entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2139 | hrt_server = &cpu_entry->hrt_server; | ||
2140 | |||
2141 | slack_timer_cancel(hrt_server); | ||
2142 | |||
2143 | if (likely(bheap_node_in_heap(cpu_entry->hn))) | ||
2144 | bheap_delete(server_order, &cpu_heap, cpu_entry->hn); | ||
2145 | } | ||
2146 | |||
2147 | local_irq_restore(flags); | ||
2148 | |||
2149 | return 0; | ||
2150 | } | ||
2151 | |||
2152 | static void edf_hsb_task_block(struct task_struct *task) | ||
2153 | { | ||
2154 | unsigned long flags; | ||
2155 | cpu_entry_t *entry = task_sched_entry(task); | ||
2156 | struct task_struct *linked; | ||
2157 | server_t *linked_server; | ||
2158 | |||
2159 | TRACE_TASK(task, "block at %llu in state %llu\n", | ||
2160 | litmus_clock(), task->state); | ||
2161 | set_rt_flags(task, RT_F_BLOCK); | ||
2162 | |||
2163 | raw_spin_lock_irqsave(global_lock, flags); | ||
2164 | |||
2165 | linked = entry->linked; | ||
2166 | linked_server = entry->linked_server; | ||
2167 | |||
2168 | unlink(task); | ||
2169 | |||
2170 | /* TODO: necessary? */ | ||
2171 | if (task == linked) { | ||
2172 | check_donate_slack(linked_server, task); | ||
2173 | } | ||
2174 | |||
2175 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2176 | } | ||
2177 | |||
2178 | /* | ||
2179 | * A task leaves the system. | ||
2180 | */ | ||
2181 | static void edf_hsb_task_exit(struct task_struct *task) | ||
2182 | { | ||
2183 | unsigned long flags; | ||
2184 | cpu_entry_t *entry = task_sched_entry(task); | ||
2185 | |||
2186 | BUG_ON(!is_realtime(task)); | ||
2187 | TRACE_TASK(task, "RIP at %llu on P%d\n", | ||
2188 | TIME(litmus_clock()), tsk_rt(task)->scheduled_on); | ||
2189 | |||
2190 | raw_spin_lock_irqsave(global_lock, flags); | ||
2191 | |||
2192 | unlink(task); | ||
2193 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | ||
2194 | entry->scheduled = NULL; | ||
2195 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2196 | } | ||
2197 | if (is_srt(task)) { | ||
2198 | server_slack_destroy(task_srt_server(task)); | ||
2199 | server_destroy(task_srt_server(task)); | ||
2200 | server_free(task_srt_server(task)); | ||
2201 | task_data_free(tsk_rt(task)->plugin_data); | ||
2202 | } | ||
2203 | |||
2204 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * Attempts to determine the current scheduler state, then selects the | ||
2209 | * next task and updates the scheduler state. | ||
2210 | */ | ||
2211 | static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | ||
2212 | { | ||
2213 | unsigned long flags; | ||
2214 | int blocks, preempted, sleep, was_slack, np, hrt_preempt, donated; | ||
2215 | struct task_struct *curr; | ||
2216 | cpu_entry_t *entry = local_cpu_entry; | ||
2217 | |||
2218 | #ifdef CONFIG_RELEASE_MASTER | ||
2219 | /* Bail out early if we are the release master. | ||
2220 | * The release master never schedules any real-time tasks. | ||
2221 | */ | ||
2222 | if (edf_hsb_release_master == entry->cpu) { | ||
2223 | sched_state_task_picked(); | ||
2224 | return NULL; | ||
2225 | } | ||
2226 | #endif | ||
2227 | |||
2228 | raw_spin_lock_irqsave(global_lock, flags); | ||
2229 | |||
2230 | curr = entry->scheduled; | ||
2231 | |||
2232 | if (entry->scheduled && !is_realtime(prev)) { | ||
2233 | TRACE_TASK_SUB(entry->scheduled, "Stack deadlock!"); | ||
2234 | } | ||
2235 | |||
2236 | TRACE("server_budget: %llu, server_deadline: %llu, " | ||
2237 | "curr_time: %llu, no_slack: %d, ready: %d\n", | ||
2238 | TIME(entry->hrt_server.server.budget), | ||
2239 | TIME(entry->hrt_server.server.deadline), | ||
2240 | TIME(litmus_clock()), entry->hrt_server.no_slack, | ||
2241 | entry->hrt_server.ready); | ||
2242 | |||
2243 | /* Determine state */ | ||
2244 | blocks = curr && !is_running(curr); | ||
2245 | preempted = entry->scheduled != entry->linked; | ||
2246 | sleep = curr && get_rt_flags(curr) == RT_F_SLEEP; | ||
2247 | was_slack = !list_empty(&slack_queue); | ||
2248 | np = curr && is_np(curr); | ||
2249 | |||
2250 | TRACE("blocks: %d, preempted: %d, sleep: %d, np: %d\n", | ||
2251 | blocks, preempted, sleep, np); | ||
2252 | if (blocks) | ||
2253 | unlink(entry->scheduled); | ||
2254 | |||
2255 | /* If the task has gone to sleep or exhausted its budget, it | ||
2256 | * must complete its current job. | ||
2257 | */ | ||
2258 | if (sleep && !blocks && !preempted) | ||
2259 | job_completion(entry, entry->scheduled); | ||
2260 | |||
2261 | /* Pick the next task if there isn't one currently */ | ||
2262 | if (!entry->linked) | ||
2263 | edf_hsb_pick_next(entry); | ||
2264 | |||
2265 | /* Set task states */ | ||
2266 | if (entry->linked != entry->scheduled) { | ||
2267 | if (entry->linked) | ||
2268 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
2269 | if (entry->scheduled) | ||
2270 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
2271 | } | ||
2272 | |||
2273 | entry->scheduled = entry->linked; | ||
2274 | entry->scheduled_server = entry->linked_server; | ||
2275 | sched_state_task_picked(); | ||
2276 | |||
2277 | /* An non-HRT was preempted by an HRT task. Because of the way linking | ||
2278 | * works, it cannot link itself to anything else until the non-migratory | ||
2279 | * HRT task is scheduled. | ||
2280 | */ | ||
2281 | hrt_preempt = preempted && entry->linked && curr && | ||
2282 | is_hrt(entry->linked) && !is_hrt(curr); | ||
2283 | /* A server just donated slack */ | ||
2284 | donated = entry->linked && entry->linked_server->type != S_SLACK && | ||
2285 | head_in_list(&server_slack(entry->linked_server)->list); | ||
2286 | |||
2287 | if (hrt_preempt || donated) | ||
2288 | check_for_global_preempt(); | ||
2289 | |||
2290 | if (entry->scheduled) | ||
2291 | TRACE_TASK(entry->scheduled, "scheduled at %llu\n", | ||
2292 | TIME(litmus_clock())); | ||
2293 | else | ||
2294 | TRACE("NULL scheduled at %llu\n", TIME(litmus_clock())); | ||
2295 | |||
2296 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2297 | |||
2298 | if (!entry->scheduled && !next_eligible_slack_server()) { | ||
2299 | TRACE_SUB("A slack server has dissapeared!"); | ||
2300 | } | ||
2301 | |||
2302 | return entry->scheduled; | ||
2303 | } | ||
2304 | |||
2305 | /* | ||
2306 | * Prepare a task for running in RT mode | ||
2307 | */ | ||
2308 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | ||
2309 | { | ||
2310 | unsigned long flags; | ||
2311 | task_data_t *data; | ||
2312 | server_t *srt_server = NULL; | ||
2313 | cpu_entry_t *entry = task_sched_entry(task); | ||
2314 | |||
2315 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); | ||
2316 | |||
2317 | raw_spin_lock_irqsave(global_lock, flags); | ||
2318 | |||
2319 | /* Setup job parameters */ | ||
2320 | release_at(task, litmus_clock()); | ||
2321 | |||
2322 | /* Create SRT server */ | ||
2323 | if (is_srt(task)) { | ||
2324 | /* Create SRT server */ | ||
2325 | srt_server = server_alloc(GFP_ATOMIC); | ||
2326 | server_init(srt_server, &server_domain, | ||
2327 | task->pid, get_exec_cost(task), | ||
2328 | get_rt_period(task), 0); | ||
2329 | srt_server->type = S_SRT; | ||
2330 | |||
2331 | server_slack_create(srt_server); | ||
2332 | |||
2333 | } | ||
2334 | |||
2335 | /* Create task plugin data */ | ||
2336 | data = task_data_alloc(GFP_ATOMIC); | ||
2337 | data->owner = task; | ||
2338 | data->srt_server = srt_server; | ||
2339 | INIT_LIST_HEAD(&data->candidate_list); | ||
2340 | tsk_rt(task)->plugin_data = data; | ||
2341 | |||
2342 | /* Already running, update the cpu entry. | ||
2343 | * This tends to happen when the first tasks enter the system. | ||
2344 | */ | ||
2345 | if (running) { | ||
2346 | //BUG_ON(entry->scheduled); | ||
2347 | |||
2348 | #ifdef CONFIG_RELEASE_MASTER | ||
2349 | if (entry->cpu != edf_hsb_release_master) { | ||
2350 | #endif | ||
2351 | entry->scheduled = task; | ||
2352 | tsk_rt(task)->scheduled_on = task_cpu(task); | ||
2353 | #ifdef CONFIG_RELEASE_MASTER | ||
2354 | } else { | ||
2355 | /* do not schedule on release master */ | ||
2356 | /* Cannot preempt! Causing a preemption with a BE task | ||
2357 | * somehow leads to that task never blocking during | ||
2358 | * a synchronous release. This is a bug! | ||
2359 | */ | ||
2360 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
2361 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2362 | } | ||
2363 | #endif | ||
2364 | } else { | ||
2365 | task->rt_param.scheduled_on = NO_CPU; | ||
2366 | } | ||
2367 | |||
2368 | task->rt_param.linked_on = NO_CPU; | ||
2369 | job_arrival(task, entry); | ||
2370 | |||
2371 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2372 | } | ||
2373 | |||
2374 | static void edf_hsb_task_wake_up(struct task_struct *task) | ||
2375 | { | ||
2376 | lt_t now; | ||
2377 | unsigned long flags; | ||
2378 | cpu_entry_t *entry = task_sched_entry(task); | ||
2379 | |||
2380 | |||
2381 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | ||
2382 | task_cpu(task), task->rt_param.task_params.cpu); | ||
2383 | |||
2384 | raw_spin_lock_irqsave(global_lock, flags); | ||
2385 | |||
2386 | if (!is_be(task)) { | ||
2387 | if (is_srt(task)) { | ||
2388 | catchup_srt_server(task); | ||
2389 | } | ||
2390 | |||
2391 | /* Non-BE tasks are not sporadic in this model */ | ||
2392 | set_rt_flags(task, RT_F_RUNNING); | ||
2393 | /* The job blocked while it was being run by a slack server */ | ||
2394 | if (is_queued(task)) { | ||
2395 | check_slack_candidate(task); | ||
2396 | goto out; | ||
2397 | } | ||
2398 | } else { | ||
2399 | /* Re-release all BE tasks on wake-up */ | ||
2400 | now = litmus_clock(); | ||
2401 | |||
2402 | if (is_tardy(task, now)) { | ||
2403 | release_at(task, now); | ||
2404 | sched_trace_task_release(task); | ||
2405 | } | ||
2406 | } | ||
2407 | |||
2408 | job_arrival(task, entry); | ||
2409 | |||
2410 | out: | ||
2411 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2412 | } | ||
2413 | |||
2414 | /* | ||
2415 | * Unused. | ||
2416 | */ | ||
2417 | static void edf_hsb_tick(struct task_struct *t) | ||
2418 | { | ||
2419 | } | ||
2420 | |||
2421 | |||
2422 | /****************************************************************************** | ||
2423 | * Plugin | ||
2424 | ******************************************************************************/ | ||
2425 | |||
2426 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = { | ||
2427 | .plugin_name = "EDF-HSB-NOSLACK", | ||
2428 | |||
2429 | .activate_plugin = edf_hsb_activate_plugin, | ||
2430 | .deactivate_plugin = edf_hsb_deactivate_plugin, | ||
2431 | |||
2432 | .schedule = edf_hsb_schedule, | ||
2433 | .admit_task = edf_hsb_admit_task, | ||
2434 | .task_block = edf_hsb_task_block, | ||
2435 | .task_exit = edf_hsb_task_exit, | ||
2436 | .task_new = edf_hsb_task_new, | ||
2437 | .task_wake_up = edf_hsb_task_wake_up, | ||
2438 | .tick = edf_hsb_tick, | ||
2439 | |||
2440 | /* From jobs.h */ | ||
2441 | .complete_job = complete_job, | ||
2442 | .release_at = release_at, | ||
2443 | }; | ||
2444 | |||
2445 | static int __init init_edf_hsb(void) | ||
2446 | { | ||
2447 | cpu_entry_t *entry; | ||
2448 | hrt_server_t *hrt_server; | ||
2449 | server_t *idle_slack; | ||
2450 | int rv, cpu; | ||
2451 | |||
2452 | rv = register_sched_plugin(&edf_hsb_plugin); | ||
2453 | if (rv) { | ||
2454 | printk(KERN_ERR "Could not register plugin %s.\n", | ||
2455 | edf_hsb_plugin.plugin_name); | ||
2456 | goto out; | ||
2457 | } | ||
2458 | |||
2459 | rv = make_plugin_proc_dir(&edf_hsb_plugin, &edf_hsb_proc_dir); | ||
2460 | if (rv) { | ||
2461 | printk(KERN_ERR "Could not create %s procfs dir.\n", | ||
2462 | edf_hsb_plugin.plugin_name); | ||
2463 | goto out; | ||
2464 | } | ||
2465 | |||
2466 | |||
2467 | task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC); | ||
2468 | |||
2469 | /* Global domains */ | ||
2470 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); | ||
2471 | rt_domain_init(&be_domain, be_ready_order, | ||
2472 | NULL, release_be_jobs); | ||
2473 | server_domain_init(&server_domain, servers_released, | ||
2474 | server_completed, NO_CPU, global_lock); | ||
2475 | |||
2476 | /* Server proc interfaces */ | ||
2477 | server_proc_init(&server_domain, | ||
2478 | edf_hsb_proc_dir, BE_PROC_NAME, | ||
2479 | admit_be_server, list_be_servers, | ||
2480 | stop_be_servers); | ||
2481 | server_proc_init(&server_domain, | ||
2482 | edf_hsb_proc_dir, HRT_PROC_NAME, | ||
2483 | admit_hrt_server, list_hrt_servers, | ||
2484 | stop_hrt_servers); | ||
2485 | |||
2486 | |||
2487 | /* Global collections */ | ||
2488 | bheap_init(&cpu_heap); | ||
2489 | bheap_init(&be_ready_servers); | ||
2490 | INIT_LIST_HEAD(&be_servers); | ||
2491 | INIT_LIST_HEAD(&slack_queue); | ||
2492 | INIT_LIST_HEAD(&slack_candidates); | ||
2493 | |||
2494 | for_each_online_cpu(cpu) { | ||
2495 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2496 | hrt_server = &entry->hrt_server; | ||
2497 | |||
2498 | idle_slack = server_alloc(GFP_ATOMIC); | ||
2499 | server_init(idle_slack, &server_domain, | ||
2500 | IDLE_SLACK_BASE + cpu, | ||
2501 | LLONG_MAX, LLONG_MAX, 1); | ||
2502 | idle_slack->deadline = LLONG_MAX; | ||
2503 | idle_slack->budget = LLONG_MAX; | ||
2504 | idle_slack->job_no = 1; | ||
2505 | idle_slack->release = 1; | ||
2506 | idle_slack->type = S_SLACK; | ||
2507 | add_slack(idle_slack); | ||
2508 | |||
2509 | entry->cpu = cpu; | ||
2510 | entry->linked = NULL; | ||
2511 | entry->scheduled = NULL; | ||
2512 | entry->linked_server = NULL; | ||
2513 | |||
2514 | /* HRT server */ | ||
2515 | hrt_server->server.id = cpu; | ||
2516 | hrt_server->server.deadline = 0; | ||
2517 | hrt_server->server.period = 0; | ||
2518 | hrt_server->server.wcet = 0; | ||
2519 | hrt_server->ready = 0; | ||
2520 | |||
2521 | hrtimer_start_on_info_init(&hrt_server->slack_timer_info); | ||
2522 | |||
2523 | /* CPU entry bheap nodes */ | ||
2524 | entry->hn = &cpu_heap_node[cpu]; | ||
2525 | bheap_node_init(&entry->hn, entry); | ||
2526 | } | ||
2527 | |||
2528 | out: | ||
2529 | return rv; | ||
2530 | } | ||
2531 | |||
2532 | static void exit_edf_hsb(void) | ||
2533 | { | ||
2534 | int cpu; | ||
2535 | cpu_entry_t *entry; | ||
2536 | |||
2537 | stop_be_servers(); | ||
2538 | stop_hrt_servers(); | ||
2539 | |||
2540 | server_domain_destroy(&server_domain); | ||
2541 | |||
2542 | for_each_online_cpu(cpu) { | ||
2543 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2544 | server_slack_destroy(&entry->hrt_server.server); | ||
2545 | server_destroy(&entry->hrt_server.server); | ||
2546 | } | ||
2547 | |||
2548 | if (edf_hsb_proc_dir) { | ||
2549 | remove_plugin_proc_dir(&edf_hsb_plugin); | ||
2550 | /* TODO: is this wrong? */ | ||
2551 | edf_hsb_proc_dir = NULL; | ||
2552 | } | ||
2553 | } | ||
2554 | |||
2555 | module_init(init_edf_hsb); | ||
2556 | module_exit(exit_edf_hsb); | ||
diff --git a/litmus/servers.c b/litmus/servers.c index 5aebbe5ec1c1..37af270b5b23 100644 --- a/litmus/servers.c +++ b/litmus/servers.c | |||
@@ -1,6 +1,3 @@ | |||
1 | /* | ||
2 | * TODO: change from destroy to exit, rename server proc stuff | ||
3 | */ | ||
4 | #include <linux/hrtimer.h> | 1 | #include <linux/hrtimer.h> |
5 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
6 | #include <linux/sched.h> | 3 | #include <linux/sched.h> |
@@ -15,28 +12,11 @@ | |||
15 | 12 | ||
16 | #define DEBUG_SERVERS | 13 | #define DEBUG_SERVERS |
17 | 14 | ||
18 | /* Not working */ | ||
19 | /* #define COMPLETION_ON_MASTER */ | ||
20 | |||
21 | #define TIME(x) \ | 15 | #define TIME(x) \ |
22 | ({lt_t y = x; \ | 16 | ({lt_t y = x; \ |
23 | do_div(y, NSEC_PER_MSEC); \ | 17 | do_div(y, NSEC_PER_MSEC); \ |
24 | y;}) | 18 | y;}) |
25 | #ifdef DEBUG_SERVERS | 19 | #ifdef DEBUG_SERVERS |
26 | #define _TRACE_SUB(fmt, args...) \ | ||
27 | sched_trace_log_message("%d P%d -[%s@%s:%d]: " fmt "\n", \ | ||
28 | TRACE_ARGS, ## args) | ||
29 | #define TRACE_SUB(s, fmt, args...) \ | ||
30 | do {\ | ||
31 | if (is_server_linked(s)) \ | ||
32 | _TRACE_SUB(TASK_FMT " " SERVER_FMT " " fmt, \ | ||
33 | TASK_ARGS(server_task(s)), \ | ||
34 | SERVER_ARGS(s), ##args); \ | ||
35 | else \ | ||
36 | _TRACE_SUB("(NULL) " SERVER_FMT " " fmt, \ | ||
37 | SERVER_ARGS(s), ##args); \ | ||
38 | } while(0) | ||
39 | |||
40 | #define _TRACE_TIMER(fmt, args...) \ | 20 | #define _TRACE_TIMER(fmt, args...) \ |
41 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | 21 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ |
42 | TRACE_ARGS, ## args, TIME(litmus_clock())) | 22 | TRACE_ARGS, ## args, TIME(litmus_clock())) |
@@ -51,8 +31,6 @@ | |||
51 | SERVER_ARGS(s), ##args); \ | 31 | SERVER_ARGS(s), ##args); \ |
52 | } while(0) | 32 | } while(0) |
53 | #else | 33 | #else |
54 | #define _TRACE_SUB(fmt, args...) | ||
55 | #define TRACE_SUB(s, fmt, args...) | ||
56 | #define TRACE_TIMER(s, fmt, args...) | 34 | #define TRACE_TIMER(s, fmt, args...) |
57 | #define _TRACE_TIMER(fmt, args...) | 35 | #define _TRACE_TIMER(fmt, args...) |
58 | #endif | 36 | #endif |
@@ -89,19 +67,15 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
89 | * the server is still running / was run again. | 67 | * the server is still running / was run again. |
90 | */ | 68 | */ |
91 | if (hrtimer_active(&timer->timer)) { | 69 | if (hrtimer_active(&timer->timer)) { |
92 | TRACE_SUB(server, "cannot arm completion, already active"); | ||
93 | return 0; | 70 | return 0; |
94 | } | 71 | } |
95 | if (timer->armed) { | 72 | if (timer->armed) { |
96 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); | ||
97 | return 0; | 73 | return 0; |
98 | } | 74 | } |
99 | 75 | ||
100 | if (lt_after(budget_exhausted, server->deadline)) | 76 | if (lt_after(budget_exhausted, server->deadline)) |
101 | budget_exhausted = server->deadline; | 77 | budget_exhausted = server->deadline; |
102 | 78 | ||
103 | TRACE_SUB(server, "start time: %llu", domain->start_times[cpu]); | ||
104 | |||
105 | #ifdef COMPLETION_ON_MASTER | 79 | #ifdef COMPLETION_ON_MASTER |
106 | if (domain->release_master != NO_CPU) | 80 | if (domain->release_master != NO_CPU) |
107 | on_cpu = domain->release_master; | 81 | on_cpu = domain->release_master; |
@@ -114,11 +88,6 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
114 | err = hrtimer_start_on(on_cpu, &timer->info, &timer->timer, | 88 | err = hrtimer_start_on(on_cpu, &timer->info, &timer->timer, |
115 | ns_to_ktime(budget_exhausted), | 89 | ns_to_ktime(budget_exhausted), |
116 | HRTIMER_MODE_ABS_PINNED); | 90 | HRTIMER_MODE_ABS_PINNED); |
117 | if (err) { | ||
118 | TRACE_SUB(server, "failed to arm completion"); | ||
119 | } else { | ||
120 | TRACE_SUB(server, "success on P%d!", on_cpu); | ||
121 | } | ||
122 | } else if (atomic_read(&timer->info.state)== HRTIMER_START_ON_INACTIVE){ | 91 | } else if (atomic_read(&timer->info.state)== HRTIMER_START_ON_INACTIVE){ |
123 | err = __hrtimer_start_range_ns(&timer->timer, | 92 | err = __hrtimer_start_range_ns(&timer->timer, |
124 | ns_to_ktime(budget_exhausted), | 93 | ns_to_ktime(budget_exhausted), |
@@ -129,11 +98,6 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
129 | 98 | ||
130 | timer->armed = (err) ? 0 : 1; | 99 | timer->armed = (err) ? 0 : 1; |
131 | 100 | ||
132 | TRACE_SUB(server, "completion 0x%x and %p armed to fire at %llu, err: %d", | ||
133 | &timer->timer, | ||
134 | &timer->timer, | ||
135 | TIME(budget_exhausted), err); | ||
136 | |||
137 | return !err; | 101 | return !err; |
138 | } | 102 | } |
139 | 103 | ||
@@ -161,8 +125,6 @@ static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer) | |||
161 | /* We got the lock before someone tried to re-arm. Proceed. */ | 125 | /* We got the lock before someone tried to re-arm. Proceed. */ |
162 | if (completion_timer->armed) { | 126 | if (completion_timer->armed) { |
163 | server = domain->linked_servers[cpu]; | 127 | server = domain->linked_servers[cpu]; |
164 | TRACE_SUB(server, "completed"); | ||
165 | |||
166 | was_running = server_task(server); | 128 | was_running = server_task(server); |
167 | 129 | ||
168 | server->budget = 0; | 130 | server->budget = 0; |
@@ -185,9 +147,6 @@ static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer) | |||
185 | hrtimer_set_expires(timer, ns_to_ktime(budget_exhausted)); | 147 | hrtimer_set_expires(timer, ns_to_ktime(budget_exhausted)); |
186 | completion_timer->armed = 1; | 148 | completion_timer->armed = 1; |
187 | 149 | ||
188 | TRACE_SUB(server, "rearming on P%d at %llu", | ||
189 | cpu, TIME(budget_exhausted)); | ||
190 | |||
191 | rv = HRTIMER_RESTART; | 150 | rv = HRTIMER_RESTART; |
192 | } else { | 151 | } else { |
193 | completion_timer->armed = 0; | 152 | completion_timer->armed = 0; |
@@ -283,14 +242,6 @@ void server_run(server_t *server, struct task_struct *task) | |||
283 | int armed, cpu = task->rt_param.linked_on; | 242 | int armed, cpu = task->rt_param.linked_on; |
284 | server_domain_t *domain = server->domain; | 243 | server_domain_t *domain = server->domain; |
285 | 244 | ||
286 | TRACE_SUB(server, "running on cpu P%d", task->rt_param.linked_on); | ||
287 | |||
288 | BUG_ON(is_server_linked(server)); | ||
289 | BUG_ON(server-> cpu != NO_CPU); | ||
290 | BUG_ON(cpu == NO_CPU); | ||
291 | BUG_ON(domain->linked_servers[cpu]); | ||
292 | BUG_ON(domain->linked_tasks[cpu]); | ||
293 | |||
294 | server->cpu = cpu; | 245 | server->cpu = cpu; |
295 | 246 | ||
296 | domain->linked_servers[cpu] = server; | 247 | domain->linked_servers[cpu] = server; |
@@ -309,25 +260,18 @@ void server_stop(server_t *server) | |||
309 | server_domain_t *domain = server->domain; | 260 | server_domain_t *domain = server->domain; |
310 | 261 | ||
311 | if (!is_server_linked(server)) { | 262 | if (!is_server_linked(server)) { |
312 | TRACE_SUB(server, "already stopped"); | ||
313 | return; | 263 | return; |
314 | } | 264 | } |
315 | 265 | ||
316 | cpu = server->cpu; | 266 | cpu = server->cpu; |
317 | BUG_ON(cpu == NO_CPU); | 267 | BUG_ON(cpu == NO_CPU); |
318 | 268 | ||
319 | TRACE_SUB(server, "stopping server, start: %llu, end: %llu", | ||
320 | domain->start_times[cpu], now); | ||
321 | |||
322 | /* Calculate remaining budget */ | 269 | /* Calculate remaining budget */ |
323 | elapsed_time = lt_subtract(now, domain->start_times[cpu]); | 270 | elapsed_time = lt_subtract(now, domain->start_times[cpu]); |
324 | server->budget = lt_subtract(server->budget, elapsed_time); | 271 | server->budget = lt_subtract(server->budget, elapsed_time); |
325 | 272 | ||
326 | server->cpu = NO_CPU; | 273 | server->cpu = NO_CPU; |
327 | 274 | ||
328 | TRACE_SUB(server, "new budget: %llu", TIME(server->budget)); | ||
329 | BUG_ON(domain->linked_servers[cpu] != server); | ||
330 | |||
331 | /* Set domain state */ | 275 | /* Set domain state */ |
332 | domain->completion_timers[cpu].armed = 0; | 276 | domain->completion_timers[cpu].armed = 0; |
333 | domain->linked_servers[cpu] = NULL; | 277 | domain->linked_servers[cpu] = NULL; |
@@ -337,18 +281,11 @@ void server_stop(server_t *server) | |||
337 | 281 | ||
338 | void server_release(server_t *server) | 282 | void server_release(server_t *server) |
339 | { | 283 | { |
340 | BUG_ON(!server->deadline); | ||
341 | |||
342 | server->budget = server->wcet; | 284 | server->budget = server->wcet; |
343 | server->release = server->deadline; | 285 | server->release = server->deadline; |
344 | server->deadline += server->period; | 286 | server->deadline += server->period; |
345 | ++server->job_no; | 287 | ++server->job_no; |
346 | 288 | ||
347 | TRACE_SUB(server, "budget: %llu, release: %llu," | ||
348 | "deadline: %llu, period: %llu, job: %d", | ||
349 | TIME(server->budget), TIME(server->release), TIME(server->deadline), | ||
350 | TIME(server->period), server->job_no); | ||
351 | |||
352 | /* Need to reset for budget calculations */ | 289 | /* Need to reset for budget calculations */ |
353 | if (is_server_linked(server)) | 290 | if (is_server_linked(server)) |
354 | server->domain->start_times[server->cpu] = litmus_clock(); | 291 | server->domain->start_times[server->cpu] = litmus_clock(); |
@@ -358,8 +295,6 @@ void server_release_at(server_t *server, lt_t time) | |||
358 | { | 295 | { |
359 | server->deadline = time; | 296 | server->deadline = time; |
360 | server_release(server); | 297 | server_release(server); |
361 | |||
362 | TRACE_SUB(server, "releasing at %llu", time); | ||
363 | } | 298 | } |
364 | 299 | ||
365 | /****************************************************************************** | 300 | /****************************************************************************** |
@@ -643,24 +578,17 @@ static enum hrtimer_restart release_servers_fire(struct hrtimer *timer) | |||
643 | unsigned long flags; | 578 | unsigned long flags; |
644 | server_release_heap_t *rh; | 579 | server_release_heap_t *rh; |
645 | 580 | ||
646 | _TRACE_SUB("on_release_timer(0x%p) starts.", timer); | ||
647 | |||
648 | rh = container_of(timer, server_release_heap_t, timer); | 581 | rh = container_of(timer, server_release_heap_t, timer); |
649 | 582 | ||
650 | raw_spin_lock_irqsave(&rh->domain->release_lock, flags); | 583 | raw_spin_lock_irqsave(&rh->domain->release_lock, flags); |
651 | _TRACE_SUB("CB has the release_lock"); | ||
652 | 584 | ||
653 | /* Remove from release queue */ | 585 | /* Remove from release queue */ |
654 | list_del(&rh->list); | 586 | list_del(&rh->list); |
655 | 587 | ||
656 | raw_spin_unlock_irqrestore(&rh->domain->release_lock, flags); | 588 | raw_spin_unlock_irqrestore(&rh->domain->release_lock, flags); |
657 | _TRACE_SUB("CB returned release_lock"); | ||
658 | 589 | ||
659 | /* Call release callback */ | 590 | /* Call release callback */ |
660 | rh->domain->servers_released(&rh->servers); | 591 | rh->domain->servers_released(&rh->servers); |
661 | /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
662 | |||
663 | _TRACE_SUB("on_release_timer(0x%p) ends.", timer); | ||
664 | 592 | ||
665 | return HRTIMER_NORESTART; | 593 | return HRTIMER_NORESTART; |
666 | } | 594 | } |
@@ -680,8 +608,6 @@ static server_release_heap_t* get_release_heap(server_domain_t *rt, | |||
680 | lt_t release_time = server->release; | 608 | lt_t release_time = server->release; |
681 | unsigned int slot = time2slot(release_time); | 609 | unsigned int slot = time2slot(release_time); |
682 | 610 | ||
683 | _TRACE_SUB("searching for release time %llu", release_time); | ||
684 | |||
685 | /* Initialize pos for the case that the list is empty */ | 611 | /* Initialize pos for the case that the list is empty */ |
686 | pos = rt->release_queue[slot].next; | 612 | pos = rt->release_queue[slot].next; |
687 | list_for_each(pos, &rt->release_queue[slot]) { | 613 | list_for_each(pos, &rt->release_queue[slot]) { |
@@ -739,8 +665,6 @@ static int reinit_release_heap(server_t *server) | |||
739 | /* Under no cirumstances should the timer have been active | 665 | /* Under no cirumstances should the timer have been active |
740 | * but not running. | 666 | * but not running. |
741 | */ | 667 | */ |
742 | /* TODO: stop living dangerously */ | ||
743 | //BUG_ON(rv == 1); | ||
744 | rv = 1; | 668 | rv = 1; |
745 | 669 | ||
746 | /* initialize */ | 670 | /* initialize */ |
@@ -761,7 +685,6 @@ static int arm_release_timer(server_domain_t *domain) | |||
761 | server_t *server; | 685 | server_t *server; |
762 | server_release_heap_t *rh; | 686 | server_release_heap_t *rh; |
763 | 687 | ||
764 | _TRACE_SUB("arm_release_timer() at %llu", litmus_clock()); | ||
765 | list_replace_init(&domain->tobe_released, &list); | 688 | list_replace_init(&domain->tobe_released, &list); |
766 | 689 | ||
767 | list_for_each_safe(pos, safe, &list) { | 690 | list_for_each_safe(pos, safe, &list) { |
@@ -771,45 +694,34 @@ static int arm_release_timer(server_domain_t *domain) | |||
771 | 694 | ||
772 | /* Put into release heap while holding release_lock */ | 695 | /* Put into release heap while holding release_lock */ |
773 | raw_spin_lock(&domain->release_lock); | 696 | raw_spin_lock(&domain->release_lock); |
774 | TRACE_SUB(server, "I have the release_lock"); | ||
775 | 697 | ||
776 | rh = get_release_heap(domain, server, 0); | 698 | rh = get_release_heap(domain, server, 0); |
777 | if (!rh) { | 699 | if (!rh) { |
778 | /* Need to use our own, but drop lock first */ | 700 | /* Need to use our own, but drop lock first */ |
779 | raw_spin_unlock(&domain->release_lock); | 701 | raw_spin_unlock(&domain->release_lock); |
780 | TRACE_SUB(server, "Dropped release_lock"); | ||
781 | 702 | ||
782 | rv = reinit_release_heap(server); | 703 | rv = reinit_release_heap(server); |
783 | 704 | ||
784 | /* Bail! We missed the release time */ | 705 | /* Bail! We missed the release time */ |
785 | if (!rv) { | 706 | if (!rv) { |
786 | TRACE_SUB(server, "missed release"); | ||
787 | rv = 0; | 707 | rv = 0; |
788 | goto out; | 708 | goto out; |
789 | } | 709 | } |
790 | 710 | ||
791 | TRACE_SUB(server, "release_heap ready"); | ||
792 | |||
793 | raw_spin_lock(&domain->release_lock); | 711 | raw_spin_lock(&domain->release_lock); |
794 | TRACE_SUB(server, "Re-acquired release_lock"); | ||
795 | 712 | ||
796 | rh = get_release_heap(domain, server, 1); | 713 | rh = get_release_heap(domain, server, 1); |
797 | } | 714 | } |
798 | 715 | ||
799 | list_add(&server->release_list, &rh->servers); | 716 | list_add(&server->release_list, &rh->servers); |
800 | TRACE_SUB(server, "arm_release_timer(): added to release heap"); | ||
801 | 717 | ||
802 | raw_spin_unlock(&domain->release_lock); | 718 | raw_spin_unlock(&domain->release_lock); |
803 | TRACE_SUB(server, "Returned the release_lock"); | ||
804 | 719 | ||
805 | /* To avoid arming the timer multiple times, we only let the | 720 | /* To avoid arming the timer multiple times, we only let the |
806 | * owner do the arming (which is the "first" task to reference | 721 | * owner do the arming (which is the "first" task to reference |
807 | * this release_heap anyway). | 722 | * this release_heap anyway). |
808 | */ | 723 | */ |
809 | if (rh == server->release_heap) { | 724 | if (rh == server->release_heap) { |
810 | TRACE_SUB(server, "arming timer 0x%p at %llu on P%d", | ||
811 | &rh->timer, rh->release_time, | ||
812 | domain->release_master); | ||
813 | /* We cannot arm the timer using hrtimer_start() | 725 | /* We cannot arm the timer using hrtimer_start() |
814 | * as it may deadlock on rq->lock | 726 | * as it may deadlock on rq->lock |
815 | * | 727 | * |
@@ -825,8 +737,7 @@ static int arm_release_timer(server_domain_t *domain) | |||
825 | ns_to_ktime(rh->release_time), | 737 | ns_to_ktime(rh->release_time), |
826 | HRTIMER_MODE_ABS_PINNED); | 738 | HRTIMER_MODE_ABS_PINNED); |
827 | } | 739 | } |
828 | } else | 740 | } |
829 | TRACE_SUB(server, "0x%p is not my timer", &rh->timer); | ||
830 | } | 741 | } |
831 | out: | 742 | out: |
832 | return rv; | 743 | return rv; |
@@ -834,7 +745,6 @@ static int arm_release_timer(server_domain_t *domain) | |||
834 | 745 | ||
835 | int add_server_release(server_t *server, server_domain_t *domain) | 746 | int add_server_release(server_t *server, server_domain_t *domain) |
836 | { | 747 | { |
837 | TRACE_SUB(server, "adding to release at %llu", server->release); | ||
838 | list_add(&server->release_list, &domain->tobe_released); | 748 | list_add(&server->release_list, &domain->tobe_released); |
839 | return arm_release_timer(domain); | 749 | return arm_release_timer(domain); |
840 | } | 750 | } |