diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-14 15:47:40 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-14 15:47:40 -0400 |
commit | 0317947a4d7fa886d90ec500444446894726f680 (patch) | |
tree | 075bfcb0d38490db8212dcab906e032b8b104d57 | |
parent | 150425d2e08f46dd25547d397b70835598dfffe1 (diff) |
Bug fixes
-rw-r--r-- | kernel/hrtimer.c | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 12 | ||||
-rw-r--r-- | litmus/sched_edf_hsb.c | 86 | ||||
-rw-r--r-- | litmus/servers.c | 7 |
4 files changed, 62 insertions, 47 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cb49883b64e5..805ac8701723 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1062,7 +1062,7 @@ void hrtimer_pull(void) | |||
1062 | { | 1062 | { |
1063 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 1063 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
1064 | struct hrtimer_start_on_info *info; | 1064 | struct hrtimer_start_on_info *info; |
1065 | struct list_head *pos, *safe, list; | 1065 | struct list_head *pos, *safe, *prev, list; |
1066 | 1066 | ||
1067 | raw_spin_lock(&base->lock); | 1067 | raw_spin_lock(&base->lock); |
1068 | list_replace_init(&base->to_pull, &list); | 1068 | list_replace_init(&base->to_pull, &list); |
@@ -1073,6 +1073,8 @@ void hrtimer_pull(void) | |||
1073 | TRACE("pulled timer 0x%x\n", info->timer); | 1073 | TRACE("pulled timer 0x%x\n", info->timer); |
1074 | list_del(pos); | 1074 | list_del(pos); |
1075 | hrtimer_start(info->timer, info->time, info->mode); | 1075 | hrtimer_start(info->timer, info->time, info->mode); |
1076 | atomic_set(&info->state, HRTIMER_START_ON_INACTIVE); | ||
1077 | prev = pos; | ||
1076 | } | 1078 | } |
1077 | } | 1079 | } |
1078 | 1080 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index c4f899510900..38f276d67095 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -368,12 +368,12 @@ void litmus_exit_task(struct task_struct* tsk) | |||
368 | 368 | ||
369 | litmus->task_exit(tsk); | 369 | litmus->task_exit(tsk); |
370 | 370 | ||
371 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | 371 | if (!bheap_node_in_heap(tsk_rt(tsk)->heap_node)) { |
372 | bheap_node_free(tsk_rt(tsk)->heap_node); | 372 | bheap_node_free(tsk_rt(tsk)->heap_node); |
373 | release_heap_free(tsk_rt(tsk)->rel_heap); | 373 | release_heap_free(tsk_rt(tsk)->rel_heap); |
374 | 374 | atomic_dec(&rt_task_count); | |
375 | atomic_dec(&rt_task_count); | 375 | reinit_litmus_state(tsk, 1); |
376 | reinit_litmus_state(tsk, 1); | 376 | } |
377 | } | 377 | } |
378 | } | 378 | } |
379 | 379 | ||
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index 7f944d1d97f0..70e254981861 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -15,14 +15,15 @@ | |||
15 | * stop_[hrt|be]_servers | 15 | * stop_[hrt|be]_servers |
16 | * admit_[hrt|be]_server | 16 | * admit_[hrt|be]_server |
17 | * | 17 | * |
18 | * TODO BE SERVER TASK PREEMPTGION A;SDIFHSAKEUHFLKH | 18 | * TODO system for removing tasks from their release queues |
19 | * TODO BE server heap needed? | 19 | * TODO clean up link_task_to_cpu and check_slack args |
20 | * TODO move slack completion into release | 20 | * TODO move slack completion into release |
21 | * TODO fix concurrent arms | 21 | * TODO fix concurrent arms |
22 | * TODO slack and BE servers, include slack higher prio | 22 | * TODO slack and BE servers, include slack higher prio |
23 | * TODO start servers should no longer be cessary | 23 | * TODO start servers should no longer be cessary |
24 | * TODO harmonize order of method arguments | 24 | * TODO harmonize order of method arguments |
25 | * TODO test crazy task_new hack | 25 | * TODO test crazy task_new hack |
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
26 | */ | 27 | */ |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
@@ -361,7 +362,8 @@ static void check_slack_candidate(struct task_struct *task) | |||
361 | /* The task has yet to be released */ | 362 | /* The task has yet to be released */ |
362 | lt_after(get_release(task), litmus_clock()) && | 363 | lt_after(get_release(task), litmus_clock()) && |
363 | /* The task didn't just complete */ | 364 | /* The task didn't just complete */ |
364 | get_rt_flags(task) != RT_F_SLEEP && | 365 | !(get_rt_flags(task) == RT_F_SLEEP && task_srt_server(task)->job_no == |
366 | task_job_no(task)) && | ||
365 | /* The task hasn't already been added to the list */ | 367 | /* The task hasn't already been added to the list */ |
366 | !head_in_list(&task_data(task)->candidate_list)) { | 368 | !head_in_list(&task_data(task)->candidate_list)) { |
367 | 369 | ||
@@ -598,33 +600,33 @@ static void reclaim_slack(server_t *slack) | |||
598 | BUG_ON(!slack->wcet); | 600 | BUG_ON(!slack->wcet); |
599 | BUG_ON(!donator->budget); | 601 | BUG_ON(!donator->budget); |
600 | 602 | ||
601 | donator->budget -= exec; | 603 | donator->budget = lt_subtract(donator->budget, exec); |
602 | slack->wcet = slack->budget; | 604 | slack->wcet = slack->budget; |
603 | 605 | ||
604 | /* If budget exhausted, server needs to wait for next release */ | 606 | /* If budget exhausted, server needs to wait for next release */ |
605 | if (!donator->budget) { | 607 | if (!donator->budget) { |
606 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | 608 | TRACE_SERVER_SUB(donator, "exhausted by slack"); |
607 | if (donator->type == S_HRT) { | 609 | /* if (donator->type == S_HRT) { */ |
608 | hrt_server = container_of(donator, | 610 | /* hrt_server = container_of(donator, */ |
609 | hrt_server_t, | 611 | /* hrt_server_t, */ |
610 | server); | 612 | /* server); */ |
611 | BUG_ON(!hrt_server->ready); | 613 | /* /\* BUG_ON(!hrt_server->ready); *\/ */ |
612 | TRACE_SERVER_SUB(donator, "no longer ready"); | 614 | /* /\* TRACE_SERVER_SUB(donator, "exhausted"); *\/ */ |
613 | hrt_server->ready = 0; | 615 | /* /\* hrt_server->ready = 0; *\/ */ |
614 | slack_timer_cancel(hrt_server); | 616 | /* /\* slack_timer_cancel(hrt_server); *\/ */ |
615 | } else if (donator->type == S_BE) { | 617 | /* } else if (donator->type == S_BE) { */ |
616 | TRACE_SERVER_SUB(donator, "BE removed from ready"); | 618 | /* TRACE_SERVER_SUB(donator, "BE removed from ready"); */ |
617 | bheap_delete(server_order, &be_ready_servers, | 619 | /* /\* bheap_delete(server_order, &be_ready_servers, *\/ */ |
618 | donator->hn); | 620 | /* /\* donator->hn); *\/ */ |
619 | } | 621 | /* } */ |
620 | 622 | ||
621 | /* Prepare servers for their next period. SRT servers are | 623 | /* /\* Prepare servers for their next period. SRT servers are */ |
622 | * handled with their SRT tasks and don't need assistance. | 624 | /* * handled with their SRT tasks and don't need assistance. */ |
623 | */ | 625 | /* *\/ */ |
624 | if (donator->type != S_SRT) { | 626 | /* if (donator->type != S_SRT) { */ |
625 | server_release(donator); | 627 | /* /\* server_release(donator); *\/ */ |
626 | requeue_server(donator); | 628 | /* /\* requeue_server(donator); *\/ */ |
627 | } | 629 | /* } */ |
628 | } | 630 | } |
629 | } | 631 | } |
630 | 632 | ||
@@ -721,7 +723,7 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry, | |||
721 | 723 | ||
722 | BUG_ON(linked && !is_realtime(linked)); | 724 | BUG_ON(linked && !is_realtime(linked)); |
723 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | 725 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); |
724 | BUG_ON(entry->cpu == edf_hsb_release_master); | 726 | BUG_ON(entry->cpu == edf_hsb_release_master); |
725 | 727 | ||
726 | if (linked) | 728 | if (linked) |
727 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | 729 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", |
@@ -963,7 +965,9 @@ static struct task_struct* next_eligible_srt(void) | |||
963 | * task blocked while it was being run by a slack server. | 965 | * task blocked while it was being run by a slack server. |
964 | * Remove and ignore this task. | 966 | * Remove and ignore this task. |
965 | */ | 967 | */ |
966 | while (next_srt && !is_running(next_srt)) { | 968 | while (next_srt && (!is_running(next_srt) || |
969 | unlikely(!is_realtime(next_srt)))) { | ||
970 | TRACE_TASK_SUB(next_srt, "removing blocking task"); | ||
967 | remove(&srt_domain, next_srt); | 971 | remove(&srt_domain, next_srt); |
968 | next_srt = __peek_ready(&srt_domain); | 972 | next_srt = __peek_ready(&srt_domain); |
969 | } | 973 | } |
@@ -1191,7 +1195,7 @@ static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, | |||
1191 | */ | 1195 | */ |
1192 | static void edf_hsb_pick_next(cpu_entry_t *entry) | 1196 | static void edf_hsb_pick_next(cpu_entry_t *entry) |
1193 | { | 1197 | { |
1194 | struct task_struct *next_task; | 1198 | struct task_struct *next_task, *linked; |
1195 | server_t *next_server; | 1199 | server_t *next_server; |
1196 | 1200 | ||
1197 | BUG_ON(entry->linked); | 1201 | BUG_ON(entry->linked); |
@@ -1209,11 +1213,16 @@ static void edf_hsb_pick_next(cpu_entry_t *entry) | |||
1209 | TRACE_TASK_SERVER_SUB(next_task, next_server, | 1213 | TRACE_TASK_SERVER_SUB(next_task, next_server, |
1210 | "removing and picked"); | 1214 | "removing and picked"); |
1211 | 1215 | ||
1212 | /* Force the link to go to the CPU we ask for since we are | 1216 | /* A slack preemption could cause something that was already |
1213 | * trying to reschedule for this CPU only. | 1217 | * running to be 'swapped' to this CPU in link_task_to_cpu. |
1214 | */ | 1218 | */ |
1215 | link_task_to_cpu(entry, next_task, next_server, | 1219 | if (entry->linked) { |
1216 | 0 /* Always link to what we want */); | 1220 | linked = entry->linked; |
1221 | unlink(entry->linked); | ||
1222 | requeue(linked, get_rt_domain(entry, linked)); | ||
1223 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1224 | } | ||
1225 | link_task_to_cpu(entry, next_task, next_server, 1); | ||
1217 | } | 1226 | } |
1218 | } | 1227 | } |
1219 | 1228 | ||
@@ -1224,14 +1233,16 @@ static void edf_hsb_pick_next(cpu_entry_t *entry) | |||
1224 | static void preempt(cpu_entry_t *entry, struct task_struct *next, | 1233 | static void preempt(cpu_entry_t *entry, struct task_struct *next, |
1225 | server_t *next_server, int slack_resched) | 1234 | server_t *next_server, int slack_resched) |
1226 | { | 1235 | { |
1227 | struct task_struct *linked = entry->linked; | 1236 | struct task_struct *linked; |
1228 | rt_domain_t *domain; | 1237 | rt_domain_t *domain; |
1229 | 1238 | ||
1230 | TRACE_TASK_SERVER_SUB(next, next_server, | 1239 | TRACE_TASK_SERVER_SUB(next, next_server, |
1231 | "preempting on P%d", entry->cpu); | 1240 | "preempting on P%d", entry->cpu); |
1232 | 1241 | ||
1233 | remove_from_ready(next_server, next, entry); | 1242 | remove_from_ready(next_server, next, entry); |
1243 | |||
1234 | check_for_slack_preempt(next, next_server, entry, slack_resched); | 1244 | check_for_slack_preempt(next, next_server, entry, slack_resched); |
1245 | linked = entry->linked; | ||
1235 | link_task_to_cpu(entry, next, next_server, 1); | 1246 | link_task_to_cpu(entry, next, next_server, 1); |
1236 | 1247 | ||
1237 | /* No need for this if only the server was preempted */ | 1248 | /* No need for this if only the server was preempted */ |
@@ -1250,9 +1261,9 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next, | |||
1250 | * 2. slack donated by server is running a task on a different CPU | 1261 | * 2. slack donated by server is running a task on a different CPU |
1251 | */ | 1262 | */ |
1252 | static void check_for_slack_preempt(struct task_struct *task, | 1263 | static void check_for_slack_preempt(struct task_struct *task, |
1253 | server_t *server, | 1264 | server_t *server, |
1254 | cpu_entry_t *next_entry, | 1265 | cpu_entry_t *next_entry, |
1255 | int resched) | 1266 | int resched) |
1256 | { | 1267 | { |
1257 | cpu_entry_t *entry = NULL; | 1268 | cpu_entry_t *entry = NULL; |
1258 | server_t *slack = server_slack(server); | 1269 | server_t *slack = server_slack(server); |
@@ -1986,8 +1997,10 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | |||
1986 | /* Bail out early if we are the release master. | 1997 | /* Bail out early if we are the release master. |
1987 | * The release master never schedules any real-time tasks. | 1998 | * The release master never schedules any real-time tasks. |
1988 | */ | 1999 | */ |
1989 | if (edf_hsb_release_master == entry->cpu) | 2000 | if (edf_hsb_release_master == entry->cpu) { |
2001 | sched_state_task_picked(); | ||
1990 | return NULL; | 2002 | return NULL; |
2003 | } | ||
1991 | #endif | 2004 | #endif |
1992 | 2005 | ||
1993 | raw_spin_lock_irqsave(global_lock, flags); | 2006 | raw_spin_lock_irqsave(global_lock, flags); |
@@ -2129,6 +2142,7 @@ static void edf_hsb_task_wake_up(struct task_struct *task) | |||
2129 | unsigned long flags; | 2142 | unsigned long flags; |
2130 | cpu_entry_t *entry = task_sched_entry(task); | 2143 | cpu_entry_t *entry = task_sched_entry(task); |
2131 | 2144 | ||
2145 | |||
2132 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | 2146 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), |
2133 | task_cpu(task), task->rt_param.task_params.cpu); | 2147 | task_cpu(task), task->rt_param.task_params.cpu); |
2134 | 2148 | ||
diff --git a/litmus/servers.c b/litmus/servers.c index 5bd0e3c937a9..577c83662357 100644 --- a/litmus/servers.c +++ b/litmus/servers.c | |||
@@ -91,7 +91,6 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
91 | TRACE_SUB(server, "cannot arm completion, already active"); | 91 | TRACE_SUB(server, "cannot arm completion, already active"); |
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | |||
95 | if (domain->completion_timers[cpu].armed) { | 94 | if (domain->completion_timers[cpu].armed) { |
96 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); | 95 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); |
97 | return 0; | 96 | return 0; |
@@ -110,8 +109,6 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
110 | on_cpu = cpu; | 109 | on_cpu = cpu; |
111 | 110 | ||
112 | if (cpu != smp_processor_id()) { | 111 | if (cpu != smp_processor_id()) { |
113 | atomic_set(&domain->completion_timers[cpu].info.state, | ||
114 | HRTIMER_START_ON_INACTIVE); | ||
115 | err = hrtimer_start_on(on_cpu, | 112 | err = hrtimer_start_on(on_cpu, |
116 | &domain->completion_timers[cpu].info, | 113 | &domain->completion_timers[cpu].info, |
117 | &domain->completion_timers[cpu].timer, | 114 | &domain->completion_timers[cpu].timer, |
@@ -132,7 +129,9 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
132 | 129 | ||
133 | domain->completion_timers[cpu].armed = 1; | 130 | domain->completion_timers[cpu].armed = 1; |
134 | 131 | ||
135 | TRACE_SUB(server, "completion armed to fire at %llu", | 132 | TRACE_SUB(server, "completion 0x%x and %p armed to fire at %llu", |
133 | &domain->completion_timers[cpu].timer, | ||
134 | &domain->completion_timers[cpu].timer, | ||
136 | TIME(budget_exhausted)); | 135 | TIME(budget_exhausted)); |
137 | 136 | ||
138 | return 1; | 137 | return 1; |