diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-10 22:13:30 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-10 22:13:30 -0400 |
commit | fbbd0ae9f4589d35265a9b85e5b2e4614b432525 (patch) | |
tree | 403a66093be20cedb8351b7af065f2e5f883661f | |
parent | 0a84a160ea79d9479d17b02b5332413159f52be2 (diff) |
Allow for concurrent blocking / user-space job completions.
-rw-r--r-- | include/litmus/litmus.h | 5 | ||||
-rw-r--r-- | include/litmus/sched_mc.h | 8 | ||||
-rw-r--r-- | litmus/rt_domain.c | 3 | ||||
-rw-r--r-- | litmus/sched_mc.c | 67 |
4 files changed, 51 insertions, 32 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 93961d08efd..a78f7f94344 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -51,8 +51,9 @@ void litmus_exit_task(struct task_struct *tsk); | |||
51 | #define get_user_deadline(t) (tsk_rt(t)->user_job.deadline) | 51 | #define get_user_deadline(t) (tsk_rt(t)->user_job.deadline) |
52 | 52 | ||
53 | /* Realtime utility macros */ | 53 | /* Realtime utility macros */ |
54 | #define get_rt_flags(t) (tsk_rt(t)->flags) | 54 | #define get_rt_flags(t) (tsk_rt(t)->flags) |
55 | #define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | 55 | #define set_rt_flags(t,f) (tsk_rt(t)->flags|=(f)) |
56 | #define clear_rt_flags(t,f) (tsk_rt(t)->flags&=(~f)) | ||
56 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 57 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
57 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 58 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
58 | 59 | ||
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index 5473ee30cb5..327f476aad7 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h | |||
@@ -28,14 +28,14 @@ struct mc_data { | |||
28 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) | 28 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) |
29 | #define tsk_mc_crit(t) \ | 29 | #define tsk_mc_crit(t) \ |
30 | (tsk_mc_data(t) ? tsk_mc_data(t)->mc_task.crit : CRIT_LEVEL_C) | 30 | (tsk_mc_data(t) ? tsk_mc_data(t)->mc_task.crit : CRIT_LEVEL_C) |
31 | #define is_ghost(t) \ | 31 | #define is_cblocked(t) (get_rt_flags(t) & RT_F_BLOCKED) |
32 | ((get_rt_flags(t) & RT_F_BLOCKED) || \ | 32 | #define is_ghost(t) \ |
33 | (get_rt_job(t) < get_user_job(t) && (get_rt_flags(t) & RT_F_SLEEP))) | 33 | (get_rt_job(t) < get_user_job(t) && (get_rt_flags(t) & RT_F_SLEEP)) |
34 | #define ce_sid(ce) \ | 34 | #define ce_sid(ce) \ |
35 | (-((ce)->level * num_online_cpus() + crit_cpu(ce)->cpu + 1)) | 35 | (-((ce)->level * num_online_cpus() + crit_cpu(ce)->cpu + 1)) |
36 | 36 | ||
37 | #define TS "(%s/%d:%d:%d:%s)" | 37 | #define TS "(%s/%d:%d:%d:%s)" |
38 | #define TA(t) (t) ? tsk_mc_data(t) ? is_ghost(t) ? "ghost" : t->comm \ | 38 | #define TA(t) (t) ? tsk_mc_data(t) ? is_ghost(t) ? "ghost" : is_cblocked(t) ? "blocked" : t->comm \ |
39 | : t->comm : "NULL", \ | 39 | : t->comm : "NULL", \ |
40 | (t) ? t->pid : 1, \ | 40 | (t) ? t->pid : 1, \ |
41 | (t) ? get_rt_job(t) : 1, \ | 41 | (t) ? get_rt_job(t) : 1, \ |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 687dc129bc2..6e6a1236bb2 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #define TRACE(fmt, args...) STRACE(fmt, ## args) | 25 | #define TRACE(fmt, args...) STRACE(fmt, ## args) |
26 | 26 | ||
27 | /* Uncomment when debugging timer races... */ | 27 | /* Uncomment when debugging timer races... */ |
28 | #if 0 | 28 | #if 1 |
29 | #define VTRACE_TASK TRACE_TASK | 29 | #define VTRACE_TASK TRACE_TASK |
30 | #define VTRACE TRACE | 30 | #define VTRACE TRACE |
31 | #else | 31 | #else |
@@ -440,6 +440,7 @@ static void pd_requeue(domain_t *dom, struct task_struct *task) | |||
440 | task->comm, task->pid, get_exec_cost(task), get_rt_period(task), | 440 | task->comm, task->pid, get_exec_cost(task), get_rt_period(task), |
441 | get_release(task), litmus_clock()); | 441 | get_release(task), litmus_clock()); |
442 | } else { | 442 | } else { |
443 | tsk_rt(task)->flags = RT_F_RUNNING; | ||
443 | /* task has to wait for next release */ | 444 | /* task has to wait for next release */ |
444 | VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task)); | 445 | VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task)); |
445 | add_release(domain, task); | 446 | add_release(domain, task); |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 33adc860b6b..0ee7be46f1e 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -123,6 +123,10 @@ static void release_resources(struct task_struct *t) | |||
123 | TRACE_MC_TASK(t, "Releasing resources\n"); | 123 | TRACE_MC_TASK(t, "Releasing resources\n"); |
124 | 124 | ||
125 | raw_spin_lock(&dgl_lock); | 125 | raw_spin_lock(&dgl_lock); |
126 | if (get_rt_flags(t) & RT_F_BLOCKED) { | ||
127 | TRACE_MC_TASK(t, "Forced off before we got our stuff\n"); | ||
128 | clear_rt_flags(t, RT_F_BLOCKED); | ||
129 | } | ||
126 | remove_group_req(&group_lock, tsk_rt(t)->req); | 130 | remove_group_req(&group_lock, tsk_rt(t)->req); |
127 | raw_spin_unlock(&dgl_lock); | 131 | raw_spin_unlock(&dgl_lock); |
128 | take_np(t); | 132 | take_np(t); |
@@ -312,6 +316,8 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
312 | /* Unlink last task */ | 316 | /* Unlink last task */ |
313 | if (ce->linked) { | 317 | if (ce->linked) { |
314 | ce->domain->release_resources(ce->linked); | 318 | ce->domain->release_resources(ce->linked); |
319 | if (ce->state == CS_BLOCKED) | ||
320 | ce->state == CS_ACTIVE; | ||
315 | 321 | ||
316 | TRACE_MC_TASK(ce->linked, "Unlinking\n"); | 322 | TRACE_MC_TASK(ce->linked, "Unlinking\n"); |
317 | ce->linked->rt_param.linked_on = NO_CPU; | 323 | ce->linked->rt_param.linked_on = NO_CPU; |
@@ -439,7 +445,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
439 | struct crit_entry *ce; | 445 | struct crit_entry *ce; |
440 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); | 446 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); |
441 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 447 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
442 | BUG_ON(task && is_ghost(task)); | 448 | BUG_ON(task && (is_ghost(task) || is_cblocked(task))); |
443 | 449 | ||
444 | if (entry->linked) { | 450 | if (entry->linked) { |
445 | sched_trace_server_switch_away(-entry->linked->pid, | 451 | sched_trace_server_switch_away(-entry->linked->pid, |
@@ -449,7 +455,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
449 | } | 455 | } |
450 | 456 | ||
451 | if (task) { | 457 | if (task) { |
452 | set_rt_flags(task, RT_F_RUNNING); | 458 | tsk_rt(task)->flags = RT_F_RUNNING; |
453 | sched_trace_server_switch_to(-task->pid, | 459 | sched_trace_server_switch_to(-task->pid, |
454 | get_rt_job(task), | 460 | get_rt_job(task), |
455 | task->pid, | 461 | task->pid, |
@@ -500,7 +506,7 @@ static void preempt_crit(struct domain *dom, struct crit_entry *ce) | |||
500 | * running on this CPU as otherwise the crit_entry would have | 506 | * running on this CPU as otherwise the crit_entry would have |
501 | * been disabled and a preemption could not have occurred | 507 | * been disabled and a preemption could not have occurred |
502 | */ | 508 | */ |
503 | if (!is_ghost(task)) { | 509 | if (!is_ghost(task) && !is_cblocked(task)) { |
504 | preempt_cpu(entry, task); | 510 | preempt_cpu(entry, task); |
505 | } else if (old && old == entry->linked) { | 511 | } else if (old && old == entry->linked) { |
506 | /* Preempted running task with ghost job. Nothing should run */ | 512 | /* Preempted running task with ghost job. Nothing should run */ |
@@ -563,7 +569,7 @@ static void update_crit_levels(struct cpu_entry *entry) | |||
563 | */ | 569 | */ |
564 | static void check_for_preempt(struct domain *dom) | 570 | static void check_for_preempt(struct domain *dom) |
565 | { | 571 | { |
566 | int recheck = 1, higher_prio, was_ghost; | 572 | int recheck = 1, higher_prio, was_inelig, update = 0; |
567 | struct cpu_entry *entry; | 573 | struct cpu_entry *entry; |
568 | struct crit_entry *ce; | 574 | struct crit_entry *ce; |
569 | 575 | ||
@@ -599,22 +605,24 @@ static void check_for_preempt(struct domain *dom) | |||
599 | raw_spin_lock(&entry->lock); | 605 | raw_spin_lock(&entry->lock); |
600 | 606 | ||
601 | if (can_use(ce)) { | 607 | if (can_use(ce)) { |
602 | was_ghost = ce->linked && !is_ghost(ce->linked) && | 608 | was_inelig = ce->linked && !is_ghost(ce->linked) && |
603 | ce->linked != entry->linked; | 609 | ce->linked != entry->linked && !is_cblocked(ce->linked); |
604 | higher_prio = mc_preempt_needed(dom, ce->linked); | 610 | higher_prio = mc_preempt_needed(dom, ce->linked); |
605 | 611 | ||
606 | if (was_ghost) { | 612 | if (was_inelig) { |
607 | preempt_cpu(entry, ce->linked); | 613 | preempt_cpu(entry, ce->linked); |
608 | start_crit(ce); | 614 | start_crit(ce); |
609 | } else if (higher_prio) | 615 | } else if (higher_prio) |
610 | preempt_crit(dom, ce); | 616 | preempt_crit(dom, ce); |
617 | update = was_inelig || higher_prio; | ||
611 | 618 | ||
612 | if (was_ghost || higher_prio) { | 619 | } else { |
613 | update_crit_levels(entry); | 620 | TRACE_CRIT_ENTRY(ce, "Can't use!\n"); |
614 | return; | ||
615 | } | ||
616 | } | 621 | } |
617 | raw_spin_unlock(&entry->lock); | 622 | if (update) |
623 | update_crit_levels(entry); | ||
624 | else | ||
625 | raw_spin_unlock(&entry->lock); | ||
618 | } | 626 | } |
619 | } | 627 | } |
620 | 628 | ||
@@ -695,22 +703,25 @@ static void job_completion(struct task_struct *task, int forced) | |||
695 | release_server = budget_exhausted(task); | 703 | release_server = budget_exhausted(task); |
696 | 704 | ||
697 | if (release_server || forced) { | 705 | if (release_server || forced) { |
706 | if (release_server) | ||
707 | sched_trace_server_completion(-task->pid, | ||
708 | get_rt_job(task)); | ||
698 | /* Only unlink (and release resources) if the current server job | 709 | /* Only unlink (and release resources) if the current server job |
699 | * must stop logically running | 710 | * must stop logically running |
700 | */ | 711 | */ |
701 | remove_from_all(task); | 712 | remove_from_all(task); |
702 | } | 713 | } |
703 | 714 | ||
715 | BUG_ON(get_rt_flags(task) & RT_F_BLOCKED); | ||
716 | |||
704 | if (lt_before(get_user_release(task), litmus_clock())) { | 717 | if (lt_before(get_user_release(task), litmus_clock())) { |
705 | TRACE_TASK(task, "Executable task going back to running\n"); | 718 | TRACE_TASK(task, "Executable task going back to running\n"); |
706 | set_rt_flags(task, RT_F_RUNNING); | 719 | clear_rt_flags(task, RT_F_SLEEP); |
707 | } | 720 | } |
708 | 721 | ||
709 | if (release_server || forced) { | 722 | if (release_server || forced) { |
710 | /* TODO: Level A does this independently and should not */ | 723 | /* TODO: Level A does this independently and should not */ |
711 | if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) { | 724 | if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) { |
712 | sched_trace_server_completion(-task->pid, | ||
713 | get_rt_job(task)); | ||
714 | prepare_for_next_period(task); | 725 | prepare_for_next_period(task); |
715 | } | 726 | } |
716 | 727 | ||
@@ -902,7 +913,7 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
902 | tsk_rt(t)->task_params.budget_policy = PRECISE_ENFORCEMENT; | 913 | tsk_rt(t)->task_params.budget_policy = PRECISE_ENFORCEMENT; |
903 | 914 | ||
904 | /* Apply chunking */ | 915 | /* Apply chunking */ |
905 | if (level == CRIT_LEVEL_B && color_chunk | 916 | if (level == CRIT_LEVEL_B && color_chunk && |
906 | lt_after(get_exec_cost(t), color_chunk)) { | 917 | lt_after(get_exec_cost(t), color_chunk)) { |
907 | tsk_rt(t)->orig_cost = get_exec_cost(t); | 918 | tsk_rt(t)->orig_cost = get_exec_cost(t); |
908 | } | 919 | } |
@@ -923,6 +934,7 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
923 | release_at(t, litmus_clock()); | 934 | release_at(t, litmus_clock()); |
924 | if (running) { | 935 | if (running) { |
925 | BUG_ON(entry->scheduled); | 936 | BUG_ON(entry->scheduled); |
937 | TRACE_MC_TASK(t, "Was already running\n"); | ||
926 | entry->scheduled = t; | 938 | entry->scheduled = t; |
927 | tsk_rt(t)->scheduled_on = entry->cpu; | 939 | tsk_rt(t)->scheduled_on = entry->cpu; |
928 | tsk_rt(t)->last_exec_time = litmus_clock(); | 940 | tsk_rt(t)->last_exec_time = litmus_clock(); |
@@ -951,7 +963,11 @@ static void mc_task_wake_up(struct task_struct *task) | |||
951 | release_at(task, now); | 963 | release_at(task, now); |
952 | sched_trace_task_release(task); | 964 | sched_trace_task_release(task); |
953 | } | 965 | } |
954 | if (!is_ghost(task)) | 966 | |
967 | if (budget_exhausted(task)) | ||
968 | /* Rare, but possible, race condition */ | ||
969 | job_completion(task, 1); | ||
970 | else | ||
955 | job_arrival(task); | 971 | job_arrival(task); |
956 | 972 | ||
957 | local_irq_restore(flags); | 973 | local_irq_restore(flags); |
@@ -1064,7 +1080,8 @@ void pick_next_task(struct cpu_entry *entry) | |||
1064 | update_crit_position(ce); | 1080 | update_crit_position(ce); |
1065 | } | 1081 | } |
1066 | } | 1082 | } |
1067 | if (ready_task && !is_ghost(ready_task)) { | 1083 | if (ready_task && !is_ghost(ready_task) && |
1084 | !is_cblocked(ready_task)) { | ||
1068 | link_task_to_cpu(entry, ready_task); | 1085 | link_task_to_cpu(entry, ready_task); |
1069 | raw_spin_unlock(dom->lock); | 1086 | raw_spin_unlock(dom->lock); |
1070 | update_crit_levels(entry); | 1087 | update_crit_levels(entry); |
@@ -1112,7 +1129,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1112 | exists = entry->scheduled != NULL; | 1129 | exists = entry->scheduled != NULL; |
1113 | blocks = exists && !is_running(entry->scheduled); | 1130 | blocks = exists && !is_running(entry->scheduled); |
1114 | out_of_time = exists && budget_exhausted(entry->scheduled); | 1131 | out_of_time = exists && budget_exhausted(entry->scheduled); |
1115 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 1132 | sleep = exists && get_rt_flags(entry->scheduled) & RT_F_SLEEP; |
1116 | global = exists && is_global_task(entry->scheduled); | 1133 | global = exists && is_global_task(entry->scheduled); |
1117 | preempt = entry->scheduled != entry->linked; | 1134 | preempt = entry->scheduled != entry->linked; |
1118 | lower = exists && preempt && entry->linked && | 1135 | lower = exists && preempt && entry->linked && |
@@ -1155,9 +1172,10 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1155 | /* A remote processor unblocked one of our crit levels */ | 1172 | /* A remote processor unblocked one of our crit levels */ |
1156 | if (entry->crit_signal != NUM_CRIT_LEVELS) { | 1173 | if (entry->crit_signal != NUM_CRIT_LEVELS) { |
1157 | ce = &entry->crit_entries[entry->crit_signal]; | 1174 | ce = &entry->crit_entries[entry->crit_signal]; |
1158 | raw_spin_lock(&ce->domain->lock); | 1175 | TRACE_CRIT_ENTRY(ce, "Processing signal for %d\n", entry->crit_signal); |
1176 | raw_spin_lock(ce->domain->lock); | ||
1159 | check_for_preempt(ce->domain); | 1177 | check_for_preempt(ce->domain); |
1160 | raw_spin_unlock(&ce->domain->lock); | 1178 | raw_spin_unlock(ce->domain->lock); |
1161 | entry->crit_signal = NUM_CRIT_LEVELS; | 1179 | entry->crit_signal = NUM_CRIT_LEVELS; |
1162 | } | 1180 | } |
1163 | 1181 | ||
@@ -1414,15 +1432,14 @@ static void cpu_acquired(int cpu) | |||
1414 | TRACE_CRIT_ENTRY(ce, "Acquired lock\n"); | 1432 | TRACE_CRIT_ENTRY(ce, "Acquired lock\n"); |
1415 | 1433 | ||
1416 | BUG_ON(!ce->linked); | 1434 | BUG_ON(!ce->linked); |
1417 | BUG_ON(get_rt_flags(ce->linked) & RT_F_SLEEP); | ||
1418 | 1435 | ||
1419 | set_rt_flags(ce->linked, RT_F_RUNNING); | 1436 | clear_rt_flags(ce->linked, RT_F_BLOCKED); |
1420 | sched_trace_task_resume(ce->linked); | 1437 | sched_trace_task_resume(ce->linked); |
1421 | 1438 | ||
1422 | if (ce->state == CS_BLOCKED) { | 1439 | if (ce->state == CS_BLOCKED) { |
1423 | entry->crit_signal = CRIT_LEVEL_B; | 1440 | entry->crit_signal = CRIT_LEVEL_B; |
1424 | /* Yes this is ok for race conditions, but only because no other | 1441 | /* Yes this is ok for race conditions, but only in the system |
1425 | * state will ever apply to a partitioned crit entry | 1442 | * for the MC-COLOR paper |
1426 | */ | 1443 | */ |
1427 | ce->state = CS_ACTIVE; | 1444 | ce->state = CS_ACTIVE; |
1428 | litmus_reschedule(cpu); | 1445 | litmus_reschedule(cpu); |