diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-17 19:18:30 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-17 19:18:30 -0400 |
commit | 685f214b45fc0c53a6f722d3c8413f3a05d9db2c (patch) | |
tree | b68fc8d10c21572fb9a8b6b6459fd1aaaa8310a0 /litmus | |
parent | 5846906431466b20b22d9063bf636f2cbb44dba0 (diff) |
Prevent tasks from linking to one CPU when they are scheduled on another
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_edf_hsb.c | 95 | ||||
-rw-r--r-- | litmus/servers.c | 27 |
2 files changed, 84 insertions, 38 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index ec4d6b0b27c2..66e870fa7ad1 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -570,8 +570,8 @@ static void requeue_server(server_t *server, lt_t now) | |||
570 | int added = 0; | 570 | int added = 0; |
571 | hrt_server_t *hrt_server; | 571 | hrt_server_t *hrt_server; |
572 | 572 | ||
573 | BUG_ON(server->type == S_SRT || | 573 | if (server->type == S_SRT || server->type == S_SLACK) |
574 | server->type == S_SLACK); | 574 | return; |
575 | 575 | ||
576 | if (lt_before(now, server->release)) { | 576 | if (lt_before(now, server->release)) { |
577 | added = add_server_release(server, &server_domain); | 577 | added = add_server_release(server, &server_domain); |
@@ -694,6 +694,9 @@ static noinline void unlink_server(cpu_entry_t *entry, int requeue) | |||
694 | BUG_ON(head_in_list(&server_slack(server)->list)); | 694 | BUG_ON(head_in_list(&server_slack(server)->list)); |
695 | } | 695 | } |
696 | 696 | ||
697 | static void requeue(struct task_struct *task, rt_domain_t *domain); | ||
698 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, struct task_struct *task); | ||
699 | |||
697 | /* Update the link of a CPU. | 700 | /* Update the link of a CPU. |
698 | * Handles the case where the to-be-linked task is already | 701 | * Handles the case where the to-be-linked task is already |
699 | * scheduled on a different CPU. The last argument is only needed | 702 | * scheduled on a different CPU. The last argument is only needed |
@@ -733,10 +736,25 @@ static noinline void link_to_cpu(cpu_entry_t *entry, | |||
733 | /* This should only happen if not linked already */ | 736 | /* This should only happen if not linked already */ |
734 | BUG_ON(sched->linked == linked); | 737 | BUG_ON(sched->linked == linked); |
735 | 738 | ||
736 | /* Swap link with entry on which linked is scheduled */ | 739 | if (entry != sched && |
737 | if (entry != sched && !is_hrt(linked) && | 740 | sched->linked && is_hrt(sched->linked)) { |
738 | (!sched->linked || !is_hrt(sched->linked))) { | 741 | /* We are already scheduled on a CPU with an HRT */ |
739 | TRACE_TASK_SUB(linked, "already scheduled on P%d", | 742 | TRACE_TASK_SUB(linked, |
743 | "cannot move to scheduled CPU P%d", | ||
744 | sched->cpu); | ||
745 | |||
746 | requeue_server(next_server, litmus_clock()); | ||
747 | requeue(linked, get_rt_domain(entry, linked)); | ||
748 | |||
749 | linked = NULL; | ||
750 | next_server = NULL; | ||
751 | } else if (entry != sched) { | ||
752 | /* Link to the CPU we are scheduled on by swapping | ||
753 | * with that CPU's linked task. | ||
754 | */ | ||
755 | BUG_ON(is_hrt(linked)); | ||
756 | |||
757 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
740 | sched->cpu); | 758 | sched->cpu); |
741 | 759 | ||
742 | tmp_task = sched->linked; | 760 | tmp_task = sched->linked; |
@@ -1023,7 +1041,9 @@ static struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | |||
1023 | if (!hrt_server->server.budget && | 1041 | if (!hrt_server->server.budget && |
1024 | !head_in_list(&hrt_server->server.release_list)) { | 1042 | !head_in_list(&hrt_server->server.release_list)) { |
1025 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | 1043 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); |
1044 | catchup_server(&hrt_server->server, now); | ||
1026 | requeue_server(&hrt_server->server, now); | 1045 | requeue_server(&hrt_server->server, now); |
1046 | slack_timer_arm(hrt_server); | ||
1027 | } | 1047 | } |
1028 | 1048 | ||
1029 | if (task) { | 1049 | if (task) { |
@@ -1108,7 +1128,6 @@ static server_t* next_eligible_be_server(void) | |||
1108 | } | 1128 | } |
1109 | 1129 | ||
1110 | if (be_server && lt_before(now, be_server->release)) { | 1130 | if (be_server && lt_before(now, be_server->release)) { |
1111 | TRACE_SERVER_SUB(be_server, "SHOULD BUG"); | ||
1112 | be_server = NULL; | 1131 | be_server = NULL; |
1113 | } | 1132 | } |
1114 | 1133 | ||
@@ -1120,6 +1139,7 @@ static server_t* next_eligible_be_server(void) | |||
1120 | */ | 1139 | */ |
1121 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | 1140 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) |
1122 | { | 1141 | { |
1142 | lt_t now = litmus_clock(); | ||
1123 | int was_added; | 1143 | int was_added; |
1124 | 1144 | ||
1125 | BUG_ON(!is_realtime(task)); | 1145 | BUG_ON(!is_realtime(task)); |
@@ -1128,8 +1148,10 @@ static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | |||
1128 | if (is_queued(task)) { | 1148 | if (is_queued(task)) { |
1129 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | 1149 | TRACE_TASK_SUB(task, "not requeueing, already queued"); |
1130 | check_slack_candidate(task); | 1150 | check_slack_candidate(task); |
1131 | } else if (is_released(task, litmus_clock())) { | 1151 | } else if (is_released(task, now)) { |
1132 | TRACE_TASK_SUB(task, "requeuing on ready"); | 1152 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", |
1153 | get_release(task), get_deadline(task), | ||
1154 | get_rt_period(task), now); | ||
1133 | __add_ready(domain, task); | 1155 | __add_ready(domain, task); |
1134 | } else { | 1156 | } else { |
1135 | /* Task needs to wait until it is released */ | 1157 | /* Task needs to wait until it is released */ |
@@ -1408,8 +1430,9 @@ static void check_for_slack_preempt(struct task_struct *task, | |||
1408 | */ | 1430 | */ |
1409 | static void check_for_global_preempt(void) | 1431 | static void check_for_global_preempt(void) |
1410 | { | 1432 | { |
1411 | cpu_entry_t *entry, *pref; | 1433 | cpu_entry_t *entry, *sched; |
1412 | server_t *next_server; | 1434 | server_t *next_server; |
1435 | int on_cpu; | ||
1413 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | 1436 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ |
1414 | 1437 | ||
1415 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | 1438 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { |
@@ -1421,17 +1444,6 @@ static void check_for_global_preempt(void) | |||
1421 | if (!next_server) | 1444 | if (!next_server) |
1422 | break; | 1445 | break; |
1423 | 1446 | ||
1424 | /* Prevent migrations when possible */ | ||
1425 | if (!entry->linked) { | ||
1426 | pref = &per_cpu(cpu_entries, task_cpu(next_task)); | ||
1427 | #ifdef CONFIG_RELEASE_MASTER | ||
1428 | if (!pref->linked && pref->cpu!=edf_hsb_release_master) | ||
1429 | #else | ||
1430 | if (!pref->linked) | ||
1431 | #endif | ||
1432 | entry = pref; | ||
1433 | } | ||
1434 | |||
1435 | /* Preempt only if we have an earlier deadline */ | 1447 | /* Preempt only if we have an earlier deadline */ |
1436 | if (entry->linked && | 1448 | if (entry->linked && |
1437 | !lt_before(next_server->deadline, | 1449 | !lt_before(next_server->deadline, |
@@ -1439,6 +1451,35 @@ static void check_for_global_preempt(void) | |||
1439 | break; | 1451 | break; |
1440 | } | 1452 | } |
1441 | 1453 | ||
1454 | /* If we are scheduled on another CPU, the link code | ||
1455 | * will force us to link to that CPU and try and link | ||
1456 | * that CPU's task to this CPU. This is impossible | ||
1457 | * if that CPU has linked HRT tasks which cannot | ||
1458 | * migrate. | ||
1459 | */ | ||
1460 | on_cpu = next_task->rt_param.scheduled_on; | ||
1461 | if (on_cpu != NO_CPU) { | ||
1462 | sched = &per_cpu(cpu_entries, on_cpu); | ||
1463 | |||
1464 | if (sched != entry && sched->linked && | ||
1465 | is_hrt(sched->linked)) { | ||
1466 | |||
1467 | TRACE_TASK_SUB(next_task, | ||
1468 | "Already on P%d", | ||
1469 | sched->cpu); | ||
1470 | |||
1471 | if (entry->linked) { | ||
1472 | requeue(entry->linked, | ||
1473 | get_rt_domain(entry, | ||
1474 | entry->linked)); | ||
1475 | unlink(entry->linked); | ||
1476 | } | ||
1477 | preempt_if_preemptable(entry->scheduled, | ||
1478 | entry->cpu); | ||
1479 | break; | ||
1480 | } | ||
1481 | } | ||
1482 | |||
1442 | /* We do not reschedule if this causes a slack preemption | 1483 | /* We do not reschedule if this causes a slack preemption |
1443 | * because we will detect if we should reschedule on the | 1484 | * because we will detect if we should reschedule on the |
1444 | * next iteration of the loop. | 1485 | * next iteration of the loop. |
@@ -1718,6 +1759,8 @@ static void server_completed(server_t *server, struct task_struct *task) | |||
1718 | edf_hsb_pick_next(entry); | 1759 | edf_hsb_pick_next(entry); |
1719 | 1760 | ||
1720 | BUG_ON(!is_hrt(task) && entry->linked && is_hrt(entry->linked)); | 1761 | BUG_ON(!is_hrt(task) && entry->linked && is_hrt(entry->linked)); |
1762 | /* if (!is_hrt(task) && entry->linked && is_hrt(entry->linked)) */ | ||
1763 | /* check_for_global_preempt(); ;1*/ | ||
1721 | 1764 | ||
1722 | /* Only cause a reschedule if something new was scheduled. A task | 1765 | /* Only cause a reschedule if something new was scheduled. A task |
1723 | * could merely have swapped servers. | 1766 | * could merely have swapped servers. |
@@ -2041,6 +2084,9 @@ static long edf_hsb_deactivate_plugin(void) | |||
2041 | cpu_entry = &per_cpu(cpu_entries, cpu); | 2084 | cpu_entry = &per_cpu(cpu_entries, cpu); |
2042 | hrt_server = &cpu_entry->hrt_server; | 2085 | hrt_server = &cpu_entry->hrt_server; |
2043 | slack_timer_cancel(hrt_server); | 2086 | slack_timer_cancel(hrt_server); |
2087 | |||
2088 | if (likely(bheap_node_in_heap(cpu_entry->hn))) | ||
2089 | bheap_delete(server_order, &cpu_heap, cpu_entry->hn); | ||
2044 | } | 2090 | } |
2045 | 2091 | ||
2046 | local_irq_restore(flags); | 2092 | local_irq_restore(flags); |
@@ -2292,8 +2338,11 @@ static void edf_hsb_task_wake_up(struct task_struct *task) | |||
2292 | } else { | 2338 | } else { |
2293 | /* Re-release all BE tasks on wake-up */ | 2339 | /* Re-release all BE tasks on wake-up */ |
2294 | now = litmus_clock(); | 2340 | now = litmus_clock(); |
2295 | release_at(task, now); | 2341 | |
2296 | sched_trace_task_release(task); | 2342 | if (is_tardy(task, now)) { |
2343 | release_at(task, now); | ||
2344 | sched_trace_task_release(task); | ||
2345 | } | ||
2297 | } | 2346 | } |
2298 | 2347 | ||
2299 | job_arrival(task, entry); | 2348 | job_arrival(task, entry); |
diff --git a/litmus/servers.c b/litmus/servers.c index 7c47e5fdc125..6e622f3bd5a3 100644 --- a/litmus/servers.c +++ b/litmus/servers.c | |||
@@ -81,17 +81,18 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
81 | lt_t now = domain->start_times[cpu]; | 81 | lt_t now = domain->start_times[cpu]; |
82 | server_t *server = domain->linked_servers[cpu]; | 82 | server_t *server = domain->linked_servers[cpu]; |
83 | lt_t budget_exhausted = now + server->budget; | 83 | lt_t budget_exhausted = now + server->budget; |
84 | completion_timer_t *timer = &domain->completion_timers[cpu]; | ||
84 | 85 | ||
85 | /* This happens when someone attempts to call server_run when | 86 | /* This happens when someone attempts to call server_run when |
86 | * the server completes. When this happens, we can ignore the request | 87 | * the server completes. When this happens, we can ignore the request |
87 | * here because completion_timer_fire will re-arm the timer if | 88 | * here because completion_timer_fire will re-arm the timer if |
88 | * the server is still running / was run again. | 89 | * the server is still running / was run again. |
89 | */ | 90 | */ |
90 | if (hrtimer_active(&domain->completion_timers[cpu].timer)) { | 91 | if (hrtimer_active(&timer->timer)) { |
91 | TRACE_SUB(server, "cannot arm completion, already active"); | 92 | TRACE_SUB(server, "cannot arm completion, already active"); |
92 | return 0; | 93 | return 0; |
93 | } | 94 | } |
94 | if (domain->completion_timers[cpu].armed) { | 95 | if (timer->armed) { |
95 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); | 96 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); |
96 | return 0; | 97 | return 0; |
97 | } | 98 | } |
@@ -108,10 +109,9 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
108 | #endif | 109 | #endif |
109 | on_cpu = cpu; | 110 | on_cpu = cpu; |
110 | 111 | ||
112 | err = 1; | ||
111 | if (cpu != smp_processor_id()) { | 113 | if (cpu != smp_processor_id()) { |
112 | err = hrtimer_start_on(on_cpu, | 114 | err = hrtimer_start_on(on_cpu, &timer->info, &timer->timer, |
113 | &domain->completion_timers[cpu].info, | ||
114 | &domain->completion_timers[cpu].timer, | ||
115 | ns_to_ktime(budget_exhausted), | 115 | ns_to_ktime(budget_exhausted), |
116 | HRTIMER_MODE_ABS_PINNED); | 116 | HRTIMER_MODE_ABS_PINNED); |
117 | if (err) { | 117 | if (err) { |
@@ -119,23 +119,20 @@ static int completion_timer_arm(server_domain_t* domain, int cpu) | |||
119 | } else { | 119 | } else { |
120 | TRACE_SUB(server, "success on P%d!", on_cpu); | 120 | TRACE_SUB(server, "success on P%d!", on_cpu); |
121 | } | 121 | } |
122 | } else { | 122 | } else if (atomic_read(&timer->info.state)!= HRTIMER_START_ON_INACTIVE){ |
123 | err = __hrtimer_start_range_ns(&domain->completion_timers[cpu].timer, | 123 | err = __hrtimer_start_range_ns(&timer->timer, |
124 | ns_to_ktime(budget_exhausted), | 124 | ns_to_ktime(budget_exhausted), |
125 | 0 /* delta */, | 125 | 0 /* delta */, |
126 | HRTIMER_MODE_ABS_PINNED, | 126 | HRTIMER_MODE_ABS_PINNED, |
127 | 0 /* no wakeup */); | 127 | 0 /* no wakeup */); |
128 | } | 128 | } |
129 | 129 | ||
130 | if (!err) | 130 | timer->armed = (err) ? 0 : 1; |
131 | domain->completion_timers[cpu].armed = 1; | ||
132 | else | ||
133 | domain->completion_timers[cpu].armed = 0; | ||
134 | 131 | ||
135 | TRACE_SUB(server, "completion 0x%x and %p armed to fire at %llu", | 132 | TRACE_SUB(server, "completion 0x%x and %p armed to fire at %llu, err: %d", |
136 | &domain->completion_timers[cpu].timer, | 133 | &timer->timer, |
137 | &domain->completion_timers[cpu].timer, | 134 | &timer->timer, |
138 | TIME(budget_exhausted)); | 135 | TIME(budget_exhausted), err); |
139 | 136 | ||
140 | return !err; | 137 | return !err; |
141 | } | 138 | } |