diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-06 10:52:09 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-06 10:52:09 -0400 |
commit | 2a604f8edec24b291ba2c5491108808176c9020f (patch) | |
tree | a2954d7cbbb6127cd5168a039bf3d7de9776a719 /litmus/sched_edf_hsb.c | |
parent | 7b335ec69a6905c80ff3180a6e4dfac937e7d7f5 (diff) |
Better memory management, removed unnecessary preemptions, moved data out of
rt_params and into plugin data, better list managment logic.
Diffstat (limited to 'litmus/sched_edf_hsb.c')
-rw-r--r-- | litmus/sched_edf_hsb.c | 269 |
1 files changed, 164 insertions, 105 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index d75f8a04b362..e3cd78d29ce8 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -20,7 +20,7 @@ | |||
20 | * TODO move slack completion into release | 20 | * TODO move slack completion into release |
21 | * TODO fix concurrent arms | 21 | * TODO fix concurrent arms |
22 | * TODO slack and BE servers | 22 | * TODO slack and BE servers |
23 | * | 23 | * TODO start servers should no longer be cessary |
24 | */ | 24 | */ |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
@@ -63,21 +63,22 @@ | |||
63 | * Useful debugging macros. Remove for actual use as they cause | 63 | * Useful debugging macros. Remove for actual use as they cause |
64 | * a lot of lock contention. | 64 | * a lot of lock contention. |
65 | */ | 65 | */ |
66 | //#ifdef DEBUG_EDF_HSB | 66 | #ifdef DEBUG_EDF_HSB |
67 | atomic_t servers_running = ATOMIC_INIT(0); | 67 | atomic_t servers_running = ATOMIC_INIT(0); /* TODO should be unnecessary */ |
68 | /* #define TRACE_SUB(fmt, args...) \ */ | 68 | #define TRACE_SUB(fmt, args...) \ |
69 | /* sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ */ | 69 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ |
70 | /* TRACE_ARGS, ## args) */ | 70 | TRACE_ARGS, ## args) |
71 | /* #define TRACE_TASK_SUB(t, fmt, args...) \ */ | 71 | #define TRACE_TASK_SUB(t, fmt, args...) \ |
72 | /* TRACE_SUB("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ */ | 72 | TRACE_SUB("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ |
73 | /* (t)->rt_param.job_params.job_no, ##args) */ | 73 | (t)->rt_param.job_params.job_no, ##args) |
74 | /* #define TRACE_SERVER_SUB(s, fmt, args...) \ */ | 74 | #define TRACE_SERVER_SUB(s, fmt, args...) \ |
75 | /* TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) */ | 75 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) |
76 | /* #else */ | 76 | #else |
77 | #define TRACE_SUB(fmt, args...) | 77 | #define TRACE_SUB(fmt, args...) |
78 | #define TRACE_TASK_SUB(t, fmt, args...) | 78 | #define TRACE_TASK_SUB(t, fmt, args...) |
79 | #define TRACE_SERVER_SUB(s, fmt, args...) | 79 | #define TRACE_SERVER_SUB(s, fmt, args...) |
80 | //#endif | 80 | #endif |
81 | |||
81 | typedef enum { | 82 | typedef enum { |
82 | S_HRT, | 83 | S_HRT, |
83 | S_SRT, | 84 | S_SRT, |
@@ -88,7 +89,7 @@ typedef enum { | |||
88 | typedef struct { | 89 | typedef struct { |
89 | server_t server; | 90 | server_t server; |
90 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ | 91 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ |
91 | int ready; | 92 | int ready; /* False if waiting for next release */ |
92 | int no_slack; | 93 | int no_slack; |
93 | struct hrtimer slack_timer; /* Server has no slack when: | 94 | struct hrtimer slack_timer; /* Server has no slack when: |
94 | * (deadline - budget) <= current_time. | 95 | * (deadline - budget) <= current_time. |
@@ -108,52 +109,74 @@ typedef struct { | |||
108 | struct bheap_node* hn; /* For the cpu_heap */ | 109 | struct bheap_node* hn; /* For the cpu_heap */ |
109 | } cpu_entry_t; | 110 | } cpu_entry_t; |
110 | 111 | ||
112 | typedef struct task_data { | ||
113 | server_t *srt_server; /* If the task is SRT, its server */ | ||
114 | struct list_head slack_list; /* List of slack canditates */ | ||
115 | struct task_struct *owner; | ||
116 | } task_data_t; | ||
117 | |||
111 | /* CPU state */ | 118 | /* CPU state */ |
112 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries); | 119 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries); |
113 | static struct bheap cpu_heap; | 120 | static struct bheap cpu_heap; |
114 | static struct bheap_node cpu_heap_node[NR_CPUS]; | 121 | static struct bheap_node cpu_heap_node[NR_CPUS]; |
115 | 122 | ||
116 | static rt_domain_t srt_domain; /* SRT tasks (and servers) */ | 123 | /* Task domains */ |
117 | static rt_domain_t be_domain; /* BE tasks */ | 124 | static rt_domain_t srt_domain; |
125 | static rt_domain_t be_domain; | ||
118 | 126 | ||
119 | static server_domain_t server_domain; /* Useful tools for server scheduling */ | 127 | /* Useful tools for server scheduling */ |
128 | static server_domain_t server_domain; | ||
120 | 129 | ||
121 | static struct list_head be_servers; /* All BE servers */ | 130 | /* BE server support */ |
122 | static struct bheap be_ready_servers; /* Runnable BE servers */ | 131 | static struct list_head be_servers; |
132 | static struct bheap be_ready_servers; | ||
123 | 133 | ||
134 | /* Slack support */ | ||
124 | static struct list_head slack_queue; | 135 | static struct list_head slack_queue; |
125 | static struct list_head slack_candidates; | 136 | static struct list_head slack_candidates; |
126 | 137 | ||
127 | static int release_master; /* CPU which will release tasks and global servers */ | 138 | /* CPU which will release tasks and global servers */ |
139 | static int edf_hsb_release_master; | ||
128 | 140 | ||
141 | static struct kmem_cache *task_data_cache; | ||
129 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; | 142 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; |
130 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | 143 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; |
131 | 144 | ||
132 | #define local_cpu_entry (&__get_cpu_var(cpu_entries)) | ||
133 | #define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task))) | 145 | #define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task))) |
134 | #define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on)) | 146 | #define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on)) |
135 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) | 147 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) |
136 | #define task_srt_server(task) ((server_t*)tsk_rt(task)->plugin_data) | 148 | #define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data) |
137 | #define task_slack_server(task) ((server_t*)tsk_rt(task)->slack_server) | 149 | #define task_srt_server(task) ((server_t*)task_data(task)->srt_server) |
150 | #define local_cpu_entry (&__get_cpu_var(cpu_entries)) | ||
138 | #define global_lock (&srt_domain.ready_lock) | 151 | #define global_lock (&srt_domain.ready_lock) |
139 | #define is_active_plugin (litmus == &edf_hsb_plugin) | 152 | #define is_active_plugin (litmus == &edf_hsb_plugin) |
140 | 153 | ||
141 | static inline int head_in_list(struct list_head *list) | 154 | static inline server_t* task_slack_server(struct task_struct *task) |
142 | { | 155 | { |
143 | if ((list->next == list->prev && list->prev == list) || | 156 | server_t *slack_server = NULL; |
144 | (list->next == LIST_POISON1 && list->prev == LIST_POISON2)) | 157 | if (task->rt_param.linked_on != NO_CPU) { |
145 | return 0; | 158 | slack_server = task_linked_entry(task)->linked_server; |
146 | else | 159 | if (slack_server->type != S_SLACK) |
147 | return 1; | 160 | slack_server = NULL; |
161 | } | ||
162 | return slack_server; | ||
163 | } | ||
164 | |||
165 | static inline int head_in_list(struct list_head *node) | ||
166 | { | ||
167 | return !(node->next == node->prev && node->prev == node); | ||
168 | } | ||
169 | |||
170 | static task_data_t* task_data_alloc(int gfp_flags) | ||
171 | { | ||
172 | return kmem_cache_alloc(task_data_cache, gfp_flags); | ||
173 | } | ||
174 | |||
175 | static void task_data_free(task_data_t* data) | ||
176 | { | ||
177 | kmem_cache_free(task_data_cache, data); | ||
148 | } | 178 | } |
149 | 179 | ||
150 | /* In the next methods check to see if have donated. If so, do what? | ||
151 | * Server_stop. in check for hrt, need to make sure a check for global is | ||
152 | * called. in the other one, just need to make sure we unlink and crap and | ||
153 | * then call for it again. | ||
154 | * What happens if an srt is released that is scheduled? | ||
155 | * Then the effective deadline will mark it. | ||
156 | */ | ||
157 | static server_t* next_eligible_slack(void) | 180 | static server_t* next_eligible_slack(void) |
158 | { | 181 | { |
159 | server_t *next_slack = NULL, *donator; | 182 | server_t *next_slack = NULL, *donator; |
@@ -171,11 +194,11 @@ static server_t* next_eligible_slack(void) | |||
171 | SERVER_ARGS(next_slack)); | 194 | SERVER_ARGS(next_slack)); |
172 | sched_trace_action(NULL, 7); | 195 | sched_trace_action(NULL, 7); |
173 | 196 | ||
174 | list_del(&next_slack->list); | 197 | list_del_init(&next_slack->list); |
175 | donator = (server_t*)next_slack->data; | 198 | donator = (server_t*)next_slack->data; |
176 | donator->data = NULL; | 199 | donator->data = NULL; |
177 | server_destroy(next_slack); | 200 | server_destroy(next_slack); |
178 | kfree(next_slack); | 201 | server_free(next_slack); |
179 | 202 | ||
180 | next_slack = NULL; | 203 | next_slack = NULL; |
181 | } | 204 | } |
@@ -197,8 +220,6 @@ static void add_slack(server_t *slack) | |||
197 | server_t *queued; | 220 | server_t *queued; |
198 | TRACE_SERVER_SUB(slack, "adding"); | 221 | TRACE_SERVER_SUB(slack, "adding"); |
199 | 222 | ||
200 | BUG_ON(head_in_list(&slack->list)); | ||
201 | |||
202 | list_for_each_prev(pos, &slack_queue) { | 223 | list_for_each_prev(pos, &slack_queue) { |
203 | queued = list_entry(pos, server_t, list); | 224 | queued = list_entry(pos, server_t, list); |
204 | if (lt_before_eq(queued->deadline, slack->deadline)) { | 225 | if (lt_before_eq(queued->deadline, slack->deadline)) { |
@@ -209,26 +230,33 @@ static void add_slack(server_t *slack) | |||
209 | list_add(&slack->list, &slack_queue); | 230 | list_add(&slack->list, &slack_queue); |
210 | } | 231 | } |
211 | 232 | ||
233 | static inline struct task_struct* get_candidate(struct list_head *pos) | ||
234 | { | ||
235 | struct task_struct *task = NULL; | ||
236 | task_data_t *data; | ||
237 | if (!list_empty(pos)) { | ||
238 | data = list_entry(pos, task_data_t, slack_list); | ||
239 | task = data->owner; | ||
240 | } | ||
241 | return task; | ||
242 | } | ||
243 | |||
212 | static void add_slack_candidate(struct task_struct *task) | 244 | static void add_slack_candidate(struct task_struct *task) |
213 | { | 245 | { |
214 | struct list_head *pos; | 246 | struct list_head *pos; |
215 | struct task_struct *queued; | 247 | struct task_struct *queued; |
216 | 248 | ||
217 | tsk_rt(task)->slack_candidate = 1; | ||
218 | INIT_LIST_HEAD(&tsk_rt(task)->slack_list); | ||
219 | |||
220 | list_for_each_prev(pos, &slack_candidates) { | 249 | list_for_each_prev(pos, &slack_candidates) { |
221 | queued = list_entry(pos, struct task_struct, | 250 | queued = get_candidate(pos); |
222 | rt_param.slack_list); | ||
223 | if (lt_before_eq(get_deadline(queued), get_deadline(task))) { | 251 | if (lt_before_eq(get_deadline(queued), get_deadline(task))) { |
224 | TRACE_TASK_SUB(task, "adding after %d", queued->pid); | 252 | TRACE_TASK_SUB(task, "adding after %d", queued->pid); |
225 | __list_add(&tsk_rt(task)->slack_list, | 253 | __list_add(&task_data(task)->slack_list, |
226 | pos, pos->next); | 254 | pos, pos->next); |
227 | return; | 255 | return; |
228 | } | 256 | } |
229 | } | 257 | } |
230 | TRACE_TASK_SUB(task, "adding to end of list"); | 258 | TRACE_TASK_SUB(task, "adding to end of list"); |
231 | list_add(&tsk_rt(task)->slack_list, &slack_candidates); | 259 | list_add(&task_data(task)->slack_list, &slack_candidates); |
232 | } | 260 | } |
233 | 261 | ||
234 | static struct task_struct* next_eligible_hrt(hrt_server_t*); | 262 | static struct task_struct* next_eligible_hrt(hrt_server_t*); |
@@ -264,7 +292,7 @@ static void donate_slack(server_t *donator, struct task_struct *was_scheduled) | |||
264 | 292 | ||
265 | TRACE_SERVER_SUB(donator, "donated %llu slack", TIME(donator->budget)); | 293 | TRACE_SERVER_SUB(donator, "donated %llu slack", TIME(donator->budget)); |
266 | sched_trace_action(was_scheduled, 9); | 294 | sched_trace_action(was_scheduled, 9); |
267 | slack = kmalloc(sizeof(server_t), GFP_ATOMIC); | 295 | slack = server_alloc(GFP_ATOMIC); |
268 | server_init(slack, donator->id, donator->budget, | 296 | server_init(slack, donator->id, donator->budget, |
269 | donator->period, 0); | 297 | donator->period, 0); |
270 | slack->type = S_SLACK; | 298 | slack->type = S_SLACK; |
@@ -286,8 +314,7 @@ static struct task_struct* pick_next_slack(server_t *slack, cpu_entry_t *entry) | |||
286 | if (!slack) | 314 | if (!slack) |
287 | goto out; | 315 | goto out; |
288 | if (!list_empty(&slack_candidates)) { | 316 | if (!list_empty(&slack_candidates)) { |
289 | rv = list_entry(slack_candidates.next, struct task_struct, | 317 | rv = get_candidate(slack_candidates.next); |
290 | rt_param.slack_list); | ||
291 | TRACE_TASK_SUB(rv, "is next slack"); | 318 | TRACE_TASK_SUB(rv, "is next slack"); |
292 | } else if (entry && entry->linked && | 319 | } else if (entry && entry->linked && |
293 | entry->linked_server->type == S_SLACK) { | 320 | entry->linked_server->type == S_SLACK) { |
@@ -297,25 +324,29 @@ static struct task_struct* pick_next_slack(server_t *slack, cpu_entry_t *entry) | |||
297 | return rv; | 324 | return rv; |
298 | } | 325 | } |
299 | 326 | ||
300 | static void take_next_slack(struct task_struct *task) | 327 | static void take_slack_candidate(struct task_struct *task) |
301 | { | 328 | { |
302 | if (tsk_rt(task)->slack_candidate) { | 329 | if (head_in_list(&task_data(task)->slack_list)) { |
303 | TRACE_TASK_SUB(task, "deleting slack"); | 330 | TRACE_TASK_SUB(task, "deleting candidate"); |
304 | list_del(&tsk_rt(task)->slack_list); | 331 | list_del_init(&task_data(task)->slack_list); |
305 | tsk_rt(task)->slack_candidate = 0; | ||
306 | } else { | 332 | } else { |
307 | TRACE_TASK_SUB(task, "can't delete slack"); | 333 | TRACE_TASK_SUB(task, "can't delete candidate"); |
308 | } | 334 | } |
309 | } | 335 | } |
310 | 336 | ||
311 | static void check_slack_candidate(struct task_struct *task) | 337 | static void check_slack_candidate(struct task_struct *task) |
312 | { | 338 | { |
313 | TRACE_TASK_SUB(task, "checking"); | 339 | TRACE_TASK_SUB(task, "checking"); |
314 | if (is_srt(task) && task_srt_server(task)->job_no >= | 340 | if (is_srt(task) && |
315 | task_job_no(task) && lt_after(get_release(task), litmus_clock()) && | 341 | /* The SRT task is not ahead of its server */ |
342 | task_srt_server(task)->job_no >= task_job_no(task) && | ||
343 | /* The task has yet to be released */ | ||
344 | lt_after(get_release(task), litmus_clock()) && | ||
345 | /* The task didn't just complete */ | ||
316 | get_rt_flags(task) != RT_F_SLEEP && | 346 | get_rt_flags(task) != RT_F_SLEEP && |
317 | !tsk_rt(task)->slack_candidate) | 347 | /* The task hasn't already been added to the list */ |
318 | { | 348 | !head_in_list(&task_data(task)->slack_list)) { |
349 | |||
319 | add_slack_candidate(task); | 350 | add_slack_candidate(task); |
320 | sched_trace_action(task, 8); | 351 | sched_trace_action(task, 8); |
321 | } | 352 | } |
@@ -454,8 +485,8 @@ static void slack_timer_arm(hrt_server_t *hrt_server) | |||
454 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | 485 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); |
455 | 486 | ||
456 | #ifdef SLACK_ON_MASTER | 487 | #ifdef SLACK_ON_MASTER |
457 | if (release_master != NO_CPU) | 488 | if (edf_hsb_release_master != NO_CPU) |
458 | cpu = release_master; | 489 | cpu = edf_hsb_release_master; |
459 | else | 490 | else |
460 | #endif | 491 | #endif |
461 | cpu = entry->cpu; | 492 | cpu = entry->cpu; |
@@ -548,12 +579,14 @@ static noinline void link_server(cpu_entry_t *entry, | |||
548 | if (next_server->type == S_SRT) { | 579 | if (next_server->type == S_SRT) { |
549 | TRACE_TASK_SUB(entry->linked, "resuming SRT server," | 580 | TRACE_TASK_SUB(entry->linked, "resuming SRT server," |
550 | "budget*: %llu, exec_time: %llu, deadline: %llu," | 581 | "budget*: %llu, exec_time: %llu, deadline: %llu," |
551 | "job_no: %d", | 582 | "tjob_no: %d, sjob_no: %d sdead: %llu", |
552 | next_server->budget, get_exec_time(entry->linked), | 583 | next_server->budget, get_exec_time(entry->linked), |
553 | get_deadline(entry->linked), next_server->job_no); | 584 | get_deadline(entry->linked), |
585 | task_job_no(entry->linked), | ||
586 | next_server->job_no, | ||
587 | next_server->deadline); | ||
554 | BUG_ON(lt_after(next_server->budget, | 588 | BUG_ON(lt_after(next_server->budget, |
555 | get_exec_cost(entry->linked))); | 589 | get_exec_cost(entry->linked))); |
556 | |||
557 | BUG_ON(next_server->job_no < task_job_no(entry->linked)); | 590 | BUG_ON(next_server->job_no < task_job_no(entry->linked)); |
558 | BUG_ON(lt_after(get_deadline(entry->linked), | 591 | BUG_ON(lt_after(get_deadline(entry->linked), |
559 | next_server->deadline)); | 592 | next_server->deadline)); |
@@ -563,10 +596,10 @@ static noinline void link_server(cpu_entry_t *entry, | |||
563 | slack_timer_cancel(&entry->hrt_server); | 596 | slack_timer_cancel(&entry->hrt_server); |
564 | } else if (next_server->type == S_SLACK) { | 597 | } else if (next_server->type == S_SLACK) { |
565 | /* Should have already been removed */ | 598 | /* Should have already been removed */ |
566 | BUG_ON(tsk_rt(entry->linked)->slack_candidate); | 599 | BUG_ON(head_in_list(&task_data(entry->linked)->slack_list)); |
600 | |||
567 | TRACE_SERVER_SUB(next_server, "linking slack server"); | 601 | TRACE_SERVER_SUB(next_server, "linking slack server"); |
568 | sched_trace_action(entry->linked, 5); | 602 | sched_trace_action(entry->linked, 5); |
569 | tsk_rt(entry->linked)->slack_server = next_server; | ||
570 | } else { /* BE */ | 603 | } else { /* BE */ |
571 | BUG_ON(bheap_node_in_heap(next_server->hn)); | 604 | BUG_ON(bheap_node_in_heap(next_server->hn)); |
572 | sched_trace_action(entry->linked, 200 + next_server->id); | 605 | sched_trace_action(entry->linked, 200 + next_server->id); |
@@ -610,10 +643,6 @@ static noinline void unlink_server(cpu_entry_t *entry, | |||
610 | 643 | ||
611 | } | 644 | } |
612 | 645 | ||
613 | if (entry->linked_server->type == S_SLACK) { | ||
614 | tsk_rt(entry->linked)->slack_server = NULL; | ||
615 | } | ||
616 | |||
617 | /* Requeue SRT servers, they will be garbage collected later */ | 646 | /* Requeue SRT servers, they will be garbage collected later */ |
618 | if (entry->linked_server->type == S_SLACK && | 647 | if (entry->linked_server->type == S_SLACK && |
619 | next_server != entry->linked_server && | 648 | next_server != entry->linked_server && |
@@ -864,17 +893,17 @@ static struct task_struct* next_eligible_srt(void) | |||
864 | * to overutilization of the system. | 893 | * to overutilization of the system. |
865 | */ | 894 | */ |
866 | if (next_srt) { | 895 | if (next_srt) { |
867 | srt_server = tsk_rt(next_srt)->plugin_data; | 896 | srt_server = task_srt_server(next_srt); |
868 | if (srt_server->deadline < get_deadline(next_srt)) { | 897 | if (srt_server->deadline < get_deadline(next_srt)) { |
869 | TRACE_SUB("catching up SRT to %llu", | 898 | TRACE_SUB("catching up SRT to %llu", |
870 | get_release(next_srt)); | 899 | get_release(next_srt)); |
871 | server_release_at(srt_server, get_release(next_srt)); | 900 | server_release_at(srt_server, get_release(next_srt)); |
872 | srt_server->job_no = task_job_no(next_srt); | 901 | srt_server->job_no = task_job_no(next_srt); |
873 | } | 902 | } |
874 | } | ||
875 | 903 | ||
876 | if (next_srt && tsk_rt(next_srt)->slack_candidate) | 904 | /* Ensure that no slack server will try to schedule this */ |
877 | take_next_slack(next_srt); | 905 | take_slack_candidate(next_srt); |
906 | } | ||
878 | 907 | ||
879 | return next_srt; | 908 | return next_srt; |
880 | } | 909 | } |
@@ -1016,6 +1045,8 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next, | |||
1016 | { | 1045 | { |
1017 | rt_domain_t *domain; | 1046 | rt_domain_t *domain; |
1018 | 1047 | ||
1048 | BUG_ON(next == entry->linked); | ||
1049 | |||
1019 | if (entry->linked) { | 1050 | if (entry->linked) { |
1020 | domain = get_rt_domain(entry, entry->linked); | 1051 | domain = get_rt_domain(entry, entry->linked); |
1021 | requeue(entry->linked, domain, entry->linked_server); | 1052 | requeue(entry->linked, domain, entry->linked_server); |
@@ -1042,12 +1073,17 @@ static cpu_entry_t* check_for_slack_preempt(struct task_struct *task, | |||
1042 | cpu_entry_t *next_entry) | 1073 | cpu_entry_t *next_entry) |
1043 | { | 1074 | { |
1044 | cpu_entry_t *preempted = NULL; | 1075 | cpu_entry_t *preempted = NULL; |
1045 | cpu_entry_t *entry = task_linked_entry(task); | 1076 | cpu_entry_t *entry = NULL; |
1046 | server_t *slack_server = task_slack_server(task); | 1077 | server_t *slack_server = NULL; |
1047 | 1078 | ||
1048 | BUG_ON(!is_srt(task)); | 1079 | BUG_ON(!is_srt(task)); |
1049 | 1080 | ||
1050 | if (!slack_server || !slack_server->running) | 1081 | if (tsk_rt(task)->linked_on != NO_CPU) { |
1082 | entry = task_linked_entry(task); | ||
1083 | if (entry->linked_server->type == S_SLACK) | ||
1084 | slack_server = entry->linked_server; | ||
1085 | } | ||
1086 | if (!slack_server) | ||
1051 | goto out; | 1087 | goto out; |
1052 | 1088 | ||
1053 | TRACE_TASK_SUB(task, "slack preempt"); | 1089 | TRACE_TASK_SUB(task, "slack preempt"); |
@@ -1056,7 +1092,7 @@ static cpu_entry_t* check_for_slack_preempt(struct task_struct *task, | |||
1056 | preempted = entry; | 1092 | preempted = entry; |
1057 | 1093 | ||
1058 | unlink(task); | 1094 | unlink(task); |
1059 | take_next_slack(task); | 1095 | take_slack_candidate(task); |
1060 | out: | 1096 | out: |
1061 | return preempted; | 1097 | return preempted; |
1062 | } | 1098 | } |
@@ -1111,8 +1147,8 @@ static void edf_hsb_pick_next(cpu_entry_t *entry) | |||
1111 | next_slack = pick_next_slack(slack_server, NULL); | 1147 | next_slack = pick_next_slack(slack_server, NULL); |
1112 | 1148 | ||
1113 | if (next_slack) { | 1149 | if (next_slack) { |
1114 | take_next_slack(next_slack); | 1150 | take_slack_candidate(next_slack); |
1115 | list_del(&slack_server->list); | 1151 | list_del_init(&slack_server->list); |
1116 | TRACE_TASK_SUB(next_slack, "taking"); | 1152 | TRACE_TASK_SUB(next_slack, "taking"); |
1117 | link_task_to_cpu(entry, next_slack, slack_server); | 1153 | link_task_to_cpu(entry, next_slack, slack_server); |
1118 | } | 1154 | } |
@@ -1211,7 +1247,7 @@ static void check_for_global_preempt(void) | |||
1211 | /* Swap two slack servers here */ | 1247 | /* Swap two slack servers here */ |
1212 | if (slack_server && entry->linked_server->type == S_SLACK) { | 1248 | if (slack_server && entry->linked_server->type == S_SLACK) { |
1213 | TRACE_SUB("9.5"); | 1249 | TRACE_SUB("9.5"); |
1214 | list_del(&slack_server->list); | 1250 | list_del_init(&slack_server->list); |
1215 | unlink_server(entry, slack_server); | 1251 | unlink_server(entry, slack_server); |
1216 | link_server(entry, slack_server); | 1252 | link_server(entry, slack_server); |
1217 | update_cpu_position(entry); | 1253 | update_cpu_position(entry); |
@@ -1242,7 +1278,7 @@ static void check_for_global_preempt(void) | |||
1242 | } else { /* Slack */ | 1278 | } else { /* Slack */ |
1243 | next_task = next_slack; | 1279 | next_task = next_slack; |
1244 | next_server = slack_server; | 1280 | next_server = slack_server; |
1245 | list_del(&slack_server->list); | 1281 | list_del_init(&slack_server->list); |
1246 | } | 1282 | } |
1247 | BUG_ON(!next_task); | 1283 | BUG_ON(!next_task); |
1248 | TRACE_TASK_SUB(next_task, "preempting on P%d", entry->cpu); | 1284 | TRACE_TASK_SUB(next_task, "preempting on P%d", entry->cpu); |
@@ -1250,7 +1286,7 @@ static void check_for_global_preempt(void) | |||
1250 | if (next_server != slack_server && is_queued(next_task)) | 1286 | if (next_server != slack_server && is_queued(next_task)) |
1251 | remove(get_rt_domain(entry, next_task), next_task); | 1287 | remove(get_rt_domain(entry, next_task), next_task); |
1252 | else if (next_slack) | 1288 | else if (next_slack) |
1253 | take_next_slack(next_slack); | 1289 | take_slack_candidate(next_slack); |
1254 | preempt(entry, next_task, next_server); | 1290 | preempt(entry, next_task, next_server); |
1255 | 1291 | ||
1256 | loop_end: | 1292 | loop_end: |
@@ -1522,8 +1558,8 @@ static void server_completed(server_t *server, struct task_struct *task) | |||
1522 | requeue(task, get_rt_domain(entry, task), server); | 1558 | requeue(task, get_rt_domain(entry, task), server); |
1523 | 1559 | ||
1524 | /* Need to pick the next task to run */ | 1560 | /* Need to pick the next task to run */ |
1525 | check_for_global_preempt(); | 1561 | edf_hsb_pick_next(entry); |
1526 | if (!entry->linked) | 1562 | if (!entry->linked || entry->linked != task) |
1527 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 1563 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
1528 | } | 1564 | } |
1529 | 1565 | ||
@@ -1542,7 +1578,7 @@ static void hrt_server_released(server_t *server) | |||
1542 | /* Boot off an HRT task which has become uneligible */ | 1578 | /* Boot off an HRT task which has become uneligible */ |
1543 | if (entry->linked && is_hrt(entry->linked) && | 1579 | if (entry->linked && is_hrt(entry->linked) && |
1544 | !is_eligible(entry->linked, hrt_server)) { | 1580 | !is_eligible(entry->linked, hrt_server)) { |
1545 | 1581 | /* TODO: necessary?? */ | |
1546 | requeue(entry->linked, &hrt_server->hrt_domain, | 1582 | requeue(entry->linked, &hrt_server->hrt_domain, |
1547 | entry->linked_server); | 1583 | entry->linked_server); |
1548 | unlink(entry->linked); | 1584 | unlink(entry->linked); |
@@ -1622,7 +1658,7 @@ static int admit_be_server(unsigned long long wcet, | |||
1622 | goto out; | 1658 | goto out; |
1623 | } | 1659 | } |
1624 | 1660 | ||
1625 | be_server = kmalloc(sizeof(server_t), GFP_ATOMIC); | 1661 | be_server = server_alloc(GFP_ATOMIC); |
1626 | server_init(be_server, BE_SERVER_BASE + ++curr_be, | 1662 | server_init(be_server, BE_SERVER_BASE + ++curr_be, |
1627 | wcet, period, 1); | 1663 | wcet, period, 1); |
1628 | be_server->type = S_BE; | 1664 | be_server->type = S_BE; |
@@ -1662,13 +1698,12 @@ static void stop_be_servers(void) | |||
1662 | list_for_each_safe(pos, safe, &be_servers) { | 1698 | list_for_each_safe(pos, safe, &be_servers) { |
1663 | be_server = list_entry(pos, server_t, list); | 1699 | be_server = list_entry(pos, server_t, list); |
1664 | 1700 | ||
1665 | list_del(pos); | 1701 | list_del_init(pos); |
1666 | if (bheap_node_in_heap(be_server->hn)) | 1702 | if (bheap_node_in_heap(be_server->hn)) |
1667 | bheap_delete(server_order, &be_ready_servers, | 1703 | bheap_delete(server_order, &be_ready_servers, |
1668 | be_server->hn); | 1704 | be_server->hn); |
1669 | 1705 | server_destroy(be_server); | |
1670 | kfree(be_server->hn); | 1706 | server_free(be_server); |
1671 | kfree(be_server); | ||
1672 | } | 1707 | } |
1673 | } | 1708 | } |
1674 | 1709 | ||
@@ -1781,8 +1816,8 @@ static void start_servers(lt_t time) | |||
1781 | cpu, TIME(slack_fire), &entry->hrt_server.slack_timer); | 1816 | cpu, TIME(slack_fire), &entry->hrt_server.slack_timer); |
1782 | 1817 | ||
1783 | #ifdef SLACK_ON_MASTER | 1818 | #ifdef SLACK_ON_MASTER |
1784 | if (release_master != NO_CPU) | 1819 | if (edf_hsb_release_master != NO_CPU) |
1785 | slack_cpu = release_master; | 1820 | slack_cpu = edf_hsb_release_master; |
1786 | else | 1821 | else |
1787 | #endif | 1822 | #endif |
1788 | slack_cpu = cpu; | 1823 | slack_cpu = cpu; |
@@ -1827,11 +1862,11 @@ static long edf_hsb_activate_plugin(void) | |||
1827 | #endif | 1862 | #endif |
1828 | 1863 | ||
1829 | #ifdef CONFIG_RELEASE_MASTER | 1864 | #ifdef CONFIG_RELEASE_MASTER |
1830 | release_master = atomic_read(&release_master_cpu); | 1865 | edf_hsb_release_master = atomic_read(&release_master_cpu); |
1831 | #else | 1866 | #else |
1832 | release_master = NO_CPU; | 1867 | edf_hsb_release_master = NO_CPU; |
1833 | #endif | 1868 | #endif |
1834 | server_domain.release_master = release_master; | 1869 | server_domain.release_master = edf_hsb_release_master; |
1835 | 1870 | ||
1836 | TRACE("activating EDF-HSB plugin.\n"); | 1871 | TRACE("activating EDF-HSB plugin.\n"); |
1837 | return 0; | 1872 | return 0; |
@@ -1915,8 +1950,11 @@ static void edf_hsb_task_exit(struct task_struct *task) | |||
1915 | entry->scheduled = NULL; | 1950 | entry->scheduled = NULL; |
1916 | tsk_rt(task)->scheduled_on = NO_CPU; | 1951 | tsk_rt(task)->scheduled_on = NO_CPU; |
1917 | } | 1952 | } |
1918 | if (is_srt(task)) | 1953 | if (is_srt(task)) { |
1919 | kfree(tsk_rt(task)->plugin_data); | 1954 | server_destroy(task_srt_server(task)); |
1955 | server_free(task_srt_server(task)); | ||
1956 | task_data_free(tsk_rt(task)->plugin_data); | ||
1957 | } | ||
1920 | 1958 | ||
1921 | raw_spin_unlock_irqrestore(global_lock, flags); | 1959 | raw_spin_unlock_irqrestore(global_lock, flags); |
1922 | } | 1960 | } |
@@ -1996,7 +2034,8 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | |||
1996 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | 2034 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) |
1997 | { | 2035 | { |
1998 | unsigned long flags; | 2036 | unsigned long flags; |
1999 | server_t *srt_server = NULL; | 2037 | server_t *srt_server; |
2038 | task_data_t *data = NULL; | ||
2000 | cpu_entry_t *entry = task_sched_entry(task); | 2039 | cpu_entry_t *entry = task_sched_entry(task); |
2001 | 2040 | ||
2002 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); | 2041 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); |
@@ -2006,17 +2045,21 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | |||
2006 | /* Setup job parameters */ | 2045 | /* Setup job parameters */ |
2007 | release_at(task, litmus_clock()); | 2046 | release_at(task, litmus_clock()); |
2008 | 2047 | ||
2009 | /* Create struct to store SRT server state on suspension */ | ||
2010 | if (is_srt(task)) { | 2048 | if (is_srt(task)) { |
2011 | srt_server = kmalloc(sizeof(server_t), GFP_ATOMIC); | 2049 | /* Create SRT server */ |
2050 | srt_server = server_alloc(GFP_ATOMIC); | ||
2012 | server_init(srt_server, task->pid, get_exec_cost(task), | 2051 | server_init(srt_server, task->pid, get_exec_cost(task), |
2013 | get_rt_period(task), 0); | 2052 | get_rt_period(task), 0); |
2014 | srt_server->type = S_SRT; | 2053 | srt_server->type = S_SRT; |
2015 | srt_server->data = task; | 2054 | srt_server->data = task; |
2016 | srt_server->job_no = tsk_rt(task)->job_params.job_no; | 2055 | srt_server->job_no = 1; |
2017 | server_release_at(srt_server, get_release(task)); | 2056 | |
2057 | data = task_data_alloc(GFP_ATOMIC); | ||
2058 | data->owner = task; | ||
2059 | data->srt_server = srt_server; | ||
2060 | INIT_LIST_HEAD(&data->slack_list); | ||
2018 | } | 2061 | } |
2019 | tsk_rt(task)->plugin_data = srt_server; | 2062 | tsk_rt(task)->plugin_data = data; |
2020 | 2063 | ||
2021 | /* Already running, update the cpu entry. | 2064 | /* Already running, update the cpu entry. |
2022 | * This tends to happen when the first tasks enter the system. | 2065 | * This tends to happen when the first tasks enter the system. |
@@ -2122,6 +2165,8 @@ static int __init init_edf_hsb(void) | |||
2122 | admit_hrt_server, list_hrt_servers, | 2165 | admit_hrt_server, list_hrt_servers, |
2123 | stop_hrt_servers); | 2166 | stop_hrt_servers); |
2124 | 2167 | ||
2168 | task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC); | ||
2169 | |||
2125 | /* Global domains */ | 2170 | /* Global domains */ |
2126 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); | 2171 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); |
2127 | rt_domain_init(&be_domain, be_ready_order, | 2172 | rt_domain_init(&be_domain, be_ready_order, |
@@ -2165,11 +2210,25 @@ static int __init init_edf_hsb(void) | |||
2165 | 2210 | ||
2166 | static void exit_edf_hsb(void) | 2211 | static void exit_edf_hsb(void) |
2167 | { | 2212 | { |
2213 | int cpu; | ||
2214 | cpu_entry_t *entry; | ||
2215 | |||
2216 | stop_be_servers(); | ||
2217 | stop_hrt_servers(); | ||
2218 | |||
2168 | server_proc_exit(edf_hsb_proc_dir, BE_PROC_NAME); | 2219 | server_proc_exit(edf_hsb_proc_dir, BE_PROC_NAME); |
2169 | server_proc_exit(edf_hsb_proc_dir, HRT_PROC_NAME); | 2220 | server_proc_exit(edf_hsb_proc_dir, HRT_PROC_NAME); |
2170 | 2221 | ||
2222 | server_domain_destroy(&server_domain); | ||
2223 | |||
2224 | for_each_online_cpu(cpu) { | ||
2225 | entry = &per_cpu(cpu_entries, cpu); | ||
2226 | server_destroy(&entry->hrt_server.server); | ||
2227 | } | ||
2228 | |||
2171 | if (edf_hsb_proc_dir) { | 2229 | if (edf_hsb_proc_dir) { |
2172 | remove_plugin_proc_dir(&edf_hsb_plugin); | 2230 | remove_plugin_proc_dir(&edf_hsb_plugin); |
2231 | /* TODO: is this wrong? */ | ||
2173 | edf_hsb_proc_dir = NULL; | 2232 | edf_hsb_proc_dir = NULL; |
2174 | } | 2233 | } |
2175 | } | 2234 | } |