diff options
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/sched_edf_hsb.c | 15 | ||||
-rw-r--r-- | litmus/sched_edf_hsb_noslack.c | 2523 |
3 files changed, 2534 insertions, 7 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 62c2bb064581..9468312b39e4 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -18,7 +18,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
18 | ctrldev.o \ | 18 | ctrldev.o \ |
19 | servers.o \ | 19 | servers.o \ |
20 | sched_gsn_edf.o \ | 20 | sched_gsn_edf.o \ |
21 | sched_edf_hsb.o | 21 | sched_edf_hsb.o \ |
22 | sched_edf_hsb_noslack.o | ||
22 | 23 | ||
23 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 24 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
24 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 25 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index d89364fdb582..d176efca648b 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -340,6 +340,9 @@ static noinline void check_donate_slack(server_t *donator, struct task_struct *w | |||
340 | 340 | ||
341 | TRACE_SERVER_SUB(donator, "checking donation"); | 341 | TRACE_SERVER_SUB(donator, "checking donation"); |
342 | 342 | ||
343 | if (!slack) | ||
344 | return; | ||
345 | |||
343 | /* Donating small amounts of slack will result in excess migrations */ | 346 | /* Donating small amounts of slack will result in excess migrations */ |
344 | if (donator->budget < SLACK_MIN) | 347 | if (donator->budget < SLACK_MIN) |
345 | return; | 348 | return; |
@@ -1330,8 +1333,7 @@ static void remove_from_ready(server_t *server, struct task_struct *task, | |||
1330 | } | 1333 | } |
1331 | } else { | 1334 | } else { |
1332 | slack = server_slack(server); | 1335 | slack = server_slack(server); |
1333 | BUG_ON(!slack); | 1336 | if (slack && head_in_list(&slack->list)) { |
1334 | if (head_in_list(&slack->list)) { | ||
1335 | remove_slack(slack); | 1337 | remove_slack(slack); |
1336 | } | 1338 | } |
1337 | if (server->type == S_BE) { | 1339 | if (server->type == S_BE) { |
@@ -1456,7 +1458,7 @@ static noinline void check_for_slack_preempt(struct task_struct *task, | |||
1456 | } | 1458 | } |
1457 | 1459 | ||
1458 | /* The server's slack is currently being run */ | 1460 | /* The server's slack is currently being run */ |
1459 | if (is_server_linked(slack)) { | 1461 | if (slack && is_server_linked(slack)) { |
1460 | entry = &per_cpu(cpu_entries, slack->cpu); | 1462 | entry = &per_cpu(cpu_entries, slack->cpu); |
1461 | slack_task = server_task(slack); | 1463 | slack_task = server_task(slack); |
1462 | 1464 | ||
@@ -2466,11 +2468,12 @@ static int __init init_edf_hsb(void) | |||
2466 | idle_slack = server_alloc(GFP_ATOMIC); | 2468 | idle_slack = server_alloc(GFP_ATOMIC); |
2467 | server_init(idle_slack, &server_domain, | 2469 | server_init(idle_slack, &server_domain, |
2468 | IDLE_SLACK_BASE + cpu, | 2470 | IDLE_SLACK_BASE + cpu, |
2469 | ULLONG_MAX, ULLONG_MAX, 1); | 2471 | LLONG_MAX, LLONG_MAX, 1); |
2470 | idle_slack->deadline = ULLONG_MAX; | 2472 | idle_slack->deadline = LLONG_MAX; |
2471 | idle_slack->budget = ULLONG_MAX; | 2473 | idle_slack->budget = LLONG_MAX; |
2472 | idle_slack->job_no = 1; | 2474 | idle_slack->job_no = 1; |
2473 | idle_slack->release = 1; | 2475 | idle_slack->release = 1; |
2476 | idle_slack->type = S_SLACK; | ||
2474 | add_slack(idle_slack); | 2477 | add_slack(idle_slack); |
2475 | 2478 | ||
2476 | entry->cpu = cpu; | 2479 | entry->cpu = cpu; |
diff --git a/litmus/sched_edf_hsb_noslack.c b/litmus/sched_edf_hsb_noslack.c new file mode 100644 index 000000000000..e6e8d7dd0e8b --- /dev/null +++ b/litmus/sched_edf_hsb_noslack.c | |||
@@ -0,0 +1,2523 @@ | |||
1 | /* | ||
2 | * litmus/sched_edf_hsb.c | ||
3 | * | ||
4 | * Implentation of the EDF-HSB scheduling algorithm. | ||
5 | * | ||
6 | * The following 6 events are fired by timers and not handled by | ||
7 | * the plugin infrastructure itself: | ||
8 | * | ||
9 | * release_[hrt|srt|be]_jobs | ||
10 | * [hrt|be]_server_released | ||
11 | * server_completed (for HRT, SRT, and BE) | ||
12 | * | ||
13 | * The following 4 events are caused by a write to the proc entry | ||
14 | * and should never be run when the plugin is already running: | ||
15 | * stop_[hrt|be]_servers | ||
16 | * admit_[hrt|be]_server | ||
17 | * | ||
18 | * TODO system for removing tasks from their release queues | ||
19 | * TODO clean up link_to_cpu and check_slack args | ||
20 | * TODO move slack completion into release | ||
21 | * TODO fix concurrent arms | ||
22 | * TODO slack and BE servers, include slack higher prio | ||
23 | * TODO start servers should no longer be cessary | ||
24 | * TODO harmonize order of method arguments | ||
25 | * TODO test crazy task_new hack | ||
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
27 | */ | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/ctype.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | |||
36 | #include <litmus/litmus.h> | ||
37 | #include <litmus/bheap.h> | ||
38 | #include <litmus/jobs.h> | ||
39 | #include <litmus/litmus_proc.h> | ||
40 | #include <litmus/sched_plugin.h> | ||
41 | #include <litmus/edf_common.h> | ||
42 | #include <litmus/sched_trace.h> | ||
43 | #include <litmus/servers.h> | ||
44 | #define DEBUG_EDF_HSB | ||
45 | |||
46 | /* DOES NOT WORK */ | ||
47 | //#define SLACK_ON_MASTER | ||
48 | |||
49 | #define BE_PROC_NAME "be_servers" | ||
50 | #define HRT_PROC_NAME "hrt_servers" | ||
51 | #define BE_SERVER_BASE 100 | ||
52 | #define IDLE_SLACK_BASE 1000 | ||
53 | #define SLACK_MIN NSEC_PER_MSEC | ||
54 | |||
55 | /* SCHED_TRACE action events */ | ||
56 | #define SERVER_COMPLETED_ACTION 1 | ||
57 | #define SERVER_RELEASED_ACTION 2 | ||
58 | #define NO_SLACK_ACTION 3 | ||
59 | #define SLACK_RUN_ACTION 4 | ||
60 | #define SLACK_STOP_ACTION 5 | ||
61 | #define SLACK_RECLAIM_ACTION 6 | ||
62 | #define SLACK_EXPIRED_ACTION 7 | ||
63 | #define SLACK_DONATED_ACTION 8 | ||
64 | #define CANDIDATE_ADDED_ACTION 9 | ||
65 | |||
66 | /* Uncomment for human readable time */ | ||
67 | #define TIME(x) \ | ||
68 | (x) | ||
69 | /* ({lt_t y = x; \ */ | ||
70 | /* do_div(y, NSEC_PER_MSEC); \ */ | ||
71 | /* y;}) */ | ||
72 | #define TRACE_TIMER(fmt, args...) \ | ||
73 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | ||
74 | TRACE_ARGS, ## args, TIME(litmus_clock())) | ||
75 | #define TRACE_TASK_TIMER(t, fmt, args...) \ | ||
76 | TRACE_TIMER("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
77 | (t)->rt_param.job_params.job_no, ## args) | ||
78 | |||
79 | /* | ||
80 | * Useful debugging macros. Remove for actual use as they cause | ||
81 | * a lot of lock contention. | ||
82 | */ | ||
83 | #ifdef DEBUG_EDF_HSB | ||
84 | |||
85 | #define TRACE_SUB(fmt, args...) \ | ||
86 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ | ||
87 | TRACE_ARGS, ## args) | ||
88 | #define TRACE_TASK_SUB(t, fmt, args...) \ | ||
89 | TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args) | ||
90 | #define TRACE_SERVER_SUB(s, fmt, args...) \ | ||
91 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
92 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \ | ||
93 | TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
94 | #else | ||
95 | #define TRACE_SUB(fmt, args...) | ||
96 | #define TRACE_TASK_SUB(t, fmt, args...) | ||
97 | #define TRACE_SERVER_SUB(s, fmt, args...) | ||
98 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Different types of servers | ||
103 | */ | ||
104 | typedef enum { | ||
105 | S_HRT, | ||
106 | S_SRT, | ||
107 | S_BE, | ||
108 | S_SLACK | ||
109 | } server_type_t; | ||
110 | |||
111 | /* | ||
112 | * A server running HRT tasks | ||
113 | */ | ||
114 | typedef struct { | ||
115 | server_t server; | ||
116 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ | ||
117 | int ready; /* False if waiting for next release */ | ||
118 | int no_slack; | ||
119 | struct hrtimer slack_timer; /* Server has no slack when: | ||
120 | * (deadline - budget) <= current_time. | ||
121 | */ | ||
122 | struct hrtimer_start_on_info slack_timer_info; | ||
123 | } hrt_server_t; | ||
124 | |||
125 | /* | ||
126 | * State of a single CPU | ||
127 | */ | ||
128 | typedef struct { | ||
129 | int cpu; | ||
130 | struct task_struct* scheduled; /* Task that should be running */ | ||
131 | struct task_struct* linked; /* Task that actually is running */ | ||
132 | server_t *scheduled_server; | ||
133 | server_t *linked_server; /* The server running on this cpu. | ||
134 | * Note that what it is 'running' is | ||
135 | * linked, not scheduled. | ||
136 | */ | ||
137 | hrt_server_t hrt_server; /* One HRT server per CPU */ | ||
138 | struct bheap_node* hn; /* For the cpu_heap */ | ||
139 | } cpu_entry_t; | ||
140 | |||
141 | /* | ||
142 | * Data assigned to each task | ||
143 | */ | ||
144 | typedef struct task_data { | ||
145 | server_t *srt_server; /* If the task is SRT, its server */ | ||
146 | struct list_head candidate_list; /* List of slack canditates */ | ||
147 | struct task_struct *owner; | ||
148 | } task_data_t; | ||
149 | |||
150 | /* CPU state */ | ||
151 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, noslack_cpu_entries); | ||
152 | static struct bheap cpu_heap; | ||
153 | static struct bheap_node cpu_heap_node[NR_CPUS]; | ||
154 | /* Task domains */ | ||
155 | static rt_domain_t srt_domain; | ||
156 | static rt_domain_t be_domain; | ||
157 | /* Useful tools for server scheduling */ | ||
158 | static server_domain_t server_domain; | ||
159 | /* BE server support */ | ||
160 | static struct list_head be_servers; | ||
161 | static struct bheap be_ready_servers; | ||
162 | /* Slack support */ | ||
163 | static struct list_head slack_queue; | ||
164 | static struct list_head slack_candidates; | ||
165 | /* CPU which will release tasks and global servers */ | ||
166 | static int edf_hsb_release_master; | ||
167 | /* Cache to store task_data structs */ | ||
168 | static struct kmem_cache *task_data_cache; | ||
169 | |||
170 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; | ||
171 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | ||
172 | |||
173 | #define task_sched_entry(task) (&per_cpu(noslack_cpu_entries, task_cpu(task))) | ||
174 | #define task_linked_entry(task) (&per_cpu(noslack_cpu_entries, task->rt_param.linked_on)) | ||
175 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) | ||
176 | #define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data) | ||
177 | #define task_srt_server(task) ((server_t*)task_data(task)->srt_server) | ||
178 | #define server_slack(s) ((server_t*)(s)->data) | ||
179 | #define server_has_slack(s) (server_slack(s)->deadline != 0) | ||
180 | #define local_cpu_entry (&__get_cpu_var(noslack_cpu_entries)) | ||
181 | #define global_lock (&srt_domain.ready_lock) | ||
182 | #define is_active_plugin (litmus == &edf_hsb_plugin) | ||
183 | |||
184 | /* | ||
185 | * This only works if items are deleted with list_del_init. | ||
186 | */ | ||
187 | static inline int head_in_list(struct list_head *head) | ||
188 | { | ||
189 | BUG_ON(!head); | ||
190 | return !(head->next == head->prev && head->prev == head); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Returns slack server running the task or NULL if N/A. | ||
195 | */ | ||
196 | static inline server_t* task_slack_server(struct task_struct *task) | ||
197 | { | ||
198 | server_t *slack_server = NULL; | ||
199 | if (task->rt_param.linked_on != NO_CPU) { | ||
200 | slack_server = task_linked_entry(task)->linked_server; | ||
201 | if (slack_server->type != S_SLACK) | ||
202 | slack_server = NULL; | ||
203 | } | ||
204 | return slack_server; | ||
205 | } | ||
206 | |||
207 | static task_data_t* task_data_alloc(int gfp_flags) | ||
208 | { | ||
209 | return kmem_cache_alloc(task_data_cache, gfp_flags); | ||
210 | } | ||
211 | |||
212 | static void task_data_free(task_data_t* data) | ||
213 | { | ||
214 | kmem_cache_free(task_data_cache, data); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Donating servers pre-allocate a server for slack to avoid runtime | ||
219 | * calls to kmalloc. | ||
220 | */ | ||
221 | static void server_slack_create(server_t *donator) | ||
222 | { | ||
223 | server_t *slack = server_alloc(GFP_ATOMIC); | ||
224 | |||
225 | server_init(slack, &server_domain, -donator->id, 0, 0, 1); | ||
226 | slack->type = S_SLACK; | ||
227 | slack->data = donator; | ||
228 | donator->data = slack; | ||
229 | } | ||
230 | |||
231 | |||
232 | static void server_slack_destroy(server_t *donator) | ||
233 | { | ||
234 | server_t *slack = (server_t*)donator->data; | ||
235 | |||
236 | donator->data = NULL; | ||
237 | server_destroy(slack); | ||
238 | server_free(slack); | ||
239 | } | ||
240 | |||
241 | static void remove_slack(server_t *slack) | ||
242 | { | ||
243 | if (!slack) | ||
244 | return; | ||
245 | TRACE_SERVER_SUB(slack, "slack removed"); | ||
246 | //sched_trace_action(NULL, SLACK_EXPIRED_ACTION); | ||
247 | |||
248 | if (head_in_list(&slack->list)) | ||
249 | list_del_init(&slack->list); | ||
250 | slack->deadline = 0; | ||
251 | slack->budget = 0; | ||
252 | slack->wcet = 0; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Slack queue is EDF. | ||
257 | */ | ||
258 | static void add_slack(server_t *slack) | ||
259 | { | ||
260 | struct list_head *pos; | ||
261 | server_t *queued; | ||
262 | |||
263 | TRACE_SERVER_SUB(slack, "slack added"); | ||
264 | |||
265 | if (head_in_list(&slack->list)) | ||
266 | return; | ||
267 | |||
268 | list_for_each_prev(pos, &slack_queue) { | ||
269 | queued = list_entry(pos, server_t, list); | ||
270 | if (lt_before_eq(queued->deadline, slack->deadline)) { | ||
271 | __list_add(&slack->list, pos, pos->next); | ||
272 | return; | ||
273 | } | ||
274 | } | ||
275 | list_add(&slack->list, &slack_queue); | ||
276 | } | ||
277 | |||
278 | static inline struct task_struct* get_candidate(struct list_head *pos) | ||
279 | { | ||
280 | struct task_struct *task = NULL; | ||
281 | task_data_t *data; | ||
282 | if (!list_empty(pos)) { | ||
283 | data = list_entry(pos, task_data_t, candidate_list); | ||
284 | task = data->owner; | ||
285 | } | ||
286 | return task; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Candidate queue is EDF. | ||
291 | */ | ||
292 | static void add_slack_candidate(struct task_struct *task) | ||
293 | { | ||
294 | struct list_head *pos; | ||
295 | struct task_struct *queued; | ||
296 | |||
297 | TRACE_TASK_SUB(task, "candidate added"); | ||
298 | |||
299 | list_for_each_prev(pos, &slack_candidates) { | ||
300 | queued = get_candidate(pos); | ||
301 | if (lt_before_eq(get_deadline(queued), get_deadline(task))) { | ||
302 | __list_add(&task_data(task)->candidate_list, | ||
303 | pos, pos->next); | ||
304 | return; | ||
305 | } | ||
306 | } | ||
307 | list_add(&task_data(task)->candidate_list, &slack_candidates); | ||
308 | } | ||
309 | |||
310 | static void donate_slack(server_t *donator) | ||
311 | { | ||
312 | server_t *slack = (server_t*)donator->data; | ||
313 | hrt_server_t *hrt_server; | ||
314 | |||
315 | TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget)); | ||
316 | |||
317 | if (donator->type == S_HRT) { | ||
318 | hrt_server = container_of(donator, hrt_server_t, server); | ||
319 | BUG_ON(!hrt_server->ready); | ||
320 | } | ||
321 | |||
322 | BUG_ON(head_in_list(&slack->list)); | ||
323 | |||
324 | slack->wcet = donator->budget; | ||
325 | slack->budget = donator->budget; | ||
326 | slack->deadline = donator->deadline; | ||
327 | |||
328 | add_slack(slack); | ||
329 | } | ||
330 | |||
331 | #if 0 | ||
332 | /* | ||
333 | * Donate any available slack from a server. | ||
334 | */ | ||
335 | static noinline void check_donate_slack(server_t *donator, struct task_struct *was_scheduled) | ||
336 | { | ||
337 | server_t *slack = server_slack(donator); | ||
338 | hrt_server_t *hrt_server; | ||
339 | int donate = 0; | ||
340 | |||
341 | TRACE_SERVER_SUB(donator, "checking donation"); | ||
342 | |||
343 | /* Donating small amounts of slack will result in excess migrations */ | ||
344 | if (donator->budget < SLACK_MIN) | ||
345 | return; | ||
346 | |||
347 | if (server_has_slack(donator)) { | ||
348 | TRACE_SERVER_SUB(donator, "dead: %d, rel: %d, job: %d already donated", | ||
349 | slack->deadline, slack->release, slack->job_no); | ||
350 | return; | ||
351 | } | ||
352 | |||
353 | if (donator->type == S_HRT) | ||
354 | hrt_server = container_of(donator, hrt_server_t, server); | ||
355 | |||
356 | /* Donate if the server is waiting for a task release */ | ||
357 | if ((donator->type == S_SRT && | ||
358 | donator->job_no <= task_job_no(was_scheduled)) || | ||
359 | (donator->type == S_HRT && | ||
360 | hrt_server->no_slack && hrt_server->ready && | ||
361 | !__jobs_pending(&hrt_server->hrt_domain)) || | ||
362 | (donator->type == S_BE && | ||
363 | !__jobs_pending(&be_domain))) { | ||
364 | donate = 1; | ||
365 | } | ||
366 | |||
367 | if (!donate) | ||
368 | return; | ||
369 | |||
370 | sched_trace_action(was_scheduled, SLACK_DONATED_ACTION); | ||
371 | |||
372 | donate_slack(donator); | ||
373 | } | ||
374 | |||
375 | #else | ||
376 | #define check_donate_slack(a, b) | ||
377 | #endif | ||
378 | |||
379 | /* | ||
380 | * Adds the task to the candidate queue if it is eligible for slack stealing. | ||
381 | */ | ||
382 | static void check_slack_candidate(struct task_struct *task) | ||
383 | { | ||
384 | TRACE_TASK_SUB(task, "checking for candidate"); | ||
385 | if (is_srt(task) && | ||
386 | /* The task has been synchronously released */ | ||
387 | task_job_no(task) > 2 && | ||
388 | /* The SRT task is behind its server */ | ||
389 | task_srt_server(task)->job_no > task_job_no(task) && | ||
390 | /* The task hasn't already been added to the list */ | ||
391 | !head_in_list(&task_data(task)->candidate_list)) { | ||
392 | |||
393 | add_slack_candidate(task); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Returns the next eligible slack server. This will remove any expired | ||
399 | * slack servers still present in the list. | ||
400 | */ | ||
401 | static noinline server_t* next_eligible_slack_server(void) | ||
402 | { | ||
403 | server_t *next_slack = NULL; | ||
404 | |||
405 | while (!list_empty(&slack_queue)) { | ||
406 | next_slack = list_entry(slack_queue.next, server_t, list); | ||
407 | BUG_ON(!next_slack); | ||
408 | |||
409 | if (lt_after(next_slack->deadline, litmus_clock()) && | ||
410 | lt_after(next_slack->budget, SLACK_MIN) && | ||
411 | !is_server_linked(next_slack)) { | ||
412 | break; | ||
413 | } else { | ||
414 | /* Slack has expired or has too little time */ | ||
415 | remove_slack(next_slack); | ||
416 | next_slack = NULL; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | return next_slack; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * Returns the next SRT task that is tardy or will be tardy. If none | ||
425 | * are available, will return a tardy BE task if present. | ||
426 | */ | ||
427 | static noinline struct task_struct* next_eligible_slack(void) | ||
428 | { | ||
429 | struct task_struct *next = get_candidate(slack_candidates.next); | ||
430 | |||
431 | /* We couldn't find an SRT to schedule. Find a BE which is | ||
432 | * either tardy or cannot run due to a lack of servers. | ||
433 | */ | ||
434 | if (!next) { | ||
435 | next = __peek_ready(&be_domain); | ||
436 | } | ||
437 | return next; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Order BE tasks FIFO. | ||
442 | */ | ||
443 | static inline int be_higher_prio(struct task_struct *first, struct task_struct *second) | ||
444 | { | ||
445 | return lt_before(get_release(first), get_release(second)) || | ||
446 | |||
447 | /* Break by PID */ | ||
448 | (get_release(first) == get_release(second) && | ||
449 | (first->pid < second->pid)); | ||
450 | } | ||
451 | |||
452 | static int be_ready_order(struct bheap_node *a, struct bheap_node *b) | ||
453 | { | ||
454 | struct task_struct *first, *second; | ||
455 | first = bheap2task(a); | ||
456 | second = bheap2task(b); | ||
457 | if (!first || !second) | ||
458 | return first && !second; | ||
459 | return be_higher_prio(first, second); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Order servers by EDF. | ||
464 | */ | ||
465 | static inline int server_higher_prio(server_t *first, server_t *second) | ||
466 | { | ||
467 | return lt_before(first->deadline, second->deadline) || | ||
468 | /* Break by id */ | ||
469 | (first->deadline == second->deadline && | ||
470 | first->id < second->id); | ||
471 | } | ||
472 | |||
473 | static int server_order(struct bheap_node *a, struct bheap_node *b) | ||
474 | { | ||
475 | server_t *first, *second; | ||
476 | first = a->value; | ||
477 | second = b->value; | ||
478 | return server_higher_prio(first, second); | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * Order CPU's by deadlines of their servers. | ||
483 | */ | ||
484 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | ||
485 | { | ||
486 | cpu_entry_t *first, *second; | ||
487 | first = a->value; | ||
488 | second = b->value; | ||
489 | if (first->linked && second->linked) { | ||
490 | return !server_higher_prio(first->linked_server, | ||
491 | second->linked_server); | ||
492 | } | ||
493 | return second->linked && !first->linked; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Move the CPU entry to the correct position in the queue. | ||
498 | */ | ||
499 | static inline void update_cpu_position(cpu_entry_t *entry) | ||
500 | { | ||
501 | if (likely(bheap_node_in_heap(entry->hn))) | ||
502 | bheap_delete(server_order, &cpu_heap, entry->hn); | ||
503 | /* Don't leave HRT CPUs in the heap as its order only matters | ||
504 | * for global preempts. | ||
505 | */ | ||
506 | if (!entry->linked || !is_hrt(entry->linked)) | ||
507 | bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); | ||
508 | } | ||
509 | |||
510 | static inline cpu_entry_t* lowest_prio_cpu(void) | ||
511 | { | ||
512 | struct bheap_node *hn = bheap_peek(cpu_lower_prio, &cpu_heap); | ||
513 | return (hn) ? hn->value : NULL; | ||
514 | } | ||
515 | |||
516 | static inline int check_hrt_server_initialized(hrt_server_t *hrt_server) | ||
517 | { | ||
518 | return hrt_server->server.wcet && hrt_server->server.period; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Arms the slack timer for the server, if necessary. | ||
523 | */ | ||
524 | static void slack_timer_arm(hrt_server_t *hrt_server) | ||
525 | { | ||
526 | int cpu, err; | ||
527 | cpu_entry_t *entry; | ||
528 | struct hrtimer *timer; | ||
529 | lt_t now = litmus_clock(), when_to_fire; | ||
530 | |||
531 | if (!check_hrt_server_initialized(hrt_server)) { | ||
532 | TRACE_SERVER_SUB(&hrt_server->server, "not initialized"); | ||
533 | return; | ||
534 | } | ||
535 | |||
536 | timer = &hrt_server->slack_timer; | ||
537 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
538 | |||
539 | #ifdef SLACK_ON_MASTER | ||
540 | if (edf_hsb_release_master != NO_CPU) | ||
541 | cpu = edf_hsb_release_master; | ||
542 | else | ||
543 | #endif | ||
544 | cpu = entry->cpu; | ||
545 | |||
546 | when_to_fire = hrt_server->server.deadline - hrt_server->server.budget; | ||
547 | |||
548 | /* Ensure the timer is needed */ | ||
549 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || | ||
550 | hrt_server->no_slack || hrt_server->server.budget == 0 || | ||
551 | !hrt_server->ready) { | ||
552 | TRACE_SERVER_SUB(&hrt_server->server, | ||
553 | "not arming slack timer on P%d, %d %d %d %d %d", | ||
554 | entry->cpu, | ||
555 | hrtimer_active(timer), hrt_server->server.deadline == 0, | ||
556 | hrt_server->no_slack, hrt_server->server.budget == 0, | ||
557 | !hrt_server->ready); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | if (when_to_fire >= hrt_server->server.deadline) { | ||
562 | TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu", | ||
563 | when_to_fire, hrt_server->server.deadline, | ||
564 | hrt_server->server.budget); | ||
565 | BUG_ON(1); | ||
566 | } | ||
567 | |||
568 | /* Arm timer */ | ||
569 | if (lt_after_eq(now, when_to_fire)) { | ||
570 | /* 'Fire' immediately */ | ||
571 | TRACE_SERVER_SUB(&hrt_server->server, | ||
572 | "immediate: %llu", when_to_fire); | ||
573 | hrt_server->no_slack = 1; | ||
574 | } else if (cpu != smp_processor_id()) { | ||
575 | err = hrtimer_start_on(cpu, | ||
576 | &hrt_server->slack_timer_info, | ||
577 | &hrt_server->slack_timer, | ||
578 | ns_to_ktime(when_to_fire), | ||
579 | HRTIMER_MODE_ABS_PINNED); | ||
580 | if (err) | ||
581 | TRACE_SERVER_SUB(&hrt_server->server, "failed to arm slack"); | ||
582 | } else { | ||
583 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), | ||
584 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
585 | } | ||
586 | |||
587 | TRACE_SUB("slack timer 0x%x armed to fire at %llu on P%d", | ||
588 | timer, TIME(when_to_fire), entry->cpu); | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Does nothing if the slack timer is not armed. | ||
593 | */ | ||
594 | static inline void slack_timer_cancel(hrt_server_t *hrt_server) | ||
595 | { | ||
596 | int ret; | ||
597 | if (hrtimer_active(&hrt_server->slack_timer)) { | ||
598 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); | ||
599 | if (ret == -1) { | ||
600 | TRACE_SERVER_SUB(&hrt_server->server, | ||
601 | "slack timer was running concurrently"); | ||
602 | } else { | ||
603 | TRACE_SERVER_SUB(&hrt_server->server, | ||
604 | "slack timer cancelled"); | ||
605 | } | ||
606 | } else { | ||
607 | TRACE_SERVER_SUB(&hrt_server->server, "slack not active"); | ||
608 | } | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Handles subtraction of lt_t without underflows. | ||
613 | */ | ||
614 | static inline lt_t lt_subtract(lt_t a, lt_t b) | ||
615 | { | ||
616 | long long sub = (long long)a - (long long)b; | ||
617 | if (sub >= 0) | ||
618 | return sub; | ||
619 | else | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | static void requeue_server(server_t *server, lt_t now) | ||
624 | { | ||
625 | int added = 0; | ||
626 | hrt_server_t *hrt_server; | ||
627 | |||
628 | if (server->type == S_SRT || server->type == S_SLACK) | ||
629 | return; | ||
630 | |||
631 | if (lt_before(now, server->release)) { | ||
632 | added = add_server_release(server, &server_domain); | ||
633 | } | ||
634 | |||
635 | if (!added) { | ||
636 | /* Mark servers as released */ | ||
637 | if (server->type == S_HRT) { | ||
638 | TRACE_SERVER_SUB(server, "P%d now ready at %llu", now); | ||
639 | hrt_server = container_of(server, hrt_server_t, server); | ||
640 | hrt_server->ready = 1; | ||
641 | remove_slack(server_slack(server)); | ||
642 | hrt_server->no_slack = 0; | ||
643 | sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
644 | } else if (server->type == S_BE) { | ||
645 | TRACE_SERVER_SUB(server, "BE added to ready"); | ||
646 | bheap_insert(server_order, &be_ready_servers, server->hn); | ||
647 | } | ||
648 | } else { | ||
649 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
650 | } | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * Absorbs a task's execution time into its donator. | ||
655 | */ | ||
656 | static void reclaim_slack(server_t *slack) | ||
657 | { | ||
658 | lt_t exec; | ||
659 | server_t *donator = server_slack(slack); | ||
660 | |||
661 | if (!donator) | ||
662 | return; | ||
663 | |||
664 | /* SRT servers do not ever reclaim slack */ | ||
665 | sched_trace_action(NULL, SLACK_RECLAIM_ACTION); | ||
666 | |||
667 | exec = slack->wcet - slack->budget; | ||
668 | TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec)); | ||
669 | |||
670 | BUG_ON(is_server_linked(donator)); | ||
671 | BUG_ON(!slack->wcet); | ||
672 | BUG_ON(!donator->budget); | ||
673 | |||
674 | donator->budget = lt_subtract(donator->budget, exec); | ||
675 | slack->wcet = slack->budget; | ||
676 | |||
677 | /* If budget exhausted, server needs to wait for next release */ | ||
678 | if (!donator->budget) { | ||
679 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | ||
680 | } | ||
681 | } | ||
682 | |||
683 | /* | ||
684 | * Begins server execution and arms any timers necessary. | ||
685 | */ | ||
686 | static noinline void link_server(cpu_entry_t *entry, | ||
687 | server_t *next_server) | ||
688 | { | ||
689 | |||
690 | if (entry->linked) { | ||
691 | /* Massive state check */ | ||
692 | if (next_server->type == S_SRT) { | ||
693 | /* SRT task cannot get ahead of its server */ | ||
694 | BUG_ON(next_server->job_no + 1 < task_job_no(entry->linked)); | ||
695 | BUG_ON(lt_after(get_deadline(entry->linked), | ||
696 | next_server->deadline)); | ||
697 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
698 | } else if (next_server->type == S_HRT) { | ||
699 | /* HRT servers should never, ever migrate */ | ||
700 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
701 | BUG_ON(!entry->hrt_server.ready); | ||
702 | } else if (next_server->type == S_SLACK) { | ||
703 | /* Should have already been removed from slack list */ | ||
704 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
705 | BUG_ON(is_be(entry->linked) && is_queued(entry->linked)); | ||
706 | sched_trace_action(entry->linked, SLACK_RUN_ACTION); | ||
707 | } else { /* BE */ | ||
708 | /* Should have already been removed from ready heap */ | ||
709 | BUG_ON(bheap_node_in_heap(next_server->hn)); | ||
710 | BUG_ON(is_queued(entry->linked)); | ||
711 | sched_trace_action(entry->linked, next_server->id); | ||
712 | } | ||
713 | |||
714 | if (next_server->type != S_SLACK && | ||
715 | (head_in_list(&server_slack(next_server)->list))) { | ||
716 | remove_slack(server_slack(next_server)); | ||
717 | } | ||
718 | |||
719 | entry->linked_server = next_server; | ||
720 | server_run(entry->linked_server, entry->linked); | ||
721 | } | ||
722 | |||
723 | /* Timer necessary whenever an HRT is not running */ | ||
724 | if (!entry->linked || !is_hrt(entry->linked)) | ||
725 | slack_timer_arm(&entry->hrt_server); | ||
726 | else | ||
727 | slack_timer_cancel(&entry->hrt_server); | ||
728 | } | ||
729 | |||
730 | /* | ||
731 | * Stops server execution and timers. This will also re-add servers | ||
732 | * to any collections they should be members of. | ||
733 | */ | ||
734 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) | ||
735 | { | ||
736 | server_t *server = entry->linked_server; | ||
737 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
738 | |||
739 | BUG_ON(!entry->linked_server); | ||
740 | |||
741 | server_stop(entry->linked_server); | ||
742 | server = entry->linked_server; | ||
743 | entry->linked_server = NULL; | ||
744 | |||
745 | if (!requeue) | ||
746 | return; | ||
747 | |||
748 | if (server->type == S_SLACK && server->deadline) { | ||
749 | add_slack(server); | ||
750 | sched_trace_action(entry->linked, SLACK_STOP_ACTION); | ||
751 | |||
752 | /* Donator needs to absorb slack execution time */ | ||
753 | reclaim_slack(server); | ||
754 | } else if (server->type != S_SRT) { | ||
755 | requeue_server(server, litmus_clock()); | ||
756 | } | ||
757 | |||
758 | if (server->type == S_HRT && hrt_server->ready) | ||
759 | BUG_ON(head_in_list(&server_slack(server)->list)); | ||
760 | } | ||
761 | |||
762 | static void requeue(struct task_struct *task, rt_domain_t *domain); | ||
763 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, struct task_struct *task); | ||
764 | |||
765 | /* Update the link of a CPU. | ||
766 | * Handles the case where the to-be-linked task is already | ||
767 | * scheduled on a different CPU. The last argument is only needed | ||
768 | * for BE tasks as their servers can't be determined here. | ||
769 | */ | ||
770 | static noinline void link_to_cpu(cpu_entry_t *entry, | ||
771 | struct task_struct* linked, | ||
772 | server_t* next_server) | ||
773 | { | ||
774 | cpu_entry_t *sched; | ||
775 | server_t *tmp_server; | ||
776 | struct task_struct *tmp_task; | ||
777 | int on_cpu; | ||
778 | |||
779 | BUG_ON(linked && !is_realtime(linked)); | ||
780 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
781 | BUG_ON(entry->cpu == edf_hsb_release_master); | ||
782 | |||
783 | if (linked) | ||
784 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | ||
785 | entry->cpu); | ||
786 | |||
787 | /* Currently linked task is set to be unlinked. */ | ||
788 | if (entry->linked) { | ||
789 | unlink_server(entry, 1); | ||
790 | entry->linked->rt_param.linked_on = NO_CPU; | ||
791 | entry->linked = NULL; | ||
792 | } | ||
793 | |||
794 | /* Link new task to CPU. */ | ||
795 | if (linked) { | ||
796 | set_rt_flags(linked, RT_F_RUNNING); | ||
797 | /* Handle task is already scheduled somewhere! */ | ||
798 | on_cpu = linked->rt_param.scheduled_on; | ||
799 | if (on_cpu != NO_CPU) { | ||
800 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
801 | /* This should only happen if not linked already */ | ||
802 | BUG_ON(sched->linked == linked); | ||
803 | |||
804 | if (entry != sched && | ||
805 | sched->linked && is_hrt(sched->linked)) { | ||
806 | /* We are already scheduled on a CPU with an HRT */ | ||
807 | TRACE_TASK_SUB(linked, | ||
808 | "cannot move to scheduled CPU P%d", | ||
809 | sched->cpu); | ||
810 | |||
811 | requeue_server(next_server, litmus_clock()); | ||
812 | requeue(linked, get_rt_domain(entry, linked)); | ||
813 | |||
814 | linked = NULL; | ||
815 | next_server = NULL; | ||
816 | } else if (entry != sched) { | ||
817 | /* Link to the CPU we are scheduled on by swapping | ||
818 | * with that CPU's linked task. | ||
819 | */ | ||
820 | BUG_ON(is_hrt(linked)); | ||
821 | |||
822 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
823 | sched->cpu); | ||
824 | |||
825 | tmp_task = sched->linked; | ||
826 | tmp_server = sched->linked_server; | ||
827 | |||
828 | if (tmp_task) | ||
829 | unlink_server(sched, 0); | ||
830 | |||
831 | linked->rt_param.linked_on = sched->cpu; | ||
832 | sched->linked = linked; | ||
833 | link_server(sched, next_server); | ||
834 | |||
835 | update_cpu_position(sched); | ||
836 | |||
837 | linked = tmp_task; | ||
838 | next_server = tmp_server; | ||
839 | } | ||
840 | } | ||
841 | if (linked) /* Might be NULL due to swap */ | ||
842 | linked->rt_param.linked_on = entry->cpu; | ||
843 | } | ||
844 | entry->linked = linked; | ||
845 | link_server(entry, next_server); | ||
846 | update_cpu_position(entry); | ||
847 | |||
848 | BUG_ON(!entry->linked && entry->linked_server); | ||
849 | |||
850 | if (linked) | ||
851 | TRACE_TASK_SERVER_SUB(linked, next_server, | ||
852 | "linked to %d", entry->cpu); | ||
853 | else | ||
854 | TRACE_SUB("NULL linked to %d", entry->cpu); | ||
855 | } | ||
856 | |||
857 | /* | ||
858 | * Grab the local HRT or global SRT or BE domain for the task. | ||
859 | */ | ||
860 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, | ||
861 | struct task_struct *task) | ||
862 | { | ||
863 | if (is_hrt(task)) | ||
864 | return &entry->hrt_server.hrt_domain; | ||
865 | else if (is_srt(task)) | ||
866 | return &srt_domain; | ||
867 | else /* BE */ | ||
868 | return &be_domain; | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * Ensures the task is not linked anywhere nor present in any ready queues. | ||
873 | */ | ||
874 | static noinline void unlink(struct task_struct* t) | ||
875 | { | ||
876 | cpu_entry_t *entry; | ||
877 | |||
878 | BUG_ON(!t); | ||
879 | |||
880 | if (t->rt_param.linked_on != NO_CPU) { | ||
881 | /* Unlink */ | ||
882 | entry = task_linked_entry(t); | ||
883 | link_to_cpu(entry, NULL, NULL); | ||
884 | } else if (is_queued(t)) { | ||
885 | |||
886 | if (head_in_list(&task_data(t)->candidate_list)) { | ||
887 | list_del_init(&task_data(t)->candidate_list); | ||
888 | } | ||
889 | |||
890 | entry = task_sched_entry(t); | ||
891 | |||
892 | /* A task that is unlinked due to a slack server must be treated | ||
893 | * differently. It is probably queued in a release_queue, but | ||
894 | * a race condition could allow is_released() to return true | ||
895 | * even when the task has not yet been released. Attempting | ||
896 | * to remove the task in this case would be disastrous. | ||
897 | */ | ||
898 | if (entry->scheduled == t && | ||
899 | entry->scheduled_server && /* Can be NULL on task_new */ | ||
900 | entry->scheduled_server->type == S_SLACK) { | ||
901 | |||
902 | TRACE_TASK_SUB(t, "unlinked on slack server"); | ||
903 | |||
904 | } else if (is_released(t, litmus_clock())) { | ||
905 | /* This is an interesting situation: t is scheduled, | ||
906 | * but has already been unlinked. It was re-added to | ||
907 | * a ready queue of some sort but now needs to | ||
908 | * be removed. This usually happens when a job has | ||
909 | * been preempted but completes before it is | ||
910 | * descheduled. | ||
911 | */ | ||
912 | TRACE_TASK_SUB(t, "removing from domain"); | ||
913 | remove(get_rt_domain(entry, t), t); | ||
914 | BUG_ON(is_queued(t)); | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * A job generated by a HRT task is eligible if either the job's deadline | ||
921 | * is earlier than the server's next deadline, or the server has zero slack | ||
922 | * time in its current period. | ||
923 | */ | ||
924 | static inline int is_eligible(struct task_struct *task, | ||
925 | hrt_server_t *hrt_server) | ||
926 | { | ||
927 | TRACE_TASK_SUB(task, "%d %d %llu %llu", | ||
928 | hrt_server->ready, hrt_server->no_slack, | ||
929 | hrt_server->server.deadline, | ||
930 | get_deadline(task)); | ||
931 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && | ||
932 | (hrt_server->no_slack || | ||
933 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * Set the server to release at the closest preceding deadline to time. | ||
938 | */ | ||
939 | static inline void catchup_server(server_t *server, lt_t time) | ||
940 | { | ||
941 | lt_t diff, sub; | ||
942 | |||
943 | diff = time - server->deadline; | ||
944 | sub = diff % server->period; | ||
945 | |||
946 | server_release_at(server, time - sub); | ||
947 | TRACE_SERVER_SUB(server, "catching up to %llu", time); | ||
948 | } | ||
949 | |||
950 | static noinline int catchup_srt_server(struct task_struct *task) | ||
951 | { | ||
952 | int jobs, rv = 0; | ||
953 | lt_t release; | ||
954 | lt_t now = litmus_clock(); | ||
955 | server_t *srt_server = task_srt_server(task); | ||
956 | |||
957 | if (lt_before(srt_server->deadline, now) && | ||
958 | srt_server->job_no > 1) { | ||
959 | /* Calculate the number of jobs behind the server is */ | ||
960 | jobs = lt_subtract(now, srt_server->deadline) / | ||
961 | srt_server->period + 1; | ||
962 | |||
963 | /* Get the new release */ | ||
964 | release = srt_server->release + jobs * srt_server->period; | ||
965 | |||
966 | TRACE_SERVER_SUB(srt_server, "catching up to %llu, job %d", | ||
967 | release, srt_server->job_no + jobs); | ||
968 | |||
969 | BUG_ON(jobs < 1); | ||
970 | |||
971 | /* Update server state */ | ||
972 | server_release_at(srt_server, release); | ||
973 | srt_server->job_no += jobs - 1; | ||
974 | |||
975 | /* Force task to take characteristics of server */ | ||
976 | tsk_rt(task)->job_params.release = srt_server->release; | ||
977 | tsk_rt(task)->job_params.deadline = srt_server->deadline; | ||
978 | |||
979 | rv = 1; | ||
980 | |||
981 | sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
982 | |||
983 | } else if (lt_before(srt_server->deadline, now) && | ||
984 | srt_server->job_no <= 1) { | ||
985 | |||
986 | server_release_at(srt_server, get_release(task)); | ||
987 | srt_server->job_no = task_job_no(task); | ||
988 | } | ||
989 | |||
990 | BUG_ON(srt_server->job_no == 0); | ||
991 | |||
992 | return rv; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * If the server is eligible, return the next eligible job. If the server is | ||
997 | * ineligible or there are no eligible jobs, returns NULL. This will re-release | ||
998 | * any servers that are behind. | ||
999 | */ | ||
1000 | static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | ||
1001 | { | ||
1002 | lt_t now = litmus_clock(); | ||
1003 | lt_t dead, slack, budget; | ||
1004 | struct task_struct *task = __peek_ready(&hrt_server->hrt_domain); | ||
1005 | |||
1006 | /* Catch up server if it is initialized, not running, and late */ | ||
1007 | if (check_hrt_server_initialized(hrt_server) && | ||
1008 | !is_server_linked(&hrt_server->server)) { | ||
1009 | |||
1010 | dead = hrt_server->server.deadline; | ||
1011 | budget = hrt_server->server.budget; | ||
1012 | slack = lt_subtract(dead, budget); | ||
1013 | |||
1014 | TRACE_SERVER_SUB(&hrt_server->server, "dead: %llu, budget: %llu" | ||
1015 | "now: %llu, slack: %llu", | ||
1016 | TIME(dead), TIME(budget), TIME(now), TIME(slack)); | ||
1017 | |||
1018 | if (!head_in_list(&hrt_server->server.release_list) && | ||
1019 | lt_before_eq(dead, now)) { | ||
1020 | /* The server missed a release */ | ||
1021 | catchup_server(&hrt_server->server, now); | ||
1022 | TRACE_SERVER_SUB(&hrt_server->server, "now ready"); | ||
1023 | hrt_server->ready = 1; | ||
1024 | remove_slack(server_slack(&hrt_server->server)); | ||
1025 | hrt_server->no_slack = 0; | ||
1026 | |||
1027 | slack = lt_subtract(hrt_server->server.deadline, | ||
1028 | hrt_server->server.budget); | ||
1029 | |||
1030 | sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1031 | } | ||
1032 | |||
1033 | /* If the slack timer is active, this is not necessary */ | ||
1034 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { | ||
1035 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { | ||
1036 | /* The server missed the shift to no slack */ | ||
1037 | TRACE_SERVER_SUB(&hrt_server->server, "no slack: %llu", | ||
1038 | TIME(slack)); | ||
1039 | hrt_server->no_slack = 1; | ||
1040 | sched_trace_action(task, NO_SLACK_ACTION); | ||
1041 | } else { | ||
1042 | slack_timer_arm(hrt_server); | ||
1043 | } | ||
1044 | } | ||
1045 | |||
1046 | } else { | ||
1047 | TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu %d %d", | ||
1048 | hrt_server->server.deadline, | ||
1049 | is_server_linked(&hrt_server->server), | ||
1050 | now, check_hrt_server_initialized(hrt_server), | ||
1051 | !is_server_linked(&hrt_server->server)); | ||
1052 | } | ||
1053 | |||
1054 | if (!hrt_server->server.budget || | ||
1055 | (task && !is_eligible(task, hrt_server))) { | ||
1056 | |||
1057 | if (!hrt_server->server.budget && | ||
1058 | !head_in_list(&hrt_server->server.release_list)) { | ||
1059 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | ||
1060 | catchup_server(&hrt_server->server, now); | ||
1061 | requeue_server(&hrt_server->server, now); | ||
1062 | slack_timer_arm(hrt_server); | ||
1063 | } | ||
1064 | |||
1065 | if (task) { | ||
1066 | TRACE_TASK_SUB(task, "not eligible, budget: %llu", | ||
1067 | TIME(hrt_server->server.budget)); | ||
1068 | } | ||
1069 | task = NULL; | ||
1070 | |||
1071 | /* Donate slack if we have nothing to schedule */ | ||
1072 | if (hrt_server->ready && hrt_server->no_slack) { | ||
1073 | check_donate_slack(&hrt_server->server, NULL); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | return task; | ||
1078 | } | ||
1079 | |||
1080 | /* | ||
1081 | * This will catch up the SRT's server if it is behind. | ||
1082 | */ | ||
1083 | static noinline struct task_struct* next_eligible_srt(void) | ||
1084 | { | ||
1085 | int done = 0; | ||
1086 | struct task_struct *next_srt; | ||
1087 | |||
1088 | while (!done) { | ||
1089 | next_srt = __peek_ready(&srt_domain); | ||
1090 | |||
1091 | /* A blocking task might pollute the SRT domain if the | ||
1092 | * task blocked while it was being run by a slack server. | ||
1093 | * Remove and ignore this task. | ||
1094 | */ | ||
1095 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || | ||
1096 | unlikely(!is_realtime(next_srt)))) { | ||
1097 | TRACE_TASK_SUB(next_srt, "removing finished task"); | ||
1098 | remove(&srt_domain, next_srt); | ||
1099 | next_srt = __peek_ready(&srt_domain); | ||
1100 | } | ||
1101 | |||
1102 | /* If the task blocked for awhile or has otherwise not been | ||
1103 | * accessed, its server could have fallen behind. | ||
1104 | */ | ||
1105 | if (next_srt) { | ||
1106 | done = !catchup_srt_server(next_srt); | ||
1107 | |||
1108 | /* The parameters were modified. Re-insert the task. */ | ||
1109 | if (!done) { | ||
1110 | remove(&srt_domain, next_srt); | ||
1111 | __add_ready(&srt_domain, next_srt); | ||
1112 | } else if (is_server_linked(task_srt_server(next_srt))){ | ||
1113 | remove(&srt_domain, next_srt); | ||
1114 | done = 0; | ||
1115 | } | ||
1116 | } else { | ||
1117 | done = 1; | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | return next_srt; | ||
1122 | } | ||
1123 | |||
1124 | static inline server_t* next_be_server(void) | ||
1125 | { | ||
1126 | struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers); | ||
1127 | return (hn) ? hn->value : NULL; | ||
1128 | } | ||
1129 | |||
1130 | static noinline server_t* next_eligible_be_server(void) | ||
1131 | { | ||
1132 | server_t *be_server = next_be_server(); | ||
1133 | lt_t now = litmus_clock(); | ||
1134 | |||
1135 | /* Catch up any late be servers. This happens when the servers could | ||
1136 | * not find tasks to schedule or if the system is overutilized. | ||
1137 | */ | ||
1138 | while (be_server && (lt_before_eq(be_server->deadline, now) || | ||
1139 | is_server_linked(be_server))) { | ||
1140 | if (!be_server->deadline) { | ||
1141 | TRACE_SERVER_SUB(be_server, "not intialized"); | ||
1142 | return NULL; | ||
1143 | } | ||
1144 | bheap_delete(server_order, &be_ready_servers, | ||
1145 | be_server->hn); | ||
1146 | |||
1147 | if (is_server_linked(be_server)) { | ||
1148 | TRACE_SERVER_SUB(be_server, "linked"); | ||
1149 | continue; | ||
1150 | } | ||
1151 | |||
1152 | catchup_server(be_server, now); | ||
1153 | check_donate_slack(be_server, NULL); | ||
1154 | bheap_insert(server_order, &be_ready_servers, | ||
1155 | be_server->hn); | ||
1156 | be_server = next_be_server(); | ||
1157 | TRACE_SERVER_SUB(be_server, "catching up BE server"); | ||
1158 | sched_trace_action(NULL, SERVER_RELEASED_ACTION); /* Release */ | ||
1159 | } | ||
1160 | |||
1161 | if (be_server && lt_before(now, be_server->release)) { | ||
1162 | TRACE_SERVER_SUB(be_server, "not released"); | ||
1163 | be_server = NULL; | ||
1164 | } | ||
1165 | |||
1166 | if (be_server) { | ||
1167 | TRACE_SERVER_SUB(be_server, "dead: %llu, rel: %llu, budget: %llu", | ||
1168 | be_server->deadline, be_server->release, | ||
1169 | be_server->budget); | ||
1170 | |||
1171 | } | ||
1172 | |||
1173 | return be_server; | ||
1174 | } | ||
1175 | |||
1176 | /* | ||
1177 | * Adds a task to the appropriate queue (ready / release) in a domain. | ||
1178 | */ | ||
1179 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | ||
1180 | { | ||
1181 | lt_t now = litmus_clock(); | ||
1182 | int was_added; | ||
1183 | |||
1184 | BUG_ON(!is_realtime(task)); | ||
1185 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1186 | list_del_init(&task_data(task)->candidate_list); | ||
1187 | } | ||
1188 | |||
1189 | check_slack_candidate(task); | ||
1190 | |||
1191 | if (is_queued(task)) { | ||
1192 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | ||
1193 | } else if (is_released(task, now)) { | ||
1194 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", | ||
1195 | get_release(task), get_deadline(task), | ||
1196 | get_rt_period(task), now); | ||
1197 | __add_ready(domain, task); | ||
1198 | } else { | ||
1199 | /* Task needs to wait until it is released */ | ||
1200 | TRACE_TASK_SUB(task, "requeuing on release"); | ||
1201 | |||
1202 | was_added = add_release(domain, task); | ||
1203 | |||
1204 | /* The release time happened before we added ourselves | ||
1205 | * to the heap. We can now add to ready. | ||
1206 | */ | ||
1207 | if (!was_added) { | ||
1208 | TRACE_TASK_SUB(task, "missed release, going to ready"); | ||
1209 | __add_ready(domain, task); | ||
1210 | } | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | static inline void earlier_server_task(server_t *first, | ||
1215 | struct task_struct *first_task, | ||
1216 | server_t *second, | ||
1217 | struct task_struct *second_task, | ||
1218 | server_t **server, | ||
1219 | struct task_struct **task) | ||
1220 | { | ||
1221 | if (!first || | ||
1222 | (second && lt_before_eq(second->deadline, first->deadline))) { | ||
1223 | *server = second; | ||
1224 | *task = second_task; | ||
1225 | } else { | ||
1226 | *server = first; | ||
1227 | *task = first_task; | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | /* | ||
1232 | * Set server and task to the next server and task respectively. | ||
1233 | * If entry is not null, the next server will see if it can schedule | ||
1234 | * entry's linked task. | ||
1235 | */ | ||
1236 | static void next_global_task(cpu_entry_t *entry, | ||
1237 | server_t **next_server, | ||
1238 | struct task_struct **next_task) | ||
1239 | { | ||
1240 | struct task_struct *next_srt, *next_be, *next_slack; | ||
1241 | server_t *be_server, *slack_server, *srt_server; | ||
1242 | |||
1243 | *next_server = NULL; | ||
1244 | *next_task = NULL; | ||
1245 | |||
1246 | next_srt = next_eligible_srt(); | ||
1247 | srt_server = (next_srt) ? task_srt_server(next_srt) : NULL; | ||
1248 | |||
1249 | next_be = __peek_ready(&be_domain); | ||
1250 | be_server = next_eligible_be_server(); | ||
1251 | |||
1252 | next_slack = next_eligible_slack(); | ||
1253 | slack_server = next_eligible_slack_server(); | ||
1254 | |||
1255 | TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d " | ||
1256 | "next_slack: %d", (be_server) ? be_server->id : -1, | ||
1257 | (next_be) ? next_be->pid : -1, | ||
1258 | (next_srt) ? next_srt->pid : -1, | ||
1259 | (slack_server) ? slack_server->id : -1, | ||
1260 | (next_slack) ? next_slack->pid : -1); | ||
1261 | |||
1262 | /* Check if the servers can schedule the task linked to entry */ | ||
1263 | if (entry && entry->linked) { | ||
1264 | |||
1265 | if (entry->linked_server->type == S_BE && | ||
1266 | (!next_be || | ||
1267 | lt_before(get_release(entry->linked), | ||
1268 | get_release(next_be)))) { | ||
1269 | |||
1270 | next_be = entry->linked; | ||
1271 | } else if (entry->linked_server->type == S_SLACK && | ||
1272 | (!next_slack || | ||
1273 | lt_before(get_deadline(entry->linked), | ||
1274 | get_deadline(next_slack)))) { | ||
1275 | |||
1276 | next_slack = entry->linked; | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1280 | /* Remove tasks without servers and vice versa from contention */ | ||
1281 | if (!next_be || !be_server) { | ||
1282 | next_be = NULL; | ||
1283 | be_server = NULL; | ||
1284 | } | ||
1285 | if (!next_slack || !slack_server) { | ||
1286 | next_slack = NULL; | ||
1287 | slack_server = NULL; | ||
1288 | } | ||
1289 | |||
1290 | /* Favor BE servers. If we don't, then a BE server might lose | ||
1291 | * out to its own slack. | ||
1292 | */ | ||
1293 | if (slack_server && be_server && | ||
1294 | be_server->deadline == slack_server->deadline) { | ||
1295 | next_slack = NULL; | ||
1296 | slack_server = NULL; | ||
1297 | } | ||
1298 | |||
1299 | /* There is probably a better way to do this */ | ||
1300 | earlier_server_task(srt_server, next_srt, | ||
1301 | be_server, next_be, | ||
1302 | next_server, next_task); | ||
1303 | earlier_server_task(*next_server, *next_task, | ||
1304 | slack_server, next_slack, | ||
1305 | next_server, next_task); | ||
1306 | |||
1307 | //BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release)); | ||
1308 | } | ||
1309 | |||
1310 | /* | ||
1311 | * Remove the task and server from any ready queues. | ||
1312 | */ | ||
1313 | static void remove_from_ready(server_t *server, struct task_struct *task, | ||
1314 | cpu_entry_t *entry) | ||
1315 | { | ||
1316 | server_t *slack; | ||
1317 | rt_domain_t *domain; | ||
1318 | BUG_ON(!server); | ||
1319 | BUG_ON(!entry); | ||
1320 | BUG_ON(!task); | ||
1321 | |||
1322 | if (server->type == S_SLACK) { | ||
1323 | TRACE_SERVER_SUB(server, "removed from slack list"); | ||
1324 | list_del_init(&server->list); | ||
1325 | |||
1326 | /* Remove from consideration of BE servers */ | ||
1327 | if (is_be(task) && is_queued(task)) { | ||
1328 | TRACE_TASK_SUB(task, "BE removed from ready"); | ||
1329 | remove(&be_domain, task); | ||
1330 | } | ||
1331 | } else { | ||
1332 | slack = server_slack(server); | ||
1333 | if (slack && head_in_list(&slack->list)) { | ||
1334 | remove_slack(slack); | ||
1335 | } | ||
1336 | if (server->type == S_BE) { | ||
1337 | TRACE_SERVER_SUB(server, "server removed from ready"); | ||
1338 | BUG_ON(!server->hn); | ||
1339 | bheap_delete(server_order, &be_ready_servers, | ||
1340 | server->hn); | ||
1341 | } | ||
1342 | if (is_queued(task)) { | ||
1343 | domain = get_rt_domain(entry, task); | ||
1344 | BUG_ON(!domain); | ||
1345 | TRACE_TASK_SUB(task, "removed from ready"); | ||
1346 | remove(domain, task); | ||
1347 | } | ||
1348 | } | ||
1349 | |||
1350 | BUG_ON(!task_data(task)); | ||
1351 | |||
1352 | /* Remove from consideration of slack servers */ | ||
1353 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1354 | TRACE_TASK_SUB(task, "deleting candidate"); | ||
1355 | list_del_init(&task_data(task)->candidate_list); | ||
1356 | } | ||
1357 | |||
1358 | } | ||
1359 | |||
1360 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); | ||
1361 | |||
1362 | /* | ||
1363 | * Finds and links the next server and task to an entry with no linked task. | ||
1364 | */ | ||
1365 | static void edf_hsb_pick_next(cpu_entry_t *entry) | ||
1366 | { | ||
1367 | struct task_struct *next_task, *linked; | ||
1368 | server_t *next_server; | ||
1369 | |||
1370 | BUG_ON(entry->linked); | ||
1371 | |||
1372 | next_task = next_eligible_hrt(&entry->hrt_server); | ||
1373 | if (next_task) | ||
1374 | next_server = &entry->hrt_server.server; | ||
1375 | else | ||
1376 | next_global_task(NULL, &next_server, &next_task); | ||
1377 | |||
1378 | |||
1379 | if (next_task) { | ||
1380 | remove_from_ready(next_server, next_task, entry); | ||
1381 | check_for_slack_preempt(next_task, next_server, entry, 1); | ||
1382 | TRACE_TASK_SERVER_SUB(next_task, next_server, | ||
1383 | "removing and picked"); | ||
1384 | |||
1385 | /* A slack preemption could cause something that was already | ||
1386 | * running to be 'swapped' to this CPU in link_to_cpu. | ||
1387 | */ | ||
1388 | if (entry->linked) { | ||
1389 | linked = entry->linked; | ||
1390 | unlink(entry->linked); | ||
1391 | requeue(linked, get_rt_domain(entry, linked)); | ||
1392 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1393 | } | ||
1394 | link_to_cpu(entry, next_task, next_server); | ||
1395 | } | ||
1396 | } | ||
1397 | |||
1398 | /* | ||
1399 | * Preempt the currently running server and task with new ones. | ||
1400 | * It is possible that either only the server or the task is different here. | ||
1401 | */ | ||
1402 | static void preempt(cpu_entry_t *entry, struct task_struct *next, | ||
1403 | server_t *next_server, int slack_resched) | ||
1404 | { | ||
1405 | struct task_struct *linked; | ||
1406 | rt_domain_t *domain; | ||
1407 | |||
1408 | TRACE_TASK_SERVER_SUB(next, next_server, | ||
1409 | "preempting on P%d", entry->cpu); | ||
1410 | |||
1411 | remove_from_ready(next_server, next, entry); | ||
1412 | |||
1413 | check_for_slack_preempt(next, next_server, entry, slack_resched); | ||
1414 | linked = entry->linked; | ||
1415 | link_to_cpu(entry, next, next_server); | ||
1416 | |||
1417 | /* No need for this if only the server was preempted */ | ||
1418 | if (!linked || linked != entry->linked) { | ||
1419 | if (linked) { | ||
1420 | domain = get_rt_domain(entry, linked); | ||
1421 | requeue(linked, domain); | ||
1422 | } | ||
1423 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | /* | ||
1428 | * Causes a preemption if: | ||
1429 | * 1. task is being run by a slack server on a different CPU | ||
1430 | * 2. slack donated by server is running a task on a different CPU | ||
1431 | */ | ||
1432 | static noinline void check_for_slack_preempt(struct task_struct *task, | ||
1433 | server_t *server, | ||
1434 | cpu_entry_t *next_entry, | ||
1435 | int resched) | ||
1436 | { | ||
1437 | cpu_entry_t *entry = NULL; | ||
1438 | server_t *slack = server_slack(server); | ||
1439 | struct task_struct *slack_task; | ||
1440 | |||
1441 | /* The task is currently being run by another slack server */ | ||
1442 | if (tsk_rt(task)->linked_on != NO_CPU) { | ||
1443 | entry = task_linked_entry(task); | ||
1444 | |||
1445 | if (entry != next_entry) { | ||
1446 | TRACE_TASK_SUB(task, "was on P%d", entry->cpu); | ||
1447 | |||
1448 | unlink(task); | ||
1449 | |||
1450 | if (resched) { | ||
1451 | edf_hsb_pick_next(entry); | ||
1452 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1453 | } | ||
1454 | } | ||
1455 | } | ||
1456 | |||
1457 | /* The server's slack is currently being run */ | ||
1458 | if (slack && is_server_linked(slack)) { | ||
1459 | entry = &per_cpu(noslack_cpu_entries, slack->cpu); | ||
1460 | slack_task = server_task(slack); | ||
1461 | |||
1462 | unlink(slack_task); | ||
1463 | remove_slack(slack); | ||
1464 | requeue(slack_task, get_rt_domain(entry, slack_task)); | ||
1465 | |||
1466 | if (entry != next_entry && resched) { | ||
1467 | TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu); | ||
1468 | /* Force a reschedule */ | ||
1469 | edf_hsb_pick_next(entry); | ||
1470 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1471 | } else { | ||
1472 | /* This can only happen on a preemption. If a preemption | ||
1473 | * happens, the task will be requeued elsewhere. | ||
1474 | * Obviously the next task has already been chosen. | ||
1475 | */ | ||
1476 | TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu); | ||
1477 | } | ||
1478 | } | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Check for any necessary non-hrt preemptions. | ||
1483 | */ | ||
1484 | static void check_for_global_preempt(void) | ||
1485 | { | ||
1486 | cpu_entry_t *entry, *sched; | ||
1487 | server_t *next_server; | ||
1488 | int on_cpu; | ||
1489 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | ||
1490 | |||
1491 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | ||
1492 | /* HRT cpus should not be in this heap */ | ||
1493 | BUG_ON(entry->linked && is_hrt(entry->linked)); | ||
1494 | |||
1495 | next_global_task(entry, &next_server, &next_task); | ||
1496 | |||
1497 | if (!next_server) | ||
1498 | break; | ||
1499 | |||
1500 | /* Preempt only if we have an earlier deadline */ | ||
1501 | if (entry->linked && | ||
1502 | !lt_before(next_server->deadline, | ||
1503 | entry->linked_server->deadline)) { | ||
1504 | break; | ||
1505 | } | ||
1506 | |||
1507 | /* If we are scheduled on another CPU, the link code | ||
1508 | * will force us to link to that CPU and try and link | ||
1509 | * that CPU's task to this CPU. This is impossible | ||
1510 | * if that CPU has linked HRT tasks which cannot | ||
1511 | * migrate. | ||
1512 | */ | ||
1513 | on_cpu = next_task->rt_param.scheduled_on; | ||
1514 | if (on_cpu != NO_CPU) { | ||
1515 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
1516 | |||
1517 | if (sched != entry && sched->linked && | ||
1518 | is_hrt(sched->linked)) { | ||
1519 | |||
1520 | TRACE_TASK_SUB(next_task, | ||
1521 | "Already on P%d", | ||
1522 | sched->cpu); | ||
1523 | break; | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | /* We do not reschedule if this causes a slack preemption | ||
1528 | * because we will detect if we should reschedule on the | ||
1529 | * next iteration of the loop. | ||
1530 | */ | ||
1531 | preempt(entry, next_task, next_server, | ||
1532 | 0 /* Don't reschedule on a slack preemption */); | ||
1533 | } | ||
1534 | } | ||
1535 | |||
1536 | /* | ||
1537 | * Correct local link after a change to the local HRT domain. | ||
1538 | */ | ||
1539 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
1540 | { | ||
1541 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1542 | struct task_struct *next_hrt = next_eligible_hrt(hrt_server); | ||
1543 | |||
1544 | if (next_hrt && | ||
1545 | (!entry->linked || !is_hrt(entry->linked) || | ||
1546 | !is_eligible(entry->linked, hrt_server) || | ||
1547 | edf_preemption_needed(&hrt_server->hrt_domain, entry->linked))) { | ||
1548 | |||
1549 | preempt(entry, next_hrt, &hrt_server->server, 1); | ||
1550 | |||
1551 | } else { | ||
1552 | TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting"); | ||
1553 | } | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * Assumes called with local irqs disabled. | ||
1558 | */ | ||
1559 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | ||
1560 | { | ||
1561 | int was_empty; | ||
1562 | |||
1563 | BUG_ON(task_cpu(task) == NO_CPU); | ||
1564 | |||
1565 | TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu); | ||
1566 | |||
1567 | if (is_hrt(task)) { | ||
1568 | requeue(task, &entry->hrt_server.hrt_domain); | ||
1569 | check_for_hrt_preempt(entry); | ||
1570 | } else if (is_srt(task)) { | ||
1571 | requeue(task, &srt_domain); | ||
1572 | check_for_global_preempt(); | ||
1573 | } else /* BE */ { | ||
1574 | was_empty = !__jobs_pending(&be_domain); | ||
1575 | requeue(task, &be_domain); | ||
1576 | |||
1577 | /* Only way this could cause a preemption is if an eligible | ||
1578 | * BE server could not queue up a task. | ||
1579 | */ | ||
1580 | if (was_empty && __jobs_pending(&be_domain)) | ||
1581 | check_for_global_preempt(); | ||
1582 | } | ||
1583 | } | ||
1584 | |||
1585 | /****************************************************************************** | ||
1586 | * Timer methods | ||
1587 | ******************************************************************************/ | ||
1588 | |||
1589 | /* | ||
1590 | * Merges a group of released HRT tasks into a ready queue and checks | ||
1591 | * for preeemptions. | ||
1592 | */ | ||
1593 | static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1594 | { | ||
1595 | unsigned long flags; | ||
1596 | struct task_struct *first; | ||
1597 | cpu_entry_t *entry; | ||
1598 | |||
1599 | raw_spin_lock_irqsave(global_lock, flags); | ||
1600 | |||
1601 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | ||
1602 | entry = task_sched_entry(first); | ||
1603 | |||
1604 | BUG_ON(!first || !is_hrt(first)); | ||
1605 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", | ||
1606 | TIME(litmus_clock()), task_cpu(first)); | ||
1607 | |||
1608 | __merge_ready(domain, tasks); | ||
1609 | check_for_hrt_preempt(entry); | ||
1610 | |||
1611 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1612 | } | ||
1613 | |||
1614 | /* | ||
1615 | * Merges a group of released tasks into a ready queue and checks to see | ||
1616 | * if scheduled needs to be called. | ||
1617 | */ | ||
1618 | static void release_srt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1619 | { | ||
1620 | unsigned long flags; | ||
1621 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1622 | |||
1623 | raw_spin_lock_irqsave(global_lock, flags); | ||
1624 | |||
1625 | TRACE_TASK(first, "SRT tasks released at %llu\n", TIME(litmus_clock())); | ||
1626 | |||
1627 | __merge_ready(domain, tasks); | ||
1628 | check_for_global_preempt(); | ||
1629 | |||
1630 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1631 | } | ||
1632 | |||
1633 | /* | ||
1634 | * Merges a group of released tasks into a ready queue and checks to see | ||
1635 | * if scheduled needs to be called. | ||
1636 | */ | ||
1637 | static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1638 | { | ||
1639 | unsigned long flags; | ||
1640 | int was_empty; | ||
1641 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1642 | |||
1643 | TRACE_TASK(first, "BE tasks released at %llu\n", TIME(litmus_clock()));; | ||
1644 | |||
1645 | raw_spin_lock_irqsave(global_lock, flags); | ||
1646 | |||
1647 | was_empty = !__jobs_pending(domain); | ||
1648 | __merge_ready(domain, tasks); | ||
1649 | if (was_empty) { | ||
1650 | /* Only way this could cause a preemption is if an BE server | ||
1651 | * could not find a task to run. | ||
1652 | */ | ||
1653 | check_for_global_preempt(); | ||
1654 | } | ||
1655 | |||
1656 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1657 | } | ||
1658 | |||
1659 | static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) | ||
1660 | { | ||
1661 | unsigned long flags; | ||
1662 | hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer); | ||
1663 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
1664 | |||
1665 | raw_spin_lock_irqsave(global_lock, flags); | ||
1666 | |||
1667 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); | ||
1668 | BUG_ON(!server->ready); | ||
1669 | sched_trace_action(entry->linked, NO_SLACK_ACTION); | ||
1670 | |||
1671 | /* Set new state of entry */ | ||
1672 | server->no_slack = 1; | ||
1673 | check_for_hrt_preempt(entry); | ||
1674 | |||
1675 | /* Donate slack if the HRT server cannot run anything */ | ||
1676 | if (!entry->linked || !is_hrt(entry->linked)) { | ||
1677 | check_donate_slack(&server->server, NULL); | ||
1678 | check_for_global_preempt(); | ||
1679 | } | ||
1680 | |||
1681 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1682 | |||
1683 | return HRTIMER_NORESTART; | ||
1684 | } | ||
1685 | |||
1686 | static void job_completion(cpu_entry_t *entry, struct task_struct* task) | ||
1687 | { | ||
1688 | server_t *server = entry->linked_server; | ||
1689 | set_rt_flags(task, RT_F_SLEEP); | ||
1690 | |||
1691 | TRACE_TASK_SUB(task, "completed"); | ||
1692 | |||
1693 | unlink(task); | ||
1694 | check_donate_slack(server, task); | ||
1695 | |||
1696 | /* If a slack server completed an SRT task, the work for the | ||
1697 | * next job arrival has already been done. | ||
1698 | */ | ||
1699 | if (server->type == S_SLACK && is_srt(task)) { | ||
1700 | tsk_rt(task)->job_params.job_no++; | ||
1701 | sched_trace_task_release(task); | ||
1702 | TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, " | ||
1703 | "rel: %llu, dead: %llu", | ||
1704 | TIME(get_release(task)), | ||
1705 | TIME(get_deadline(task))); | ||
1706 | check_slack_candidate(task); | ||
1707 | sched_trace_task_completion(task, 1); | ||
1708 | |||
1709 | return; | ||
1710 | } | ||
1711 | |||
1712 | BUG_ON(is_queued(task)); | ||
1713 | |||
1714 | if (server->type == S_SRT) { | ||
1715 | /* If the task is behind the server it must release immediately, | ||
1716 | * leaving its release time and deadline unchanged. | ||
1717 | */ | ||
1718 | if (server->job_no > tsk_rt(task)->job_params.job_no) { | ||
1719 | TRACE_TASK_SUB(task, "catching up"); | ||
1720 | tsk_rt(task)->job_params.job_no++; | ||
1721 | } else { | ||
1722 | /* Otherwise release them both */ | ||
1723 | prepare_for_next_period(task); | ||
1724 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1725 | TIME(get_release(task)), | ||
1726 | TIME(get_deadline(task))); | ||
1727 | server_release(server); | ||
1728 | } | ||
1729 | } else { | ||
1730 | prepare_for_next_period(task); | ||
1731 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1732 | TIME(get_release(task)), | ||
1733 | TIME(get_deadline(task))); | ||
1734 | } | ||
1735 | |||
1736 | if (is_released(task, litmus_clock())) | ||
1737 | sched_trace_task_release(task); | ||
1738 | |||
1739 | /* Don't requeue a blocking task */ | ||
1740 | if (is_running(task)) | ||
1741 | job_arrival(task, entry); | ||
1742 | |||
1743 | sched_trace_task_completion(task, 1); | ||
1744 | } | ||
1745 | |||
1746 | /* | ||
1747 | * Assumes called with local irqs disabled. | ||
1748 | */ | ||
1749 | static void server_completed(server_t *server, struct task_struct *task) | ||
1750 | { | ||
1751 | hrt_server_t *hrt_server; | ||
1752 | cpu_entry_t *entry = task_linked_entry(task); | ||
1753 | |||
1754 | BUG_ON(entry->linked != task); | ||
1755 | BUG_ON(entry->linked_server != server); | ||
1756 | |||
1757 | if (server->type == S_SRT) { | ||
1758 | TRACE_TASK_SUB(task, "must wait on server"); | ||
1759 | |||
1760 | /* The job must now take the priority and release time | ||
1761 | * of the next server. We do this so that we can still | ||
1762 | * use rt_domain and other handy methods to still work | ||
1763 | * with SRT jobs. Because this can ONLY happen if the | ||
1764 | * task's job number gets behind the server's, we can | ||
1765 | * easily detect the job catching up later. | ||
1766 | */ | ||
1767 | tsk_rt(task)->job_params.release = server->deadline; | ||
1768 | tsk_rt(task)->job_params.deadline = server->deadline + | ||
1769 | get_rt_period(task); | ||
1770 | TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu", | ||
1771 | TIME(get_deadline(task)), | ||
1772 | TIME(get_release(task))); | ||
1773 | |||
1774 | } else if (server->type == S_HRT) { | ||
1775 | /* Update state of HRT server */ | ||
1776 | hrt_server = container_of(server, hrt_server_t, server); | ||
1777 | hrt_server->ready = 0; | ||
1778 | TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu); | ||
1779 | |||
1780 | if (hrtimer_active(&hrt_server->slack_timer)) | ||
1781 | slack_timer_cancel(hrt_server); | ||
1782 | } | ||
1783 | |||
1784 | if (server->type != S_SLACK) { | ||
1785 | server_release(server); | ||
1786 | sched_trace_action(task, SERVER_COMPLETED_ACTION); | ||
1787 | } | ||
1788 | |||
1789 | unlink(task); | ||
1790 | requeue(task, get_rt_domain(entry, task)); | ||
1791 | |||
1792 | /* We know this CPU needs to pick its next task */ | ||
1793 | edf_hsb_pick_next(entry); | ||
1794 | |||
1795 | /* Only cause a reschedule if something new was scheduled. A task | ||
1796 | * could merely have swapped servers. | ||
1797 | */ | ||
1798 | if (entry->linked != task) | ||
1799 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1800 | else | ||
1801 | entry->scheduled_server = entry->linked_server; | ||
1802 | } | ||
1803 | |||
1804 | static void hrt_server_released(server_t *server) | ||
1805 | { | ||
1806 | hrt_server_t *hrt_server = container_of(server, hrt_server_t, server); | ||
1807 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
1808 | |||
1809 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); | ||
1810 | TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu); | ||
1811 | |||
1812 | hrt_server->no_slack = 0; | ||
1813 | hrt_server->ready = 1; | ||
1814 | remove_slack(server_slack(&hrt_server->server)); | ||
1815 | |||
1816 | check_for_hrt_preempt(entry); | ||
1817 | |||
1818 | /* Ensure slack timer is only running if the current | ||
1819 | * job is not HRT. | ||
1820 | */ | ||
1821 | if (entry->linked && is_hrt(entry->linked)) | ||
1822 | slack_timer_cancel(hrt_server); | ||
1823 | else | ||
1824 | slack_timer_arm(hrt_server); | ||
1825 | } | ||
1826 | |||
1827 | static void servers_released(struct list_head *servers) | ||
1828 | { | ||
1829 | int was_be = 0; | ||
1830 | unsigned long flags; | ||
1831 | struct list_head *pos, *safe; | ||
1832 | server_t *server; | ||
1833 | |||
1834 | raw_spin_lock_irqsave(global_lock, flags); | ||
1835 | |||
1836 | sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
1837 | TRACE_TIMER("Servers released"); | ||
1838 | |||
1839 | list_for_each_safe(pos, safe, servers) { | ||
1840 | server = list_entry(pos, server_t, release_list); | ||
1841 | |||
1842 | list_del_init(pos); | ||
1843 | |||
1844 | if (server->type == S_BE) { | ||
1845 | check_donate_slack(server, NULL); | ||
1846 | was_be = 1; | ||
1847 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
1848 | TRACE_SERVER_SUB(server, "inserting BE server"); | ||
1849 | bheap_insert(server_order, &be_ready_servers, | ||
1850 | server->hn); | ||
1851 | check_donate_slack(server, NULL); | ||
1852 | } else { /* HRT server */ | ||
1853 | hrt_server_released(server); | ||
1854 | } | ||
1855 | } | ||
1856 | |||
1857 | if (was_be) | ||
1858 | check_for_global_preempt(); | ||
1859 | |||
1860 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1861 | } | ||
1862 | |||
1863 | /****************************************************************************** | ||
1864 | * Server management methods | ||
1865 | ******************************************************************************/ | ||
1866 | |||
1867 | static int curr_be = 0; | ||
1868 | |||
1869 | /* | ||
1870 | * A BE server has been added in a proc entry. | ||
1871 | */ | ||
1872 | static int admit_be_server(unsigned long long wcet, | ||
1873 | unsigned long long period, | ||
1874 | int cpu) | ||
1875 | { | ||
1876 | int rv = 0; | ||
1877 | server_t *be_server; | ||
1878 | |||
1879 | if (cpu != NO_CPU) { | ||
1880 | rv = -EINVAL; | ||
1881 | goto out; | ||
1882 | } | ||
1883 | |||
1884 | be_server = server_alloc(GFP_ATOMIC); | ||
1885 | server_init(be_server, &server_domain, | ||
1886 | BE_SERVER_BASE + ++curr_be, | ||
1887 | wcet, period, 1); | ||
1888 | be_server->type = S_BE; | ||
1889 | server_slack_create(be_server); | ||
1890 | |||
1891 | TRACE_SERVER_SUB(be_server, "admitted BE server"); | ||
1892 | |||
1893 | list_add(&be_server->list, &be_servers); | ||
1894 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
1895 | |||
1896 | out: | ||
1897 | return rv; | ||
1898 | } | ||
1899 | |||
1900 | /* | ||
1901 | * Output all BE servers to a proc entry. | ||
1902 | */ | ||
1903 | static void list_be_servers(server_proc_t *proc) | ||
1904 | { | ||
1905 | struct list_head *pos; | ||
1906 | server_t *be_server; | ||
1907 | |||
1908 | list_for_each(pos, &be_servers) { | ||
1909 | be_server = list_entry(pos, server_t, list); | ||
1910 | list_server(be_server, NO_CPU, proc); | ||
1911 | } | ||
1912 | } | ||
1913 | |||
1914 | /* | ||
1915 | * Halts and destroys all BE servers. | ||
1916 | */ | ||
1917 | static void stop_be_servers(void) | ||
1918 | { | ||
1919 | server_t *be_server; | ||
1920 | struct list_head *pos, *safe; | ||
1921 | |||
1922 | list_for_each_safe(pos, safe, &be_servers) { | ||
1923 | be_server = list_entry(pos, server_t, list); | ||
1924 | |||
1925 | list_del_init(pos); | ||
1926 | if (bheap_node_in_heap(be_server->hn)) | ||
1927 | bheap_delete(server_order, &be_ready_servers, | ||
1928 | be_server->hn); | ||
1929 | server_slack_destroy(be_server); | ||
1930 | server_destroy(be_server); | ||
1931 | server_free(be_server); | ||
1932 | } | ||
1933 | } | ||
1934 | |||
1935 | /* | ||
1936 | * An HRT server has been added in a proc entry. | ||
1937 | */ | ||
1938 | static int admit_hrt_server(unsigned long long wcet, | ||
1939 | unsigned long long period, | ||
1940 | int cpu) | ||
1941 | { | ||
1942 | cpu_entry_t *entry = &per_cpu(noslack_cpu_entries, cpu); | ||
1943 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1944 | struct hrtimer *slack_timer = &hrt_server->slack_timer; | ||
1945 | |||
1946 | server_init(&hrt_server->server, &server_domain, | ||
1947 | cpu, wcet, period, 1); | ||
1948 | server_slack_create(&hrt_server->server); | ||
1949 | hrt_server->no_slack = 0; | ||
1950 | hrt_server->ready = 1; | ||
1951 | hrt_server->server.type = S_HRT; | ||
1952 | |||
1953 | edf_domain_init(&hrt_server->hrt_domain, NULL, | ||
1954 | release_hrt_jobs); | ||
1955 | |||
1956 | hrtimer_init(slack_timer, | ||
1957 | CLOCK_MONOTONIC, | ||
1958 | HRTIMER_MODE_ABS); | ||
1959 | slack_timer->function = slack_timer_fire; | ||
1960 | |||
1961 | return 0; | ||
1962 | } | ||
1963 | |||
1964 | /* | ||
1965 | * Print all HRT servers to a proc entry. | ||
1966 | */ | ||
1967 | static void list_hrt_servers(server_proc_t *proc) | ||
1968 | { | ||
1969 | cpu_entry_t *entry; | ||
1970 | hrt_server_t *hrt_server; | ||
1971 | int cpu; | ||
1972 | |||
1973 | for_each_online_cpu(cpu) { | ||
1974 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
1975 | hrt_server = &entry->hrt_server; | ||
1976 | list_server(&hrt_server->server, cpu, proc); | ||
1977 | } | ||
1978 | } | ||
1979 | |||
1980 | /* | ||
1981 | * Stops all hrt server timers and resets all fields to 0. | ||
1982 | */ | ||
1983 | static void stop_hrt_servers(void) | ||
1984 | { | ||
1985 | int cpu; | ||
1986 | cpu_entry_t *entry; | ||
1987 | hrt_server_t *hrt_server; | ||
1988 | |||
1989 | for_each_online_cpu(cpu) { | ||
1990 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
1991 | hrt_server = &entry->hrt_server; | ||
1992 | |||
1993 | if (hrt_server->server.data) | ||
1994 | server_slack_destroy(&hrt_server->server); | ||
1995 | slack_timer_cancel(hrt_server); | ||
1996 | |||
1997 | hrt_server->no_slack = 0; | ||
1998 | hrt_server->ready = 0; | ||
1999 | hrt_server->server.period = 0; | ||
2000 | hrt_server->server.wcet = 0; | ||
2001 | } | ||
2002 | } | ||
2003 | |||
2004 | /* | ||
2005 | * Starts timers used to manage servers. | ||
2006 | */ | ||
2007 | static void start_servers(lt_t time) | ||
2008 | { | ||
2009 | int cpu; | ||
2010 | cpu_entry_t *entry; | ||
2011 | server_t *server; | ||
2012 | server_t *be_server; | ||
2013 | struct list_head *pos; | ||
2014 | |||
2015 | TRACE_SUB("starting servers at %llu", time); | ||
2016 | |||
2017 | /* Start HRT servers */ | ||
2018 | for_each_online_cpu(cpu) { | ||
2019 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2020 | server = &entry->hrt_server.server; | ||
2021 | |||
2022 | if (!check_hrt_server_initialized(&entry->hrt_server)) | ||
2023 | goto loop_end; | ||
2024 | |||
2025 | /* Cause a catchup later */ | ||
2026 | server_release_at(server, time - server->period); | ||
2027 | entry->hrt_server.ready = 1; | ||
2028 | |||
2029 | TRACE("Setting up cpu %d to have timer deadline %llu\n", | ||
2030 | cpu, TIME(server->deadline)); | ||
2031 | loop_end: | ||
2032 | cpu = cpu; | ||
2033 | } | ||
2034 | |||
2035 | /* Start BE servers */ | ||
2036 | list_for_each(pos, &be_servers) { | ||
2037 | be_server = list_entry(pos, server_t, list); | ||
2038 | |||
2039 | if (!bheap_node_in_heap(be_server->hn)) | ||
2040 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
2041 | |||
2042 | /* Cause a catchup later */ | ||
2043 | server_release_at(be_server, time - be_server->period); | ||
2044 | |||
2045 | TRACE("Releasing BE server %d\n", be_server->id); | ||
2046 | TRACE_SERVER_SUB(be_server, "inserting be server"); | ||
2047 | } | ||
2048 | } | ||
2049 | |||
2050 | /****************************************************************************** | ||
2051 | * Plugin methods | ||
2052 | ******************************************************************************/ | ||
2053 | |||
2054 | static long edf_hsb_activate_plugin(void) | ||
2055 | { | ||
2056 | int cpu; | ||
2057 | cpu_entry_t *entry; | ||
2058 | #ifdef CONFIG_RELEASE_MASTER | ||
2059 | edf_hsb_release_master = atomic_read(&release_master_cpu); | ||
2060 | #else | ||
2061 | edf_hsb_release_master = NO_CPU; | ||
2062 | #endif | ||
2063 | server_domain.release_master = edf_hsb_release_master; | ||
2064 | |||
2065 | for_each_online_cpu(cpu) { | ||
2066 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2067 | #ifdef CONFIG_RELEASE_MASTER | ||
2068 | if (cpu != edf_hsb_release_master) | ||
2069 | #endif | ||
2070 | update_cpu_position(entry); | ||
2071 | } | ||
2072 | |||
2073 | start_servers(litmus_clock()); | ||
2074 | |||
2075 | TRACE("activating EDF-HSB plugin.\n"); | ||
2076 | return 0; | ||
2077 | } | ||
2078 | |||
2079 | /* | ||
2080 | * Requires a processor be specified for any task run on the system. | ||
2081 | */ | ||
2082 | static long edf_hsb_admit_task(struct task_struct *task) | ||
2083 | { | ||
2084 | cpu_entry_t *entry = task_sched_entry(task); | ||
2085 | |||
2086 | TRACE_TASK(task, "Admitting\n"); | ||
2087 | |||
2088 | if (is_hrt(task)) { | ||
2089 | return check_hrt_server_initialized(&entry->hrt_server) && | ||
2090 | ((task_cpu(task) == task->rt_param.task_params.cpu) && | ||
2091 | (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL; | ||
2092 | } else { | ||
2093 | /* If the task is not HRT, we don't want to force the user | ||
2094 | * to specify a CPU. | ||
2095 | */ | ||
2096 | return 0; | ||
2097 | } | ||
2098 | } | ||
2099 | |||
2100 | /* | ||
2101 | * Stops all servers from running. | ||
2102 | */ | ||
2103 | static long edf_hsb_deactivate_plugin(void) | ||
2104 | { | ||
2105 | cpu_entry_t *cpu_entry; | ||
2106 | hrt_server_t *hrt_server; | ||
2107 | unsigned long flags; | ||
2108 | int cpu; | ||
2109 | |||
2110 | local_irq_save(flags); | ||
2111 | |||
2112 | for_each_online_cpu(cpu) { | ||
2113 | cpu_entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2114 | hrt_server = &cpu_entry->hrt_server; | ||
2115 | |||
2116 | slack_timer_cancel(hrt_server); | ||
2117 | |||
2118 | if (likely(bheap_node_in_heap(cpu_entry->hn))) | ||
2119 | bheap_delete(server_order, &cpu_heap, cpu_entry->hn); | ||
2120 | } | ||
2121 | |||
2122 | local_irq_restore(flags); | ||
2123 | |||
2124 | return 0; | ||
2125 | } | ||
2126 | |||
2127 | static void edf_hsb_task_block(struct task_struct *task) | ||
2128 | { | ||
2129 | unsigned long flags; | ||
2130 | cpu_entry_t *entry = task_sched_entry(task); | ||
2131 | struct task_struct *linked; | ||
2132 | server_t *linked_server; | ||
2133 | |||
2134 | TRACE_TASK(task, "block at %llu\n", litmus_clock()); | ||
2135 | set_rt_flags(task, RT_F_BLOCK); | ||
2136 | |||
2137 | raw_spin_lock_irqsave(global_lock, flags); | ||
2138 | |||
2139 | linked = entry->linked; | ||
2140 | linked_server = entry->linked_server; | ||
2141 | |||
2142 | unlink(task); | ||
2143 | |||
2144 | /* TODO: necessary? */ | ||
2145 | if (task == linked) { | ||
2146 | check_donate_slack(linked_server, task); | ||
2147 | } | ||
2148 | |||
2149 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2150 | } | ||
2151 | |||
2152 | /* | ||
2153 | * A task leaves the system. | ||
2154 | */ | ||
2155 | static void edf_hsb_task_exit(struct task_struct *task) | ||
2156 | { | ||
2157 | unsigned long flags; | ||
2158 | cpu_entry_t *entry = task_sched_entry(task); | ||
2159 | |||
2160 | BUG_ON(!is_realtime(task)); | ||
2161 | TRACE_TASK(task, "RIP at %llu on P%d\n", | ||
2162 | TIME(litmus_clock()), tsk_rt(task)->scheduled_on); | ||
2163 | |||
2164 | raw_spin_lock_irqsave(global_lock, flags); | ||
2165 | |||
2166 | unlink(task); | ||
2167 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | ||
2168 | entry->scheduled = NULL; | ||
2169 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2170 | } | ||
2171 | if (is_srt(task)) { | ||
2172 | server_slack_destroy(task_srt_server(task)); | ||
2173 | server_destroy(task_srt_server(task)); | ||
2174 | server_free(task_srt_server(task)); | ||
2175 | task_data_free(tsk_rt(task)->plugin_data); | ||
2176 | } | ||
2177 | |||
2178 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2179 | } | ||
2180 | |||
2181 | /* | ||
2182 | * Attempts to determine the current scheduler state, then selects the | ||
2183 | * next task and updates the scheduler state. | ||
2184 | */ | ||
2185 | static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | ||
2186 | { | ||
2187 | unsigned long flags; | ||
2188 | int blocks, preempted, sleep, was_slack, np, hrt_preempt, donated; | ||
2189 | struct task_struct *curr; | ||
2190 | cpu_entry_t *entry = local_cpu_entry; | ||
2191 | |||
2192 | #ifdef CONFIG_RELEASE_MASTER | ||
2193 | /* Bail out early if we are the release master. | ||
2194 | * The release master never schedules any real-time tasks. | ||
2195 | */ | ||
2196 | if (edf_hsb_release_master == entry->cpu) { | ||
2197 | sched_state_task_picked(); | ||
2198 | return NULL; | ||
2199 | } | ||
2200 | #endif | ||
2201 | |||
2202 | raw_spin_lock_irqsave(global_lock, flags); | ||
2203 | |||
2204 | curr = entry->scheduled; | ||
2205 | |||
2206 | TRACE("server_budget: %llu, server_deadline: %llu, " | ||
2207 | "curr_time: %llu, no_slack: %d, ready: %d\n", | ||
2208 | TIME(entry->hrt_server.server.budget), | ||
2209 | TIME(entry->hrt_server.server.deadline), | ||
2210 | TIME(litmus_clock()), entry->hrt_server.no_slack, | ||
2211 | entry->hrt_server.ready); | ||
2212 | |||
2213 | /* Determine state */ | ||
2214 | blocks = curr && !is_running(curr); | ||
2215 | preempted = entry->scheduled != entry->linked; | ||
2216 | sleep = curr && get_rt_flags(curr) == RT_F_SLEEP; | ||
2217 | was_slack = !list_empty(&slack_queue); | ||
2218 | np = curr && is_np(curr); | ||
2219 | |||
2220 | TRACE("blocks: %d, preempted: %d, sleep: %d, np: %d\n", | ||
2221 | blocks, preempted, sleep, np); | ||
2222 | if (blocks) | ||
2223 | unlink(entry->scheduled); | ||
2224 | |||
2225 | /* If the task has gone to sleep or exhausted its budget, it | ||
2226 | * must complete its current job. | ||
2227 | */ | ||
2228 | if (sleep && !blocks && !preempted) | ||
2229 | job_completion(entry, entry->scheduled); | ||
2230 | |||
2231 | /* Pick the next task if there isn't one currently */ | ||
2232 | if (!entry->linked) | ||
2233 | edf_hsb_pick_next(entry); | ||
2234 | |||
2235 | /* Set task states */ | ||
2236 | if (entry->linked != entry->scheduled) { | ||
2237 | if (entry->linked) | ||
2238 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
2239 | if (entry->scheduled) | ||
2240 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
2241 | } | ||
2242 | |||
2243 | entry->scheduled = entry->linked; | ||
2244 | entry->scheduled_server = entry->linked_server; | ||
2245 | sched_state_task_picked(); | ||
2246 | |||
2247 | /* An non-HRT was preempted by an HRT task. Because of the way linking | ||
2248 | * works, it cannot link itself to anything else until the non-migratory | ||
2249 | * HRT task is scheduled. | ||
2250 | */ | ||
2251 | hrt_preempt = preempted && entry->linked && curr && | ||
2252 | is_hrt(entry->linked) && !is_hrt(curr); | ||
2253 | /* A server just donated slack */ | ||
2254 | donated = entry->linked && entry->linked_server->type != S_SLACK && | ||
2255 | head_in_list(&server_slack(entry->linked_server)->list); | ||
2256 | |||
2257 | if (hrt_preempt || donated) | ||
2258 | check_for_global_preempt(); | ||
2259 | |||
2260 | if (entry->scheduled) | ||
2261 | TRACE_TASK(entry->scheduled, "scheduled at %llu\n", | ||
2262 | TIME(litmus_clock())); | ||
2263 | else | ||
2264 | TRACE("NULL scheduled at %llu\n", TIME(litmus_clock())); | ||
2265 | |||
2266 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2267 | |||
2268 | return entry->scheduled; | ||
2269 | } | ||
2270 | |||
2271 | /* | ||
2272 | * Prepare a task for running in RT mode | ||
2273 | */ | ||
2274 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | ||
2275 | { | ||
2276 | unsigned long flags; | ||
2277 | task_data_t *data; | ||
2278 | server_t *srt_server = NULL; | ||
2279 | cpu_entry_t *entry = task_sched_entry(task); | ||
2280 | |||
2281 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); | ||
2282 | |||
2283 | raw_spin_lock_irqsave(global_lock, flags); | ||
2284 | |||
2285 | /* Setup job parameters */ | ||
2286 | release_at(task, litmus_clock()); | ||
2287 | |||
2288 | /* Create SRT server */ | ||
2289 | if (is_srt(task)) { | ||
2290 | /* Create SRT server */ | ||
2291 | srt_server = server_alloc(GFP_ATOMIC); | ||
2292 | server_init(srt_server, &server_domain, | ||
2293 | task->pid, get_exec_cost(task), | ||
2294 | get_rt_period(task), 0); | ||
2295 | srt_server->type = S_SRT; | ||
2296 | |||
2297 | server_slack_create(srt_server); | ||
2298 | |||
2299 | } | ||
2300 | |||
2301 | /* Create task plugin data */ | ||
2302 | data = task_data_alloc(GFP_ATOMIC); | ||
2303 | data->owner = task; | ||
2304 | data->srt_server = srt_server; | ||
2305 | INIT_LIST_HEAD(&data->candidate_list); | ||
2306 | tsk_rt(task)->plugin_data = data; | ||
2307 | |||
2308 | /* Already running, update the cpu entry. | ||
2309 | * This tends to happen when the first tasks enter the system. | ||
2310 | */ | ||
2311 | if (running) { | ||
2312 | //BUG_ON(entry->scheduled); | ||
2313 | |||
2314 | #ifdef CONFIG_RELEASE_MASTER | ||
2315 | if (entry->cpu != edf_hsb_release_master) { | ||
2316 | #endif | ||
2317 | entry->scheduled = task; | ||
2318 | tsk_rt(task)->scheduled_on = task_cpu(task); | ||
2319 | #ifdef CONFIG_RELEASE_MASTER | ||
2320 | } else { | ||
2321 | /* do not schedule on release master */ | ||
2322 | /* Cannot preempt! Causing a preemption with a BE task | ||
2323 | * somehow leads to that task never blocking during | ||
2324 | * a synchronous release. This is a bug! | ||
2325 | */ | ||
2326 | if (!is_be(task)) | ||
2327 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
2328 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2329 | } | ||
2330 | #endif | ||
2331 | } else { | ||
2332 | task->rt_param.scheduled_on = NO_CPU; | ||
2333 | } | ||
2334 | |||
2335 | task->rt_param.linked_on = NO_CPU; | ||
2336 | job_arrival(task, entry); | ||
2337 | |||
2338 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2339 | } | ||
2340 | |||
2341 | static void edf_hsb_task_wake_up(struct task_struct *task) | ||
2342 | { | ||
2343 | lt_t now; | ||
2344 | unsigned long flags; | ||
2345 | cpu_entry_t *entry = task_sched_entry(task); | ||
2346 | |||
2347 | |||
2348 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | ||
2349 | task_cpu(task), task->rt_param.task_params.cpu); | ||
2350 | |||
2351 | raw_spin_lock_irqsave(global_lock, flags); | ||
2352 | |||
2353 | if (!is_be(task)) { | ||
2354 | if (is_srt(task)) { | ||
2355 | catchup_srt_server(task); | ||
2356 | } | ||
2357 | |||
2358 | /* Non-BE tasks are not sporadic in this model */ | ||
2359 | set_rt_flags(task, RT_F_RUNNING); | ||
2360 | /* The job blocked while it was being run by a slack server */ | ||
2361 | if (is_queued(task)) { | ||
2362 | check_slack_candidate(task); | ||
2363 | goto out; | ||
2364 | } | ||
2365 | } else { | ||
2366 | /* Re-release all BE tasks on wake-up */ | ||
2367 | now = litmus_clock(); | ||
2368 | |||
2369 | if (is_tardy(task, now)) { | ||
2370 | release_at(task, now); | ||
2371 | sched_trace_task_release(task); | ||
2372 | } | ||
2373 | } | ||
2374 | |||
2375 | job_arrival(task, entry); | ||
2376 | |||
2377 | out: | ||
2378 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2379 | } | ||
2380 | |||
2381 | /* | ||
2382 | * Unused. | ||
2383 | */ | ||
2384 | static void edf_hsb_tick(struct task_struct *t) | ||
2385 | { | ||
2386 | } | ||
2387 | |||
2388 | |||
2389 | /****************************************************************************** | ||
2390 | * Plugin | ||
2391 | ******************************************************************************/ | ||
2392 | |||
2393 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = { | ||
2394 | .plugin_name = "EDF-HSB-NOSLACK", | ||
2395 | |||
2396 | .activate_plugin = edf_hsb_activate_plugin, | ||
2397 | .deactivate_plugin = edf_hsb_deactivate_plugin, | ||
2398 | |||
2399 | .schedule = edf_hsb_schedule, | ||
2400 | .admit_task = edf_hsb_admit_task, | ||
2401 | .task_block = edf_hsb_task_block, | ||
2402 | .task_exit = edf_hsb_task_exit, | ||
2403 | .task_new = edf_hsb_task_new, | ||
2404 | .task_wake_up = edf_hsb_task_wake_up, | ||
2405 | .tick = edf_hsb_tick, | ||
2406 | |||
2407 | /* From jobs.h */ | ||
2408 | .complete_job = complete_job, | ||
2409 | .release_at = release_at, | ||
2410 | }; | ||
2411 | |||
2412 | static int __init init_edf_hsb(void) | ||
2413 | { | ||
2414 | cpu_entry_t *entry; | ||
2415 | hrt_server_t *hrt_server; | ||
2416 | server_t *idle_slack; | ||
2417 | int rv, cpu; | ||
2418 | |||
2419 | rv = register_sched_plugin(&edf_hsb_plugin); | ||
2420 | if (rv) { | ||
2421 | printk(KERN_ERR "Could not register plugin %s.\n", | ||
2422 | edf_hsb_plugin.plugin_name); | ||
2423 | goto out; | ||
2424 | } | ||
2425 | |||
2426 | rv = make_plugin_proc_dir(&edf_hsb_plugin, &edf_hsb_proc_dir); | ||
2427 | if (rv) { | ||
2428 | printk(KERN_ERR "Could not create %s procfs dir.\n", | ||
2429 | edf_hsb_plugin.plugin_name); | ||
2430 | goto out; | ||
2431 | } | ||
2432 | |||
2433 | |||
2434 | task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC); | ||
2435 | |||
2436 | /* Global domains */ | ||
2437 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); | ||
2438 | rt_domain_init(&be_domain, be_ready_order, | ||
2439 | NULL, release_be_jobs); | ||
2440 | server_domain_init(&server_domain, servers_released, | ||
2441 | server_completed, NO_CPU, global_lock); | ||
2442 | |||
2443 | /* Server proc interfaces */ | ||
2444 | server_proc_init(&server_domain, | ||
2445 | edf_hsb_proc_dir, BE_PROC_NAME, | ||
2446 | admit_be_server, list_be_servers, | ||
2447 | stop_be_servers); | ||
2448 | server_proc_init(&server_domain, | ||
2449 | edf_hsb_proc_dir, HRT_PROC_NAME, | ||
2450 | admit_hrt_server, list_hrt_servers, | ||
2451 | stop_hrt_servers); | ||
2452 | |||
2453 | |||
2454 | /* Global collections */ | ||
2455 | bheap_init(&cpu_heap); | ||
2456 | bheap_init(&be_ready_servers); | ||
2457 | INIT_LIST_HEAD(&be_servers); | ||
2458 | INIT_LIST_HEAD(&slack_queue); | ||
2459 | INIT_LIST_HEAD(&slack_candidates); | ||
2460 | |||
2461 | for_each_online_cpu(cpu) { | ||
2462 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2463 | hrt_server = &entry->hrt_server; | ||
2464 | |||
2465 | idle_slack = server_alloc(GFP_ATOMIC); | ||
2466 | server_init(idle_slack, &server_domain, | ||
2467 | IDLE_SLACK_BASE + cpu, | ||
2468 | LLONG_MAX, LLONG_MAX, 1); | ||
2469 | idle_slack->deadline = LLONG_MAX; | ||
2470 | idle_slack->budget = LLONG_MAX; | ||
2471 | idle_slack->job_no = 1; | ||
2472 | idle_slack->release = 1; | ||
2473 | idle_slack->type = S_SLACK; | ||
2474 | add_slack(idle_slack); | ||
2475 | |||
2476 | entry->cpu = cpu; | ||
2477 | entry->linked = NULL; | ||
2478 | entry->scheduled = NULL; | ||
2479 | entry->linked_server = NULL; | ||
2480 | |||
2481 | /* HRT server */ | ||
2482 | hrt_server->server.id = cpu; | ||
2483 | hrt_server->server.deadline = 0; | ||
2484 | hrt_server->server.period = 0; | ||
2485 | hrt_server->server.wcet = 0; | ||
2486 | hrt_server->ready = 0; | ||
2487 | |||
2488 | hrtimer_start_on_info_init(&hrt_server->slack_timer_info); | ||
2489 | |||
2490 | /* CPU entry bheap nodes */ | ||
2491 | entry->hn = &cpu_heap_node[cpu]; | ||
2492 | bheap_node_init(&entry->hn, entry); | ||
2493 | } | ||
2494 | |||
2495 | out: | ||
2496 | return rv; | ||
2497 | } | ||
2498 | |||
2499 | static void exit_edf_hsb(void) | ||
2500 | { | ||
2501 | int cpu; | ||
2502 | cpu_entry_t *entry; | ||
2503 | |||
2504 | stop_be_servers(); | ||
2505 | stop_hrt_servers(); | ||
2506 | |||
2507 | server_domain_destroy(&server_domain); | ||
2508 | |||
2509 | for_each_online_cpu(cpu) { | ||
2510 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2511 | server_slack_destroy(&entry->hrt_server.server); | ||
2512 | server_destroy(&entry->hrt_server.server); | ||
2513 | } | ||
2514 | |||
2515 | if (edf_hsb_proc_dir) { | ||
2516 | remove_plugin_proc_dir(&edf_hsb_plugin); | ||
2517 | /* TODO: is this wrong? */ | ||
2518 | edf_hsb_proc_dir = NULL; | ||
2519 | } | ||
2520 | } | ||
2521 | |||
2522 | module_init(init_edf_hsb); | ||
2523 | module_exit(exit_edf_hsb); | ||