diff options
author | Björn B. Brandenburg <bbb@cs.unc.edu> | 2009-12-05 17:21:52 -0500 |
---|---|---|
committer | Björn B. Brandenburg <bbb@cs.unc.edu> | 2009-12-05 17:21:52 -0500 |
commit | da2f4f4af74a973979db895efbdb805bcd121bce (patch) | |
tree | 6f9fe46e4a27f65885acca18e6967a94f04d313b | |
parent | 069bf2e2ae96a03d3bdcaa1934b6fcd43d372850 (diff) |
make RTSS09 code available
-rw-r--r-- | download/RTSS09/litmus-rt-RTSS09.patch | 2427 | ||||
-rw-r--r-- | index.html | 21 |
2 files changed, 2447 insertions, 1 deletions
diff --git a/download/RTSS09/litmus-rt-RTSS09.patch b/download/RTSS09/litmus-rt-RTSS09.patch new file mode 100644 index 0000000..d26a441 --- /dev/null +++ b/download/RTSS09/litmus-rt-RTSS09.patch | |||
@@ -0,0 +1,2427 @@ | |||
1 | diff --git a/include/litmus/cheap.h b/include/litmus/cheap.h | ||
2 | new file mode 100644 | ||
3 | index 0000000..39c29fb | ||
4 | --- /dev/null | ||
5 | +++ b/include/litmus/cheap.h | ||
6 | @@ -0,0 +1,62 @@ | ||
7 | +/* cheap.h -- A concurrent heap implementation. | ||
8 | + * | ||
9 | + * (c) 2009 Bjoern B. Brandenburg, <bbb@cs.unc.edu> | ||
10 | + * | ||
11 | + * Based on: | ||
12 | + * | ||
13 | + * G. Hunt, M. Micheal, S. Parthasarath, and M. Scott | ||
14 | + * "An efficient algorithm for concurrent priority queue heaps." | ||
15 | + * Information Processing Letters, 60(3): 151-157, November 1996 | ||
16 | + */ | ||
17 | + | ||
18 | +#ifndef CHEAP_H | ||
19 | + | ||
20 | +#include "linux/spinlock.h" | ||
21 | + | ||
22 | +#define CHEAP_EMPTY 0xffffffff | ||
23 | +#define CHEAP_READY 0xffffff00 | ||
24 | +#define CHEAP_ROOT 0 | ||
25 | + | ||
26 | +struct cheap_node { | ||
27 | + spinlock_t lock; | ||
28 | + | ||
29 | + unsigned int tag; | ||
30 | + void* value; | ||
31 | +}; | ||
32 | + | ||
33 | +struct cheap { | ||
34 | + spinlock_t lock; | ||
35 | + | ||
36 | + unsigned int next; | ||
37 | + unsigned int size; | ||
38 | + | ||
39 | + struct cheap_node* heap; | ||
40 | +}; | ||
41 | + | ||
42 | +typedef int (*cheap_prio_t)(void* a, void* b); | ||
43 | + | ||
44 | +void cheap_init(struct cheap* ch, | ||
45 | + unsigned int size, | ||
46 | + struct cheap_node* nodes); | ||
47 | + | ||
48 | +int cheap_insert(cheap_prio_t higher_prio, | ||
49 | + struct cheap* ch, | ||
50 | + void* item, | ||
51 | + int pid); | ||
52 | + | ||
53 | +void* cheap_peek(struct cheap* ch); | ||
54 | + | ||
55 | +typedef int (*cheap_take_predicate_t)(void* ctx, void* value); | ||
56 | + | ||
57 | +void* cheap_take_if(cheap_take_predicate_t pred, | ||
58 | + void* pred_ctx, | ||
59 | + cheap_prio_t higher_prio, | ||
60 | + struct cheap* ch); | ||
61 | + | ||
62 | +static inline void* cheap_take(cheap_prio_t higher_prio, | ||
63 | + struct cheap* ch) | ||
64 | +{ | ||
65 | + return cheap_take_if(NULL, NULL, higher_prio, ch); | ||
66 | +} | ||
67 | + | ||
68 | +#endif | ||
69 | diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h | ||
70 | index 6c7a4c5..1b99049 100644 | ||
71 | --- a/include/litmus/litmus.h | ||
72 | +++ b/include/litmus/litmus.h | ||
73 | @@ -86,7 +86,6 @@ void litmus_exit_task(struct task_struct *tsk); | ||
74 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
75 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
76 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
77 | -#define get_release(t) (tsk_rt(t)->job_params.release) | ||
78 | #define get_class(t) (tsk_rt(t)->task_params.cls) | ||
79 | |||
80 | inline static int budget_exhausted(struct task_struct* t) | ||
81 | @@ -102,6 +101,8 @@ inline static int budget_exhausted(struct task_struct* t) | ||
82 | #define is_be(t) \ | ||
83 | (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT) | ||
84 | |||
85 | +#define get_release(t) (tsk_rt(t)->job_params.release) | ||
86 | + | ||
87 | /* Our notion of time within LITMUS: kernel monotonic time. */ | ||
88 | static inline lt_t litmus_clock(void) | ||
89 | { | ||
90 | diff --git a/litmus/Kconfig b/litmus/Kconfig | ||
91 | index f73a454..c2a7756 100644 | ||
92 | --- a/litmus/Kconfig | ||
93 | +++ b/litmus/Kconfig | ||
94 | @@ -19,7 +19,7 @@ config SRP | ||
95 | bool "Stack Resource Policy (SRP)" | ||
96 | default n | ||
97 | help | ||
98 | - Include support for Baker's Stack Resource Policy. | ||
99 | + Include support for Baker's Stack Resource Policy. | ||
100 | |||
101 | Say Yes if you want FMLP local long critical section synchronization support. | ||
102 | |||
103 | @@ -43,7 +43,7 @@ config FEATHER_TRACE | ||
104 | help | ||
105 | Feather-Trace basic tracing infrastructure. Includes device file | ||
106 | driver and instrumentation point support. | ||
107 | - | ||
108 | + | ||
109 | |||
110 | config SCHED_TASK_TRACE | ||
111 | bool "Trace real-time tasks" | ||
112 | diff --git a/litmus/Makefile b/litmus/Makefile | ||
113 | index 837f697..d9e8dc0 100644 | ||
114 | --- a/litmus/Makefile | ||
115 | +++ b/litmus/Makefile | ||
116 | @@ -6,11 +6,14 @@ obj-y = sched_plugin.o litmus.o \ | ||
117 | edf_common.o jobs.o \ | ||
118 | rt_domain.o fdso.o sync.o \ | ||
119 | fmlp.o srp.o norqlock.o \ | ||
120 | - heap.o \ | ||
121 | + cheap.o heap.o \ | ||
122 | sched_gsn_edf.o \ | ||
123 | sched_psn_edf.o \ | ||
124 | sched_cedf.o \ | ||
125 | - sched_pfair.o | ||
126 | + sched_pfair.o \ | ||
127 | + sched_gq_edf.o \ | ||
128 | + sched_gedf.o \ | ||
129 | + sched_ghq_edf.o | ||
130 | |||
131 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | ||
132 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | ||
133 | diff --git a/litmus/cheap.c b/litmus/cheap.c | ||
134 | new file mode 100644 | ||
135 | index 0000000..9fc68fd | ||
136 | --- /dev/null | ||
137 | +++ b/litmus/cheap.c | ||
138 | @@ -0,0 +1,249 @@ | ||
139 | +#include "litmus/cheap.h" | ||
140 | + | ||
141 | +static unsigned int __cheap_parent(unsigned int child) | ||
142 | +{ | ||
143 | + return (child - 1) / 2; | ||
144 | +} | ||
145 | + | ||
146 | +static unsigned int __cheap_left_child(unsigned int parent) | ||
147 | +{ | ||
148 | + return parent * 2 + 1; | ||
149 | +} | ||
150 | + | ||
151 | +static unsigned int __cheap_right_child(unsigned int parent) | ||
152 | +{ | ||
153 | + return parent * 2 + 2; | ||
154 | +} | ||
155 | + | ||
156 | +static void __cheap_swap(struct cheap_node* a, struct cheap_node* b) | ||
157 | +{ | ||
158 | + unsigned int tag; | ||
159 | + void* val; | ||
160 | + tag = a->tag; | ||
161 | + val = a->value; | ||
162 | + a->tag = b->tag; | ||
163 | + a->value = b->value; | ||
164 | + b->tag = tag; | ||
165 | + b->value = val; | ||
166 | +} | ||
167 | + | ||
168 | +void cheap_init(struct cheap* ch, unsigned int size, | ||
169 | + struct cheap_node* nodes) | ||
170 | +{ | ||
171 | + unsigned int i; | ||
172 | + spin_lock_init(&ch->lock); | ||
173 | + ch->next = 0; | ||
174 | + ch->size = size; | ||
175 | + ch->heap = nodes; | ||
176 | + | ||
177 | + for (i = 0; i < size; i++) { | ||
178 | + spin_lock_init(&ch->heap[i].lock); | ||
179 | + ch->heap[i].tag = CHEAP_EMPTY; | ||
180 | + ch->heap[i].value = NULL; | ||
181 | + } | ||
182 | +} | ||
183 | + | ||
184 | +void* cheap_peek(struct cheap* ch) | ||
185 | +{ | ||
186 | + void* val; | ||
187 | + spin_lock(&ch->heap[CHEAP_ROOT].lock); | ||
188 | + val = ch->heap[CHEAP_ROOT].tag != CHEAP_EMPTY ? | ||
189 | + ch->heap[CHEAP_ROOT].value : NULL; | ||
190 | + spin_unlock(&ch->heap[CHEAP_ROOT].lock); | ||
191 | + return val; | ||
192 | +} | ||
193 | + | ||
194 | +int cheap_insert(cheap_prio_t higher_prio, | ||
195 | + struct cheap* ch, | ||
196 | + void* item, | ||
197 | + int pid) | ||
198 | +{ | ||
199 | + int stop = 0; | ||
200 | + unsigned int child, parent, locked; | ||
201 | + unsigned int wait_for_parent_state; | ||
202 | + | ||
203 | + lockdep_off(); /* generates false positives */ | ||
204 | + | ||
205 | + spin_lock(&ch->lock); | ||
206 | + if (ch->next < ch->size) { | ||
207 | + /* ok, node allocated */ | ||
208 | + child = ch->next++; | ||
209 | + spin_lock(&ch->heap[child].lock); | ||
210 | + ch->heap[child].tag = pid; | ||
211 | + ch->heap[child].value = item; | ||
212 | + spin_unlock(&ch->lock); | ||
213 | + } else { | ||
214 | + /* out of space! */ | ||
215 | + spin_unlock(&ch->lock); | ||
216 | + lockdep_on(); | ||
217 | + return -1; | ||
218 | + } | ||
219 | + | ||
220 | + spin_unlock(&ch->heap[child].lock); | ||
221 | + | ||
222 | + /* bubble up */ | ||
223 | + while (!stop && child > CHEAP_ROOT) { | ||
224 | + parent = __cheap_parent(child); | ||
225 | + spin_lock(&ch->heap[parent].lock); | ||
226 | + spin_lock(&ch->heap[child].lock); | ||
227 | + locked = child; | ||
228 | + wait_for_parent_state = CHEAP_EMPTY; | ||
229 | + if (ch->heap[parent].tag == CHEAP_READY && | ||
230 | + ch->heap[child].tag == pid) { | ||
231 | + /* no interference */ | ||
232 | + if (higher_prio(ch->heap[child].value, | ||
233 | + ch->heap[parent].value)) { | ||
234 | + /* out of order; swap and move up */ | ||
235 | + __cheap_swap(ch->heap + child, | ||
236 | + ch->heap + parent); | ||
237 | + child = parent; | ||
238 | + } else { | ||
239 | + /* In order; we are done. */ | ||
240 | + ch->heap[child].tag = CHEAP_READY; | ||
241 | + stop = 1; | ||
242 | + } | ||
243 | + } else if (ch->heap[parent].tag == CHEAP_EMPTY) { | ||
244 | + /* Concurrent extract moved child to root; | ||
245 | + * we are done. | ||
246 | + */ | ||
247 | + stop = 1; | ||
248 | + } else if (ch->heap[child].tag != pid) { | ||
249 | + /* Concurrent extract moved child up; | ||
250 | + * we go after it. | ||
251 | + */ | ||
252 | + child = parent; | ||
253 | + } else { | ||
254 | + /* Some other process needs to act first. | ||
255 | + * We need to back off a little in order | ||
256 | + * to give the others a chance to acquire the | ||
257 | + * parent's lock. | ||
258 | + */ | ||
259 | + wait_for_parent_state = ch->heap[parent].tag; | ||
260 | + } | ||
261 | + | ||
262 | + spin_unlock(&ch->heap[locked].lock); | ||
263 | + spin_unlock(&ch->heap[parent].lock); | ||
264 | + | ||
265 | + while (wait_for_parent_state != CHEAP_EMPTY && | ||
266 | + ((volatile unsigned int) ch->heap[parent].tag) == | ||
267 | + wait_for_parent_state) | ||
268 | + cpu_relax(); | ||
269 | + | ||
270 | + } | ||
271 | + if (!stop && child == CHEAP_ROOT) { | ||
272 | + spin_lock(&ch->heap[child].lock); | ||
273 | + if (ch->heap[child].tag == pid) | ||
274 | + ch->heap[child].tag = CHEAP_READY; | ||
275 | + spin_unlock(&ch->heap[child].lock); | ||
276 | + } | ||
277 | + | ||
278 | + lockdep_on(); | ||
279 | + return 0; | ||
280 | +} | ||
281 | + | ||
282 | +void* cheap_take_if(cheap_take_predicate_t pred, | ||
283 | + void* pred_ctx, | ||
284 | + cheap_prio_t higher_prio, | ||
285 | + struct cheap* ch) | ||
286 | +{ | ||
287 | + void *val, *cval; | ||
288 | + unsigned int ctag; | ||
289 | + unsigned int left, right, child, parent; | ||
290 | + | ||
291 | + lockdep_off(); | ||
292 | + spin_lock(&ch->lock); | ||
293 | + if (ch->next > CHEAP_ROOT) { | ||
294 | + child = ch->next - 1; | ||
295 | + spin_lock(&ch->heap[child].lock); | ||
296 | + /* see if callback wants this item | ||
297 | + */ | ||
298 | + if (!pred || pred(pred_ctx, ch->heap[child].value)) | ||
299 | + /* yes, proceed */ | ||
300 | + ch->next--; | ||
301 | + else { | ||
302 | + /* no, cleanup and return */ | ||
303 | + spin_unlock(&ch->heap[child].lock); | ||
304 | + child = ch->size; | ||
305 | + } | ||
306 | + } else | ||
307 | + child = ch->size; | ||
308 | + spin_unlock(&ch->lock); | ||
309 | + | ||
310 | + if (child == ch->size) { | ||
311 | + lockdep_on(); | ||
312 | + /* empty heap */ | ||
313 | + return NULL; | ||
314 | + } | ||
315 | + | ||
316 | + /* take value from last leaf */ | ||
317 | + cval = ch->heap[child].value; | ||
318 | + ctag = ch->heap[child].tag; | ||
319 | + /* free last leaf */ | ||
320 | + ch->heap[child].tag = CHEAP_EMPTY; | ||
321 | + ch->heap[child].value = NULL; | ||
322 | + | ||
323 | + /* unlock before locking root to maintain locking order */ | ||
324 | + spin_unlock(&ch->heap[child].lock); | ||
325 | + | ||
326 | + spin_lock(&ch->heap[CHEAP_ROOT].lock); | ||
327 | + if (ch->heap[CHEAP_ROOT].tag == CHEAP_EMPTY) { | ||
328 | + /* heap became empty, we got the last one */ | ||
329 | + spin_unlock(&ch->heap[CHEAP_ROOT].lock); | ||
330 | + lockdep_on(); | ||
331 | + return cval; | ||
332 | + } else { | ||
333 | + /* grab value of root (=min), replace with | ||
334 | + * what we got from the last leaf | ||
335 | + */ | ||
336 | + val = ch->heap[CHEAP_ROOT].value; | ||
337 | + ch->heap[CHEAP_ROOT].value = cval; | ||
338 | + ch->heap[CHEAP_ROOT].tag = CHEAP_READY; | ||
339 | + } | ||
340 | + | ||
341 | + /* Bubble down. We are still holding the ROOT (=parent) lock. */ | ||
342 | + child = 0; | ||
343 | + parent = CHEAP_ROOT; | ||
344 | + while (parent < __cheap_parent(ch->size)) { | ||
345 | + left = __cheap_left_child(parent); | ||
346 | + right = __cheap_right_child(parent); | ||
347 | + spin_lock(&ch->heap[left].lock); | ||
348 | + if (ch->heap[left].tag == CHEAP_EMPTY) { | ||
349 | + /* end of the heap, done */ | ||
350 | + spin_unlock(&ch->heap[left].lock); | ||
351 | + break; | ||
352 | + } else if (right < ch->size) { | ||
353 | + /* right child node exists */ | ||
354 | + spin_lock(&ch->heap[right].lock); | ||
355 | + if (ch->heap[right].tag == CHEAP_EMPTY || | ||
356 | + higher_prio(ch->heap[left].value, | ||
357 | + ch->heap[right].value)) { | ||
358 | + /* left child node has higher priority */ | ||
359 | + spin_unlock(&ch->heap[right].lock); | ||
360 | + child = left; | ||
361 | + } else { | ||
362 | + /* right child node has higher priority */ | ||
363 | + spin_unlock(&ch->heap[left].lock); | ||
364 | + child = right; | ||
365 | + } | ||
366 | + } else { | ||
367 | + /* right child node does not exist */ | ||
368 | + child = left; | ||
369 | + } | ||
370 | + if (higher_prio(ch->heap[child].value, | ||
371 | + ch->heap[parent].value)) { | ||
372 | + /* parent and child out of order */ | ||
373 | + __cheap_swap(ch->heap + child, | ||
374 | + ch->heap + parent); | ||
375 | + spin_unlock(&ch->heap[parent].lock); | ||
376 | + /* move down */ | ||
377 | + parent = child; | ||
378 | + } else { | ||
379 | + /* in order; we are done. */ | ||
380 | + spin_unlock(&ch->heap[child].lock); | ||
381 | + break; | ||
382 | + } | ||
383 | + } | ||
384 | + spin_unlock(&ch->heap[parent].lock); | ||
385 | + lockdep_on(); | ||
386 | + return val; | ||
387 | +} | ||
388 | diff --git a/litmus/fdso.c b/litmus/fdso.c | ||
389 | index 7a16f64..bdc0466 100644 | ||
390 | --- a/litmus/fdso.c | ||
391 | +++ b/litmus/fdso.c | ||
392 | @@ -134,7 +134,7 @@ static struct od_table_entry* get_od_entry(struct task_struct* t) | ||
393 | |||
394 | table = t->od_table; | ||
395 | if (!table) { | ||
396 | - table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS, | ||
397 | + table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS, | ||
398 | GFP_KERNEL); | ||
399 | t->od_table = table; | ||
400 | } | ||
401 | diff --git a/litmus/litmus.c b/litmus/litmus.c | ||
402 | index 3562322..b286f8f 100644 | ||
403 | --- a/litmus/litmus.c | ||
404 | +++ b/litmus/litmus.c | ||
405 | @@ -490,11 +490,11 @@ asmlinkage long sys_exit_np(void) | ||
406 | |||
407 | /* sys_null_call() is only used for determining raw system call | ||
408 | * overheads (kernel entry, kernel exit). It has no useful side effects. | ||
409 | - * If ts is non-NULL, then the current Feather-Trace time is recorded. | ||
410 | + * If ts is non-NULL, then the current Feather-Trace time is recorded. | ||
411 | */ | ||
412 | asmlinkage long sys_null_call(cycles_t __user *ts) | ||
413 | { | ||
414 | - long ret = 0; | ||
415 | + long ret = 0; | ||
416 | cycles_t now; | ||
417 | |||
418 | if (ts) { | ||
419 | @@ -789,7 +789,7 @@ static int proc_write_release_master(struct file *file, | ||
420 | |||
421 | if (count > 63) | ||
422 | return -EINVAL; | ||
423 | - | ||
424 | + | ||
425 | if (copy_from_user(msg, buffer, count)) | ||
426 | return -EFAULT; | ||
427 | |||
428 | @@ -799,7 +799,7 @@ static int proc_write_release_master(struct file *file, | ||
429 | if (count > 1 && msg[count - 1] == '\n') | ||
430 | msg[count - 1] = '\0'; | ||
431 | |||
432 | - if (strcmp(msg, "NO_CPU") == 0) { | ||
433 | + if (strcmp(msg, "NO_CPU") == 0) { | ||
434 | atomic_set(&release_master_cpu, NO_CPU); | ||
435 | return count; | ||
436 | } else { | ||
437 | diff --git a/litmus/norqlock.c b/litmus/norqlock.c | ||
438 | index 1a17d6c..b812f34 100644 | ||
439 | --- a/litmus/norqlock.c | ||
440 | +++ b/litmus/norqlock.c | ||
441 | @@ -50,7 +50,7 @@ void tick_no_rqlock(void) | ||
442 | |||
443 | local_irq_save(flags); | ||
444 | |||
445 | - wl = &__get_cpu_var(norq_worklist); | ||
446 | + wl = &__get_cpu_var(norq_worklist); | ||
447 | |||
448 | if (wl->hrtimer_hack) { | ||
449 | /* bail out! */ | ||
450 | diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c | ||
451 | index 2d0920c..dabf196 100644 | ||
452 | --- a/litmus/rt_domain.c | ||
453 | +++ b/litmus/rt_domain.c | ||
454 | @@ -163,7 +163,7 @@ static void reinit_release_heap(struct task_struct* t) | ||
455 | |||
456 | /* initialize */ | ||
457 | heap_init(&rh->heap); | ||
458 | - | ||
459 | + | ||
460 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
461 | } | ||
462 | |||
463 | diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c | ||
464 | new file mode 100644 | ||
465 | index 0000000..9d07b1b | ||
466 | --- /dev/null | ||
467 | +++ b/litmus/sched_gedf.c | ||
468 | @@ -0,0 +1,621 @@ | ||
469 | + | ||
470 | +#include <linux/spinlock.h> | ||
471 | +#include <linux/percpu.h> | ||
472 | +#include <linux/sched.h> | ||
473 | + | ||
474 | +#include <litmus/litmus.h> | ||
475 | +#include <litmus/jobs.h> | ||
476 | +#include <litmus/sched_plugin.h> | ||
477 | +#include <litmus/edf_common.h> | ||
478 | +#include <litmus/sched_trace.h> | ||
479 | + | ||
480 | +#include <litmus/heap.h> | ||
481 | +#include <litmus/cheap.h> | ||
482 | + | ||
483 | +#include <linux/module.h> | ||
484 | + | ||
485 | +#define GEDF_MAX_TASKS 1000 | ||
486 | + | ||
487 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
488 | + */ | ||
489 | +typedef struct { | ||
490 | + int cpu; | ||
491 | + struct task_struct* linked; /* only RT tasks */ | ||
492 | + int picked; /* linked was seen */ | ||
493 | + struct task_struct* scheduled; /* only RT tasks */ | ||
494 | + struct heap_node* hn; | ||
495 | +} cpu_entry_t; | ||
496 | +DEFINE_PER_CPU(cpu_entry_t, gedf_cpu_entries); | ||
497 | + | ||
498 | +cpu_entry_t* gedf_cpus[NR_CPUS]; | ||
499 | + | ||
500 | +/* the cpus queue themselves according to priority in here */ | ||
501 | +static struct heap_node gedf_heap_node[NR_CPUS]; | ||
502 | +static struct heap gedf_cpu_heap; | ||
503 | + | ||
504 | +DEFINE_SPINLOCK(gedf_cpu_lock); /* synchronize access to cpu heap */ | ||
505 | + | ||
506 | +static struct cheap_node gedf_cheap_nodes[GEDF_MAX_TASKS]; | ||
507 | +static struct cheap gedf_ready_queue; | ||
508 | + | ||
509 | +static rt_domain_t gedf; /* used only for the release queue */ | ||
510 | + | ||
511 | +static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
512 | +{ | ||
513 | + cpu_entry_t *a, *b; | ||
514 | + a = _a->value; | ||
515 | + b = _b->value; | ||
516 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
517 | + * the top of the heap. | ||
518 | + */ | ||
519 | + return edf_higher_prio(b->linked, a->linked); | ||
520 | +} | ||
521 | + | ||
522 | +static void remove_from_cpu_heap(cpu_entry_t* entry) | ||
523 | +{ | ||
524 | + if (likely(heap_node_in_heap(entry->hn))) | ||
525 | + heap_delete(cpu_lower_prio, &gedf_cpu_heap, entry->hn); | ||
526 | +} | ||
527 | + | ||
528 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
529 | + * order in the cpu queue. Caller must hold gedf lock. | ||
530 | + */ | ||
531 | +static void update_cpu_position(cpu_entry_t *entry) | ||
532 | +{ | ||
533 | + remove_from_cpu_heap(entry); | ||
534 | + heap_insert(cpu_lower_prio, &gedf_cpu_heap, entry->hn); | ||
535 | +} | ||
536 | + | ||
537 | +/* caller must hold gedf lock */ | ||
538 | +static cpu_entry_t* lowest_prio_cpu(int take) | ||
539 | +{ | ||
540 | + struct heap_node* hn; | ||
541 | + if (take) | ||
542 | + hn = heap_take(cpu_lower_prio, &gedf_cpu_heap); | ||
543 | + else | ||
544 | + hn = heap_peek(cpu_lower_prio, &gedf_cpu_heap); | ||
545 | + return hn ? hn->value : NULL; | ||
546 | +} | ||
547 | + | ||
548 | + | ||
549 | +/* link_task_to_cpu - Update the link of a CPU. | ||
550 | + * Handles the case where the to-be-linked task is already | ||
551 | + * scheduled on a different CPU. | ||
552 | + */ | ||
553 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
554 | + cpu_entry_t *entry) | ||
555 | +{ | ||
556 | + cpu_entry_t *sched = NULL; | ||
557 | + struct task_struct* tmp; | ||
558 | + int on_cpu; | ||
559 | + | ||
560 | + BUG_ON(linked && !is_realtime(linked)); | ||
561 | + | ||
562 | + /* Currently linked task is set to be unlinked. */ | ||
563 | + if (entry->linked) { | ||
564 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
565 | + } | ||
566 | + | ||
567 | + /* Link new task to CPU. */ | ||
568 | + if (linked) { | ||
569 | + set_rt_flags(linked, RT_F_RUNNING); | ||
570 | + /* handle task is already scheduled somewhere! */ | ||
571 | + on_cpu = linked->rt_param.scheduled_on; | ||
572 | + if (on_cpu != NO_CPU) { | ||
573 | + sched = &per_cpu(gedf_cpu_entries, on_cpu); | ||
574 | + /* this should only happen if not linked already */ | ||
575 | + BUG_ON(sched->linked == linked); | ||
576 | + | ||
577 | + /* If we are already scheduled on the CPU to which we | ||
578 | + * wanted to link, we don't need to do the swap -- | ||
579 | + * we just link ourselves to the CPU and depend on | ||
580 | + * the caller to get things right. | ||
581 | + * | ||
582 | + * But only swap if the other node is in the queue. | ||
583 | + * If it is not, then it is being updated | ||
584 | + * concurrently and some other task was already | ||
585 | + * picked for it. | ||
586 | + */ | ||
587 | + if (entry != sched && heap_node_in_heap(sched->hn)) { | ||
588 | + TRACE_TASK(linked, | ||
589 | + "already scheduled on %d, " | ||
590 | + "updating link.\n", | ||
591 | + sched->cpu); | ||
592 | + tmp = sched->linked; | ||
593 | + linked->rt_param.linked_on = sched->cpu; | ||
594 | + sched->linked = linked; | ||
595 | + sched->picked = 1; | ||
596 | + update_cpu_position(sched); | ||
597 | + linked = tmp; | ||
598 | + } | ||
599 | + } | ||
600 | + if (linked) /* might be NULL due to swap */ | ||
601 | + linked->rt_param.linked_on = entry->cpu; | ||
602 | + } | ||
603 | + entry->linked = linked; | ||
604 | + entry->picked = entry == sched; /* set to one if we linked to the | ||
605 | + * the CPU that the task is | ||
606 | + * executing on | ||
607 | + */ | ||
608 | + if (linked) | ||
609 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
610 | + else | ||
611 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
612 | + update_cpu_position(entry); | ||
613 | +} | ||
614 | + | ||
615 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
616 | + * where it was linked before. Must hold gedf_lock. | ||
617 | + */ | ||
618 | +static noinline void unlink(struct task_struct* t) | ||
619 | +{ | ||
620 | + cpu_entry_t *entry; | ||
621 | + | ||
622 | + if (t->rt_param.linked_on != NO_CPU) { | ||
623 | + /* unlink */ | ||
624 | + entry = &per_cpu(gedf_cpu_entries, t->rt_param.linked_on); | ||
625 | + t->rt_param.linked_on = NO_CPU; | ||
626 | + link_task_to_cpu(NULL, entry); | ||
627 | + } | ||
628 | +} | ||
629 | + | ||
630 | + | ||
631 | +/* preempt - force a CPU to reschedule | ||
632 | + */ | ||
633 | +static noinline void preempt(cpu_entry_t *entry) | ||
634 | +{ | ||
635 | + if (smp_processor_id() == entry->cpu) | ||
636 | + set_tsk_need_resched(current); | ||
637 | + else | ||
638 | + smp_send_reschedule(entry->cpu); | ||
639 | +} | ||
640 | + | ||
641 | + | ||
642 | +static void add_to_ready_queue(struct task_struct* task) | ||
643 | +{ | ||
644 | + TRACE_TASK(task, "adding to ready queue\n"); | ||
645 | + cheap_insert((cheap_prio_t) edf_higher_prio, | ||
646 | + &gedf_ready_queue, | ||
647 | + task, | ||
648 | + smp_processor_id()); | ||
649 | +} | ||
650 | + | ||
651 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
652 | + * Caller must hold gedf_lock. | ||
653 | + * | ||
654 | + * call unlocked, but with preemptions disabled! | ||
655 | + */ | ||
656 | +static noinline void requeue(struct task_struct* task) | ||
657 | +{ | ||
658 | + if (is_released(task, litmus_clock())) | ||
659 | + add_to_ready_queue(task); | ||
660 | + else | ||
661 | + /* it has got to wait */ | ||
662 | + add_release(&gedf, task); | ||
663 | +} | ||
664 | + | ||
665 | +static int preemption_required(cpu_entry_t* last, | ||
666 | + struct task_struct* task) | ||
667 | +{ | ||
668 | + if (edf_higher_prio(task, last->linked)) { | ||
669 | + /* yes, drop lock before dequeuing task | ||
670 | + * and dequeue cpu state | ||
671 | + */ | ||
672 | + last = lowest_prio_cpu(1); | ||
673 | + lockdep_on(); /* let lockdep see we actually released it */ | ||
674 | + spin_unlock(&gedf_cpu_lock); | ||
675 | + lockdep_off(); | ||
676 | + return 1; | ||
677 | + } else | ||
678 | + return 0; | ||
679 | +} | ||
680 | + | ||
681 | +/* check for any necessary preemptions */ | ||
682 | +static void check_for_preemptions(void) | ||
683 | +{ | ||
684 | + int done = 0; | ||
685 | + unsigned long flags; | ||
686 | + struct task_struct *task, *unlinked; | ||
687 | + cpu_entry_t* last; | ||
688 | + | ||
689 | + | ||
690 | + local_irq_save(flags); | ||
691 | + while (!done) { | ||
692 | + unlinked = NULL; | ||
693 | + spin_lock(&gedf_cpu_lock); | ||
694 | + last = lowest_prio_cpu(0); | ||
695 | + if (likely(last)) { | ||
696 | + task = cheap_take_if( | ||
697 | + (cheap_take_predicate_t) preemption_required, | ||
698 | + last, | ||
699 | + (cheap_prio_t) edf_higher_prio, | ||
700 | + &gedf_ready_queue); | ||
701 | + if (task) { | ||
702 | + TRACE_TASK(task, "removed from ready Q\n"); | ||
703 | + /* cpu lock was dropped, reacquire */ | ||
704 | + spin_lock(&gedf_cpu_lock); | ||
705 | + if (last->linked && !last->picked) | ||
706 | + /* can be requeued by us */ | ||
707 | + unlinked = last->linked; | ||
708 | + TRACE("check_for_preemptions: " | ||
709 | + "attempting to link task %d to %d\n", | ||
710 | + task->pid, last->cpu); | ||
711 | + link_task_to_cpu(task, last); | ||
712 | + update_cpu_position(last); | ||
713 | + } else | ||
714 | + /* no preemption required */ | ||
715 | + done = 1; | ||
716 | + } else | ||
717 | + /* all gone, being checked elsewhere? */ | ||
718 | + done = 1; | ||
719 | + spin_unlock(&gedf_cpu_lock); | ||
720 | + if (unlinked) | ||
721 | + /* stick it back into the queue */ | ||
722 | + requeue(unlinked); | ||
723 | + if (last && !done) | ||
724 | + /* we have a preemption, send IPI */ | ||
725 | + preempt(last); | ||
726 | + } | ||
727 | + local_irq_restore(flags); | ||
728 | +} | ||
729 | + | ||
730 | +/* gedf_job_arrival: task is either resumed or released | ||
731 | + * call only unlocked! | ||
732 | + */ | ||
733 | +static noinline void gedf_job_arrival(struct task_struct* task) | ||
734 | +{ | ||
735 | + requeue(task); | ||
736 | + check_for_preemptions(); | ||
737 | +} | ||
738 | + | ||
739 | +static void gedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
740 | +{ | ||
741 | + struct heap_node* hn; | ||
742 | + struct task_struct* t; | ||
743 | + unsigned long flags; | ||
744 | + | ||
745 | + | ||
746 | + local_irq_save(flags); | ||
747 | + /* insert unlocked */ | ||
748 | + while ((hn = heap_take(edf_ready_order, tasks))) { | ||
749 | + t = (struct task_struct*) hn->value; | ||
750 | + TRACE_TASK(t, "to be merged into ready queue " | ||
751 | + "(is_released:%d, is_running:%d)\n", | ||
752 | + is_released(t, litmus_clock()), | ||
753 | + is_running(t)); | ||
754 | + add_to_ready_queue(t); | ||
755 | + } | ||
756 | + | ||
757 | + local_irq_restore(flags); | ||
758 | + check_for_preemptions(); | ||
759 | +} | ||
760 | + | ||
761 | +/* caller holds gedf_lock */ | ||
762 | +static noinline int job_completion(cpu_entry_t* entry, int forced) | ||
763 | +{ | ||
764 | + | ||
765 | + struct task_struct *t = entry->scheduled; | ||
766 | + | ||
767 | + sched_trace_task_completion(t, forced); | ||
768 | + | ||
769 | + TRACE_TASK(t, "job_completion().\n"); | ||
770 | + | ||
771 | + /* set flags */ | ||
772 | + set_rt_flags(t, RT_F_SLEEP); | ||
773 | + /* prepare for next period */ | ||
774 | + prepare_for_next_period(t); | ||
775 | + if (is_released(t, litmus_clock())) | ||
776 | + sched_trace_task_release(t); | ||
777 | + | ||
778 | + | ||
779 | + if (is_released(t, litmus_clock())){ | ||
780 | + /* we changed the priority, see if we need to preempt */ | ||
781 | + set_rt_flags(t, RT_F_RUNNING); | ||
782 | + update_cpu_position(entry); | ||
783 | + return 1; | ||
784 | + } | ||
785 | + else { | ||
786 | + /* it has got to wait */ | ||
787 | + unlink(t); | ||
788 | + add_release(&gedf, t); | ||
789 | + return 0; | ||
790 | + } | ||
791 | +} | ||
792 | + | ||
793 | +/* gedf_tick - this function is called for every local timer | ||
794 | + * interrupt. | ||
795 | + * | ||
796 | + * checks whether the current task has expired and checks | ||
797 | + * whether we need to preempt it if it has not expired | ||
798 | + */ | ||
799 | +static void gedf_tick(struct task_struct* t) | ||
800 | +{ | ||
801 | + if (is_realtime(t) && budget_exhausted(t)) | ||
802 | + set_tsk_need_resched(t); | ||
803 | +} | ||
804 | + | ||
805 | +static struct task_struct* gedf_schedule(struct task_struct * prev) | ||
806 | +{ | ||
807 | + cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries); | ||
808 | + int out_of_time, sleep, preempt, exists, blocks; | ||
809 | + struct task_struct* next = NULL; | ||
810 | + | ||
811 | + /* Bail out early if we are the release master. | ||
812 | + * The release master never schedules any real-time tasks. | ||
813 | + */ | ||
814 | + if (gedf.release_master == entry->cpu) | ||
815 | + return NULL; | ||
816 | + | ||
817 | + TRACE_TASK(prev, "invoked gedf_schedule.\n"); | ||
818 | + | ||
819 | + /* sanity checking */ | ||
820 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
821 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
822 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
823 | + | ||
824 | + /* (0) Determine state */ | ||
825 | + exists = entry->scheduled != NULL; | ||
826 | + blocks = exists && !is_running(entry->scheduled); | ||
827 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
828 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
829 | + | ||
830 | + spin_lock(&gedf_cpu_lock); | ||
831 | + | ||
832 | + preempt = entry->scheduled != entry->linked; | ||
833 | + | ||
834 | + if (exists) | ||
835 | + TRACE_TASK(prev, | ||
836 | + "blocks:%d out_of_time:%d sleep:%d preempt:%d " | ||
837 | + "state:%d sig:%d\n", | ||
838 | + blocks, out_of_time, sleep, preempt, | ||
839 | + prev->state, signal_pending(prev)); | ||
840 | + if (preempt && entry->linked) | ||
841 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
842 | + entry->linked->comm, entry->linked->pid); | ||
843 | + | ||
844 | + /* If a task blocks we have no choice but to reschedule. | ||
845 | + */ | ||
846 | + if (blocks) | ||
847 | + unlink(entry->scheduled); | ||
848 | + | ||
849 | + | ||
850 | + /* Any task that is preemptable and either exhausts its execution | ||
851 | + * budget or wants to sleep completes. We may have to reschedule after | ||
852 | + * this. Don't do a job completion if we block (can't have timers | ||
853 | + * running for blocked jobs). Preemptions go first for the same reason. | ||
854 | + */ | ||
855 | + if ((out_of_time || sleep) && !blocks && !preempt) { | ||
856 | + if (job_completion(entry, !sleep)) { | ||
857 | + /* Task might stay with us. | ||
858 | + * Drop locks and check for preemptions. | ||
859 | + */ | ||
860 | + spin_unlock(&gedf_cpu_lock); | ||
861 | + /* anything to update ? */ | ||
862 | + check_for_preemptions(); | ||
863 | + spin_lock(&gedf_cpu_lock); | ||
864 | + /* if something higher priority got linked, | ||
865 | + * then we need to add the task into the | ||
866 | + * ready queue (since it wasn't added by | ||
867 | + * check_for_preemptions b/c picked==1. | ||
868 | + */ | ||
869 | + if (entry->linked != prev) | ||
870 | + add_to_ready_queue(prev); | ||
871 | + } | ||
872 | + } | ||
873 | + | ||
874 | + /* Link pending task if we became unlinked. | ||
875 | + * NOTE: Do not hold locks while performing ready queue updates | ||
876 | + * since we want concurrent access to the queue. | ||
877 | + */ | ||
878 | + if (!entry->linked) { | ||
879 | + if (exists) | ||
880 | + /* We are committed to descheduling; erase marker | ||
881 | + * before we drop the lock. | ||
882 | + */ | ||
883 | + tsk_rt(prev)->scheduled_on = NO_CPU; | ||
884 | + spin_unlock(&gedf_cpu_lock); | ||
885 | + check_for_preemptions(); /* update links */ | ||
886 | + spin_lock(&gedf_cpu_lock); | ||
887 | + } | ||
888 | + | ||
889 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
890 | + * If linked is different from scheduled, then select linked as next. | ||
891 | + */ | ||
892 | + if (entry->linked != entry->scheduled) { | ||
893 | + /* Schedule a linked job? */ | ||
894 | + if (entry->linked) { | ||
895 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
896 | + next = entry->linked; | ||
897 | + } | ||
898 | + if (entry->scheduled) | ||
899 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
900 | + } else | ||
901 | + /* Only override Linux scheduler if we have a real-time task | ||
902 | + * scheduled that needs to continue. | ||
903 | + */ | ||
904 | + if (exists) | ||
905 | + next = prev; | ||
906 | + | ||
907 | + /* Mark entry->linked as being ours. Do this unconditionally since | ||
908 | + * entry->linked might have become reassigned to us while we dropped | ||
909 | + * the lock even though we never descheduled it. In this case, | ||
910 | + * entry->picked became reset. | ||
911 | + */ | ||
912 | + entry->picked = 1; | ||
913 | + if (next) | ||
914 | + tsk_rt(next)->scheduled_on = entry->cpu; | ||
915 | + spin_unlock(&gedf_cpu_lock); | ||
916 | + if (exists && preempt && !blocks) | ||
917 | + /* stick preempted task back into the ready queue */ | ||
918 | + gedf_job_arrival(prev); | ||
919 | + | ||
920 | + if (next) | ||
921 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
922 | + else if (exists && !next) | ||
923 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
924 | + | ||
925 | + return next; | ||
926 | +} | ||
927 | + | ||
928 | + | ||
929 | +/* _finish_switch - we just finished the switch away from prev | ||
930 | + */ | ||
931 | +static void gedf_finish_switch(struct task_struct *prev) | ||
932 | +{ | ||
933 | + cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries); | ||
934 | + | ||
935 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
936 | + TRACE_TASK(prev, "switched away from\n"); | ||
937 | +} | ||
938 | + | ||
939 | + | ||
940 | +/* Prepare a task for running in RT mode | ||
941 | + */ | ||
942 | +static void gedf_task_new(struct task_struct * t, int on_rq, int running) | ||
943 | +{ | ||
944 | + unsigned long flags; | ||
945 | + cpu_entry_t* entry; | ||
946 | + | ||
947 | + TRACE("gedf: task new %d\n", t->pid); | ||
948 | + | ||
949 | + spin_lock_irqsave(&gedf_cpu_lock, flags); | ||
950 | + | ||
951 | + /* setup job params */ | ||
952 | + release_at(t, litmus_clock()); | ||
953 | + | ||
954 | + if (running) { | ||
955 | + entry = &per_cpu(gedf_cpu_entries, task_cpu(t)); | ||
956 | + BUG_ON(entry->scheduled); | ||
957 | + if (entry->cpu != gedf.release_master) { | ||
958 | + entry->scheduled = t; | ||
959 | + t->rt_param.scheduled_on = task_cpu(t); | ||
960 | + } else { | ||
961 | + preempt(entry); | ||
962 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
963 | + } | ||
964 | + } else { | ||
965 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
966 | + } | ||
967 | + tsk_rt(t)->linked_on = NO_CPU; | ||
968 | + | ||
969 | + spin_unlock_irqrestore(&gedf_cpu_lock, flags); | ||
970 | + | ||
971 | + if (!running || entry->cpu == gedf.release_master) | ||
972 | + /* schedule() will not insert task into ready_queue */ | ||
973 | + gedf_job_arrival(t); | ||
974 | +} | ||
975 | + | ||
976 | +static void gedf_task_wake_up(struct task_struct *task) | ||
977 | +{ | ||
978 | + unsigned long flags; | ||
979 | + lt_t now; | ||
980 | + | ||
981 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
982 | + | ||
983 | + spin_lock_irqsave(&gedf_cpu_lock, flags); | ||
984 | + now = litmus_clock(); | ||
985 | + if (is_tardy(task, now)) { | ||
986 | + /* new sporadic release */ | ||
987 | + release_at(task, now); | ||
988 | + sched_trace_task_release(task); | ||
989 | + } | ||
990 | + spin_unlock_irqrestore(&gedf_cpu_lock, flags); | ||
991 | + gedf_job_arrival(task); | ||
992 | +} | ||
993 | + | ||
994 | +static void gedf_task_block(struct task_struct *t) | ||
995 | +{ | ||
996 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
997 | +} | ||
998 | + | ||
999 | +static void gedf_task_exit(struct task_struct * t) | ||
1000 | +{ | ||
1001 | + unsigned long flags; | ||
1002 | + | ||
1003 | + /* unlink if necessary */ | ||
1004 | + spin_lock_irqsave(&gedf_cpu_lock, flags); | ||
1005 | + /* remove from CPU state, if necessary */ | ||
1006 | + unlink(t); | ||
1007 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
1008 | + gedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
1009 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
1010 | + } else { | ||
1011 | + /* FIXME: If t is currently queued, then we need to | ||
1012 | + * dequeue it now; otherwise it will probably | ||
1013 | + * cause a crash once it is dequeued. | ||
1014 | + */ | ||
1015 | + TRACE_TASK(t, "called gedf_task_exit(), " | ||
1016 | + "but is not scheduled!\n"); | ||
1017 | + } | ||
1018 | + spin_unlock_irqrestore(&gedf_cpu_lock, flags); | ||
1019 | + | ||
1020 | + TRACE_TASK(t, "RIP\n"); | ||
1021 | +} | ||
1022 | + | ||
1023 | +static long gedf_admit_task(struct task_struct* tsk) | ||
1024 | +{ | ||
1025 | + return 0; | ||
1026 | +} | ||
1027 | + | ||
1028 | + | ||
1029 | +static long gedf_activate_plugin(void) | ||
1030 | +{ | ||
1031 | + int cpu; | ||
1032 | + cpu_entry_t *entry; | ||
1033 | + | ||
1034 | + heap_init(&gedf_cpu_heap); | ||
1035 | + gedf.release_master = atomic_read(&release_master_cpu); | ||
1036 | + | ||
1037 | + for_each_online_cpu(cpu) { | ||
1038 | + entry = &per_cpu(gedf_cpu_entries, cpu); | ||
1039 | + heap_node_init(&entry->hn, entry); | ||
1040 | + entry->linked = NULL; | ||
1041 | + entry->scheduled = NULL; | ||
1042 | + entry->picked = 0; | ||
1043 | + if (cpu != gedf.release_master) { | ||
1044 | + TRACE("G-EDF: Initializing CPU #%d.\n", cpu); | ||
1045 | + update_cpu_position(entry); | ||
1046 | + } else { | ||
1047 | + TRACE("G-EDF: CPU %d is release master.\n", cpu); | ||
1048 | + } | ||
1049 | + } | ||
1050 | + return 0; | ||
1051 | +} | ||
1052 | + | ||
1053 | + | ||
1054 | +/* Plugin object */ | ||
1055 | +static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = { | ||
1056 | + .plugin_name = "G-EDF", | ||
1057 | + .finish_switch = gedf_finish_switch, | ||
1058 | + .tick = gedf_tick, | ||
1059 | + .task_new = gedf_task_new, | ||
1060 | + .complete_job = complete_job, | ||
1061 | + .task_exit = gedf_task_exit, | ||
1062 | + .schedule = gedf_schedule, | ||
1063 | + .task_wake_up = gedf_task_wake_up, | ||
1064 | + .task_block = gedf_task_block, | ||
1065 | + .admit_task = gedf_admit_task, | ||
1066 | + .activate_plugin = gedf_activate_plugin, | ||
1067 | +}; | ||
1068 | + | ||
1069 | + | ||
1070 | +static int __init init_gedf(void) | ||
1071 | +{ | ||
1072 | + int cpu; | ||
1073 | + cpu_entry_t *entry; | ||
1074 | + | ||
1075 | + cheap_init(&gedf_ready_queue, GEDF_MAX_TASKS, gedf_cheap_nodes); | ||
1076 | + /* initialize CPU state */ | ||
1077 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
1078 | + entry = &per_cpu(gedf_cpu_entries, cpu); | ||
1079 | + gedf_cpus[cpu] = entry; | ||
1080 | + entry->cpu = cpu; | ||
1081 | + entry->hn = &gedf_heap_node[cpu]; | ||
1082 | + heap_node_init(&entry->hn, entry); | ||
1083 | + } | ||
1084 | + edf_domain_init(&gedf, NULL, gedf_release_jobs); | ||
1085 | + return register_sched_plugin(&gedf_plugin); | ||
1086 | +} | ||
1087 | + | ||
1088 | + | ||
1089 | +module_init(init_gedf); | ||
1090 | diff --git a/litmus/sched_ghq_edf.c b/litmus/sched_ghq_edf.c | ||
1091 | new file mode 100644 | ||
1092 | index 0000000..a9b1d06 | ||
1093 | --- /dev/null | ||
1094 | +++ b/litmus/sched_ghq_edf.c | ||
1095 | @@ -0,0 +1,720 @@ | ||
1096 | +#include <linux/spinlock.h> | ||
1097 | +#include <linux/percpu.h> | ||
1098 | +#include <linux/sched.h> | ||
1099 | + | ||
1100 | +#include <litmus/litmus.h> | ||
1101 | +#include <litmus/jobs.h> | ||
1102 | +#include <litmus/sched_plugin.h> | ||
1103 | +#include <litmus/edf_common.h> | ||
1104 | +#include <litmus/sched_trace.h> | ||
1105 | + | ||
1106 | +#include <litmus/heap.h> | ||
1107 | + | ||
1108 | +#include <linux/module.h> | ||
1109 | + | ||
1110 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
1111 | + */ | ||
1112 | +typedef struct { | ||
1113 | + int cpu; | ||
1114 | + struct task_struct* linked; /* only RT tasks */ | ||
1115 | + int picked; /* linked was seen */ | ||
1116 | + struct task_struct* scheduled; /* only RT tasks */ | ||
1117 | + struct heap_node* hn; | ||
1118 | +} cpu_entry_t; | ||
1119 | +DEFINE_PER_CPU(cpu_entry_t, ghqedf_cpu_entries); | ||
1120 | + | ||
1121 | +DEFINE_SPINLOCK(ghqedf_cpu_lock); /* synchronize access to cpu heap */ | ||
1122 | + | ||
1123 | +cpu_entry_t* ghqedf_cpus[NR_CPUS]; | ||
1124 | + | ||
1125 | +/* the cpus queue themselves according to priority in here */ | ||
1126 | +static struct heap_node ghqedf_heap_node[NR_CPUS]; | ||
1127 | +static struct heap ghqedf_cpu_heap; | ||
1128 | + | ||
1129 | +static rt_domain_t ghqedf; /* used only for the release queue */ | ||
1130 | + | ||
1131 | +struct subqueue { | ||
1132 | + struct heap queue; | ||
1133 | + struct task_struct* top; | ||
1134 | + struct heap_node* hn; | ||
1135 | + spinlock_t lock; | ||
1136 | +}; | ||
1137 | + | ||
1138 | +/* per-cpu sub queue */ | ||
1139 | +//DEFINE_PER_CPU(struct subqueue, ghqedf_subqueue); | ||
1140 | + | ||
1141 | +struct subqueue ghqedf_subqueue[NR_CPUS]; | ||
1142 | + | ||
1143 | +/* heap nodes for subqueue::hn field */ | ||
1144 | +static struct heap_node ghqedf_subqueue_heap_node[NR_CPUS]; | ||
1145 | + | ||
1146 | +/* queue of sub queues */ | ||
1147 | +struct heap master_queue; | ||
1148 | + | ||
1149 | +/* re-use ready queue lock */ | ||
1150 | +#define master_lock (ghqedf.ready_lock) | ||
1151 | + | ||
1152 | +static int subqueue_higher_prio(struct heap_node *_a, struct heap_node *_b) | ||
1153 | +{ | ||
1154 | + struct subqueue *a, *b; | ||
1155 | + a = _a->value; | ||
1156 | + b = _b->value; | ||
1157 | + return edf_higher_prio(a->top, b->top); | ||
1158 | +} | ||
1159 | + | ||
1160 | +static void subqueues_init(void) | ||
1161 | +{ | ||
1162 | + int cpu; | ||
1163 | + struct subqueue *q; | ||
1164 | + | ||
1165 | + heap_init(&master_queue); | ||
1166 | + | ||
1167 | + for_each_online_cpu(cpu) { | ||
1168 | +// q = &per_cpu(ghqedf_subqueue, cpu); | ||
1169 | + q = ghqedf_subqueue + cpu; | ||
1170 | + heap_init(&q->queue); | ||
1171 | + q->top = NULL; | ||
1172 | + q->hn = ghqedf_subqueue_heap_node + cpu; | ||
1173 | + heap_node_init(&q->hn, q); | ||
1174 | + spin_lock_init(&q->lock); | ||
1175 | + heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
1176 | + } | ||
1177 | +} | ||
1178 | + | ||
1179 | +static void __update_top(struct subqueue* q) | ||
1180 | +{ | ||
1181 | + struct heap_node *tmp; | ||
1182 | + | ||
1183 | + tmp = heap_peek(edf_ready_order, &q->queue); | ||
1184 | + q->top = tmp ? tmp->value : NULL; | ||
1185 | +} | ||
1186 | + | ||
1187 | +static void update_top(struct subqueue* q) | ||
1188 | +{ | ||
1189 | + spin_lock(&q->lock); | ||
1190 | + __update_top(q); | ||
1191 | + spin_unlock(&q->lock); | ||
1192 | +} | ||
1193 | + | ||
1194 | +static void merge_into_ready_queue(struct heap *h) | ||
1195 | +{ | ||
1196 | +// struct subqueue *q = &__get_cpu_var(ghqedf_subqueue); | ||
1197 | + struct subqueue *q = ghqedf_subqueue + smp_processor_id(); | ||
1198 | + struct heap_node *tmp; | ||
1199 | + void *old_top; | ||
1200 | + int changed_top = 0; | ||
1201 | + | ||
1202 | + spin_lock(&q->lock); | ||
1203 | + tmp = heap_peek(edf_ready_order, &q->queue); | ||
1204 | + old_top = tmp ? tmp->value : NULL; | ||
1205 | + heap_union(edf_ready_order, &q->queue, h); | ||
1206 | + /* is the new min the task that we just inserted? */ | ||
1207 | + changed_top = !old_top || | ||
1208 | + heap_peek(edf_ready_order, &q->queue)->value != old_top; | ||
1209 | + spin_unlock(&q->lock); | ||
1210 | + if (changed_top) { | ||
1211 | + /* need to update master queue */ | ||
1212 | + spin_lock(&master_lock); | ||
1213 | + /* If it is not in the heap then it is already | ||
1214 | + * being updated concurrently, so we skip it. | ||
1215 | + */ | ||
1216 | + if (likely(heap_node_in_heap(q->hn))) { | ||
1217 | + heap_delete(subqueue_higher_prio, &master_queue, q->hn); | ||
1218 | + update_top(q); | ||
1219 | + heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
1220 | + } else | ||
1221 | + TRACE("not updating subqueue top\n"); | ||
1222 | + spin_unlock(&master_lock); | ||
1223 | + } | ||
1224 | +} | ||
1225 | + | ||
1226 | +static void add_to_ready_queue(struct task_struct *t) | ||
1227 | +{ | ||
1228 | + struct heap tmp; | ||
1229 | + | ||
1230 | + TRACE_TASK(t, "adding to ready queue\n"); | ||
1231 | + heap_init(&tmp); | ||
1232 | + heap_insert(edf_ready_order, &tmp, tsk_rt(t)->heap_node); | ||
1233 | + merge_into_ready_queue(&tmp); | ||
1234 | +} | ||
1235 | + | ||
1236 | + | ||
1237 | +static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
1238 | +{ | ||
1239 | + cpu_entry_t *a, *b; | ||
1240 | + a = _a->value; | ||
1241 | + b = _b->value; | ||
1242 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
1243 | + * the top of the heap. | ||
1244 | + */ | ||
1245 | + return edf_higher_prio(b->linked, a->linked); | ||
1246 | +} | ||
1247 | + | ||
1248 | +static void remove_from_cpu_heap(cpu_entry_t* entry) | ||
1249 | +{ | ||
1250 | + if (likely(heap_node_in_heap(entry->hn))) | ||
1251 | + heap_delete(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn); | ||
1252 | +} | ||
1253 | + | ||
1254 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
1255 | + * order in the cpu queue. Caller must hold ghqedf lock. | ||
1256 | + */ | ||
1257 | +static void update_cpu_position(cpu_entry_t *entry) | ||
1258 | +{ | ||
1259 | + remove_from_cpu_heap(entry); | ||
1260 | + heap_insert(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn); | ||
1261 | +} | ||
1262 | + | ||
1263 | +/* caller must hold ghqedf lock */ | ||
1264 | +static cpu_entry_t* lowest_prio_cpu(int take) | ||
1265 | +{ | ||
1266 | + struct heap_node* hn; | ||
1267 | + if (take) | ||
1268 | + hn = heap_take(cpu_lower_prio, &ghqedf_cpu_heap); | ||
1269 | + else | ||
1270 | + hn = heap_peek(cpu_lower_prio, &ghqedf_cpu_heap); | ||
1271 | + return hn ? hn->value : NULL; | ||
1272 | +} | ||
1273 | + | ||
1274 | + | ||
1275 | +/* link_task_to_cpu - Update the link of a CPU. | ||
1276 | + * Handles the case where the to-be-linked task is already | ||
1277 | + * scheduled on a different CPU. | ||
1278 | + */ | ||
1279 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
1280 | + cpu_entry_t *entry) | ||
1281 | +{ | ||
1282 | + cpu_entry_t *sched = NULL; | ||
1283 | + struct task_struct* tmp; | ||
1284 | + int on_cpu; | ||
1285 | + | ||
1286 | + BUG_ON(linked && !is_realtime(linked)); | ||
1287 | + | ||
1288 | + /* Currently linked task is set to be unlinked. */ | ||
1289 | + if (entry->linked) { | ||
1290 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
1291 | + } | ||
1292 | + | ||
1293 | + /* Link new task to CPU. */ | ||
1294 | + if (linked) { | ||
1295 | + set_rt_flags(linked, RT_F_RUNNING); | ||
1296 | + /* handle task is already scheduled somewhere! */ | ||
1297 | + on_cpu = linked->rt_param.scheduled_on; | ||
1298 | + if (on_cpu != NO_CPU) { | ||
1299 | + sched = &per_cpu(ghqedf_cpu_entries, on_cpu); | ||
1300 | + /* this should only happen if not linked already */ | ||
1301 | + BUG_ON(sched->linked == linked); | ||
1302 | + | ||
1303 | + /* If we are already scheduled on the CPU to which we | ||
1304 | + * wanted to link, we don't need to do the swap -- | ||
1305 | + * we just link ourselves to the CPU and depend on | ||
1306 | + * the caller to get things right. | ||
1307 | + * | ||
1308 | + * But only swap if the other node is in the queue. | ||
1309 | + * If it is not, then it is being updated | ||
1310 | + * concurrently and some other task was already | ||
1311 | + * picked for it. | ||
1312 | + */ | ||
1313 | + if (entry != sched && heap_node_in_heap(sched->hn)) { | ||
1314 | + TRACE_TASK(linked, | ||
1315 | + "already scheduled on %d, " | ||
1316 | + "updating link.\n", | ||
1317 | + sched->cpu); | ||
1318 | + tmp = sched->linked; | ||
1319 | + linked->rt_param.linked_on = sched->cpu; | ||
1320 | + sched->linked = linked; | ||
1321 | + sched->picked = 1; | ||
1322 | + update_cpu_position(sched); | ||
1323 | + linked = tmp; | ||
1324 | + } | ||
1325 | + } | ||
1326 | + if (linked) /* might be NULL due to swap */ | ||
1327 | + linked->rt_param.linked_on = entry->cpu; | ||
1328 | + } | ||
1329 | + entry->linked = linked; | ||
1330 | + entry->picked = entry == sched; /* set to one if we linked to the | ||
1331 | + * the CPU that the task is | ||
1332 | + * executing on | ||
1333 | + */ | ||
1334 | + if (linked) | ||
1335 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
1336 | + else | ||
1337 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
1338 | + update_cpu_position(entry); | ||
1339 | +} | ||
1340 | + | ||
1341 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
1342 | + * where it was linked before. Must hold ghqedf_lock. | ||
1343 | + */ | ||
1344 | +static noinline void unlink(struct task_struct* t) | ||
1345 | +{ | ||
1346 | + cpu_entry_t *entry; | ||
1347 | + | ||
1348 | + if (t->rt_param.linked_on != NO_CPU) { | ||
1349 | + /* unlink */ | ||
1350 | + entry = &per_cpu(ghqedf_cpu_entries, t->rt_param.linked_on); | ||
1351 | + t->rt_param.linked_on = NO_CPU; | ||
1352 | + link_task_to_cpu(NULL, entry); | ||
1353 | + } | ||
1354 | +} | ||
1355 | + | ||
1356 | + | ||
1357 | +/* preempt - force a CPU to reschedule | ||
1358 | + */ | ||
1359 | +static noinline void preempt(cpu_entry_t *entry) | ||
1360 | +{ | ||
1361 | + if (smp_processor_id() == entry->cpu) | ||
1362 | + set_tsk_need_resched(current); | ||
1363 | + else | ||
1364 | + smp_send_reschedule(entry->cpu); | ||
1365 | +} | ||
1366 | + | ||
1367 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
1368 | + * Caller must hold ghqedf_lock. | ||
1369 | + * | ||
1370 | + * call unlocked, but with preemptions disabled! | ||
1371 | + */ | ||
1372 | +static noinline void requeue(struct task_struct* task) | ||
1373 | +{ | ||
1374 | + if (is_released(task, litmus_clock())) | ||
1375 | + add_to_ready_queue(task); | ||
1376 | + else | ||
1377 | + /* it has got to wait */ | ||
1378 | + add_release(&ghqedf, task); | ||
1379 | +} | ||
1380 | + | ||
1381 | + | ||
1382 | +static struct task_struct* take_if_preempt_required(cpu_entry_t* last) | ||
1383 | +{ | ||
1384 | + struct heap_node* tmp; | ||
1385 | + struct subqueue* q; | ||
1386 | + struct task_struct* t; | ||
1387 | + int preempt_required = 0; | ||
1388 | + | ||
1389 | + spin_lock(&master_lock); | ||
1390 | + tmp = heap_peek(subqueue_higher_prio, &master_queue); | ||
1391 | + BUG_ON(!tmp); /* should be there */ | ||
1392 | + q = tmp->value; | ||
1393 | + | ||
1394 | + spin_lock(&q->lock); | ||
1395 | + tmp = heap_peek(edf_ready_order, &q->queue); | ||
1396 | + t = tmp ? tmp->value : NULL; | ||
1397 | + preempt_required = edf_higher_prio(t, last->linked); | ||
1398 | + if (preempt_required) { | ||
1399 | + /* take it out */ | ||
1400 | + last = lowest_prio_cpu(1); | ||
1401 | + spin_unlock(&ghqedf_cpu_lock); | ||
1402 | + heap_delete(subqueue_higher_prio, &master_queue, q->hn); | ||
1403 | + } | ||
1404 | + /* drop lock master lock while we update subqueue */ | ||
1405 | + spin_unlock(&master_lock); | ||
1406 | + | ||
1407 | + if (preempt_required) { | ||
1408 | + heap_delete(edf_ready_order, &q->queue, tmp); | ||
1409 | + /* precompute, so that next lookup is O(1) */ | ||
1410 | + __update_top(q); | ||
1411 | + spin_unlock(&q->lock); | ||
1412 | + | ||
1413 | + /* re-insert with new priority */ | ||
1414 | + spin_lock(&master_lock); | ||
1415 | + /* update, with right locking order */ | ||
1416 | + update_top(q); | ||
1417 | + heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
1418 | + spin_unlock(&master_lock); | ||
1419 | + | ||
1420 | + return t; | ||
1421 | + } else { | ||
1422 | + spin_unlock(&q->lock); | ||
1423 | + return NULL; | ||
1424 | + } | ||
1425 | +} | ||
1426 | + | ||
1427 | + | ||
1428 | +/* check for any necessary preemptions */ | ||
1429 | +static void check_for_preemptions(void) | ||
1430 | +{ | ||
1431 | + int done = 0; | ||
1432 | + unsigned long flags; | ||
1433 | + struct task_struct *task, *unlinked; | ||
1434 | + cpu_entry_t* last; | ||
1435 | + | ||
1436 | + | ||
1437 | + local_irq_save(flags); | ||
1438 | + while (!done) { | ||
1439 | + unlinked = NULL; | ||
1440 | + spin_lock(&ghqedf_cpu_lock); | ||
1441 | + last = lowest_prio_cpu(0); | ||
1442 | + if (likely(last)) { | ||
1443 | + task = take_if_preempt_required(last); | ||
1444 | + if (task) { | ||
1445 | + TRACE_TASK(task, "removed from ready Q\n"); | ||
1446 | + /* cpu lock was dropped, reacquire */ | ||
1447 | + spin_lock(&ghqedf_cpu_lock); | ||
1448 | + if (last->linked && !last->picked) | ||
1449 | + /* can be requeued by us */ | ||
1450 | + unlinked = last->linked; | ||
1451 | + TRACE("check_for_preemptions: " | ||
1452 | + "attempting to link task %d to %d\n", | ||
1453 | + task->pid, last->cpu); | ||
1454 | + link_task_to_cpu(task, last); | ||
1455 | + update_cpu_position(last); | ||
1456 | + } else | ||
1457 | + /* no preemption required */ | ||
1458 | + done = 1; | ||
1459 | + } else | ||
1460 | + /* all gone, being checked elsewhere? */ | ||
1461 | + done = 1; | ||
1462 | + spin_unlock(&ghqedf_cpu_lock); | ||
1463 | + if (unlinked) | ||
1464 | + /* stick it back into the queue */ | ||
1465 | + requeue(unlinked); | ||
1466 | + if (last && !done) | ||
1467 | + /* we have a preemption, send IPI */ | ||
1468 | + preempt(last); | ||
1469 | + } | ||
1470 | + TRACE("done with preemption checking\n"); | ||
1471 | + local_irq_restore(flags); | ||
1472 | +} | ||
1473 | + | ||
1474 | +/* ghqedf_job_arrival: task is either resumed or released | ||
1475 | + * call only unlocked! | ||
1476 | + */ | ||
1477 | +static noinline void ghqedf_job_arrival(struct task_struct* task) | ||
1478 | +{ | ||
1479 | + requeue(task); | ||
1480 | + check_for_preemptions(); | ||
1481 | +} | ||
1482 | + | ||
1483 | +static void ghqedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
1484 | +{ | ||
1485 | + unsigned long flags; | ||
1486 | + | ||
1487 | + TRACE("release_jobs() invoked\n"); | ||
1488 | + local_irq_save(flags); | ||
1489 | + /* insert unlocked */ | ||
1490 | + merge_into_ready_queue(tasks); | ||
1491 | + local_irq_restore(flags); | ||
1492 | + check_for_preemptions(); | ||
1493 | +} | ||
1494 | + | ||
1495 | +/* caller holds ghqedf_lock */ | ||
1496 | +static noinline int job_completion(cpu_entry_t* entry, int forced) | ||
1497 | +{ | ||
1498 | + | ||
1499 | + struct task_struct *t = entry->scheduled; | ||
1500 | + | ||
1501 | + sched_trace_task_completion(t, forced); | ||
1502 | + | ||
1503 | + TRACE_TASK(t, "job_completion().\n"); | ||
1504 | + | ||
1505 | + /* set flags */ | ||
1506 | + set_rt_flags(t, RT_F_SLEEP); | ||
1507 | + /* prepare for next period */ | ||
1508 | + prepare_for_next_period(t); | ||
1509 | + if (is_released(t, litmus_clock())) | ||
1510 | + sched_trace_task_release(t); | ||
1511 | + | ||
1512 | + | ||
1513 | + if (is_released(t, litmus_clock())){ | ||
1514 | + /* we changed the priority, see if we need to preempt */ | ||
1515 | + set_rt_flags(t, RT_F_RUNNING); | ||
1516 | + update_cpu_position(entry); | ||
1517 | + return 1; | ||
1518 | + } | ||
1519 | + else { | ||
1520 | + /* it has got to wait */ | ||
1521 | + unlink(t); | ||
1522 | + add_release(&ghqedf, t); | ||
1523 | + return 0; | ||
1524 | + } | ||
1525 | +} | ||
1526 | + | ||
1527 | +/* ghqedf_tick - this function is called for every local timer | ||
1528 | + * interrupt. | ||
1529 | + * | ||
1530 | + * checks whether the current task has expired and checks | ||
1531 | + * whether we need to preempt it if it has not expired | ||
1532 | + */ | ||
1533 | +static void ghqedf_tick(struct task_struct* t) | ||
1534 | +{ | ||
1535 | + if (is_realtime(t) && budget_exhausted(t)) | ||
1536 | + set_tsk_need_resched(t); | ||
1537 | +} | ||
1538 | + | ||
1539 | +static struct task_struct* ghqedf_schedule(struct task_struct * prev) | ||
1540 | +{ | ||
1541 | + cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries); | ||
1542 | + int out_of_time, sleep, preempt, exists, blocks; | ||
1543 | + struct task_struct* next = NULL; | ||
1544 | + | ||
1545 | + /* Bail out early if we are the release master. | ||
1546 | + * The release master never schedules any real-time tasks. | ||
1547 | + */ | ||
1548 | + if (ghqedf.release_master == entry->cpu) | ||
1549 | + return NULL; | ||
1550 | + | ||
1551 | +// TRACE_TASK(prev, "invoked ghqedf_schedule.\n"); | ||
1552 | + | ||
1553 | + /* sanity checking */ | ||
1554 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
1555 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
1556 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
1557 | + | ||
1558 | + /* (0) Determine state */ | ||
1559 | + exists = entry->scheduled != NULL; | ||
1560 | + blocks = exists && !is_running(entry->scheduled); | ||
1561 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
1562 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
1563 | + | ||
1564 | + spin_lock(&ghqedf_cpu_lock); | ||
1565 | + | ||
1566 | + preempt = entry->scheduled != entry->linked; | ||
1567 | + | ||
1568 | + if (exists) | ||
1569 | + TRACE_TASK(prev, | ||
1570 | + "blocks:%d out_of_time:%d sleep:%d preempt:%d " | ||
1571 | + "state:%d sig:%d\n", | ||
1572 | + blocks, out_of_time, sleep, preempt, | ||
1573 | + prev->state, signal_pending(prev)); | ||
1574 | + if (preempt && entry->linked) | ||
1575 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
1576 | + entry->linked->comm, entry->linked->pid); | ||
1577 | + | ||
1578 | + /* If a task blocks we have no choice but to reschedule. | ||
1579 | + */ | ||
1580 | + if (blocks) | ||
1581 | + unlink(entry->scheduled); | ||
1582 | + | ||
1583 | + | ||
1584 | + /* Any task that is preemptable and either exhausts its execution | ||
1585 | + * budget or wants to sleep completes. We may have to reschedule after | ||
1586 | + * this. Don't do a job completion if we block (can't have timers | ||
1587 | + * running for blocked jobs). Preemptions go first for the same reason. | ||
1588 | + */ | ||
1589 | + if ((out_of_time || sleep) && !blocks && !preempt) { | ||
1590 | + if (job_completion(entry, !sleep)) { | ||
1591 | + /* Task might stay with us. | ||
1592 | + * Drop locks and check for preemptions. | ||
1593 | + */ | ||
1594 | + spin_unlock(&ghqedf_cpu_lock); | ||
1595 | + /* anything to update ? */ | ||
1596 | + check_for_preemptions(); | ||
1597 | + spin_lock(&ghqedf_cpu_lock); | ||
1598 | + /* if something higher priority got linked, | ||
1599 | + * then we need to add the task into the | ||
1600 | + * ready queue (since it wasn't added by | ||
1601 | + * check_for_preemptions b/c picked==1. | ||
1602 | + */ | ||
1603 | + if (entry->linked != prev) | ||
1604 | + add_to_ready_queue(prev); | ||
1605 | + } | ||
1606 | + } | ||
1607 | + | ||
1608 | + /* Link pending task if we became unlinked. | ||
1609 | + * NOTE: Do not hold locks while performing ready queue updates | ||
1610 | + * since we want concurrent access to the queue. | ||
1611 | + */ | ||
1612 | + if (!entry->linked) { | ||
1613 | + if (exists) | ||
1614 | + /* We are committed to descheduling; erase marker | ||
1615 | + * before we drop the lock. | ||
1616 | + */ | ||
1617 | + tsk_rt(prev)->scheduled_on = NO_CPU; | ||
1618 | + spin_unlock(&ghqedf_cpu_lock); | ||
1619 | + check_for_preemptions(); /* update links */ | ||
1620 | + spin_lock(&ghqedf_cpu_lock); | ||
1621 | + } | ||
1622 | + | ||
1623 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
1624 | + * If linked is different from scheduled, then select linked as next. | ||
1625 | + */ | ||
1626 | + if (entry->linked != entry->scheduled) { | ||
1627 | + /* Schedule a linked job? */ | ||
1628 | + if (entry->linked) { | ||
1629 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
1630 | + entry->picked = 1; | ||
1631 | + next = entry->linked; | ||
1632 | + } | ||
1633 | + if (entry->scheduled) | ||
1634 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
1635 | + } else | ||
1636 | + /* Only override Linux scheduler if we have a real-time task | ||
1637 | + * scheduled that needs to continue. | ||
1638 | + */ | ||
1639 | + if (exists) | ||
1640 | + next = prev; | ||
1641 | + | ||
1642 | + spin_unlock(&ghqedf_cpu_lock); | ||
1643 | + if (exists && preempt && !blocks) | ||
1644 | + /* stick preempted task back into the ready queue */ | ||
1645 | + ghqedf_job_arrival(prev); | ||
1646 | + | ||
1647 | + if (next) | ||
1648 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
1649 | + else if (exists && !next) | ||
1650 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
1651 | + | ||
1652 | + return next; | ||
1653 | +} | ||
1654 | + | ||
1655 | + | ||
1656 | +/* _finish_switch - we just finished the switch away from prev | ||
1657 | + */ | ||
1658 | +static void ghqedf_finish_switch(struct task_struct *prev) | ||
1659 | +{ | ||
1660 | + cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries); | ||
1661 | + | ||
1662 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
1663 | + TRACE_TASK(prev, "switched away from\n"); | ||
1664 | +} | ||
1665 | + | ||
1666 | + | ||
1667 | +/* Prepare a task for running in RT mode | ||
1668 | + */ | ||
1669 | +static void ghqedf_task_new(struct task_struct * t, int on_rq, int running) | ||
1670 | +{ | ||
1671 | + unsigned long flags; | ||
1672 | + cpu_entry_t* entry; | ||
1673 | + | ||
1674 | + TRACE("ghqedf: task new %d\n", t->pid); | ||
1675 | + | ||
1676 | + spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
1677 | + | ||
1678 | + /* setup job params */ | ||
1679 | + release_at(t, litmus_clock()); | ||
1680 | + | ||
1681 | + if (running) { | ||
1682 | + entry = &per_cpu(ghqedf_cpu_entries, task_cpu(t)); | ||
1683 | + BUG_ON(entry->scheduled); | ||
1684 | + if (entry->cpu != ghqedf.release_master) { | ||
1685 | + entry->scheduled = t; | ||
1686 | + t->rt_param.scheduled_on = task_cpu(t); | ||
1687 | + } else { | ||
1688 | + preempt(entry); | ||
1689 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
1690 | + } | ||
1691 | + } else { | ||
1692 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
1693 | + } | ||
1694 | + tsk_rt(t)->linked_on = NO_CPU; | ||
1695 | + | ||
1696 | + spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
1697 | + | ||
1698 | + if (!running || entry->cpu == ghqedf.release_master) | ||
1699 | + ghqedf_job_arrival(t); | ||
1700 | +} | ||
1701 | + | ||
1702 | +static void ghqedf_task_wake_up(struct task_struct *task) | ||
1703 | +{ | ||
1704 | + unsigned long flags; | ||
1705 | + lt_t now; | ||
1706 | + | ||
1707 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
1708 | + | ||
1709 | + spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
1710 | + now = litmus_clock(); | ||
1711 | + if (is_tardy(task, now)) { | ||
1712 | + /* new sporadic release */ | ||
1713 | + release_at(task, now); | ||
1714 | + sched_trace_task_release(task); | ||
1715 | + } | ||
1716 | + spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
1717 | + ghqedf_job_arrival(task); | ||
1718 | +} | ||
1719 | + | ||
1720 | +static void ghqedf_task_block(struct task_struct *t) | ||
1721 | +{ | ||
1722 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
1723 | +} | ||
1724 | + | ||
1725 | +static void ghqedf_task_exit(struct task_struct * t) | ||
1726 | +{ | ||
1727 | + unsigned long flags; | ||
1728 | + | ||
1729 | + /* unlink if necessary */ | ||
1730 | + spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
1731 | + /* remove from CPU state, if necessary */ | ||
1732 | + unlink(t); | ||
1733 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
1734 | + ghqedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
1735 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
1736 | + } else { | ||
1737 | + /* FIXME: If t is currently queued, then we need to | ||
1738 | + * dequeue it now; otherwise it will probably | ||
1739 | + * cause a crash once it is dequeued. | ||
1740 | + */ | ||
1741 | + TRACE_TASK(t, "called ghqedf_task_exit(), " | ||
1742 | + "but is not scheduled!\n"); | ||
1743 | + } | ||
1744 | + spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
1745 | + | ||
1746 | + TRACE_TASK(t, "RIP\n"); | ||
1747 | +} | ||
1748 | + | ||
1749 | +static long ghqedf_admit_task(struct task_struct* tsk) | ||
1750 | +{ | ||
1751 | + return 0; | ||
1752 | +} | ||
1753 | + | ||
1754 | + | ||
1755 | +static long ghqedf_activate_plugin(void) | ||
1756 | +{ | ||
1757 | + int cpu; | ||
1758 | + cpu_entry_t *entry; | ||
1759 | + | ||
1760 | + heap_init(&ghqedf_cpu_heap); | ||
1761 | + subqueues_init(); | ||
1762 | + ghqedf.release_master = atomic_read(&release_master_cpu); | ||
1763 | + | ||
1764 | + for_each_online_cpu(cpu) { | ||
1765 | + entry = &per_cpu(ghqedf_cpu_entries, cpu); | ||
1766 | + heap_node_init(&entry->hn, entry); | ||
1767 | + entry->linked = NULL; | ||
1768 | + entry->scheduled = NULL; | ||
1769 | + entry->picked = 0; | ||
1770 | + if (cpu != ghqedf.release_master) { | ||
1771 | + TRACE("G-EDF: Initializing CPU #%d.\n", cpu); | ||
1772 | + update_cpu_position(entry); | ||
1773 | + } else { | ||
1774 | + TRACE("G-EDF: CPU %d is release master.\n", cpu); | ||
1775 | + } | ||
1776 | + } | ||
1777 | + return 0; | ||
1778 | +} | ||
1779 | + | ||
1780 | + | ||
1781 | +/* Plugin object */ | ||
1782 | +static struct sched_plugin ghqedf_plugin __cacheline_aligned_in_smp = { | ||
1783 | + .plugin_name = "GHQ-EDF", | ||
1784 | + .finish_switch = ghqedf_finish_switch, | ||
1785 | + .tick = ghqedf_tick, | ||
1786 | + .task_new = ghqedf_task_new, | ||
1787 | + .complete_job = complete_job, | ||
1788 | + .task_exit = ghqedf_task_exit, | ||
1789 | + .schedule = ghqedf_schedule, | ||
1790 | + .task_wake_up = ghqedf_task_wake_up, | ||
1791 | + .task_block = ghqedf_task_block, | ||
1792 | + .admit_task = ghqedf_admit_task, | ||
1793 | + .activate_plugin = ghqedf_activate_plugin, | ||
1794 | +}; | ||
1795 | + | ||
1796 | + | ||
1797 | +static int __init init_ghqedf(void) | ||
1798 | +{ | ||
1799 | + int cpu; | ||
1800 | + cpu_entry_t *entry; | ||
1801 | + | ||
1802 | + /* initialize CPU state */ | ||
1803 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
1804 | + entry = &per_cpu(ghqedf_cpu_entries, cpu); | ||
1805 | + ghqedf_cpus[cpu] = entry; | ||
1806 | + entry->cpu = cpu; | ||
1807 | + entry->hn = &ghqedf_heap_node[cpu]; | ||
1808 | + heap_node_init(&entry->hn, entry); | ||
1809 | + } | ||
1810 | + edf_domain_init(&ghqedf, NULL, ghqedf_release_jobs); | ||
1811 | + return register_sched_plugin(&ghqedf_plugin); | ||
1812 | +} | ||
1813 | + | ||
1814 | + | ||
1815 | +module_init(init_ghqedf); | ||
1816 | diff --git a/litmus/sched_gq_edf.c b/litmus/sched_gq_edf.c | ||
1817 | new file mode 100644 | ||
1818 | index 0000000..7b6e8dd | ||
1819 | --- /dev/null | ||
1820 | +++ b/litmus/sched_gq_edf.c | ||
1821 | @@ -0,0 +1,606 @@ | ||
1822 | +/* A quantum-based implementation of G-EDF. | ||
1823 | + * | ||
1824 | + * Based on GSN-EDF. | ||
1825 | + */ | ||
1826 | + | ||
1827 | +#include <linux/spinlock.h> | ||
1828 | +#include <linux/percpu.h> | ||
1829 | +#include <linux/sched.h> | ||
1830 | + | ||
1831 | +#include <litmus/litmus.h> | ||
1832 | +#include <litmus/jobs.h> | ||
1833 | +#include <litmus/sched_plugin.h> | ||
1834 | +#include <litmus/edf_common.h> | ||
1835 | +#include <litmus/sched_trace.h> | ||
1836 | + | ||
1837 | +#include <litmus/heap.h> | ||
1838 | + | ||
1839 | +#include <linux/module.h> | ||
1840 | + | ||
1841 | +/* cpu_state_t - maintain the linked and scheduled state | ||
1842 | + */ | ||
1843 | +typedef struct { | ||
1844 | + int cpu; | ||
1845 | + struct task_struct* linked; /* only RT tasks */ | ||
1846 | + struct task_struct* scheduled; /* only RT tasks */ | ||
1847 | + struct task_struct* absentee; /* blocked quantum owner */ | ||
1848 | + struct heap_node* hn; | ||
1849 | +} cpu_state_t; | ||
1850 | +DEFINE_PER_CPU(cpu_state_t, gq_cpu_entries); | ||
1851 | + | ||
1852 | +cpu_state_t* gq_cpus[NR_CPUS]; | ||
1853 | + | ||
1854 | +/* the cpus queue themselves according to priority in here */ | ||
1855 | +static struct heap_node gq_heap_node[NR_CPUS]; | ||
1856 | +static struct heap gq_cpu_heap; | ||
1857 | +/* jobs to be merged at the beginning of the next quantum */ | ||
1858 | +static struct heap gq_released_heap; | ||
1859 | + | ||
1860 | + | ||
1861 | +static rt_domain_t gqedf; | ||
1862 | +#define gq_lock (gqedf.ready_lock) | ||
1863 | + | ||
1864 | +DEFINE_SPINLOCK(gq_release_lock); | ||
1865 | + | ||
1866 | +static void preempt(cpu_state_t *entry) | ||
1867 | +{ | ||
1868 | + if (smp_processor_id() == entry->cpu) | ||
1869 | + set_tsk_need_resched(current); | ||
1870 | + else | ||
1871 | + smp_send_reschedule(entry->cpu); | ||
1872 | +} | ||
1873 | + | ||
1874 | +static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
1875 | +{ | ||
1876 | + cpu_state_t *a, *b; | ||
1877 | + a = _a->value; | ||
1878 | + b = _b->value; | ||
1879 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
1880 | + * the top of the heap. | ||
1881 | + */ | ||
1882 | + return edf_higher_prio(b->linked, a->linked); | ||
1883 | +} | ||
1884 | + | ||
1885 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
1886 | + * order in the cpu queue. Caller must hold gqedf lock. | ||
1887 | + */ | ||
1888 | +static void update_cpu_position(cpu_state_t *entry) | ||
1889 | +{ | ||
1890 | + if (likely(heap_node_in_heap(entry->hn))) | ||
1891 | + heap_delete(cpu_lower_prio, &gq_cpu_heap, entry->hn); | ||
1892 | + heap_insert(cpu_lower_prio, &gq_cpu_heap, entry->hn); | ||
1893 | +} | ||
1894 | + | ||
1895 | +/* caller must hold gqedf lock */ | ||
1896 | +static cpu_state_t* lowest_prio_cpu(void) | ||
1897 | +{ | ||
1898 | + struct heap_node* hn; | ||
1899 | + hn = heap_peek(cpu_lower_prio, &gq_cpu_heap); | ||
1900 | + return hn->value; //hn ? hn->value : NULL; | ||
1901 | +} | ||
1902 | + | ||
1903 | +/* link_task_to_cpu - Update the link of a CPU. | ||
1904 | + * Handles the case where the to-be-linked task is already | ||
1905 | + * scheduled on a different CPU. | ||
1906 | + */ | ||
1907 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
1908 | + cpu_state_t *entry) | ||
1909 | +{ | ||
1910 | + cpu_state_t *sched; | ||
1911 | + struct task_struct* tmp; | ||
1912 | + int on_cpu; | ||
1913 | + | ||
1914 | + BUG_ON(linked && !is_realtime(linked)); | ||
1915 | + /* don't relink tasks that are already linked */ | ||
1916 | + BUG_ON(linked && tsk_rt(linked)->linked_on != NO_CPU); | ||
1917 | + | ||
1918 | + /* Currently linked task is set to be unlinked. */ | ||
1919 | + if (entry->linked) { | ||
1920 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
1921 | + } | ||
1922 | + | ||
1923 | + /* Link new task to CPU. */ | ||
1924 | + if (linked) { | ||
1925 | + set_rt_flags(linked, RT_F_RUNNING); | ||
1926 | + /* handle task is already scheduled somewhere! */ | ||
1927 | + on_cpu = linked->rt_param.scheduled_on; | ||
1928 | + if (on_cpu != NO_CPU) { | ||
1929 | + sched = &per_cpu(gq_cpu_entries, on_cpu); | ||
1930 | + /* this should only happen if not linked already */ | ||
1931 | + BUG_ON(sched->linked == linked); | ||
1932 | + | ||
1933 | + /* If we are already scheduled on the CPU to which we | ||
1934 | + * wanted to link, we don't need to do the swap -- | ||
1935 | + * we just link ourselves to the CPU and depend on | ||
1936 | + * the caller to get things right. | ||
1937 | + */ | ||
1938 | + if (entry != sched) { | ||
1939 | + TRACE_TASK(linked, | ||
1940 | + "already scheduled on %d, updating link.\n", | ||
1941 | + sched->cpu); | ||
1942 | + tmp = sched->linked; | ||
1943 | + linked->rt_param.linked_on = sched->cpu; | ||
1944 | + sched->linked = linked; | ||
1945 | + update_cpu_position(sched); | ||
1946 | + linked = tmp; | ||
1947 | + } | ||
1948 | + } | ||
1949 | + if (linked) /* might be NULL due to swap */ | ||
1950 | + linked->rt_param.linked_on = entry->cpu; | ||
1951 | + } | ||
1952 | + entry->linked = linked; | ||
1953 | + if (linked) | ||
1954 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
1955 | + else | ||
1956 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
1957 | + update_cpu_position(entry); | ||
1958 | +} | ||
1959 | + | ||
1960 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
1961 | + * where it was linked before. Must hold gq_lock. | ||
1962 | + */ | ||
1963 | +static noinline void unlink(struct task_struct* t) | ||
1964 | +{ | ||
1965 | + cpu_state_t *entry; | ||
1966 | + | ||
1967 | + if (unlikely(!t)) { | ||
1968 | + TRACE_BUG_ON(!t); | ||
1969 | + return; | ||
1970 | + } | ||
1971 | + | ||
1972 | + if (t->rt_param.linked_on != NO_CPU) { | ||
1973 | + /* unlink */ | ||
1974 | + entry = &per_cpu(gq_cpu_entries, t->rt_param.linked_on); | ||
1975 | + t->rt_param.linked_on = NO_CPU; | ||
1976 | + link_task_to_cpu(NULL, entry); | ||
1977 | + } else if (is_queued(t)) { | ||
1978 | + /* This is an interesting situation: t is scheduled, | ||
1979 | + * but was just recently unlinked. It cannot be | ||
1980 | + * linked anywhere else (because then it would have | ||
1981 | + * been relinked to this CPU), thus it must be in some | ||
1982 | + * queue. We must remove it from the list in this | ||
1983 | + * case. | ||
1984 | + */ | ||
1985 | + TRACE_TASK(t, "unlink() -> remove()\n"); | ||
1986 | + remove(&gqedf, t); | ||
1987 | + } | ||
1988 | +} | ||
1989 | + | ||
1990 | + | ||
1991 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
1992 | + * Caller must hold gq_lock. | ||
1993 | + */ | ||
1994 | +static noinline void requeue(struct task_struct* task) | ||
1995 | +{ | ||
1996 | + BUG_ON(!task); | ||
1997 | + /* sanity check before insertion */ | ||
1998 | + BUG_ON(is_queued(task)); | ||
1999 | + | ||
2000 | + if (is_released(task, litmus_clock())) | ||
2001 | + __add_ready(&gqedf, task); | ||
2002 | + else | ||
2003 | + /* it has got to wait */ | ||
2004 | + add_release(&gqedf, task); | ||
2005 | +} | ||
2006 | + | ||
2007 | +/* check for any necessary preemptions */ | ||
2008 | +static void link_highest_priority_jobs(void) | ||
2009 | +{ | ||
2010 | + struct task_struct *task; | ||
2011 | + cpu_state_t* last; | ||
2012 | + | ||
2013 | + for(last = lowest_prio_cpu(); | ||
2014 | +// last && | ||
2015 | + edf_preemption_needed(&gqedf, last->linked); | ||
2016 | + last = lowest_prio_cpu()) { | ||
2017 | + TRACE("last cpu:%d linked:%s/%d preempt:%d\n", | ||
2018 | + last->cpu, | ||
2019 | + last->linked ? last->linked->comm : "---", | ||
2020 | + last->linked ? last->linked->pid : 0, | ||
2021 | + edf_preemption_needed(&gqedf, last->linked)); | ||
2022 | + /* preemption necessary */ | ||
2023 | + task = __take_ready(&gqedf); | ||
2024 | + TRACE("attempting to link task %d to %d at %llu\n", | ||
2025 | + task->pid, last->cpu, litmus_clock()); | ||
2026 | + if (last->linked) { | ||
2027 | + TRACE_TASK(last->linked, "requeued.\n"); | ||
2028 | + requeue(last->linked); | ||
2029 | + } | ||
2030 | + link_task_to_cpu(task, last); | ||
2031 | + } | ||
2032 | +} | ||
2033 | + | ||
2034 | +/* caller holds gq_lock */ | ||
2035 | +static void job_completion(struct task_struct *t, int forced) | ||
2036 | +{ | ||
2037 | + | ||
2038 | + sched_trace_task_completion(t, forced); | ||
2039 | + | ||
2040 | + TRACE_TASK(t, "job_completion(forced=%d), state:%d\n", forced, | ||
2041 | + t->state); | ||
2042 | + | ||
2043 | + /* prepare for next period */ | ||
2044 | + prepare_for_next_period(t); | ||
2045 | + if (is_released(t, litmus_clock())) | ||
2046 | + sched_trace_task_release(t); | ||
2047 | + /* unlink */ | ||
2048 | + unlink(t); | ||
2049 | + /* requeue | ||
2050 | + * But don't requeue a blocking task. */ | ||
2051 | + if (is_running(t)) | ||
2052 | + requeue(t); | ||
2053 | + else | ||
2054 | + TRACE_TASK(t, "job_completion(): not requeued, is not running. " | ||
2055 | + "state:%d\n", t->state); | ||
2056 | +} | ||
2057 | + | ||
2058 | + | ||
2059 | +static void gq_add_released_queue(struct task_struct *t) | ||
2060 | +{ | ||
2061 | + spin_lock(&gq_release_lock); | ||
2062 | + heap_insert(edf_ready_order, &gq_released_heap, tsk_rt(t)->heap_node); | ||
2063 | + spin_unlock(&gq_release_lock); | ||
2064 | +} | ||
2065 | + | ||
2066 | +/* caller holds gq_lock, irqs are disabled */ | ||
2067 | +static void merge_released_queue(void) | ||
2068 | +{ | ||
2069 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
2070 | + struct heap_node* hn; | ||
2071 | + struct task_struct* t; | ||
2072 | +#endif | ||
2073 | + | ||
2074 | + spin_lock(&gq_release_lock); | ||
2075 | + | ||
2076 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
2077 | + /* do it individually (= slooow) | ||
2078 | + * so that we can trace each merge | ||
2079 | + */ | ||
2080 | + | ||
2081 | + | ||
2082 | + while ((hn = heap_take(edf_ready_order, &gq_released_heap))) { | ||
2083 | + t = (struct task_struct*) hn->value; | ||
2084 | + TRACE_TASK(t, "merged into ready queue (is_released:%d)\n", | ||
2085 | + is_released(t, litmus_clock())); | ||
2086 | + __add_ready(&gqedf, t); | ||
2087 | + } | ||
2088 | +#else | ||
2089 | + __merge_ready(&gqedf, &gq_released_heap); | ||
2090 | +#endif | ||
2091 | + | ||
2092 | + spin_unlock(&gq_release_lock); | ||
2093 | +} | ||
2094 | + | ||
2095 | +/* gq_tick - this function is called for every local timer | ||
2096 | + * interrupt. | ||
2097 | + * | ||
2098 | + * checks whether the current task has expired and checks | ||
2099 | + * whether we need to preempt it if it has not expired | ||
2100 | + */ | ||
2101 | +static void gq_tick(struct task_struct* t) | ||
2102 | +{ | ||
2103 | + unsigned long flags; | ||
2104 | + cpu_state_t* entry; | ||
2105 | + | ||
2106 | + spin_lock_irqsave(&gq_lock, flags); | ||
2107 | + entry = &__get_cpu_var(gq_cpu_entries); | ||
2108 | + entry->absentee = NULL; | ||
2109 | + | ||
2110 | + merge_released_queue(); | ||
2111 | + /* update linked if required */ | ||
2112 | + link_highest_priority_jobs(); | ||
2113 | + | ||
2114 | + if (entry->linked != entry->scheduled || | ||
2115 | + (is_realtime(t) && budget_exhausted(t))) | ||
2116 | + /* let's reschedule */ | ||
2117 | + set_tsk_need_resched(t); | ||
2118 | + | ||
2119 | + spin_unlock_irqrestore(&gq_lock, flags); | ||
2120 | +} | ||
2121 | + | ||
2122 | +static struct task_struct* gq_schedule(struct task_struct * prev) | ||
2123 | +{ | ||
2124 | + cpu_state_t* entry = &__get_cpu_var(gq_cpu_entries); | ||
2125 | + int sleep, preempt, exists, blocks, out_of_time; | ||
2126 | + struct task_struct* next = NULL; | ||
2127 | + | ||
2128 | + /* Bail out early if we are the release master. | ||
2129 | + * The release master never schedules any real-time tasks. | ||
2130 | + */ | ||
2131 | + if (gqedf.release_master == entry->cpu) | ||
2132 | + return NULL; | ||
2133 | + | ||
2134 | + spin_lock(&gq_lock); | ||
2135 | + | ||
2136 | + /* sanity checking */ | ||
2137 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
2138 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
2139 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
2140 | + BUG_ON(entry->scheduled && tsk_rt(entry->scheduled)->scheduled_on != entry->cpu); | ||
2141 | + BUG_ON(entry->scheduled && tsk_rt(entry->scheduled)->scheduled_on != entry->cpu); | ||
2142 | + | ||
2143 | + /* Determine state */ | ||
2144 | + exists = entry->scheduled != NULL; | ||
2145 | + blocks = exists && !is_running(entry->scheduled); | ||
2146 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
2147 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
2148 | + preempt = entry->scheduled != entry->linked; | ||
2149 | + | ||
2150 | + BUG_ON(blocks && sleep); | ||
2151 | + | ||
2152 | + TRACE_TASK(prev, "invoked gq_schedule.\n"); | ||
2153 | + | ||
2154 | + if (exists) | ||
2155 | + TRACE_TASK(prev, | ||
2156 | + "blocks:%d sleep:%d preempt:%d " | ||
2157 | + "state:%d sig:%d out_of_time:%d\n", | ||
2158 | + blocks, sleep, preempt, | ||
2159 | + prev->state, signal_pending(prev), | ||
2160 | + out_of_time); | ||
2161 | + if (entry->linked && preempt) | ||
2162 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
2163 | + entry->linked->comm, entry->linked->pid); | ||
2164 | + | ||
2165 | + | ||
2166 | + if (blocks) { | ||
2167 | + /* Record task identity so that the task | ||
2168 | + * can be rescheduled when it resumes, | ||
2169 | + * but only do so when prev has not been | ||
2170 | + * preempted anyway. | ||
2171 | + */ | ||
2172 | + if (!preempt) { | ||
2173 | + entry->absentee = prev; | ||
2174 | + tsk_rt(prev)->last_cpu = entry->cpu; | ||
2175 | + } | ||
2176 | + unlink(entry->scheduled); | ||
2177 | + } | ||
2178 | + | ||
2179 | + if (sleep || out_of_time) | ||
2180 | + job_completion(entry->scheduled, !sleep); | ||
2181 | + | ||
2182 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
2183 | + * If linked is different from scheduled, then select linked as next. | ||
2184 | + */ | ||
2185 | + TRACE("gq: linked=%p scheduled=%p\n", entry->linked, entry->scheduled); | ||
2186 | + if (entry->linked != entry->scheduled) { | ||
2187 | + /* Schedule a linked job? */ | ||
2188 | + if (entry->linked) { | ||
2189 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
2190 | + next = entry->linked; | ||
2191 | + } | ||
2192 | + if (entry->scheduled) { | ||
2193 | + /* kick this task off the CPU */ | ||
2194 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
2195 | + TRACE_TASK(entry->scheduled, "scheduled_on <- NO_CPU\n"); | ||
2196 | + } | ||
2197 | + } else | ||
2198 | + /* Only override Linux scheduler if we have a real-time task | ||
2199 | + * scheduled that needs to continue. | ||
2200 | + */ | ||
2201 | + if (exists) | ||
2202 | + next = prev; | ||
2203 | + | ||
2204 | + spin_unlock(&gq_lock); | ||
2205 | + | ||
2206 | + TRACE("gq_lock released, next=0x%p\n", next); | ||
2207 | + | ||
2208 | + | ||
2209 | + if (next) | ||
2210 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
2211 | + else if (exists && !next) | ||
2212 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
2213 | + | ||
2214 | + | ||
2215 | + return next; | ||
2216 | +} | ||
2217 | + | ||
2218 | + | ||
2219 | +/* _finish_switch - we just finished the switch away from prev | ||
2220 | + */ | ||
2221 | +static void gq_finish_switch(struct task_struct *prev) | ||
2222 | +{ | ||
2223 | + cpu_state_t* entry = &__get_cpu_var(gq_cpu_entries); | ||
2224 | + | ||
2225 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
2226 | + TRACE_TASK(prev, "switched away from\n"); | ||
2227 | +} | ||
2228 | + | ||
2229 | + | ||
2230 | +/* Prepare a task for running in RT mode | ||
2231 | + */ | ||
2232 | +static void gq_task_new(struct task_struct * t, int on_rq, int running) | ||
2233 | +{ | ||
2234 | + unsigned long flags; | ||
2235 | + cpu_state_t* entry = NULL; | ||
2236 | + int on_rm = 0; | ||
2237 | + | ||
2238 | + spin_lock_irqsave(&gq_lock, flags); | ||
2239 | + | ||
2240 | + /* setup job params */ | ||
2241 | + release_at(t, litmus_clock()); | ||
2242 | + | ||
2243 | + if (running) { | ||
2244 | + entry = &per_cpu(gq_cpu_entries, task_cpu(t)); | ||
2245 | + BUG_ON(entry->scheduled); | ||
2246 | + on_rm = entry->cpu == gqedf.release_master; | ||
2247 | + } | ||
2248 | + | ||
2249 | + TRACE_TASK(t, "gq edf: task new (running:%d on_rm:%d)\n", | ||
2250 | + running, on_rm); | ||
2251 | + | ||
2252 | + if (running && on_rm) | ||
2253 | + preempt(entry); | ||
2254 | + | ||
2255 | + if (running && !on_rm) { | ||
2256 | + /* just leave it where it is, CPU was real-time idle */ | ||
2257 | + tsk_rt(t)->scheduled_on = task_cpu(t); | ||
2258 | + tsk_rt(t)->linked_on = task_cpu(t); | ||
2259 | + entry->scheduled = t; | ||
2260 | + if (entry->linked != NULL) { | ||
2261 | + /* Something raced and got assigned here. | ||
2262 | + * Kick it back into the queue, since t is | ||
2263 | + * already executing. | ||
2264 | + */ | ||
2265 | + tsk_rt(entry->linked)->linked_on = NO_CPU; | ||
2266 | + __add_ready(&gqedf, entry->linked); | ||
2267 | + } | ||
2268 | + entry->linked = t; | ||
2269 | + } | ||
2270 | + | ||
2271 | + if (!running || on_rm) { | ||
2272 | + /* should be released properly */ | ||
2273 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
2274 | + tsk_rt(t)->linked_on = NO_CPU; | ||
2275 | + gq_add_released_queue(t); | ||
2276 | + } | ||
2277 | + | ||
2278 | + spin_unlock_irqrestore(&gq_lock, flags); | ||
2279 | +} | ||
2280 | + | ||
2281 | +static void gq_task_wake_up(struct task_struct *task) | ||
2282 | +{ | ||
2283 | + unsigned long flags; | ||
2284 | + cpu_state_t* entry; | ||
2285 | + lt_t now; | ||
2286 | + | ||
2287 | + spin_lock_irqsave(&gq_lock, flags); | ||
2288 | + | ||
2289 | + now = litmus_clock(); | ||
2290 | + if (is_tardy(task, now)) { | ||
2291 | + TRACE_TASK(task, "wake_up => new release\n"); | ||
2292 | + /* Since task came back after its deadline, we | ||
2293 | + * assume that resuming indidates a new job release. | ||
2294 | + */ | ||
2295 | + release_at(task, now); | ||
2296 | + sched_trace_task_release(task); | ||
2297 | + gq_add_released_queue(task); | ||
2298 | + } else if (is_released(task, now)) { | ||
2299 | + set_rt_flags(task, RT_F_RUNNING); | ||
2300 | + entry = &per_cpu(gq_cpu_entries, tsk_rt(task)->last_cpu); | ||
2301 | + /* check if task is still the quantum owner on its CPU */ | ||
2302 | + if (entry->linked == NULL && entry->absentee == task) { | ||
2303 | + TRACE_TASK(task, "wake_up => is quantum owner\n"); | ||
2304 | + /* can resume right away */ | ||
2305 | + link_task_to_cpu(task, entry); | ||
2306 | + if (entry->scheduled != task) | ||
2307 | + preempt(entry); | ||
2308 | + } else { | ||
2309 | + /* becomes eligible at next quantum */ | ||
2310 | + TRACE_TASK(task, "wake_up => released_queue\n"); | ||
2311 | + gq_add_released_queue(task); | ||
2312 | + } | ||
2313 | + } else { | ||
2314 | + /* last suspension occurred together with a | ||
2315 | + * job completion | ||
2316 | + */ | ||
2317 | + TRACE_TASK(task, "wake_up => not yet released!\n"); | ||
2318 | + requeue(task); | ||
2319 | + } | ||
2320 | + spin_unlock_irqrestore(&gq_lock, flags); | ||
2321 | +} | ||
2322 | + | ||
2323 | +static void gq_task_block(struct task_struct *t) | ||
2324 | +{ | ||
2325 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
2326 | +} | ||
2327 | + | ||
2328 | + | ||
2329 | +static void gq_task_exit(struct task_struct * t) | ||
2330 | +{ | ||
2331 | + unsigned long flags; | ||
2332 | + | ||
2333 | + /* unlink if necessary */ | ||
2334 | + spin_lock_irqsave(&gq_lock, flags); | ||
2335 | + unlink(t); | ||
2336 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
2337 | + gq_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
2338 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
2339 | + } | ||
2340 | + spin_unlock_irqrestore(&gq_lock, flags); | ||
2341 | + | ||
2342 | + BUG_ON(!is_realtime(t)); | ||
2343 | + TRACE_TASK(t, "RIP\n"); | ||
2344 | +} | ||
2345 | + | ||
2346 | + | ||
2347 | + | ||
2348 | +static void gq_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
2349 | +{ | ||
2350 | + unsigned long flags; | ||
2351 | + | ||
2352 | + spin_lock_irqsave(&gq_release_lock, flags); | ||
2353 | + TRACE("gq_release_jobs() at %llu\n", litmus_clock()); | ||
2354 | + | ||
2355 | + /* save tasks to queue so that they can be merged on next quantum */ | ||
2356 | + heap_union(edf_ready_order, &gq_released_heap, tasks); | ||
2357 | + | ||
2358 | + spin_unlock_irqrestore(&gq_release_lock, flags); | ||
2359 | +} | ||
2360 | + | ||
2361 | +static long gq_admit_task(struct task_struct* tsk) | ||
2362 | +{ | ||
2363 | + return 0; | ||
2364 | +} | ||
2365 | + | ||
2366 | + | ||
2367 | +static long gq_activate_plugin(void) | ||
2368 | +{ | ||
2369 | + int cpu; | ||
2370 | + cpu_state_t *entry; | ||
2371 | + | ||
2372 | + heap_init(&gq_cpu_heap); | ||
2373 | + heap_init(&gq_released_heap); | ||
2374 | + gqedf.release_master = atomic_read(&release_master_cpu); | ||
2375 | + | ||
2376 | + | ||
2377 | + for_each_online_cpu(cpu) { | ||
2378 | + entry = &per_cpu(gq_cpu_entries, cpu); | ||
2379 | + heap_node_init(&entry->hn, entry); | ||
2380 | + entry->linked = NULL; | ||
2381 | + entry->scheduled = NULL; | ||
2382 | + entry->absentee = NULL; | ||
2383 | + if (cpu != gqedf.release_master) { | ||
2384 | + TRACE("GQ-EDF: Initializing CPU #%d.\n", cpu); | ||
2385 | + update_cpu_position(entry); | ||
2386 | + } else { | ||
2387 | + TRACE("GQ-EDF: CPU %d is release master.\n", cpu); | ||
2388 | + } | ||
2389 | + } | ||
2390 | + return 0; | ||
2391 | +} | ||
2392 | + | ||
2393 | +/* Plugin object */ | ||
2394 | +static struct sched_plugin gq_edf_plugin __cacheline_aligned_in_smp = { | ||
2395 | + .plugin_name = "GQ-EDF", | ||
2396 | + .finish_switch = gq_finish_switch, | ||
2397 | + .tick = gq_tick, | ||
2398 | + .task_new = gq_task_new, | ||
2399 | + .complete_job = complete_job, | ||
2400 | + .task_exit = gq_task_exit, | ||
2401 | + .schedule = gq_schedule, | ||
2402 | + .task_wake_up = gq_task_wake_up, | ||
2403 | + .task_block = gq_task_block, | ||
2404 | + .admit_task = gq_admit_task, | ||
2405 | + .activate_plugin = gq_activate_plugin, | ||
2406 | +}; | ||
2407 | + | ||
2408 | + | ||
2409 | +static int __init init_gq_edf(void) | ||
2410 | +{ | ||
2411 | + int cpu; | ||
2412 | + cpu_state_t *entry; | ||
2413 | + | ||
2414 | + /* initialize CPU state */ | ||
2415 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
2416 | + entry = &per_cpu(gq_cpu_entries, cpu); | ||
2417 | + gq_cpus[cpu] = entry; | ||
2418 | + entry->cpu = cpu; | ||
2419 | + entry->hn = &gq_heap_node[cpu]; | ||
2420 | + heap_node_init(&entry->hn, entry); | ||
2421 | + } | ||
2422 | + edf_domain_init(&gqedf, NULL, gq_release_jobs); | ||
2423 | + return register_sched_plugin(&gq_edf_plugin); | ||
2424 | +} | ||
2425 | + | ||
2426 | + | ||
2427 | +module_init(init_gq_edf); | ||
@@ -73,7 +73,7 @@ | |||
73 | </p> | 73 | </p> |
74 | <h3>Development Plans</h3> | 74 | <h3>Development Plans</h3> |
75 | <p class="nomargin"> | 75 | <p class="nomargin"> |
76 | Re-basing to the then-current Linux kernel version is scheduled for Fall 2009. There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. | 76 | Re-basing to the then-current Linux kernel version is scheduled for Spring 2010. There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. |
77 | </p> | 77 | </p> |
78 | </div> | 78 | </div> |
79 | 79 | ||
@@ -114,6 +114,25 @@ | |||
114 | <div class="box"> | 114 | <div class="box"> |
115 | 115 | ||
116 | <ol class="nomargin"> | 116 | <ol class="nomargin"> |
117 | <li><p> | ||
118 | B. Brandenburg and J. Anderson, | ||
119 | “On the Implementation of Global Real-Time | ||
120 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. | ||
121 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.ps">Postscript</a>. | ||
122 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. | ||
123 | Longer version with all graphs: | ||
124 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.ps">Postscript</a>. | ||
125 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. | ||
126 | </p> | ||
127 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). | ||
128 | </p> | ||
129 | <ul> | ||
130 | <li> | ||
131 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> | ||
132 | </li> | ||
133 | </ul> | ||
134 | |||
135 | </li> | ||
117 | <li> | 136 | <li> |
118 | <p> | 137 | <p> |
119 | B. Brandenburg and J. Anderson | 138 | B. Brandenburg and J. Anderson |