diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2012-05-26 16:14:12 -0400 |
---|---|---|
committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2012-05-26 16:16:08 -0400 |
commit | 5649fefe911fdadaefa579eaa672e7de583f113f (patch) | |
tree | e2c88955d9c2f2611c1a7890f66676e026528647 /litmus | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Initial GSN-EDF-split plugin
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/sched_gsn_edf_split.c | 965 |
2 files changed, 966 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 7338180f196f..8efcb699935b 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -17,6 +17,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
17 | bheap.o \ | 17 | bheap.o \ |
18 | ctrldev.o \ | 18 | ctrldev.o \ |
19 | sched_gsn_edf.o \ | 19 | sched_gsn_edf.o \ |
20 | sched_gsn_edf_split.o \ | ||
20 | sched_psn_edf.o | 21 | sched_psn_edf.o |
21 | 22 | ||
22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 23 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
diff --git a/litmus/sched_gsn_edf_split.c b/litmus/sched_gsn_edf_split.c new file mode 100644 index 000000000000..1a4b06391d12 --- /dev/null +++ b/litmus/sched_gsn_edf_split.c | |||
@@ -0,0 +1,965 @@ | |||
1 | /* | ||
2 | * litmus/sched_gsn_edf.c | ||
3 | * | ||
4 | * Implementation of the GSN-EDF scheduling algorithm with job splitting, i.e. | ||
5 | * GSN-EDF-split. | ||
6 | * | ||
7 | * This plugin is a modified version of the prior GSN-EDF plugin in | ||
8 | * litmus/sched_gsn_edf_split.c | ||
9 | */ | ||
10 | |||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include <litmus/litmus.h> | ||
17 | #include <litmus/jobs.h> | ||
18 | #include <litmus/sched_plugin.h> | ||
19 | #include <litmus/edf_common.h> | ||
20 | #include <litmus/sched_trace.h> | ||
21 | #include <litmus/trace.h> | ||
22 | |||
23 | #include <litmus/preempt.h> | ||
24 | |||
25 | #include <litmus/bheap.h> | ||
26 | |||
27 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
28 | #include <litmus/affinity.h> | ||
29 | #endif | ||
30 | |||
31 | #include <linux/module.h> | ||
32 | |||
33 | /* cpu_entry_t - maintain the linked and scheduled state | ||
34 | */ | ||
35 | typedef struct { | ||
36 | int cpu; | ||
37 | struct task_struct* linked; /* only RT tasks */ | ||
38 | struct task_struct* scheduled; /* only RT tasks */ | ||
39 | struct bheap_node* hn; | ||
40 | struct hrtimer split_timer; | ||
41 | int timer_armed; | ||
42 | } cpu_entry_t; | ||
43 | DEFINE_PER_CPU(cpu_entry_t, gsnedfsplit_cpu_entries); | ||
44 | |||
45 | cpu_entry_t* gsnedfsplit_cpus[NR_CPUS]; | ||
46 | |||
47 | /* the cpus queue themselves according to priority in here */ | ||
48 | static struct bheap_node gsnedfsplit_heap_node[NR_CPUS]; | ||
49 | static struct bheap gsnedfsplit_cpu_heap; | ||
50 | |||
51 | static rt_domain_t gsnedfsplit; | ||
52 | #define gsnedfsplit_lock (gsnedfsplit.ready_lock) | ||
53 | |||
54 | |||
55 | /* Uncomment this if you want to see all scheduling decisions in the | ||
56 | * TRACE() log. | ||
57 | #define WANT_ALL_SCHED_EVENTS | ||
58 | */ | ||
59 | |||
60 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
61 | { | ||
62 | cpu_entry_t *a, *b; | ||
63 | a = _a->value; | ||
64 | b = _b->value; | ||
65 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
66 | * the top of the heap. | ||
67 | */ | ||
68 | return edf_higher_prio(b->linked, a->linked); | ||
69 | } | ||
70 | |||
71 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
72 | * order in the cpu queue. Caller must hold gsnedfsplit lock. | ||
73 | */ | ||
74 | static void update_cpu_position(cpu_entry_t *entry) | ||
75 | { | ||
76 | if (likely(bheap_node_in_heap(entry->hn))) | ||
77 | bheap_delete(cpu_lower_prio, &gsnedfsplit_cpu_heap, entry->hn); | ||
78 | bheap_insert(cpu_lower_prio, &gsnedfsplit_cpu_heap, entry->hn); | ||
79 | } | ||
80 | |||
81 | /* caller must hold gsnedfsplit lock */ | ||
82 | static cpu_entry_t* lowest_prio_cpu(void) | ||
83 | { | ||
84 | struct bheap_node* hn; | ||
85 | hn = bheap_peek(cpu_lower_prio, &gsnedfsplit_cpu_heap); | ||
86 | return hn->value; | ||
87 | } | ||
88 | |||
89 | |||
90 | /* link_task_to_cpu - Update the link of a CPU. | ||
91 | * Handles the case where the to-be-linked task is already | ||
92 | * scheduled on a different CPU. | ||
93 | */ | ||
94 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
95 | cpu_entry_t *entry) | ||
96 | { | ||
97 | cpu_entry_t *sched; | ||
98 | struct task_struct* tmp; | ||
99 | int on_cpu; | ||
100 | |||
101 | BUG_ON(linked && !is_realtime(linked)); | ||
102 | |||
103 | /* Currently linked task is set to be unlinked. */ | ||
104 | if (entry->linked) { | ||
105 | entry->linked->rt_param.linked_on = NO_CPU; | ||
106 | } | ||
107 | |||
108 | /* Link new task to CPU. */ | ||
109 | if (linked) { | ||
110 | set_rt_flags(linked, RT_F_RUNNING); | ||
111 | /* handle task is already scheduled somewhere! */ | ||
112 | on_cpu = linked->rt_param.scheduled_on; | ||
113 | if (on_cpu != NO_CPU) { | ||
114 | sched = &per_cpu(gsnedfsplit_cpu_entries, on_cpu); | ||
115 | /* this should only happen if not linked already */ | ||
116 | BUG_ON(sched->linked == linked); | ||
117 | |||
118 | /* If we are already scheduled on the CPU to which we | ||
119 | * wanted to link, we don't need to do the swap -- | ||
120 | * we just link ourselves to the CPU and depend on | ||
121 | * the caller to get things right. | ||
122 | */ | ||
123 | if (entry != sched) { | ||
124 | TRACE_TASK(linked, | ||
125 | "already scheduled on %d, updating link.\n", | ||
126 | sched->cpu); | ||
127 | tmp = sched->linked; | ||
128 | linked->rt_param.linked_on = sched->cpu; | ||
129 | sched->linked = linked; | ||
130 | update_cpu_position(sched); | ||
131 | linked = tmp; | ||
132 | } | ||
133 | } | ||
134 | if (linked) /* might be NULL due to swap */ | ||
135 | linked->rt_param.linked_on = entry->cpu; | ||
136 | } | ||
137 | entry->linked = linked; | ||
138 | #ifdef WANT_ALL_SCHED_EVENTS | ||
139 | if (linked) | ||
140 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
141 | else | ||
142 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
143 | #endif | ||
144 | update_cpu_position(entry); | ||
145 | } | ||
146 | |||
147 | /* unlink - Make sure a task is not linked any longer to an entry | ||
148 | * where it was linked before. Must hold gsnedfsplit_lock. | ||
149 | */ | ||
150 | static noinline void unlink(struct task_struct* t) | ||
151 | { | ||
152 | cpu_entry_t *entry; | ||
153 | |||
154 | if (t->rt_param.linked_on != NO_CPU) { | ||
155 | /* unlink */ | ||
156 | entry = &per_cpu(gsnedfsplit_cpu_entries, t->rt_param.linked_on); | ||
157 | t->rt_param.linked_on = NO_CPU; | ||
158 | link_task_to_cpu(NULL, entry); | ||
159 | } else if (is_queued(t)) { | ||
160 | /* This is an interesting situation: t is scheduled, | ||
161 | * but was just recently unlinked. It cannot be | ||
162 | * linked anywhere else (because then it would have | ||
163 | * been relinked to this CPU), thus it must be in some | ||
164 | * queue. We must remove it from the list in this | ||
165 | * case. | ||
166 | */ | ||
167 | remove(&gsnedfsplit, t); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | |||
172 | /* preempt - force a CPU to reschedule | ||
173 | */ | ||
174 | static void preempt(cpu_entry_t *entry) | ||
175 | { | ||
176 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
177 | } | ||
178 | |||
179 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
180 | * Caller must hold gsnedfsplit_lock. | ||
181 | */ | ||
182 | static noinline void requeue(struct task_struct* task) | ||
183 | { | ||
184 | BUG_ON(!task); | ||
185 | /* sanity check before insertion */ | ||
186 | BUG_ON(is_queued(task)); | ||
187 | |||
188 | if (is_released(task, litmus_clock())) | ||
189 | __add_ready(&gsnedfsplit, task); | ||
190 | else { | ||
191 | /* it has got to wait */ | ||
192 | add_release(&gsnedfsplit, task); | ||
193 | } | ||
194 | } | ||
195 | |||
196 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
197 | static cpu_entry_t* gsnedfsplit_get_nearest_available_cpu(cpu_entry_t *start) | ||
198 | { | ||
199 | cpu_entry_t *affinity; | ||
200 | |||
201 | get_nearest_available_cpu(affinity, start, gsnedfsplit_cpu_entries, | ||
202 | #ifdef CONFIG_RELEASE_MASTER | ||
203 | gsnedfsplit.release_master | ||
204 | #else | ||
205 | NO_CPU | ||
206 | #endif | ||
207 | ); | ||
208 | |||
209 | return(affinity); | ||
210 | } | ||
211 | #endif | ||
212 | |||
213 | /* check for any necessary preemptions */ | ||
214 | static void check_for_preemptions(void) | ||
215 | { | ||
216 | struct task_struct *task; | ||
217 | cpu_entry_t *last; | ||
218 | |||
219 | for (last = lowest_prio_cpu(); | ||
220 | edf_preemption_needed(&gsnedfsplit, last->linked); | ||
221 | last = lowest_prio_cpu()) { | ||
222 | /* preemption necessary */ | ||
223 | task = __take_ready(&gsnedfsplit); | ||
224 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
225 | task->pid, last->cpu); | ||
226 | |||
227 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
228 | { | ||
229 | cpu_entry_t *affinity = | ||
230 | gsnedfsplit_get_nearest_available_cpu( | ||
231 | &per_cpu(gsnedfsplit_cpu_entries, task_cpu(task))); | ||
232 | if (affinity) | ||
233 | last = affinity; | ||
234 | else if (last->linked) | ||
235 | requeue(last->linked); | ||
236 | } | ||
237 | #else | ||
238 | if (last->linked) | ||
239 | requeue(last->linked); | ||
240 | #endif | ||
241 | |||
242 | link_task_to_cpu(task, last); | ||
243 | preempt(last); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | /* gsnedfsplit_job_arrival: task is either resumed or released */ | ||
248 | static noinline void gsnedfsplit_job_arrival(struct task_struct* task) | ||
249 | { | ||
250 | BUG_ON(!task); | ||
251 | |||
252 | requeue(task); | ||
253 | check_for_preemptions(); | ||
254 | } | ||
255 | |||
256 | static void gsnedfsplit_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
257 | { | ||
258 | unsigned long flags; | ||
259 | |||
260 | raw_spin_lock_irqsave(&gsnedfsplit_lock, flags); | ||
261 | |||
262 | __merge_ready(rt, tasks); | ||
263 | check_for_preemptions(); | ||
264 | |||
265 | raw_spin_unlock_irqrestore(&gsnedfsplit_lock, flags); | ||
266 | } | ||
267 | |||
268 | /* caller holds gsnedfsplit_lock */ | ||
269 | static noinline void job_completion(struct task_struct *t, int forced) | ||
270 | { | ||
271 | BUG_ON(!t); | ||
272 | |||
273 | sched_trace_task_completion(t, forced); | ||
274 | |||
275 | TRACE_TASK(t, "job_completion().\n"); | ||
276 | |||
277 | /* set flags */ | ||
278 | set_rt_flags(t, RT_F_SLEEP); | ||
279 | /* prepare for next period */ | ||
280 | prepare_for_next_period(t); | ||
281 | if (is_released(t, litmus_clock())) | ||
282 | sched_trace_task_release(t); | ||
283 | /* unlink */ | ||
284 | unlink(t); | ||
285 | /* requeue | ||
286 | * But don't requeue a blocking task. */ | ||
287 | if (is_running(t)) | ||
288 | gsnedfsplit_job_arrival(t); | ||
289 | } | ||
290 | |||
291 | /* gsnedfsplit_tick - this function is called for every local timer | ||
292 | * interrupt. | ||
293 | * | ||
294 | * checks whether the current task has expired and checks | ||
295 | * whether we need to preempt it if it has not expired | ||
296 | */ | ||
297 | static void gsnedfsplit_tick(struct task_struct* t) | ||
298 | { | ||
299 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
300 | if (!is_np(t)) { | ||
301 | /* np tasks will be preempted when they become | ||
302 | * preemptable again | ||
303 | */ | ||
304 | litmus_reschedule_local(); | ||
305 | TRACE("gsnedfsplit_scheduler_tick: " | ||
306 | "%d is preemptable " | ||
307 | " => FORCE_RESCHED\n", t->pid); | ||
308 | } else if (is_user_np(t)) { | ||
309 | TRACE("gsnedfsplit_scheduler_tick: " | ||
310 | "%d is non-preemptable, " | ||
311 | "preemption delayed.\n", t->pid); | ||
312 | request_exit_np(t); | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | |||
317 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
318 | * assumptions on the state of the current task since it may be called for a | ||
319 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
320 | * was necessary, because sys_exit_np() was called, because some Linux | ||
321 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
322 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
323 | * current state is. | ||
324 | * | ||
325 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
326 | * | ||
327 | * The following assertions for the scheduled task could hold: | ||
328 | * | ||
329 | * - !is_running(scheduled) // the job blocks | ||
330 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
331 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
332 | * - linked != scheduled // we need to reschedule (for any reason) | ||
333 | * - is_np(scheduled) // rescheduling must be delayed, | ||
334 | * sys_exit_np must be requested | ||
335 | * | ||
336 | * Any of these can occur together. | ||
337 | */ | ||
338 | static struct task_struct* gsnedfsplit_schedule(struct task_struct * prev) | ||
339 | { | ||
340 | cpu_entry_t* entry = &__get_cpu_var(gsnedfsplit_cpu_entries); | ||
341 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
342 | struct task_struct* next = NULL; | ||
343 | |||
344 | #ifdef CONFIG_RELEASE_MASTER | ||
345 | /* Bail out early if we are the release master. | ||
346 | * The release master never schedules any real-time tasks. | ||
347 | */ | ||
348 | if (unlikely(gsnedfsplit.release_master == entry->cpu)) { | ||
349 | sched_state_task_picked(); | ||
350 | return NULL; | ||
351 | } | ||
352 | #endif | ||
353 | |||
354 | raw_spin_lock(&gsnedfsplit_lock); | ||
355 | |||
356 | /* sanity checking */ | ||
357 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
358 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
359 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
360 | |||
361 | /* (0) Determine state */ | ||
362 | exists = entry->scheduled != NULL; | ||
363 | blocks = exists && !is_running(entry->scheduled); | ||
364 | out_of_time = exists && | ||
365 | budget_enforced(entry->scheduled) && | ||
366 | budget_exhausted(entry->scheduled); | ||
367 | np = exists && is_np(entry->scheduled); | ||
368 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
369 | preempt = entry->scheduled != entry->linked; | ||
370 | |||
371 | #ifdef WANT_ALL_SCHED_EVENTS | ||
372 | TRACE_TASK(prev, "invoked gsnedfsplit_schedule.\n"); | ||
373 | #endif | ||
374 | |||
375 | if (exists) | ||
376 | TRACE_TASK(prev, | ||
377 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
378 | "state:%d sig:%d\n", | ||
379 | blocks, out_of_time, np, sleep, preempt, | ||
380 | prev->state, signal_pending(prev)); | ||
381 | if (entry->linked && preempt) | ||
382 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
383 | entry->linked->comm, entry->linked->pid); | ||
384 | |||
385 | |||
386 | /* If a task blocks we have no choice but to reschedule. | ||
387 | */ | ||
388 | if (blocks) | ||
389 | unlink(entry->scheduled); | ||
390 | |||
391 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
392 | * We need to make sure to update the link structure anyway in case | ||
393 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
394 | * hurt. | ||
395 | */ | ||
396 | if (np && (out_of_time || preempt || sleep)) { | ||
397 | unlink(entry->scheduled); | ||
398 | request_exit_np(entry->scheduled); | ||
399 | } | ||
400 | |||
401 | /* Any task that is preemptable and either exhausts its execution | ||
402 | * budget or wants to sleep completes. We may have to reschedule after | ||
403 | * this. Don't do a job completion if we block (can't have timers running | ||
404 | * for blocked jobs). Preemption go first for the same reason. | ||
405 | */ | ||
406 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
407 | job_completion(entry->scheduled, !sleep); | ||
408 | |||
409 | /* Link pending task if we became unlinked. | ||
410 | */ | ||
411 | if (!entry->linked) | ||
412 | link_task_to_cpu(__take_ready(&gsnedfsplit), entry); | ||
413 | |||
414 | /* The final scheduling decision. Do we need to switch for some reason? | ||
415 | * If linked is different from scheduled, then select linked as next. | ||
416 | */ | ||
417 | if ((!np || blocks) && | ||
418 | entry->linked != entry->scheduled) { | ||
419 | /* Schedule a linked job? */ | ||
420 | if (entry->linked) { | ||
421 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
422 | next = entry->linked; | ||
423 | TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); | ||
424 | } | ||
425 | if (entry->scheduled) { | ||
426 | /* not gonna be scheduled soon */ | ||
427 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
428 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
429 | } | ||
430 | } else | ||
431 | /* Only override Linux scheduler if we have a real-time task | ||
432 | * scheduled that needs to continue. | ||
433 | */ | ||
434 | if (exists) | ||
435 | next = prev; | ||
436 | |||
437 | sched_state_task_picked(); | ||
438 | |||
439 | raw_spin_unlock(&gsnedfsplit_lock); | ||
440 | |||
441 | #ifdef WANT_ALL_SCHED_EVENTS | ||
442 | TRACE("gsnedfsplit_lock released, next=0x%p\n", next); | ||
443 | |||
444 | if (next) | ||
445 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
446 | else if (exists && !next) | ||
447 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
448 | #endif | ||
449 | |||
450 | |||
451 | return next; | ||
452 | } | ||
453 | |||
454 | |||
455 | /* _finish_switch - we just finished the switch away from prev | ||
456 | */ | ||
457 | static void gsnedfsplit_finish_switch(struct task_struct *prev) | ||
458 | { | ||
459 | cpu_entry_t* entry = &__get_cpu_var(gsnedfsplit_cpu_entries); | ||
460 | |||
461 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
462 | #ifdef WANT_ALL_SCHED_EVENTS | ||
463 | TRACE_TASK(prev, "switched away from\n"); | ||
464 | #endif | ||
465 | } | ||
466 | |||
467 | |||
468 | /* Prepare a task for running in RT mode | ||
469 | */ | ||
470 | static void gsnedfsplit_task_new(struct task_struct * t, int on_rq, int running) | ||
471 | { | ||
472 | unsigned long flags; | ||
473 | cpu_entry_t* entry; | ||
474 | |||
475 | TRACE("gsn edf: task new %d\n", t->pid); | ||
476 | |||
477 | raw_spin_lock_irqsave(&gsnedfsplit_lock, flags); | ||
478 | |||
479 | /* setup job params */ | ||
480 | release_at(t, litmus_clock()); | ||
481 | |||
482 | if (running) { | ||
483 | entry = &per_cpu(gsnedfsplit_cpu_entries, task_cpu(t)); | ||
484 | BUG_ON(entry->scheduled); | ||
485 | |||
486 | #ifdef CONFIG_RELEASE_MASTER | ||
487 | if (entry->cpu != gsnedfsplit.release_master) { | ||
488 | #endif | ||
489 | entry->scheduled = t; | ||
490 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
491 | #ifdef CONFIG_RELEASE_MASTER | ||
492 | } else { | ||
493 | /* do not schedule on release master */ | ||
494 | preempt(entry); /* force resched */ | ||
495 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
496 | } | ||
497 | #endif | ||
498 | } else { | ||
499 | t->rt_param.scheduled_on = NO_CPU; | ||
500 | } | ||
501 | t->rt_param.linked_on = NO_CPU; | ||
502 | |||
503 | gsnedfsplit_job_arrival(t); | ||
504 | raw_spin_unlock_irqrestore(&gsnedfsplit_lock, flags); | ||
505 | } | ||
506 | |||
507 | static void gsnedfsplit_task_wake_up(struct task_struct *task) | ||
508 | { | ||
509 | unsigned long flags; | ||
510 | lt_t now; | ||
511 | |||
512 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
513 | |||
514 | raw_spin_lock_irqsave(&gsnedfsplit_lock, flags); | ||
515 | /* We need to take suspensions because of semaphores into | ||
516 | * account! If a job resumes after being suspended due to acquiring | ||
517 | * a semaphore, it should never be treated as a new job release. | ||
518 | */ | ||
519 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
520 | set_rt_flags(task, RT_F_RUNNING); | ||
521 | } else { | ||
522 | now = litmus_clock(); | ||
523 | if (is_tardy(task, now)) { | ||
524 | /* new sporadic release */ | ||
525 | release_at(task, now); | ||
526 | sched_trace_task_release(task); | ||
527 | } | ||
528 | else { | ||
529 | if (task->rt.time_slice) { | ||
530 | /* came back in time before deadline | ||
531 | */ | ||
532 | set_rt_flags(task, RT_F_RUNNING); | ||
533 | } | ||
534 | } | ||
535 | } | ||
536 | gsnedfsplit_job_arrival(task); | ||
537 | raw_spin_unlock_irqrestore(&gsnedfsplit_lock, flags); | ||
538 | } | ||
539 | |||
540 | static void gsnedfsplit_task_block(struct task_struct *t) | ||
541 | { | ||
542 | unsigned long flags; | ||
543 | |||
544 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
545 | |||
546 | /* unlink if necessary */ | ||
547 | raw_spin_lock_irqsave(&gsnedfsplit_lock, flags); | ||
548 | unlink(t); | ||
549 | raw_spin_unlock_irqrestore(&gsnedfsplit_lock, flags); | ||
550 | |||
551 | BUG_ON(!is_realtime(t)); | ||
552 | } | ||
553 | |||
554 | |||
555 | static void gsnedfsplit_task_exit(struct task_struct * t) | ||
556 | { | ||
557 | unsigned long flags; | ||
558 | |||
559 | /* unlink if necessary */ | ||
560 | raw_spin_lock_irqsave(&gsnedfsplit_lock, flags); | ||
561 | unlink(t); | ||
562 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
563 | gsnedfsplit_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
564 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
565 | } | ||
566 | raw_spin_unlock_irqrestore(&gsnedfsplit_lock, flags); | ||
567 | |||
568 | BUG_ON(!is_realtime(t)); | ||
569 | TRACE_TASK(t, "RIP\n"); | ||
570 | } | ||
571 | |||
572 | |||
573 | static long gsnedfsplit_admit_task(struct task_struct* tsk) | ||
574 | { | ||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | #ifdef CONFIG_LITMUS_LOCKING | ||
579 | |||
580 | #include <litmus/fdso.h> | ||
581 | |||
582 | /* called with IRQs off */ | ||
583 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
584 | { | ||
585 | int linked_on; | ||
586 | int check_preempt = 0; | ||
587 | |||
588 | raw_spin_lock(&gsnedfsplit_lock); | ||
589 | |||
590 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
591 | tsk_rt(t)->inh_task = prio_inh; | ||
592 | |||
593 | linked_on = tsk_rt(t)->linked_on; | ||
594 | |||
595 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
596 | if (linked_on != NO_CPU) { | ||
597 | TRACE_TASK(t, "%s: linked on %d\n", | ||
598 | __FUNCTION__, linked_on); | ||
599 | /* Holder is scheduled; need to re-order CPUs. | ||
600 | * We can't use heap_decrease() here since | ||
601 | * the cpu_heap is ordered in reverse direction, so | ||
602 | * it is actually an increase. */ | ||
603 | bheap_delete(cpu_lower_prio, &gsnedfsplit_cpu_heap, | ||
604 | gsnedfsplit_cpus[linked_on]->hn); | ||
605 | bheap_insert(cpu_lower_prio, &gsnedfsplit_cpu_heap, | ||
606 | gsnedfsplit_cpus[linked_on]->hn); | ||
607 | } else { | ||
608 | /* holder may be queued: first stop queue changes */ | ||
609 | raw_spin_lock(&gsnedfsplit.release_lock); | ||
610 | if (is_queued(t)) { | ||
611 | TRACE_TASK(t, "%s: is queued\n", | ||
612 | __FUNCTION__); | ||
613 | /* We need to update the position of holder in some | ||
614 | * heap. Note that this could be a release heap if we | ||
615 | * budget enforcement is used and this job overran. */ | ||
616 | check_preempt = | ||
617 | !bheap_decrease(edf_ready_order, | ||
618 | tsk_rt(t)->heap_node); | ||
619 | } else { | ||
620 | /* Nothing to do: if it is not queued and not linked | ||
621 | * then it is either sleeping or currently being moved | ||
622 | * by other code (e.g., a timer interrupt handler) that | ||
623 | * will use the correct priority when enqueuing the | ||
624 | * task. */ | ||
625 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
626 | __FUNCTION__); | ||
627 | } | ||
628 | raw_spin_unlock(&gsnedfsplit.release_lock); | ||
629 | |||
630 | /* If holder was enqueued in a release heap, then the following | ||
631 | * preemption check is pointless, but we can't easily detect | ||
632 | * that case. If you want to fix this, then consider that | ||
633 | * simply adding a state flag requires O(n) time to update when | ||
634 | * releasing n tasks, which conflicts with the goal to have | ||
635 | * O(log n) merges. */ | ||
636 | if (check_preempt) { | ||
637 | /* heap_decrease() hit the top level of the heap: make | ||
638 | * sure preemption checks get the right task, not the | ||
639 | * potentially stale cache. */ | ||
640 | bheap_uncache_min(edf_ready_order, | ||
641 | &gsnedfsplit.ready_queue); | ||
642 | check_for_preemptions(); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | raw_spin_unlock(&gsnedfsplit_lock); | ||
647 | } | ||
648 | |||
649 | /* called with IRQs off */ | ||
650 | static void clear_priority_inheritance(struct task_struct* t) | ||
651 | { | ||
652 | raw_spin_lock(&gsnedfsplit_lock); | ||
653 | |||
654 | /* A job only stops inheriting a priority when it releases a | ||
655 | * resource. Thus we can make the following assumption.*/ | ||
656 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
657 | |||
658 | TRACE_TASK(t, "priority restored\n"); | ||
659 | tsk_rt(t)->inh_task = NULL; | ||
660 | |||
661 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
662 | * since the priority was effectively lowered. */ | ||
663 | unlink(t); | ||
664 | gsnedfsplit_job_arrival(t); | ||
665 | |||
666 | raw_spin_unlock(&gsnedfsplit_lock); | ||
667 | } | ||
668 | |||
669 | |||
670 | /* ******************** FMLP support ********************** */ | ||
671 | |||
672 | /* struct for semaphore with priority inheritance */ | ||
673 | struct fmlp_semaphore { | ||
674 | struct litmus_lock litmus_lock; | ||
675 | |||
676 | /* current resource holder */ | ||
677 | struct task_struct *owner; | ||
678 | |||
679 | /* highest-priority waiter */ | ||
680 | struct task_struct *hp_waiter; | ||
681 | |||
682 | /* FIFO queue of waiting tasks */ | ||
683 | wait_queue_head_t wait; | ||
684 | }; | ||
685 | |||
686 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
687 | { | ||
688 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
689 | } | ||
690 | |||
691 | /* caller is responsible for locking */ | ||
692 | static struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, | ||
693 | struct task_struct* skip) | ||
694 | { | ||
695 | struct list_head *pos; | ||
696 | struct task_struct *queued, *found = NULL; | ||
697 | |||
698 | list_for_each(pos, &sem->wait.task_list) { | ||
699 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
700 | task_list)->private; | ||
701 | |||
702 | /* Compare task prios, find high prio task. */ | ||
703 | if (queued != skip && edf_higher_prio(queued, found)) | ||
704 | found = queued; | ||
705 | } | ||
706 | return found; | ||
707 | } | ||
708 | |||
709 | int gsnedfsplit_fmlp_lock(struct litmus_lock* l) | ||
710 | { | ||
711 | struct task_struct* t = current; | ||
712 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
713 | wait_queue_t wait; | ||
714 | unsigned long flags; | ||
715 | |||
716 | if (!is_realtime(t)) | ||
717 | return -EPERM; | ||
718 | |||
719 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
720 | |||
721 | if (sem->owner) { | ||
722 | /* resource is not free => must suspend and wait */ | ||
723 | |||
724 | init_waitqueue_entry(&wait, t); | ||
725 | |||
726 | /* FIXME: interruptible would be nice some day */ | ||
727 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
728 | |||
729 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
730 | |||
731 | /* check if we need to activate priority inheritance */ | ||
732 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
733 | sem->hp_waiter = t; | ||
734 | if (edf_higher_prio(t, sem->owner)) | ||
735 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
736 | } | ||
737 | |||
738 | TS_LOCK_SUSPEND; | ||
739 | |||
740 | /* release lock before sleeping */ | ||
741 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
742 | |||
743 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
744 | * when we wake up; we are guaranteed to have the lock since | ||
745 | * there is only one wake up per release. | ||
746 | */ | ||
747 | |||
748 | schedule(); | ||
749 | |||
750 | TS_LOCK_RESUME; | ||
751 | |||
752 | /* Since we hold the lock, no other task will change | ||
753 | * ->owner. We can thus check it without acquiring the spin | ||
754 | * lock. */ | ||
755 | BUG_ON(sem->owner != t); | ||
756 | } else { | ||
757 | /* it's ours now */ | ||
758 | sem->owner = t; | ||
759 | |||
760 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
761 | } | ||
762 | |||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | int gsnedfsplit_fmlp_unlock(struct litmus_lock* l) | ||
767 | { | ||
768 | struct task_struct *t = current, *next; | ||
769 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
770 | unsigned long flags; | ||
771 | int err = 0; | ||
772 | |||
773 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
774 | |||
775 | if (sem->owner != t) { | ||
776 | err = -EINVAL; | ||
777 | goto out; | ||
778 | } | ||
779 | |||
780 | /* check if there are jobs waiting for this resource */ | ||
781 | next = __waitqueue_remove_first(&sem->wait); | ||
782 | if (next) { | ||
783 | /* next becomes the resouce holder */ | ||
784 | sem->owner = next; | ||
785 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
786 | |||
787 | /* determine new hp_waiter if necessary */ | ||
788 | if (next == sem->hp_waiter) { | ||
789 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
790 | /* next has the highest priority --- it doesn't need to | ||
791 | * inherit. However, we need to make sure that the | ||
792 | * next-highest priority in the queue is reflected in | ||
793 | * hp_waiter. */ | ||
794 | sem->hp_waiter = find_hp_waiter(sem, next); | ||
795 | if (sem->hp_waiter) | ||
796 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
797 | else | ||
798 | TRACE("no further waiters\n"); | ||
799 | } else { | ||
800 | /* Well, if next is not the highest-priority waiter, | ||
801 | * then it ought to inherit the highest-priority | ||
802 | * waiter's priority. */ | ||
803 | set_priority_inheritance(next, sem->hp_waiter); | ||
804 | } | ||
805 | |||
806 | /* wake up next */ | ||
807 | wake_up_process(next); | ||
808 | } else | ||
809 | /* becomes available */ | ||
810 | sem->owner = NULL; | ||
811 | |||
812 | /* we lose the benefit of priority inheritance (if any) */ | ||
813 | if (tsk_rt(t)->inh_task) | ||
814 | clear_priority_inheritance(t); | ||
815 | |||
816 | out: | ||
817 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
818 | |||
819 | return err; | ||
820 | } | ||
821 | |||
822 | int gsnedfsplit_fmlp_close(struct litmus_lock* l) | ||
823 | { | ||
824 | struct task_struct *t = current; | ||
825 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
826 | unsigned long flags; | ||
827 | |||
828 | int owner; | ||
829 | |||
830 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
831 | |||
832 | owner = sem->owner == t; | ||
833 | |||
834 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
835 | |||
836 | if (owner) | ||
837 | gsnedfsplit_fmlp_unlock(l); | ||
838 | |||
839 | return 0; | ||
840 | } | ||
841 | |||
842 | void gsnedfsplit_fmlp_free(struct litmus_lock* lock) | ||
843 | { | ||
844 | kfree(fmlp_from_lock(lock)); | ||
845 | } | ||
846 | |||
847 | static struct litmus_lock_ops gsnedfsplit_fmlp_lock_ops = { | ||
848 | .close = gsnedfsplit_fmlp_close, | ||
849 | .lock = gsnedfsplit_fmlp_lock, | ||
850 | .unlock = gsnedfsplit_fmlp_unlock, | ||
851 | .deallocate = gsnedfsplit_fmlp_free, | ||
852 | }; | ||
853 | |||
854 | static struct litmus_lock* gsnedfsplit_new_fmlp(void) | ||
855 | { | ||
856 | struct fmlp_semaphore* sem; | ||
857 | |||
858 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
859 | if (!sem) | ||
860 | return NULL; | ||
861 | |||
862 | sem->owner = NULL; | ||
863 | sem->hp_waiter = NULL; | ||
864 | init_waitqueue_head(&sem->wait); | ||
865 | sem->litmus_lock.ops = &gsnedfsplit_fmlp_lock_ops; | ||
866 | |||
867 | return &sem->litmus_lock; | ||
868 | } | ||
869 | |||
870 | /* **** lock constructor **** */ | ||
871 | |||
872 | |||
873 | static long gsnedfsplit_allocate_lock(struct litmus_lock **lock, int type, | ||
874 | void* __user unused) | ||
875 | { | ||
876 | int err = -ENXIO; | ||
877 | |||
878 | /* GSN-EDF-split currently only supports the FMLP for global resources. */ | ||
879 | switch (type) { | ||
880 | |||
881 | case FMLP_SEM: | ||
882 | /* Flexible Multiprocessor Locking Protocol */ | ||
883 | *lock = gsnedfsplit_new_fmlp(); | ||
884 | if (*lock) | ||
885 | err = 0; | ||
886 | else | ||
887 | err = -ENOMEM; | ||
888 | break; | ||
889 | |||
890 | }; | ||
891 | |||
892 | return err; | ||
893 | } | ||
894 | |||
895 | #endif | ||
896 | |||
897 | |||
898 | static long gsnedfsplit_activate_plugin(void) | ||
899 | { | ||
900 | int cpu; | ||
901 | cpu_entry_t *entry; | ||
902 | |||
903 | bheap_init(&gsnedfsplit_cpu_heap); | ||
904 | #ifdef CONFIG_RELEASE_MASTER | ||
905 | gsnedfsplit.release_master = atomic_read(&release_master_cpu); | ||
906 | #endif | ||
907 | |||
908 | for_each_online_cpu(cpu) { | ||
909 | entry = &per_cpu(gsnedfsplit_cpu_entries, cpu); | ||
910 | bheap_node_init(&entry->hn, entry); | ||
911 | entry->linked = NULL; | ||
912 | entry->scheduled = NULL; | ||
913 | #ifdef CONFIG_RELEASE_MASTER | ||
914 | if (cpu != gsnedfsplit.release_master) { | ||
915 | #endif | ||
916 | TRACE("GSN-EDF-split: Initializing CPU #%d.\n", cpu); | ||
917 | update_cpu_position(entry); | ||
918 | #ifdef CONFIG_RELEASE_MASTER | ||
919 | } else { | ||
920 | TRACE("GSN-EDF-split: CPU %d is release master.\n", cpu); | ||
921 | } | ||
922 | #endif | ||
923 | } | ||
924 | return 0; | ||
925 | } | ||
926 | |||
927 | /* Plugin object */ | ||
928 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
929 | .plugin_name = "GSN-EDF-split", | ||
930 | .finish_switch = gsnedfsplit_finish_switch, | ||
931 | .tick = gsnedfsplit_tick, | ||
932 | .task_new = gsnedfsplit_task_new, | ||
933 | .complete_job = complete_job, | ||
934 | .task_exit = gsnedfsplit_task_exit, | ||
935 | .schedule = gsnedfsplit_schedule, | ||
936 | .task_wake_up = gsnedfsplit_task_wake_up, | ||
937 | .task_block = gsnedfsplit_task_block, | ||
938 | .admit_task = gsnedfsplit_admit_task, | ||
939 | .activate_plugin = gsnedfsplit_activate_plugin, | ||
940 | #ifdef CONFIG_LITMUS_LOCKING | ||
941 | .allocate_lock = gsnedfsplit_allocate_lock, | ||
942 | #endif | ||
943 | }; | ||
944 | |||
945 | |||
946 | static int __init init_gsn_edf(void) | ||
947 | { | ||
948 | int cpu; | ||
949 | cpu_entry_t *entry; | ||
950 | |||
951 | bheap_init(&gsnedfsplit_cpu_heap); | ||
952 | /* initialize CPU state */ | ||
953 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
954 | entry = &per_cpu(gsnedfsplit_cpu_entries, cpu); | ||
955 | gsnedfsplit_cpus[cpu] = entry; | ||
956 | entry->cpu = cpu; | ||
957 | entry->hn = &gsnedfsplit_heap_node[cpu]; | ||
958 | bheap_node_init(&entry->hn, entry); | ||
959 | } | ||
960 | edf_domain_init(&gsnedfsplit, NULL, gsnedfsplit_release_jobs); | ||
961 | return register_sched_plugin(&gsn_edf_plugin); | ||
962 | } | ||
963 | |||
964 | |||
965 | module_init(init_gsn_edf); | ||