diff options
Diffstat (limited to 'litmus/sched_gsn_edf_split_namechange.c')
-rw-r--r-- | litmus/sched_gsn_edf_split_namechange.c | 1165 |
1 files changed, 1165 insertions, 0 deletions
diff --git a/litmus/sched_gsn_edf_split_namechange.c b/litmus/sched_gsn_edf_split_namechange.c new file mode 100644 index 000000000000..6839ae642b3a --- /dev/null +++ b/litmus/sched_gsn_edf_split_namechange.c | |||
@@ -0,0 +1,1165 @@ | |||
1 | /* | ||
2 | * litmus/sched_gsn_edf.c | ||
3 | * | ||
4 | * Implementation of the GSN-EDF scheduling algorithm with job splitting, i.e. | ||
5 | * GSN-EDF. | ||
6 | * | ||
7 | * This plugin is a modified version of the prior GSN-EDF plugin in | ||
8 | * litmus/sched_gsn_edf.c | ||
9 | * | ||
10 | * Splitting an implicit-deadline job simply means splitting each job into an | ||
11 | * integral number of subjobs. For example, a task with a period of 10 ms and | ||
12 | * a runtime of 4 ms could be re-organized as a task with a period of 5 ms and | ||
13 | * a runtime of 2 ms, with analytical benefit for bounded tardiness (ignoring | ||
14 | * overheads and assuming no critical sections). This would have a "splitting | ||
15 | * factor" of 2. | ||
16 | * | ||
17 | * Because our analysis works with early releasing, we actually only release | ||
18 | * each job once, but move the subjob deadline back when the appropriate amount | ||
19 | * of execution has been completed. (In the example above, a job released at | ||
20 | * time 0 would intially have a subjob deadline at time 5, but this deadline | ||
21 | * would be moved to time 10 as soon as 2 ms of execution had completed.) | ||
22 | */ | ||
23 | |||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include <litmus/litmus.h> | ||
30 | #include <litmus/jobs.h> | ||
31 | #include <litmus/sched_plugin.h> | ||
32 | #include <litmus/edf_common.h> | ||
33 | #include <litmus/sched_trace.h> | ||
34 | #include <litmus/trace.h> | ||
35 | |||
36 | #include <litmus/preempt.h> | ||
37 | |||
38 | #include <litmus/bheap.h> | ||
39 | |||
40 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
41 | #include <litmus/affinity.h> | ||
42 | #endif | ||
43 | |||
44 | #include <linux/module.h> | ||
45 | |||
46 | /* cpu_entry_t - maintain the linked and scheduled state | ||
47 | */ | ||
48 | typedef struct { | ||
49 | int cpu; | ||
50 | struct task_struct* linked; /* only RT tasks */ | ||
51 | struct task_struct* scheduled; /* only RT tasks */ | ||
52 | struct bheap_node* hn; | ||
53 | struct hrtimer split_timer; | ||
54 | int timer_armed; | ||
55 | } cpu_entry_t; | ||
56 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
57 | |||
58 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
59 | |||
60 | /* the cpus queue themselves according to priority in here */ | ||
61 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
62 | static struct bheap gsnedf_cpu_heap; | ||
63 | |||
64 | static rt_domain_t gsnedf; | ||
65 | #define gsnedf_lock (gsnedf.ready_lock) | ||
66 | |||
67 | inline static int get_slice_num(struct task_struct* t) | ||
68 | { | ||
69 | int basic = ((t->rt_param.job_params.exec_time * | ||
70 | t->rt_param.task_params.split) / | ||
71 | t->rt_param.task_params.exec_cost) + 1; | ||
72 | if (basic <= t->rt_param.task_params.split){ | ||
73 | return basic; | ||
74 | } | ||
75 | else{ | ||
76 | /*Since we don't police budget, just leave where it's at.*/ | ||
77 | return t->rt_param.task_params.split; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | /* Returns the appropriate subjob deadline.*/ | ||
82 | inline static lt_t get_proper_deadline(struct task_struct* t) | ||
83 | { | ||
84 | return t->rt_param.job_params.release + | ||
85 | ((t->rt_param.task_params.period * get_slice_num(t)) | ||
86 | / t->rt_param.task_params.split); | ||
87 | } | ||
88 | |||
89 | /* Tells us if the current deadline is too small.*/ | ||
90 | inline static int needs_deadline_move(struct task_struct* t) | ||
91 | { | ||
92 | BUG_ON(get_proper_deadline(t) < t->rt_param.job_params.subjob_deadline); | ||
93 | #ifdef CONFIG_LITMUS_LOCKING | ||
94 | return !is_in_crit_section(t) && | ||
95 | (get_proper_deadline(t) != | ||
96 | tsk_rt(t)->job_params.subjob_deadline); | ||
97 | #else | ||
98 | return get_proper_deadline(t) != tsk_rt(t)->job_params.subjob_deadline; | ||
99 | #endif | ||
100 | } | ||
101 | |||
102 | /*Returns execution time until the next deadline move. | ||
103 | * 0 means the task has no more deadline moves | ||
104 | */ | ||
105 | inline static lt_t time_to_next_move(struct task_struct* t) | ||
106 | { | ||
107 | if (get_slice_num(t) == t->rt_param.task_params.split){ | ||
108 | return 0; | ||
109 | } | ||
110 | /* +1 upper bounds ceiling, since integer division is floor*/ | ||
111 | return ((get_slice_num(t) * t->rt_param.task_params.exec_cost) | ||
112 | / t->rt_param.task_params.split) + 1 | ||
113 | - t->rt_param.job_params.exec_time; | ||
114 | } | ||
115 | |||
116 | /* Timer stuff - similar to budget.c. */ | ||
117 | static enum hrtimer_restart on_split_timeout(struct hrtimer *timer) | ||
118 | { | ||
119 | cpu_entry_t* st = container_of(timer, | ||
120 | cpu_entry_t, | ||
121 | split_timer); | ||
122 | |||
123 | unsigned long flags; | ||
124 | |||
125 | local_irq_save(flags); | ||
126 | TRACE("split timer fired.\n"); | ||
127 | st->timer_armed = 0; | ||
128 | /* Activate scheduler */ | ||
129 | litmus_reschedule_local(); | ||
130 | local_irq_restore(flags); | ||
131 | |||
132 | return HRTIMER_NORESTART; | ||
133 | } | ||
134 | |||
135 | static void cancel_split_timer(cpu_entry_t* ce) | ||
136 | { | ||
137 | int ret; | ||
138 | |||
139 | TRACE("cancelling split time.\n"); | ||
140 | |||
141 | /* Since interrupts are disabled and et->timer_armed is only | ||
142 | * modified locally, we do not need any locks. | ||
143 | */ | ||
144 | |||
145 | if (ce->timer_armed) { | ||
146 | ret = hrtimer_try_to_cancel(&ce->split_timer); | ||
147 | /* Should never be inactive. */ | ||
148 | BUG_ON(ret == 0); | ||
149 | /* Should never be running concurrently.*/ | ||
150 | BUG_ON(ret == -1); | ||
151 | |||
152 | ce->timer_armed = 0; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /* assumes called with IRQs off */ | ||
157 | static void arm_split_timer(cpu_entry_t *ce, | ||
158 | struct task_struct* t) | ||
159 | { | ||
160 | lt_t when_to_fire; | ||
161 | lt_t time_to_move; | ||
162 | TRACE_TASK(t, "arming split timer.\n"); | ||
163 | |||
164 | /* __hrtimer_start_range_ns() cancels the timer | ||
165 | * anyway, so we don't have to check whether it is still armed */ | ||
166 | |||
167 | /*We won't do any new deadline moves if the budget has been exhausted*/ | ||
168 | if (likely(!is_np(t) && (time_to_move = time_to_next_move(t)))) { | ||
169 | when_to_fire = litmus_clock() + time_to_move; | ||
170 | TRACE_TASK(t, "actually arming for %llu into the future\n", | ||
171 | time_to_move); | ||
172 | __hrtimer_start_range_ns(&ce->split_timer, | ||
173 | ns_to_ktime(when_to_fire), | ||
174 | 0 /* delta */, | ||
175 | HRTIMER_MODE_ABS_PINNED, | ||
176 | 0 /* no wakeup */); | ||
177 | ce->timer_armed = 1; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | /* Uncomment this if you want to see all scheduling decisions in the | ||
182 | * TRACE() log. | ||
183 | #define WANT_ALL_SCHED_EVENTS | ||
184 | */ | ||
185 | |||
186 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
187 | { | ||
188 | cpu_entry_t *a, *b; | ||
189 | a = _a->value; | ||
190 | b = _b->value; | ||
191 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
192 | * the top of the heap. | ||
193 | */ | ||
194 | return edf_higher_prio(b->linked, a->linked); | ||
195 | } | ||
196 | |||
197 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
198 | * order in the cpu queue. Caller must hold gsnedf lock. | ||
199 | */ | ||
200 | static void update_cpu_position(cpu_entry_t *entry) | ||
201 | { | ||
202 | if (likely(bheap_node_in_heap(entry->hn))) | ||
203 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
204 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
205 | } | ||
206 | |||
207 | /* caller must hold gsnedf lock */ | ||
208 | static cpu_entry_t* lowest_prio_cpu(void) | ||
209 | { | ||
210 | struct bheap_node* hn; | ||
211 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
212 | return hn->value; | ||
213 | } | ||
214 | |||
215 | |||
216 | /* link_task_to_cpu - Update the link of a CPU. | ||
217 | * Handles the case where the to-be-linked task is already | ||
218 | * scheduled on a different CPU. | ||
219 | */ | ||
220 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
221 | cpu_entry_t *entry) | ||
222 | { | ||
223 | cpu_entry_t *sched; | ||
224 | struct task_struct* tmp; | ||
225 | int on_cpu; | ||
226 | |||
227 | BUG_ON(linked && !is_realtime(linked)); | ||
228 | |||
229 | /* Currently linked task is set to be unlinked. */ | ||
230 | if (entry->linked) { | ||
231 | entry->linked->rt_param.linked_on = NO_CPU; | ||
232 | } | ||
233 | |||
234 | /* Link new task to CPU. */ | ||
235 | if (linked) { | ||
236 | set_rt_flags(linked, RT_F_RUNNING); | ||
237 | /* handle task is already scheduled somewhere! */ | ||
238 | on_cpu = linked->rt_param.scheduled_on; | ||
239 | if (on_cpu != NO_CPU) { | ||
240 | sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
241 | /* this should only happen if not linked already */ | ||
242 | BUG_ON(sched->linked == linked); | ||
243 | |||
244 | /* If we are already scheduled on the CPU to which we | ||
245 | * wanted to link, we don't need to do the swap -- | ||
246 | * we just link ourselves to the CPU and depend on | ||
247 | * the caller to get things right. | ||
248 | */ | ||
249 | if (entry != sched) { | ||
250 | TRACE_TASK(linked, | ||
251 | "already scheduled on %d, updating link.\n", | ||
252 | sched->cpu); | ||
253 | tmp = sched->linked; | ||
254 | linked->rt_param.linked_on = sched->cpu; | ||
255 | sched->linked = linked; | ||
256 | update_cpu_position(sched); | ||
257 | linked = tmp; | ||
258 | } | ||
259 | } | ||
260 | if (linked) /* might be NULL due to swap */ | ||
261 | linked->rt_param.linked_on = entry->cpu; | ||
262 | } | ||
263 | entry->linked = linked; | ||
264 | #ifdef WANT_ALL_SCHED_EVENTS | ||
265 | if (linked) | ||
266 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
267 | else | ||
268 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
269 | #endif | ||
270 | update_cpu_position(entry); | ||
271 | } | ||
272 | |||
273 | /* unlink - Make sure a task is not linked any longer to an entry | ||
274 | * where it was linked before. Must hold gsnedf_lock. | ||
275 | */ | ||
276 | static noinline void unlink(struct task_struct* t) | ||
277 | { | ||
278 | cpu_entry_t *entry; | ||
279 | |||
280 | if (t->rt_param.linked_on != NO_CPU) { | ||
281 | /* unlink */ | ||
282 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
283 | t->rt_param.linked_on = NO_CPU; | ||
284 | link_task_to_cpu(NULL, entry); | ||
285 | } else if (is_queued(t)) { | ||
286 | /* This is an interesting situation: t is scheduled, | ||
287 | * but was just recently unlinked. It cannot be | ||
288 | * linked anywhere else (because then it would have | ||
289 | * been relinked to this CPU), thus it must be in some | ||
290 | * queue. We must remove it from the list in this | ||
291 | * case. | ||
292 | */ | ||
293 | remove(&gsnedf, t); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | |||
298 | /* preempt - force a CPU to reschedule | ||
299 | */ | ||
300 | static void preempt(cpu_entry_t *entry) | ||
301 | { | ||
302 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
303 | } | ||
304 | |||
305 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
306 | * Caller must hold gsnedf_lock. | ||
307 | */ | ||
308 | static noinline void requeue(struct task_struct* task) | ||
309 | { | ||
310 | BUG_ON(!task); | ||
311 | /* sanity check before insertion */ | ||
312 | BUG_ON(is_queued(task)); | ||
313 | |||
314 | if (is_released(task, litmus_clock())) | ||
315 | __add_ready(&gsnedf, task); | ||
316 | else { | ||
317 | /* it has got to wait */ | ||
318 | add_release(&gsnedf, task); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
323 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) | ||
324 | { | ||
325 | cpu_entry_t *affinity; | ||
326 | |||
327 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | ||
328 | #ifdef CONFIG_RELEASE_MASTER | ||
329 | gsnedf.release_master | ||
330 | #else | ||
331 | NO_CPU | ||
332 | #endif | ||
333 | ); | ||
334 | |||
335 | return(affinity); | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | /* check for any necessary preemptions */ | ||
340 | static void check_for_preemptions(void) | ||
341 | { | ||
342 | struct task_struct *task; | ||
343 | cpu_entry_t *last; | ||
344 | |||
345 | for (last = lowest_prio_cpu(); | ||
346 | edf_preemption_needed(&gsnedf, last->linked); | ||
347 | last = lowest_prio_cpu()) { | ||
348 | /* preemption necessary */ | ||
349 | task = __take_ready(&gsnedf); | ||
350 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
351 | task->pid, last->cpu); | ||
352 | |||
353 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
354 | { | ||
355 | cpu_entry_t *affinity = | ||
356 | gsnedf_get_nearest_available_cpu( | ||
357 | &per_cpu(gsnedf_cpu_entries, | ||
358 | task_cpu(task))); | ||
359 | if (affinity) | ||
360 | last = affinity; | ||
361 | else if (last->linked) | ||
362 | requeue(last->linked); | ||
363 | } | ||
364 | #else | ||
365 | if (last->linked) | ||
366 | requeue(last->linked); | ||
367 | #endif | ||
368 | |||
369 | link_task_to_cpu(task, last); | ||
370 | preempt(last); | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /* gsnedf_job_arrival: task is either resumed or released */ | ||
375 | static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
376 | { | ||
377 | BUG_ON(!task); | ||
378 | |||
379 | requeue(task); | ||
380 | check_for_preemptions(); | ||
381 | } | ||
382 | |||
383 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
384 | { | ||
385 | unsigned long flags; | ||
386 | |||
387 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
388 | |||
389 | __merge_ready(rt, tasks); | ||
390 | check_for_preemptions(); | ||
391 | |||
392 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
393 | } | ||
394 | |||
395 | /* caller holds gsnedf_lock */ | ||
396 | static noinline void job_completion(struct task_struct *t, int forced) | ||
397 | { | ||
398 | BUG_ON(!t); | ||
399 | |||
400 | sched_trace_task_completion(t, forced); | ||
401 | |||
402 | TRACE_TASK(t, "job_completion().\n"); | ||
403 | |||
404 | /* set flags */ | ||
405 | set_rt_flags(t, RT_F_SLEEP); | ||
406 | /* prepare for next period */ | ||
407 | /* prepare_for_next_period assumes implicit deadlines and no splitting, | ||
408 | * so we call it with the job deadline it expects. | ||
409 | */ | ||
410 | t->rt_param.job_params.deadline = t->rt_param.job_params.release + | ||
411 | t->rt_param.task_params.period; | ||
412 | prepare_for_next_period(t); | ||
413 | /* We now set the subjob deadline to what it should be for scheduling | ||
414 | * priority. | ||
415 | */ | ||
416 | t->rt_param.job_params.subjob_deadline = get_proper_deadline(t); | ||
417 | if (is_released(t, litmus_clock())) | ||
418 | sched_trace_task_release(t); | ||
419 | /* unlink */ | ||
420 | unlink(t); | ||
421 | /* requeue | ||
422 | * But don't requeue a blocking task. */ | ||
423 | if (is_running(t)) | ||
424 | gsnedf_job_arrival(t); | ||
425 | } | ||
426 | |||
427 | static void move_deadline(struct task_struct *t) | ||
428 | { | ||
429 | tsk_rt(t)->job_params.subjob_deadline = get_proper_deadline(t); | ||
430 | TRACE_TASK(t, "move_deadline called\nRelease: %llu\nPeriod: %llu" | ||
431 | "\nRelease + Period: %llu\nDeadline: %llu" | ||
432 | "\nDeadline - Release: %llu\n", | ||
433 | t->rt_param.job_params.release, | ||
434 | t->rt_param.task_params.period, | ||
435 | t->rt_param.job_params.release | ||
436 | + t->rt_param.task_params.period, | ||
437 | t->rt_param.job_params.subjob_deadline, | ||
438 | t->rt_param.job_params.subjob_deadline | ||
439 | - t->rt_param.job_params.release); | ||
440 | /* Check if rescheduling needed with lower priority. */ | ||
441 | unlink(t); | ||
442 | gsnedf_job_arrival(t); | ||
443 | } | ||
444 | |||
445 | /* gsnedf_tick - this function is called for every local timer | ||
446 | * interrupt. | ||
447 | * | ||
448 | * checks whether the current task has expired and checks | ||
449 | * whether we need to preempt it if it has not expired | ||
450 | */ | ||
451 | static void gsnedf_tick(struct task_struct* t) | ||
452 | { | ||
453 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
454 | if (!is_np(t)) { | ||
455 | /* np tasks will be preempted when they become | ||
456 | * preemptable again | ||
457 | */ | ||
458 | litmus_reschedule_local(); | ||
459 | TRACE("gsnedf_scheduler_tick: " | ||
460 | "%d is preemptable " | ||
461 | " => FORCE_RESCHED\n", t->pid); | ||
462 | } else if (is_user_np(t)) { | ||
463 | TRACE("gsnedf_scheduler_tick: " | ||
464 | "%d is non-preemptable, " | ||
465 | "preemption delayed.\n", t->pid); | ||
466 | request_exit_np(t); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | |||
471 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
472 | * assumptions on the state of the current task since it may be called for a | ||
473 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
474 | * was necessary, because sys_exit_np() was called, because some Linux | ||
475 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
476 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
477 | * current state is. | ||
478 | * | ||
479 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
480 | * | ||
481 | * The following assertions for the scheduled task could hold: | ||
482 | * | ||
483 | * - !is_running(scheduled) // the job blocks | ||
484 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
485 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
486 | * - linked != scheduled // we need to reschedule (for any reason) | ||
487 | * - is_np(scheduled) // rescheduling must be delayed, | ||
488 | * sys_exit_np must be requested | ||
489 | * | ||
490 | * Any of these can occur together. | ||
491 | */ | ||
492 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
493 | { | ||
494 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
495 | int out_of_time, sleep, preempt, np, exists, blocks, needs_move; | ||
496 | struct task_struct* next = NULL; | ||
497 | |||
498 | #ifdef CONFIG_RELEASE_MASTER | ||
499 | /* Bail out early if we are the release master. | ||
500 | * The release master never schedules any real-time tasks. | ||
501 | */ | ||
502 | if (unlikely(gsnedf.release_master == entry->cpu)) { | ||
503 | sched_state_task_picked(); | ||
504 | return NULL; | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | raw_spin_lock(&gsnedf_lock); | ||
509 | |||
510 | /* sanity checking */ | ||
511 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
512 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
513 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
514 | |||
515 | /* (0) Determine state */ | ||
516 | exists = entry->scheduled != NULL; | ||
517 | blocks = exists && !is_running(entry->scheduled); | ||
518 | out_of_time = exists && | ||
519 | budget_enforced(entry->scheduled) && | ||
520 | budget_exhausted(entry->scheduled); | ||
521 | needs_move = exists && needs_deadline_move(entry->scheduled); | ||
522 | np = exists && is_np(entry->scheduled); | ||
523 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
524 | preempt = entry->scheduled != entry->linked; | ||
525 | |||
526 | #ifdef WANT_ALL_SCHED_EVENTS | ||
527 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
528 | #endif | ||
529 | |||
530 | if (exists) | ||
531 | TRACE_TASK(prev, | ||
532 | "blocks:%d out_of_time:%d needs_move:%d np:%d" | ||
533 | " sleep:%d preempt:%d state:%d sig:%d\n", | ||
534 | blocks, out_of_time, needs_move, np, sleep, preempt, | ||
535 | prev->state, signal_pending(prev)); | ||
536 | if (entry->linked && preempt) | ||
537 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
538 | entry->linked->comm, entry->linked->pid); | ||
539 | |||
540 | |||
541 | /* If a task blocks we have no choice but to reschedule. | ||
542 | */ | ||
543 | if (blocks) | ||
544 | unlink(entry->scheduled); | ||
545 | |||
546 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
547 | * We need to make sure to update the link structure anyway in case | ||
548 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
549 | * hurt. | ||
550 | * | ||
551 | * Job deadline moves handled similarly | ||
552 | */ | ||
553 | if (np && (out_of_time || preempt || sleep)) { | ||
554 | unlink(entry->scheduled); | ||
555 | request_exit_np(entry->scheduled); | ||
556 | } | ||
557 | else if (np && needs_move) { | ||
558 | move_deadline(entry->scheduled); | ||
559 | } | ||
560 | |||
561 | /* Any task that is preemptable and either exhausts its execution | ||
562 | * budget or wants to sleep completes. We may have to reschedule after | ||
563 | * this. Don't do a job completion if we block (can't have timers running | ||
564 | * for blocked jobs). Preemption go first for the same reason. | ||
565 | */ | ||
566 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
567 | job_completion(entry->scheduled, !sleep); | ||
568 | else if (!np && needs_move && !blocks && !preempt) { | ||
569 | move_deadline(entry->scheduled); | ||
570 | } | ||
571 | |||
572 | /* Link pending task if we became unlinked. | ||
573 | */ | ||
574 | if (!entry->linked) | ||
575 | link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
576 | |||
577 | /* The final scheduling decision. Do we need to switch for some reason? | ||
578 | * If linked is different from scheduled, then select linked as next. | ||
579 | */ | ||
580 | if ((!np || blocks) && | ||
581 | entry->linked != entry->scheduled) { | ||
582 | /* Schedule a linked job? */ | ||
583 | if (entry->linked) { | ||
584 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
585 | next = entry->linked; | ||
586 | TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); | ||
587 | } | ||
588 | if (entry->scheduled) { | ||
589 | /* not gonna be scheduled soon */ | ||
590 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
591 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
592 | } | ||
593 | } else | ||
594 | /* Only override Linux scheduler if we have a real-time task | ||
595 | * scheduled that needs to continue. | ||
596 | */ | ||
597 | if (exists) | ||
598 | next = prev; | ||
599 | |||
600 | sched_state_task_picked(); | ||
601 | |||
602 | raw_spin_unlock(&gsnedf_lock); | ||
603 | |||
604 | if (next) { | ||
605 | arm_split_timer(entry, next); | ||
606 | } | ||
607 | else if (entry->timer_armed) { | ||
608 | cancel_split_timer(entry); | ||
609 | } | ||
610 | |||
611 | #ifdef WANT_ALL_SCHED_EVENTS | ||
612 | TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
613 | |||
614 | if (next) | ||
615 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
616 | else if (exists && !next) | ||
617 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
618 | #endif | ||
619 | |||
620 | |||
621 | return next; | ||
622 | } | ||
623 | |||
624 | |||
625 | /* _finish_switch - we just finished the switch away from prev | ||
626 | */ | ||
627 | static void gsnedf_finish_switch(struct task_struct *prev) | ||
628 | { | ||
629 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
630 | |||
631 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
632 | #ifdef WANT_ALL_SCHED_EVENTS | ||
633 | TRACE_TASK(prev, "switched away from\n"); | ||
634 | #endif | ||
635 | } | ||
636 | |||
637 | static void gsnedf_release_at(struct task_struct *t, lt_t start) | ||
638 | { | ||
639 | t->rt_param.job_params.deadline = start; | ||
640 | prepare_for_next_period(t); | ||
641 | t->rt_param.job_params.subjob_deadline = get_proper_deadline(t); | ||
642 | set_rt_flags(t, RT_F_RUNNING); | ||
643 | } | ||
644 | |||
645 | /* Prepare a task for running in RT mode | ||
646 | */ | ||
647 | static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
648 | { | ||
649 | unsigned long flags; | ||
650 | cpu_entry_t* entry; | ||
651 | |||
652 | TRACE("gsn edf: task new %d\n", t->pid); | ||
653 | |||
654 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
655 | |||
656 | /* setup job params */ | ||
657 | gsnedf_release_at(t, litmus_clock()); | ||
658 | |||
659 | if (running) { | ||
660 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
661 | BUG_ON(entry->scheduled); | ||
662 | |||
663 | #ifdef CONFIG_RELEASE_MASTER | ||
664 | if (entry->cpu != gsnedf.release_master) { | ||
665 | #endif | ||
666 | entry->scheduled = t; | ||
667 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
668 | #ifdef CONFIG_RELEASE_MASTER | ||
669 | } else { | ||
670 | /* do not schedule on release master */ | ||
671 | preempt(entry); /* force resched */ | ||
672 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
673 | } | ||
674 | #endif | ||
675 | } else { | ||
676 | t->rt_param.scheduled_on = NO_CPU; | ||
677 | } | ||
678 | t->rt_param.linked_on = NO_CPU; | ||
679 | |||
680 | gsnedf_job_arrival(t); | ||
681 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
682 | } | ||
683 | |||
684 | static void gsnedf_task_wake_up(struct task_struct *task) | ||
685 | { | ||
686 | unsigned long flags; | ||
687 | lt_t now; | ||
688 | |||
689 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
690 | |||
691 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
692 | /* We need to take suspensions because of semaphores into | ||
693 | * account! If a job resumes after being suspended due to acquiring | ||
694 | * a semaphore, it should never be treated as a new job release. | ||
695 | */ | ||
696 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
697 | set_rt_flags(task, RT_F_RUNNING); | ||
698 | } else { | ||
699 | now = litmus_clock(); | ||
700 | if (is_tardy(task, now)) { | ||
701 | /* new sporadic release */ | ||
702 | gsnedf_release_at(task, now); | ||
703 | sched_trace_task_release(task); | ||
704 | } | ||
705 | else { | ||
706 | if (task->rt.time_slice) { | ||
707 | /* came back in time before deadline | ||
708 | */ | ||
709 | set_rt_flags(task, RT_F_RUNNING); | ||
710 | } | ||
711 | } | ||
712 | } | ||
713 | gsnedf_job_arrival(task); | ||
714 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
715 | } | ||
716 | |||
717 | static void gsnedf_task_block(struct task_struct *t) | ||
718 | { | ||
719 | unsigned long flags; | ||
720 | |||
721 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
722 | |||
723 | /* unlink if necessary */ | ||
724 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
725 | unlink(t); | ||
726 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
727 | |||
728 | BUG_ON(!is_realtime(t)); | ||
729 | } | ||
730 | |||
731 | |||
732 | static void gsnedf_task_exit(struct task_struct * t) | ||
733 | { | ||
734 | unsigned long flags; | ||
735 | |||
736 | /* unlink if necessary */ | ||
737 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
738 | unlink(t); | ||
739 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
740 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
741 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
742 | } | ||
743 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
744 | |||
745 | BUG_ON(!is_realtime(t)); | ||
746 | TRACE_TASK(t, "RIP\n"); | ||
747 | } | ||
748 | |||
749 | |||
750 | static long gsnedf_admit_task(struct task_struct* tsk) | ||
751 | { | ||
752 | return 0; | ||
753 | } | ||
754 | |||
755 | #ifdef CONFIG_LITMUS_LOCKING | ||
756 | |||
757 | #include <litmus/fdso.h> | ||
758 | |||
759 | /* called with IRQs off */ | ||
760 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
761 | { | ||
762 | int linked_on; | ||
763 | int check_preempt = 0; | ||
764 | |||
765 | raw_spin_lock(&gsnedf_lock); | ||
766 | |||
767 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
768 | tsk_rt(t)->inh_task = prio_inh; | ||
769 | |||
770 | linked_on = tsk_rt(t)->linked_on; | ||
771 | |||
772 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
773 | if (linked_on != NO_CPU) { | ||
774 | TRACE_TASK(t, "%s: linked on %d\n", | ||
775 | __FUNCTION__, linked_on); | ||
776 | /* Holder is scheduled; need to re-order CPUs. | ||
777 | * We can't use heap_decrease() here since | ||
778 | * the cpu_heap is ordered in reverse direction, so | ||
779 | * it is actually an increase. */ | ||
780 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
781 | gsnedf_cpus[linked_on]->hn); | ||
782 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
783 | gsnedf_cpus[linked_on]->hn); | ||
784 | } else { | ||
785 | /* holder may be queued: first stop queue changes */ | ||
786 | raw_spin_lock(&gsnedf.release_lock); | ||
787 | if (is_queued(t)) { | ||
788 | TRACE_TASK(t, "%s: is queued\n", | ||
789 | __FUNCTION__); | ||
790 | /* We need to update the position of holder in some | ||
791 | * heap. Note that this could be a release heap if we | ||
792 | * budget enforcement is used and this job overran. */ | ||
793 | check_preempt = | ||
794 | !bheap_decrease(edf_ready_order, | ||
795 | tsk_rt(t)->heap_node); | ||
796 | } else { | ||
797 | /* Nothing to do: if it is not queued and not linked | ||
798 | * then it is either sleeping or currently being moved | ||
799 | * by other code (e.g., a timer interrupt handler) that | ||
800 | * will use the correct priority when enqueuing the | ||
801 | * task. */ | ||
802 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
803 | __FUNCTION__); | ||
804 | } | ||
805 | raw_spin_unlock(&gsnedf.release_lock); | ||
806 | |||
807 | /* If holder was enqueued in a release heap, then the following | ||
808 | * preemption check is pointless, but we can't easily detect | ||
809 | * that case. If you want to fix this, then consider that | ||
810 | * simply adding a state flag requires O(n) time to update when | ||
811 | * releasing n tasks, which conflicts with the goal to have | ||
812 | * O(log n) merges. */ | ||
813 | if (check_preempt) { | ||
814 | /* heap_decrease() hit the top level of the heap: make | ||
815 | * sure preemption checks get the right task, not the | ||
816 | * potentially stale cache. */ | ||
817 | bheap_uncache_min(edf_ready_order, | ||
818 | &gsnedf.ready_queue); | ||
819 | check_for_preemptions(); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | raw_spin_unlock(&gsnedf_lock); | ||
824 | } | ||
825 | |||
826 | /* called with IRQs off */ | ||
827 | static void update_unlocked_priority(struct task_struct* t) | ||
828 | { | ||
829 | raw_spin_lock(&gsnedf_lock); | ||
830 | |||
831 | /* A job only stops inheriting a priority when it releases a | ||
832 | * resource. Thus we can make the following assumption.*/ | ||
833 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
834 | |||
835 | /* Clear priority inheritance */ | ||
836 | TRACE_TASK(t, "priority restored\n"); | ||
837 | tsk_rt(t)->inh_task = NULL; | ||
838 | |||
839 | /* Update splitting deadline */ | ||
840 | tsk_rt(t)->job_params.subjob_deadline = get_proper_deadline(t); | ||
841 | |||
842 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
843 | * since the priority was effectively lowered. */ | ||
844 | unlink(t); | ||
845 | gsnedf_job_arrival(t); | ||
846 | |||
847 | raw_spin_unlock(&gsnedf_lock); | ||
848 | } | ||
849 | |||
850 | |||
851 | /* ******************** FMLP support ********************** */ | ||
852 | |||
853 | /* struct for semaphore with priority inheritance */ | ||
854 | struct fmlp_semaphore { | ||
855 | struct litmus_lock litmus_lock; | ||
856 | |||
857 | /* current resource holder */ | ||
858 | struct task_struct *owner; | ||
859 | |||
860 | /* highest-priority waiter */ | ||
861 | struct task_struct *hp_waiter; | ||
862 | |||
863 | /* FIFO queue of waiting tasks */ | ||
864 | wait_queue_head_t wait; | ||
865 | }; | ||
866 | |||
867 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
868 | { | ||
869 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
870 | } | ||
871 | |||
872 | /* caller is responsible for locking */ | ||
873 | static struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, | ||
874 | struct task_struct* skip) | ||
875 | { | ||
876 | struct list_head *pos; | ||
877 | struct task_struct *queued, *found = NULL; | ||
878 | |||
879 | list_for_each(pos, &sem->wait.task_list) { | ||
880 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
881 | task_list)->private; | ||
882 | |||
883 | /* Compare task prios, find high prio task. */ | ||
884 | if (queued != skip && edf_higher_prio(queued, found)) | ||
885 | found = queued; | ||
886 | } | ||
887 | return found; | ||
888 | } | ||
889 | |||
890 | int gsnedf_fmlp_lock(struct litmus_lock* l) | ||
891 | { | ||
892 | struct task_struct* t = current; | ||
893 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
894 | cpu_entry_t* entry; | ||
895 | wait_queue_t wait; | ||
896 | unsigned long flags; | ||
897 | |||
898 | if (!is_realtime(t)) | ||
899 | return -EPERM; | ||
900 | |||
901 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
902 | entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
903 | |||
904 | tsk_rt(t)->in_crit_section = 1; | ||
905 | if (entry->timer_armed) { | ||
906 | cancel_split_timer(entry); | ||
907 | } | ||
908 | |||
909 | if (sem->owner) { | ||
910 | /* resource is not free => must suspend and wait */ | ||
911 | |||
912 | init_waitqueue_entry(&wait, t); | ||
913 | |||
914 | /* FIXME: interruptible would be nice some day */ | ||
915 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
916 | |||
917 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
918 | |||
919 | /* check if we need to activate priority inheritance */ | ||
920 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
921 | sem->hp_waiter = t; | ||
922 | if (edf_higher_prio(t, sem->owner)) | ||
923 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
924 | } | ||
925 | |||
926 | TS_LOCK_SUSPEND; | ||
927 | |||
928 | /* release lock before sleeping */ | ||
929 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
930 | |||
931 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
932 | * when we wake up; we are guaranteed to have the lock since | ||
933 | * there is only one wake up per release. | ||
934 | */ | ||
935 | |||
936 | schedule(); | ||
937 | |||
938 | TS_LOCK_RESUME; | ||
939 | |||
940 | /* Since we hold the lock, no other task will change | ||
941 | * ->owner. We can thus check it without acquiring the spin | ||
942 | * lock. */ | ||
943 | BUG_ON(sem->owner != t); | ||
944 | } else { | ||
945 | /* it's ours now */ | ||
946 | sem->owner = t; | ||
947 | |||
948 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
949 | } | ||
950 | |||
951 | return 0; | ||
952 | } | ||
953 | |||
954 | int gsnedf_fmlp_unlock(struct litmus_lock* l) | ||
955 | { | ||
956 | struct task_struct *t = current, *next; | ||
957 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
958 | unsigned long flags; | ||
959 | int err = 0; | ||
960 | |||
961 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
962 | |||
963 | if (sem->owner != t) { | ||
964 | err = -EINVAL; | ||
965 | goto out; | ||
966 | } | ||
967 | |||
968 | /* check if there are jobs waiting for this resource */ | ||
969 | next = __waitqueue_remove_first(&sem->wait); | ||
970 | if (next) { | ||
971 | /* next becomes the resouce holder */ | ||
972 | sem->owner = next; | ||
973 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
974 | |||
975 | /* determine new hp_waiter if necessary */ | ||
976 | if (next == sem->hp_waiter) { | ||
977 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
978 | /* next has the highest priority --- it doesn't need to | ||
979 | * inherit. However, we need to make sure that the | ||
980 | * next-highest priority in the queue is reflected in | ||
981 | * hp_waiter. */ | ||
982 | sem->hp_waiter = find_hp_waiter(sem, next); | ||
983 | if (sem->hp_waiter) | ||
984 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
985 | else | ||
986 | TRACE("no further waiters\n"); | ||
987 | } else { | ||
988 | /* Well, if next is not the highest-priority waiter, | ||
989 | * then it ought to inherit the highest-priority | ||
990 | * waiter's priority. */ | ||
991 | set_priority_inheritance(next, sem->hp_waiter); | ||
992 | } | ||
993 | |||
994 | /* wake up next */ | ||
995 | wake_up_process(next); | ||
996 | } else | ||
997 | /* becomes available */ | ||
998 | sem->owner = NULL; | ||
999 | |||
1000 | /* We are no longer in a critical section */ | ||
1001 | tsk_rt(t)->in_crit_section = 0; | ||
1002 | |||
1003 | /* we lose the benefit of priority inheritance (if any) and may need | ||
1004 | * to move the deadline. In either case, may need to reschedule | ||
1005 | * due to reduced priority. */ | ||
1006 | if (tsk_rt(t)->inh_task || needs_deadline_move(t)) | ||
1007 | update_unlocked_priority(t); | ||
1008 | /* TODO: Check that schedule() gets called - it needs to arm the | ||
1009 | * enforcement timer. Otherwise we should do it here or in | ||
1010 | * update_unlocked_priority. */ | ||
1011 | |||
1012 | out: | ||
1013 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
1014 | |||
1015 | return err; | ||
1016 | } | ||
1017 | |||
1018 | int gsnedf_fmlp_close(struct litmus_lock* l) | ||
1019 | { | ||
1020 | struct task_struct *t = current; | ||
1021 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
1022 | unsigned long flags; | ||
1023 | |||
1024 | int owner; | ||
1025 | |||
1026 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
1027 | |||
1028 | owner = sem->owner == t; | ||
1029 | |||
1030 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
1031 | |||
1032 | if (owner) | ||
1033 | gsnedf_fmlp_unlock(l); | ||
1034 | |||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | void gsnedf_fmlp_free(struct litmus_lock* lock) | ||
1039 | { | ||
1040 | kfree(fmlp_from_lock(lock)); | ||
1041 | } | ||
1042 | |||
1043 | static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | ||
1044 | .close = gsnedf_fmlp_close, | ||
1045 | .lock = gsnedf_fmlp_lock, | ||
1046 | .unlock = gsnedf_fmlp_unlock, | ||
1047 | .deallocate = gsnedf_fmlp_free, | ||
1048 | }; | ||
1049 | |||
1050 | static struct litmus_lock* gsnedf_new_fmlp(void) | ||
1051 | { | ||
1052 | struct fmlp_semaphore* sem; | ||
1053 | |||
1054 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1055 | if (!sem) | ||
1056 | return NULL; | ||
1057 | |||
1058 | sem->owner = NULL; | ||
1059 | sem->hp_waiter = NULL; | ||
1060 | init_waitqueue_head(&sem->wait); | ||
1061 | sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; | ||
1062 | |||
1063 | return &sem->litmus_lock; | ||
1064 | } | ||
1065 | |||
1066 | /* **** lock constructor **** */ | ||
1067 | |||
1068 | |||
1069 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1070 | void* __user unused) | ||
1071 | { | ||
1072 | int err = -ENXIO; | ||
1073 | |||
1074 | /* GSN-EDF currently only supports the FMLP for global resources. */ | ||
1075 | switch (type) { | ||
1076 | |||
1077 | case FMLP_SEM: | ||
1078 | /* Flexible Multiprocessor Locking Protocol */ | ||
1079 | *lock = gsnedf_new_fmlp(); | ||
1080 | if (*lock) | ||
1081 | err = 0; | ||
1082 | else | ||
1083 | err = -ENOMEM; | ||
1084 | break; | ||
1085 | |||
1086 | }; | ||
1087 | |||
1088 | return err; | ||
1089 | } | ||
1090 | |||
1091 | #endif | ||
1092 | |||
1093 | |||
1094 | static long gsnedf_activate_plugin(void) | ||
1095 | { | ||
1096 | int cpu; | ||
1097 | cpu_entry_t *entry; | ||
1098 | |||
1099 | bheap_init(&gsnedf_cpu_heap); | ||
1100 | #ifdef CONFIG_RELEASE_MASTER | ||
1101 | gsnedf.release_master = atomic_read(&release_master_cpu); | ||
1102 | #endif | ||
1103 | |||
1104 | for_each_online_cpu(cpu) { | ||
1105 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
1106 | bheap_node_init(&entry->hn, entry); | ||
1107 | entry->linked = NULL; | ||
1108 | entry->scheduled = NULL; | ||
1109 | #ifdef CONFIG_RELEASE_MASTER | ||
1110 | if (cpu != gsnedf.release_master) { | ||
1111 | #endif | ||
1112 | TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
1113 | update_cpu_position(entry); | ||
1114 | #ifdef CONFIG_RELEASE_MASTER | ||
1115 | } else { | ||
1116 | TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
1117 | } | ||
1118 | #endif | ||
1119 | } | ||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1123 | /* Plugin object */ | ||
1124 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
1125 | .plugin_name = "GSN-EDF", | ||
1126 | .finish_switch = gsnedf_finish_switch, | ||
1127 | .tick = gsnedf_tick, | ||
1128 | .task_new = gsnedf_task_new, | ||
1129 | .complete_job = complete_job, | ||
1130 | .task_exit = gsnedf_task_exit, | ||
1131 | .schedule = gsnedf_schedule, | ||
1132 | .release_at = gsnedf_release_at, | ||
1133 | .task_wake_up = gsnedf_task_wake_up, | ||
1134 | .task_block = gsnedf_task_block, | ||
1135 | .admit_task = gsnedf_admit_task, | ||
1136 | .activate_plugin = gsnedf_activate_plugin, | ||
1137 | #ifdef CONFIG_LITMUS_LOCKING | ||
1138 | .allocate_lock = gsnedf_allocate_lock, | ||
1139 | #endif | ||
1140 | }; | ||
1141 | |||
1142 | |||
1143 | static int __init init_gsn_edf(void) | ||
1144 | { | ||
1145 | int cpu; | ||
1146 | cpu_entry_t *entry; | ||
1147 | |||
1148 | bheap_init(&gsnedf_cpu_heap); | ||
1149 | /* initialize CPU state */ | ||
1150 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
1151 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
1152 | gsnedf_cpus[cpu] = entry; | ||
1153 | entry->cpu = cpu; | ||
1154 | entry->hn = &gsnedf_heap_node[cpu]; | ||
1155 | hrtimer_init(&entry->split_timer, | ||
1156 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1157 | entry->split_timer.function = on_split_timeout; | ||
1158 | bheap_node_init(&entry->hn, entry); | ||
1159 | } | ||
1160 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
1161 | return register_sched_plugin(&gsn_edf_plugin); | ||
1162 | } | ||
1163 | |||
1164 | |||
1165 | module_init(init_gsn_edf); | ||