diff options
| author | Felipe Cerqueira <felipec@mpi-sws.org> | 2013-02-12 13:16:03 -0500 |
|---|---|---|
| committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-08-07 03:47:05 -0400 |
| commit | 9a169caf2081e6281e3f60ff86e1ea4af6faf78e (patch) | |
| tree | 922d91d9af5317d03e922f623039966b451177a3 | |
| parent | 9c664c8b006327927693cd2a6e151b4cbb55a291 (diff) | |
Add GSN-EDF scheduler plugin
| -rw-r--r-- | litmus/Makefile | 1 | ||||
| -rw-r--r-- | litmus/sched_gsn_edf.c | 1023 |
2 files changed, 1024 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 5b38295b992d..8428360eca09 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
| @@ -18,6 +18,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
| 18 | bheap.o \ | 18 | bheap.o \ |
| 19 | binheap.o \ | 19 | binheap.o \ |
| 20 | ctrldev.o \ | 20 | ctrldev.o \ |
| 21 | sched_gsn_edf.o \ | ||
| 21 | sched_psn_edf.o | 22 | sched_psn_edf.o |
| 22 | 23 | ||
| 23 | obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o | 24 | obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c new file mode 100644 index 000000000000..90ff6f639461 --- /dev/null +++ b/litmus/sched_gsn_edf.c | |||
| @@ -0,0 +1,1023 @@ | |||
| 1 | /* | ||
| 2 | * litmus/sched_gsn_edf.c | ||
| 3 | * | ||
| 4 | * Implementation of the GSN-EDF scheduling algorithm. | ||
| 5 | * | ||
| 6 | * This version uses the simple approach and serializes all scheduling | ||
| 7 | * decisions by the use of a queue lock. This is probably not the | ||
| 8 | * best way to do it, but it should suffice for now. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/spinlock.h> | ||
| 12 | #include <linux/percpu.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/slab.h> | ||
| 15 | |||
| 16 | #include <litmus/litmus.h> | ||
| 17 | #include <litmus/jobs.h> | ||
| 18 | #include <litmus/sched_plugin.h> | ||
| 19 | #include <litmus/edf_common.h> | ||
| 20 | #include <litmus/sched_trace.h> | ||
| 21 | #include <litmus/trace.h> | ||
| 22 | |||
| 23 | #include <litmus/preempt.h> | ||
| 24 | #include <litmus/budget.h> | ||
| 25 | |||
| 26 | #include <litmus/bheap.h> | ||
| 27 | |||
| 28 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
| 29 | #include <litmus/affinity.h> | ||
| 30 | #endif | ||
| 31 | |||
| 32 | #include <linux/module.h> | ||
| 33 | |||
| 34 | /* Overview of GSN-EDF operations. | ||
| 35 | * | ||
| 36 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
| 37 | * description only covers how the individual operations are implemented in | ||
| 38 | * LITMUS. | ||
| 39 | * | ||
| 40 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
| 41 | * structure (NOT the actually scheduled | ||
| 42 | * task). If there is another linked task To | ||
| 43 | * already it will set To->linked_on = NO_CPU | ||
| 44 | * (thereby removing its association with this | ||
| 45 | * CPU). However, it will not requeue the | ||
| 46 | * previously linked task (if any). It will set | ||
| 47 | * T's state to not completed and check whether | ||
| 48 | * it is already running somewhere else. If T | ||
| 49 | * is scheduled somewhere else it will link | ||
| 50 | * it to that CPU instead (and pull the linked | ||
| 51 | * task to cpu). T may be NULL. | ||
| 52 | * | ||
| 53 | * unlink(T) - Unlink removes T from all scheduler data | ||
| 54 | * structures. If it is linked to some CPU it | ||
| 55 | * will link NULL to that CPU. If it is | ||
| 56 | * currently queued in the gsnedf queue it will | ||
| 57 | * be removed from the rt_domain. It is safe to | ||
| 58 | * call unlink(T) if T is not linked. T may not | ||
| 59 | * be NULL. | ||
| 60 | * | ||
| 61 | * requeue(T) - Requeue will insert T into the appropriate | ||
| 62 | * queue. If the system is in real-time mode and | ||
| 63 | * the T is released already, it will go into the | ||
| 64 | * ready queue. If the system is not in | ||
| 65 | * real-time mode is T, then T will go into the | ||
| 66 | * release queue. If T's release time is in the | ||
| 67 | * future, it will go into the release | ||
| 68 | * queue. That means that T's release time/job | ||
| 69 | * no/etc. has to be updated before requeu(T) is | ||
| 70 | * called. It is not safe to call requeue(T) | ||
| 71 | * when T is already queued. T may not be NULL. | ||
| 72 | * | ||
| 73 | * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
| 74 | * the system after either a suspension or at a | ||
| 75 | * job release. It will queue T (which means it | ||
| 76 | * is not safe to call gsnedf_job_arrival(T) if | ||
| 77 | * T is already queued) and then check whether a | ||
| 78 | * preemption is necessary. If a preemption is | ||
| 79 | * necessary it will update the linkage | ||
| 80 | * accordingly and cause scheduled to be called | ||
| 81 | * (either with an IPI or need_resched). It is | ||
| 82 | * safe to call gsnedf_job_arrival(T) if T's | ||
| 83 | * next job has not been actually released yet | ||
| 84 | * (releast time in the future). T will be put | ||
| 85 | * on the release queue in that case. | ||
| 86 | * | ||
| 87 | * job_completion(T) - Take care of everything that needs to be done | ||
| 88 | * to prepare T for its next release and place | ||
| 89 | * it in the right queue with | ||
| 90 | * gsnedf_job_arrival(). | ||
| 91 | * | ||
| 92 | * | ||
| 93 | * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
| 94 | * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
| 95 | * the functions will automatically propagate pending task from the ready queue | ||
| 96 | * to a linked task. This is the job of the calling function ( by means of | ||
| 97 | * __take_ready). | ||
| 98 | */ | ||
| 99 | |||
| 100 | |||
| 101 | /* cpu_entry_t - maintain the linked and scheduled state | ||
| 102 | */ | ||
| 103 | typedef struct { | ||
| 104 | int cpu; | ||
| 105 | struct task_struct* linked; /* only RT tasks */ | ||
| 106 | struct task_struct* scheduled; /* only RT tasks */ | ||
| 107 | struct bheap_node* hn; | ||
| 108 | } cpu_entry_t; | ||
| 109 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
| 110 | |||
| 111 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
| 112 | |||
| 113 | /* the cpus queue themselves according to priority in here */ | ||
| 114 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
| 115 | static struct bheap gsnedf_cpu_heap; | ||
| 116 | |||
| 117 | static rt_domain_t gsnedf; | ||
| 118 | #define gsnedf_lock (gsnedf.ready_lock) | ||
| 119 | |||
| 120 | |||
| 121 | /* Uncomment this if you want to see all scheduling decisions in the | ||
| 122 | * TRACE() log. | ||
| 123 | #define WANT_ALL_SCHED_EVENTS | ||
| 124 | */ | ||
| 125 | |||
| 126 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
| 127 | { | ||
| 128 | cpu_entry_t *a, *b; | ||
| 129 | a = _a->value; | ||
| 130 | b = _b->value; | ||
| 131 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
| 132 | * the top of the heap. | ||
| 133 | */ | ||
| 134 | return edf_higher_prio(b->linked, a->linked); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
| 138 | * order in the cpu queue. Caller must hold gsnedf lock. | ||
| 139 | */ | ||
| 140 | static void update_cpu_position(cpu_entry_t *entry) | ||
| 141 | { | ||
| 142 | if (likely(bheap_node_in_heap(entry->hn))) | ||
| 143 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
| 144 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
| 145 | } | ||
| 146 | |||
| 147 | /* caller must hold gsnedf lock */ | ||
| 148 | static cpu_entry_t* lowest_prio_cpu(void) | ||
| 149 | { | ||
| 150 | struct bheap_node* hn; | ||
| 151 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
| 152 | return hn->value; | ||
| 153 | } | ||
| 154 | |||
| 155 | |||
| 156 | /* link_task_to_cpu - Update the link of a CPU. | ||
| 157 | * Handles the case where the to-be-linked task is already | ||
| 158 | * scheduled on a different CPU. | ||
| 159 | */ | ||
| 160 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
| 161 | cpu_entry_t *entry) | ||
| 162 | { | ||
| 163 | cpu_entry_t *sched; | ||
| 164 | struct task_struct* tmp; | ||
| 165 | int on_cpu; | ||
| 166 | |||
| 167 | BUG_ON(linked && !is_realtime(linked)); | ||
| 168 | |||
| 169 | /* Currently linked task is set to be unlinked. */ | ||
| 170 | if (entry->linked) { | ||
| 171 | entry->linked->rt_param.linked_on = NO_CPU; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Link new task to CPU. */ | ||
| 175 | if (linked) { | ||
| 176 | /* handle task is already scheduled somewhere! */ | ||
| 177 | on_cpu = linked->rt_param.scheduled_on; | ||
| 178 | if (on_cpu != NO_CPU) { | ||
| 179 | sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
| 180 | /* this should only happen if not linked already */ | ||
| 181 | BUG_ON(sched->linked == linked); | ||
| 182 | |||
| 183 | /* If we are already scheduled on the CPU to which we | ||
| 184 | * wanted to link, we don't need to do the swap -- | ||
| 185 | * we just link ourselves to the CPU and depend on | ||
| 186 | * the caller to get things right. | ||
| 187 | */ | ||
| 188 | if (entry != sched) { | ||
| 189 | TRACE_TASK(linked, | ||
| 190 | "already scheduled on %d, updating link.\n", | ||
| 191 | sched->cpu); | ||
| 192 | tmp = sched->linked; | ||
| 193 | linked->rt_param.linked_on = sched->cpu; | ||
| 194 | sched->linked = linked; | ||
| 195 | update_cpu_position(sched); | ||
| 196 | linked = tmp; | ||
| 197 | } | ||
| 198 | } | ||
| 199 | if (linked) /* might be NULL due to swap */ | ||
| 200 | linked->rt_param.linked_on = entry->cpu; | ||
| 201 | } | ||
| 202 | entry->linked = linked; | ||
| 203 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 204 | if (linked) | ||
| 205 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
| 206 | else | ||
| 207 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
| 208 | #endif | ||
| 209 | update_cpu_position(entry); | ||
| 210 | } | ||
| 211 | |||
| 212 | /* unlink - Make sure a task is not linked any longer to an entry | ||
| 213 | * where it was linked before. Must hold gsnedf_lock. | ||
| 214 | */ | ||
| 215 | static noinline void unlink(struct task_struct* t) | ||
| 216 | { | ||
| 217 | cpu_entry_t *entry; | ||
| 218 | |||
| 219 | if (t->rt_param.linked_on != NO_CPU) { | ||
| 220 | /* unlink */ | ||
| 221 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
| 222 | t->rt_param.linked_on = NO_CPU; | ||
| 223 | link_task_to_cpu(NULL, entry); | ||
| 224 | } else if (is_queued(t)) { | ||
| 225 | /* This is an interesting situation: t is scheduled, | ||
| 226 | * but was just recently unlinked. It cannot be | ||
| 227 | * linked anywhere else (because then it would have | ||
| 228 | * been relinked to this CPU), thus it must be in some | ||
| 229 | * queue. We must remove it from the list in this | ||
| 230 | * case. | ||
| 231 | */ | ||
| 232 | remove(&gsnedf, t); | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | |||
| 237 | /* preempt - force a CPU to reschedule | ||
| 238 | */ | ||
| 239 | static void preempt(cpu_entry_t *entry) | ||
| 240 | { | ||
| 241 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
| 242 | } | ||
| 243 | |||
| 244 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
| 245 | * Caller must hold gsnedf_lock. | ||
| 246 | */ | ||
| 247 | static noinline void requeue(struct task_struct* task) | ||
| 248 | { | ||
| 249 | BUG_ON(!task); | ||
| 250 | /* sanity check before insertion */ | ||
| 251 | BUG_ON(is_queued(task)); | ||
| 252 | |||
| 253 | if (is_early_releasing(task) || is_released(task, litmus_clock())) | ||
| 254 | __add_ready(&gsnedf, task); | ||
| 255 | else { | ||
| 256 | /* it has got to wait */ | ||
| 257 | add_release(&gsnedf, task); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
| 262 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) | ||
| 263 | { | ||
| 264 | cpu_entry_t *affinity; | ||
| 265 | |||
| 266 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | ||
| 267 | #ifdef CONFIG_RELEASE_MASTER | ||
| 268 | gsnedf.release_master | ||
| 269 | #else | ||
| 270 | NO_CPU | ||
| 271 | #endif | ||
| 272 | ); | ||
| 273 | |||
| 274 | return(affinity); | ||
| 275 | } | ||
| 276 | #endif | ||
| 277 | |||
| 278 | /* check for any necessary preemptions */ | ||
| 279 | static void check_for_preemptions(void) | ||
| 280 | { | ||
| 281 | struct task_struct *task; | ||
| 282 | cpu_entry_t *last; | ||
| 283 | |||
| 284 | for (last = lowest_prio_cpu(); | ||
| 285 | edf_preemption_needed(&gsnedf, last->linked); | ||
| 286 | last = lowest_prio_cpu()) { | ||
| 287 | /* preemption necessary */ | ||
| 288 | task = __take_ready(&gsnedf); | ||
| 289 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
| 290 | task->pid, last->cpu); | ||
| 291 | |||
| 292 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
| 293 | { | ||
| 294 | cpu_entry_t *affinity = | ||
| 295 | gsnedf_get_nearest_available_cpu( | ||
| 296 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | ||
| 297 | if (affinity) | ||
| 298 | last = affinity; | ||
| 299 | else if (requeue_preempted_job(last->linked)) | ||
| 300 | requeue(last->linked); | ||
| 301 | } | ||
| 302 | #else | ||
| 303 | if (requeue_preempted_job(last->linked)) | ||
| 304 | requeue(last->linked); | ||
| 305 | #endif | ||
| 306 | |||
| 307 | link_task_to_cpu(task, last); | ||
| 308 | preempt(last); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | /* gsnedf_job_arrival: task is either resumed or released */ | ||
| 313 | static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
| 314 | { | ||
| 315 | BUG_ON(!task); | ||
| 316 | |||
| 317 | requeue(task); | ||
| 318 | check_for_preemptions(); | ||
| 319 | } | ||
| 320 | |||
| 321 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
| 322 | { | ||
| 323 | unsigned long flags; | ||
| 324 | |||
| 325 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 326 | |||
| 327 | __merge_ready(rt, tasks); | ||
| 328 | check_for_preemptions(); | ||
| 329 | |||
| 330 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 331 | } | ||
| 332 | |||
| 333 | /* caller holds gsnedf_lock */ | ||
| 334 | static noinline void job_completion(struct task_struct *t, int forced) | ||
| 335 | { | ||
| 336 | BUG_ON(!t); | ||
| 337 | |||
| 338 | sched_trace_task_completion(t, forced); | ||
| 339 | |||
| 340 | TRACE_TASK(t, "job_completion().\n"); | ||
| 341 | |||
| 342 | /* set flags */ | ||
| 343 | tsk_rt(t)->completed = 0; | ||
| 344 | /* prepare for next period */ | ||
| 345 | prepare_for_next_period(t); | ||
| 346 | if (is_early_releasing(t) || is_released(t, litmus_clock())) | ||
| 347 | sched_trace_task_release(t); | ||
| 348 | /* unlink */ | ||
| 349 | unlink(t); | ||
| 350 | /* requeue | ||
| 351 | * But don't requeue a blocking task. */ | ||
| 352 | if (is_running(t)) | ||
| 353 | gsnedf_job_arrival(t); | ||
| 354 | } | ||
| 355 | |||
| 356 | /* gsnedf_tick - this function is called for every local timer | ||
| 357 | * interrupt. | ||
| 358 | * | ||
| 359 | * checks whether the current task has expired and checks | ||
| 360 | * whether we need to preempt it if it has not expired | ||
| 361 | */ | ||
| 362 | static void gsnedf_tick(struct task_struct* t) | ||
| 363 | { | ||
| 364 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
| 365 | if (!is_np(t)) { | ||
| 366 | /* np tasks will be preempted when they become | ||
| 367 | * preemptable again | ||
| 368 | */ | ||
| 369 | litmus_reschedule_local(); | ||
| 370 | TRACE("gsnedf_scheduler_tick: " | ||
| 371 | "%d is preemptable " | ||
| 372 | " => FORCE_RESCHED\n", t->pid); | ||
| 373 | } else if (is_user_np(t)) { | ||
| 374 | TRACE("gsnedf_scheduler_tick: " | ||
| 375 | "%d is non-preemptable, " | ||
| 376 | "preemption delayed.\n", t->pid); | ||
| 377 | request_exit_np(t); | ||
| 378 | } | ||
| 379 | } | ||
| 380 | } | ||
| 381 | |||
| 382 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
| 383 | * assumptions on the state of the current task since it may be called for a | ||
| 384 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
| 385 | * was necessary, because sys_exit_np() was called, because some Linux | ||
| 386 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
| 387 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
| 388 | * current state is. | ||
| 389 | * | ||
| 390 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
| 391 | * | ||
| 392 | * The following assertions for the scheduled task could hold: | ||
| 393 | * | ||
| 394 | * - !is_running(scheduled) // the job blocks | ||
| 395 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
| 396 | * - is_completed() // the job completed (by syscall) | ||
| 397 | * - linked != scheduled // we need to reschedule (for any reason) | ||
| 398 | * - is_np(scheduled) // rescheduling must be delayed, | ||
| 399 | * sys_exit_np must be requested | ||
| 400 | * | ||
| 401 | * Any of these can occur together. | ||
| 402 | */ | ||
| 403 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
| 404 | { | ||
| 405 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
| 406 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
| 407 | struct task_struct* next = NULL; | ||
| 408 | |||
| 409 | #ifdef CONFIG_RELEASE_MASTER | ||
| 410 | /* Bail out early if we are the release master. | ||
| 411 | * The release master never schedules any real-time tasks. | ||
| 412 | */ | ||
| 413 | if (unlikely(gsnedf.release_master == entry->cpu)) { | ||
| 414 | sched_state_task_picked(); | ||
| 415 | return NULL; | ||
| 416 | } | ||
| 417 | #endif | ||
| 418 | |||
| 419 | raw_spin_lock(&gsnedf_lock); | ||
| 420 | |||
| 421 | /* sanity checking */ | ||
| 422 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
| 423 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
| 424 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
| 425 | |||
| 426 | /* (0) Determine state */ | ||
| 427 | exists = entry->scheduled != NULL; | ||
| 428 | blocks = exists && !is_running(entry->scheduled); | ||
| 429 | out_of_time = exists && budget_enforced(entry->scheduled) | ||
| 430 | && budget_exhausted(entry->scheduled); | ||
| 431 | np = exists && is_np(entry->scheduled); | ||
| 432 | sleep = exists && is_completed(entry->scheduled); | ||
| 433 | preempt = entry->scheduled != entry->linked; | ||
| 434 | |||
| 435 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 436 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
| 437 | #endif | ||
| 438 | |||
| 439 | if (exists) | ||
| 440 | TRACE_TASK(prev, | ||
| 441 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
| 442 | "state:%d sig:%d\n", | ||
| 443 | blocks, out_of_time, np, sleep, preempt, | ||
| 444 | prev->state, signal_pending(prev)); | ||
| 445 | if (entry->linked && preempt) | ||
| 446 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
| 447 | entry->linked->comm, entry->linked->pid); | ||
| 448 | |||
| 449 | |||
| 450 | /* If a task blocks we have no choice but to reschedule. | ||
| 451 | */ | ||
| 452 | if (blocks) | ||
| 453 | unlink(entry->scheduled); | ||
| 454 | |||
| 455 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
| 456 | * We need to make sure to update the link structure anyway in case | ||
| 457 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
| 458 | * hurt. | ||
| 459 | */ | ||
| 460 | if (np && (out_of_time || preempt || sleep)) { | ||
| 461 | unlink(entry->scheduled); | ||
| 462 | request_exit_np(entry->scheduled); | ||
| 463 | } | ||
| 464 | |||
| 465 | /* Any task that is preemptable and either exhausts its execution | ||
| 466 | * budget or wants to sleep completes. We may have to reschedule after | ||
| 467 | * this. Don't do a job completion if we block (can't have timers running | ||
| 468 | * for blocked jobs). | ||
| 469 | */ | ||
| 470 | if (!np && (out_of_time || sleep) && !blocks) | ||
| 471 | job_completion(entry->scheduled, !sleep); | ||
| 472 | |||
| 473 | /* Link pending task if we became unlinked. | ||
| 474 | */ | ||
| 475 | if (!entry->linked) | ||
| 476 | link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
| 477 | |||
| 478 | /* The final scheduling decision. Do we need to switch for some reason? | ||
| 479 | * If linked is different from scheduled, then select linked as next. | ||
| 480 | */ | ||
| 481 | if ((!np || blocks) && | ||
| 482 | entry->linked != entry->scheduled) { | ||
| 483 | /* Schedule a linked job? */ | ||
| 484 | if (entry->linked) { | ||
| 485 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
| 486 | next = entry->linked; | ||
| 487 | TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); | ||
| 488 | } | ||
| 489 | if (entry->scheduled) { | ||
| 490 | /* not gonna be scheduled soon */ | ||
| 491 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
| 492 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
| 493 | } | ||
| 494 | } else | ||
| 495 | /* Only override Linux scheduler if we have a real-time task | ||
| 496 | * scheduled that needs to continue. | ||
| 497 | */ | ||
| 498 | if (exists) | ||
| 499 | next = prev; | ||
| 500 | |||
| 501 | sched_state_task_picked(); | ||
| 502 | |||
| 503 | raw_spin_unlock(&gsnedf_lock); | ||
| 504 | |||
| 505 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 506 | TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
| 507 | |||
| 508 | if (next) | ||
| 509 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
| 510 | else if (exists && !next) | ||
| 511 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
| 512 | #endif | ||
| 513 | |||
| 514 | |||
| 515 | return next; | ||
| 516 | } | ||
| 517 | |||
| 518 | |||
| 519 | /* _finish_switch - we just finished the switch away from prev | ||
| 520 | */ | ||
| 521 | static void gsnedf_finish_switch(struct task_struct *prev) | ||
| 522 | { | ||
| 523 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
| 524 | |||
| 525 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
| 526 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 527 | TRACE_TASK(prev, "switched away from\n"); | ||
| 528 | #endif | ||
| 529 | } | ||
| 530 | |||
| 531 | |||
| 532 | /* Prepare a task for running in RT mode | ||
| 533 | */ | ||
| 534 | static void gsnedf_task_new(struct task_struct * t, int on_rq, int is_scheduled) | ||
| 535 | { | ||
| 536 | unsigned long flags; | ||
| 537 | cpu_entry_t* entry; | ||
| 538 | |||
| 539 | TRACE("gsn edf: task new %d\n", t->pid); | ||
| 540 | |||
| 541 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 542 | |||
| 543 | /* setup job params */ | ||
| 544 | release_at(t, litmus_clock()); | ||
| 545 | |||
| 546 | if (is_scheduled) { | ||
| 547 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
| 548 | BUG_ON(entry->scheduled); | ||
| 549 | |||
| 550 | #ifdef CONFIG_RELEASE_MASTER | ||
| 551 | if (entry->cpu != gsnedf.release_master) { | ||
| 552 | #endif | ||
| 553 | entry->scheduled = t; | ||
| 554 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
| 555 | #ifdef CONFIG_RELEASE_MASTER | ||
| 556 | } else { | ||
| 557 | /* do not schedule on release master */ | ||
| 558 | preempt(entry); /* force resched */ | ||
| 559 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 560 | } | ||
| 561 | #endif | ||
| 562 | } else { | ||
| 563 | t->rt_param.scheduled_on = NO_CPU; | ||
| 564 | } | ||
| 565 | t->rt_param.linked_on = NO_CPU; | ||
| 566 | |||
| 567 | if (is_running(t)) | ||
| 568 | gsnedf_job_arrival(t); | ||
| 569 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 570 | } | ||
| 571 | |||
| 572 | static void gsnedf_task_wake_up(struct task_struct *task) | ||
| 573 | { | ||
| 574 | unsigned long flags; | ||
| 575 | lt_t now; | ||
| 576 | |||
| 577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
| 578 | |||
| 579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 580 | now = litmus_clock(); | ||
| 581 | if (is_sporadic(task) && is_tardy(task, now)) { | ||
| 582 | /* new sporadic release */ | ||
| 583 | release_at(task, now); | ||
| 584 | sched_trace_task_release(task); | ||
| 585 | } | ||
| 586 | gsnedf_job_arrival(task); | ||
| 587 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 588 | } | ||
| 589 | |||
| 590 | static void gsnedf_task_block(struct task_struct *t) | ||
| 591 | { | ||
| 592 | unsigned long flags; | ||
| 593 | |||
| 594 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
| 595 | |||
| 596 | /* unlink if necessary */ | ||
| 597 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 598 | unlink(t); | ||
| 599 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 600 | |||
| 601 | BUG_ON(!is_realtime(t)); | ||
| 602 | } | ||
| 603 | |||
| 604 | |||
| 605 | static void gsnedf_task_exit(struct task_struct * t) | ||
| 606 | { | ||
| 607 | unsigned long flags; | ||
| 608 | |||
| 609 | /* unlink if necessary */ | ||
| 610 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 611 | unlink(t); | ||
| 612 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
| 613 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
| 614 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 615 | } | ||
| 616 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 617 | |||
| 618 | BUG_ON(!is_realtime(t)); | ||
| 619 | TRACE_TASK(t, "RIP\n"); | ||
| 620 | } | ||
| 621 | |||
| 622 | |||
| 623 | static long gsnedf_admit_task(struct task_struct* tsk) | ||
| 624 | { | ||
| 625 | return 0; | ||
| 626 | } | ||
| 627 | |||
| 628 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 629 | |||
| 630 | #include <litmus/fdso.h> | ||
| 631 | |||
| 632 | /* called with IRQs off */ | ||
| 633 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
| 634 | { | ||
| 635 | int linked_on; | ||
| 636 | int check_preempt = 0; | ||
| 637 | |||
| 638 | raw_spin_lock(&gsnedf_lock); | ||
| 639 | |||
| 640 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
| 641 | tsk_rt(t)->inh_task = prio_inh; | ||
| 642 | |||
| 643 | linked_on = tsk_rt(t)->linked_on; | ||
| 644 | |||
| 645 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
| 646 | if (linked_on != NO_CPU) { | ||
| 647 | TRACE_TASK(t, "%s: linked on %d\n", | ||
| 648 | __FUNCTION__, linked_on); | ||
| 649 | /* Holder is scheduled; need to re-order CPUs. | ||
| 650 | * We can't use heap_decrease() here since | ||
| 651 | * the cpu_heap is ordered in reverse direction, so | ||
| 652 | * it is actually an increase. */ | ||
| 653 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
| 654 | gsnedf_cpus[linked_on]->hn); | ||
| 655 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
| 656 | gsnedf_cpus[linked_on]->hn); | ||
| 657 | } else { | ||
| 658 | /* holder may be queued: first stop queue changes */ | ||
| 659 | raw_spin_lock(&gsnedf.release_lock); | ||
| 660 | if (is_queued(t)) { | ||
| 661 | TRACE_TASK(t, "%s: is queued\n", | ||
| 662 | __FUNCTION__); | ||
| 663 | /* We need to update the position of holder in some | ||
| 664 | * heap. Note that this could be a release heap if we | ||
| 665 | * budget enforcement is used and this job overran. */ | ||
| 666 | check_preempt = | ||
| 667 | !bheap_decrease(edf_ready_order, | ||
| 668 | tsk_rt(t)->heap_node); | ||
| 669 | } else { | ||
| 670 | /* Nothing to do: if it is not queued and not linked | ||
| 671 | * then it is either sleeping or currently being moved | ||
| 672 | * by other code (e.g., a timer interrupt handler) that | ||
| 673 | * will use the correct priority when enqueuing the | ||
| 674 | * task. */ | ||
| 675 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
| 676 | __FUNCTION__); | ||
| 677 | } | ||
| 678 | raw_spin_unlock(&gsnedf.release_lock); | ||
| 679 | |||
| 680 | /* If holder was enqueued in a release heap, then the following | ||
| 681 | * preemption check is pointless, but we can't easily detect | ||
| 682 | * that case. If you want to fix this, then consider that | ||
| 683 | * simply adding a state flag requires O(n) time to update when | ||
| 684 | * releasing n tasks, which conflicts with the goal to have | ||
| 685 | * O(log n) merges. */ | ||
| 686 | if (check_preempt) { | ||
| 687 | /* heap_decrease() hit the top level of the heap: make | ||
| 688 | * sure preemption checks get the right task, not the | ||
| 689 | * potentially stale cache. */ | ||
| 690 | bheap_uncache_min(edf_ready_order, | ||
| 691 | &gsnedf.ready_queue); | ||
| 692 | check_for_preemptions(); | ||
| 693 | } | ||
| 694 | } | ||
| 695 | |||
| 696 | raw_spin_unlock(&gsnedf_lock); | ||
| 697 | } | ||
| 698 | |||
| 699 | /* called with IRQs off */ | ||
| 700 | static void clear_priority_inheritance(struct task_struct* t) | ||
| 701 | { | ||
| 702 | raw_spin_lock(&gsnedf_lock); | ||
| 703 | |||
| 704 | /* A job only stops inheriting a priority when it releases a | ||
| 705 | * resource. Thus we can make the following assumption.*/ | ||
| 706 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
| 707 | |||
| 708 | TRACE_TASK(t, "priority restored\n"); | ||
| 709 | tsk_rt(t)->inh_task = NULL; | ||
| 710 | |||
| 711 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
| 712 | * since the priority was effectively lowered. */ | ||
| 713 | unlink(t); | ||
| 714 | gsnedf_job_arrival(t); | ||
| 715 | |||
| 716 | raw_spin_unlock(&gsnedf_lock); | ||
| 717 | } | ||
| 718 | |||
| 719 | |||
| 720 | /* ******************** FMLP support ********************** */ | ||
| 721 | |||
| 722 | /* struct for semaphore with priority inheritance */ | ||
| 723 | struct fmlp_semaphore { | ||
| 724 | struct litmus_lock litmus_lock; | ||
| 725 | |||
| 726 | /* current resource holder */ | ||
| 727 | struct task_struct *owner; | ||
| 728 | |||
| 729 | /* highest-priority waiter */ | ||
| 730 | struct task_struct *hp_waiter; | ||
| 731 | |||
| 732 | /* FIFO queue of waiting tasks */ | ||
| 733 | wait_queue_head_t wait; | ||
| 734 | }; | ||
| 735 | |||
| 736 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
| 737 | { | ||
| 738 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
| 739 | } | ||
| 740 | |||
| 741 | /* caller is responsible for locking */ | ||
| 742 | struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, | ||
| 743 | struct task_struct* skip) | ||
| 744 | { | ||
| 745 | struct list_head *pos; | ||
| 746 | struct task_struct *queued, *found = NULL; | ||
| 747 | |||
| 748 | list_for_each(pos, &sem->wait.task_list) { | ||
| 749 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
| 750 | task_list)->private; | ||
| 751 | |||
| 752 | /* Compare task prios, find high prio task. */ | ||
| 753 | if (queued != skip && edf_higher_prio(queued, found)) | ||
| 754 | found = queued; | ||
| 755 | } | ||
| 756 | return found; | ||
| 757 | } | ||
| 758 | |||
| 759 | int gsnedf_fmlp_lock(struct litmus_lock* l) | ||
| 760 | { | ||
| 761 | struct task_struct* t = current; | ||
| 762 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
| 763 | wait_queue_t wait; | ||
| 764 | unsigned long flags; | ||
| 765 | |||
| 766 | if (!is_realtime(t)) | ||
| 767 | return -EPERM; | ||
| 768 | |||
| 769 | /* prevent nested lock acquisition --- not supported by FMLP */ | ||
| 770 | if (tsk_rt(t)->num_locks_held) | ||
| 771 | return -EBUSY; | ||
| 772 | |||
| 773 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 774 | |||
| 775 | if (sem->owner) { | ||
| 776 | /* resource is not free => must suspend and wait */ | ||
| 777 | |||
| 778 | init_waitqueue_entry(&wait, t); | ||
| 779 | |||
| 780 | /* FIXME: interruptible would be nice some day */ | ||
| 781 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
| 782 | |||
| 783 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
| 784 | |||
| 785 | /* check if we need to activate priority inheritance */ | ||
| 786 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
| 787 | sem->hp_waiter = t; | ||
| 788 | if (edf_higher_prio(t, sem->owner)) | ||
| 789 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
| 790 | } | ||
| 791 | |||
| 792 | TS_LOCK_SUSPEND; | ||
| 793 | |||
| 794 | /* release lock before sleeping */ | ||
| 795 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 796 | |||
| 797 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
| 798 | * when we wake up; we are guaranteed to have the lock since | ||
| 799 | * there is only one wake up per release. | ||
| 800 | */ | ||
| 801 | |||
| 802 | schedule(); | ||
| 803 | |||
| 804 | TS_LOCK_RESUME; | ||
| 805 | |||
| 806 | /* Since we hold the lock, no other task will change | ||
| 807 | * ->owner. We can thus check it without acquiring the spin | ||
| 808 | * lock. */ | ||
| 809 | BUG_ON(sem->owner != t); | ||
| 810 | } else { | ||
| 811 | /* it's ours now */ | ||
| 812 | sem->owner = t; | ||
| 813 | |||
| 814 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 815 | } | ||
| 816 | |||
| 817 | tsk_rt(t)->num_locks_held++; | ||
| 818 | |||
| 819 | return 0; | ||
| 820 | } | ||
| 821 | |||
| 822 | int gsnedf_fmlp_unlock(struct litmus_lock* l) | ||
| 823 | { | ||
| 824 | struct task_struct *t = current, *next; | ||
| 825 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
| 826 | unsigned long flags; | ||
| 827 | int err = 0; | ||
| 828 | |||
| 829 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 830 | |||
| 831 | if (sem->owner != t) { | ||
| 832 | err = -EINVAL; | ||
| 833 | goto out; | ||
| 834 | } | ||
| 835 | |||
| 836 | tsk_rt(t)->num_locks_held--; | ||
| 837 | |||
| 838 | /* check if there are jobs waiting for this resource */ | ||
| 839 | next = __waitqueue_remove_first(&sem->wait); | ||
| 840 | if (next) { | ||
| 841 | /* next becomes the resouce holder */ | ||
| 842 | sem->owner = next; | ||
| 843 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
| 844 | |||
| 845 | /* determine new hp_waiter if necessary */ | ||
| 846 | if (next == sem->hp_waiter) { | ||
| 847 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
| 848 | /* next has the highest priority --- it doesn't need to | ||
| 849 | * inherit. However, we need to make sure that the | ||
| 850 | * next-highest priority in the queue is reflected in | ||
| 851 | * hp_waiter. */ | ||
| 852 | sem->hp_waiter = find_hp_waiter(sem, next); | ||
| 853 | if (sem->hp_waiter) | ||
| 854 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
| 855 | else | ||
| 856 | TRACE("no further waiters\n"); | ||
| 857 | } else { | ||
| 858 | /* Well, if next is not the highest-priority waiter, | ||
| 859 | * then it ought to inherit the highest-priority | ||
| 860 | * waiter's priority. */ | ||
| 861 | set_priority_inheritance(next, sem->hp_waiter); | ||
| 862 | } | ||
| 863 | |||
| 864 | /* wake up next */ | ||
| 865 | wake_up_process(next); | ||
| 866 | } else | ||
| 867 | /* becomes available */ | ||
| 868 | sem->owner = NULL; | ||
| 869 | |||
| 870 | /* we lose the benefit of priority inheritance (if any) */ | ||
| 871 | if (tsk_rt(t)->inh_task) | ||
| 872 | clear_priority_inheritance(t); | ||
| 873 | |||
| 874 | out: | ||
| 875 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 876 | |||
| 877 | return err; | ||
| 878 | } | ||
| 879 | |||
| 880 | int gsnedf_fmlp_close(struct litmus_lock* l) | ||
| 881 | { | ||
| 882 | struct task_struct *t = current; | ||
| 883 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
| 884 | unsigned long flags; | ||
| 885 | |||
| 886 | int owner; | ||
| 887 | |||
| 888 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 889 | |||
| 890 | owner = sem->owner == t; | ||
| 891 | |||
| 892 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 893 | |||
| 894 | if (owner) | ||
| 895 | gsnedf_fmlp_unlock(l); | ||
| 896 | |||
| 897 | return 0; | ||
| 898 | } | ||
| 899 | |||
| 900 | void gsnedf_fmlp_free(struct litmus_lock* lock) | ||
| 901 | { | ||
| 902 | kfree(fmlp_from_lock(lock)); | ||
| 903 | } | ||
| 904 | |||
| 905 | static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | ||
| 906 | .close = gsnedf_fmlp_close, | ||
| 907 | .lock = gsnedf_fmlp_lock, | ||
| 908 | .unlock = gsnedf_fmlp_unlock, | ||
| 909 | .deallocate = gsnedf_fmlp_free, | ||
| 910 | }; | ||
| 911 | |||
| 912 | static struct litmus_lock* gsnedf_new_fmlp(void) | ||
| 913 | { | ||
| 914 | struct fmlp_semaphore* sem; | ||
| 915 | |||
| 916 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
| 917 | if (!sem) | ||
| 918 | return NULL; | ||
| 919 | |||
| 920 | sem->owner = NULL; | ||
| 921 | sem->hp_waiter = NULL; | ||
| 922 | init_waitqueue_head(&sem->wait); | ||
| 923 | sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; | ||
| 924 | |||
| 925 | return &sem->litmus_lock; | ||
| 926 | } | ||
| 927 | |||
| 928 | /* **** lock constructor **** */ | ||
| 929 | |||
| 930 | |||
| 931 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | ||
| 932 | void* __user unused) | ||
| 933 | { | ||
| 934 | int err = -ENXIO; | ||
| 935 | |||
| 936 | /* GSN-EDF currently only supports the FMLP for global resources. */ | ||
| 937 | switch (type) { | ||
| 938 | |||
| 939 | case FMLP_SEM: | ||
| 940 | /* Flexible Multiprocessor Locking Protocol */ | ||
| 941 | *lock = gsnedf_new_fmlp(); | ||
| 942 | if (*lock) | ||
| 943 | err = 0; | ||
| 944 | else | ||
| 945 | err = -ENOMEM; | ||
| 946 | break; | ||
| 947 | |||
| 948 | }; | ||
| 949 | |||
| 950 | return err; | ||
| 951 | } | ||
| 952 | |||
| 953 | #endif | ||
| 954 | |||
| 955 | |||
| 956 | static long gsnedf_activate_plugin(void) | ||
| 957 | { | ||
| 958 | int cpu; | ||
| 959 | cpu_entry_t *entry; | ||
| 960 | |||
| 961 | bheap_init(&gsnedf_cpu_heap); | ||
| 962 | #ifdef CONFIG_RELEASE_MASTER | ||
| 963 | gsnedf.release_master = atomic_read(&release_master_cpu); | ||
| 964 | #endif | ||
| 965 | |||
| 966 | for_each_online_cpu(cpu) { | ||
| 967 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
| 968 | bheap_node_init(&entry->hn, entry); | ||
| 969 | entry->linked = NULL; | ||
| 970 | entry->scheduled = NULL; | ||
| 971 | #ifdef CONFIG_RELEASE_MASTER | ||
| 972 | if (cpu != gsnedf.release_master) { | ||
| 973 | #endif | ||
| 974 | TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
| 975 | update_cpu_position(entry); | ||
| 976 | #ifdef CONFIG_RELEASE_MASTER | ||
| 977 | } else { | ||
| 978 | TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
| 979 | } | ||
| 980 | #endif | ||
| 981 | } | ||
| 982 | return 0; | ||
| 983 | } | ||
| 984 | |||
| 985 | /* Plugin object */ | ||
| 986 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
| 987 | .plugin_name = "GSN-EDF", | ||
| 988 | .finish_switch = gsnedf_finish_switch, | ||
| 989 | .tick = gsnedf_tick, | ||
| 990 | .task_new = gsnedf_task_new, | ||
| 991 | .complete_job = complete_job, | ||
| 992 | .task_exit = gsnedf_task_exit, | ||
| 993 | .schedule = gsnedf_schedule, | ||
| 994 | .task_wake_up = gsnedf_task_wake_up, | ||
| 995 | .task_block = gsnedf_task_block, | ||
| 996 | .admit_task = gsnedf_admit_task, | ||
| 997 | .activate_plugin = gsnedf_activate_plugin, | ||
| 998 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 999 | .allocate_lock = gsnedf_allocate_lock, | ||
| 1000 | #endif | ||
| 1001 | }; | ||
| 1002 | |||
| 1003 | |||
| 1004 | static int __init init_gsn_edf(void) | ||
| 1005 | { | ||
| 1006 | int cpu; | ||
| 1007 | cpu_entry_t *entry; | ||
| 1008 | |||
| 1009 | bheap_init(&gsnedf_cpu_heap); | ||
| 1010 | /* initialize CPU state */ | ||
| 1011 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
| 1012 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
| 1013 | gsnedf_cpus[cpu] = entry; | ||
| 1014 | entry->cpu = cpu; | ||
| 1015 | entry->hn = &gsnedf_heap_node[cpu]; | ||
| 1016 | bheap_node_init(&entry->hn, entry); | ||
| 1017 | } | ||
| 1018 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
| 1019 | return register_sched_plugin(&gsn_edf_plugin); | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | |||
| 1023 | module_init(init_gsn_edf); | ||
