#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef struct { int cpu; struct task_struct* linked; struct task_struct* scheduled; //container or migrating task atomic_t will_schedule; struct bheap_node* hn; } cpu_entry_t; typedef struct { rt_domain_t domain; struct task_struct* container; struct task_struct* scheduled; //fixed task lt_t scheduled_last_exec_time; //exec_time of the scheduled task when it was last scheduled lt_t changed_budget; //change to scheduled task's exec time due to container budget constraints u64 f_util, future_f_util; sturct bheap_node* hn; #define c_lock domain.ready_lock } cont_domain_t; struct list_head pending_adds; INIT_LIST_HEAD(&pending_adds); struct list_head migrating_tasks; INIT_LIST_HEAD(&migrating_tasks); struct hrtimer container_release_timer; DEFINE_PER_CPU(cpu_entry_t, edfsc_cpu_entries); cpu_entry_t* edfsc_cpus[NR_CPUS]; struct task_struct* container_tasks[NR_CPUS]; static cont_domain_t container_domains[NR_CPUS]; static cont_domain_t* container_list[NR_CPUS]; static rt_domain_t gsched_domain; #define g_lock (gsched_domain.ready_lock) u64 m_util, future_m_util; u64 sys_util, future_sys_util; #define is_container(task) task && task->container_domain == &gsched_domain #define is_fixed(task) task && task->container_task != NULL #define is_migrating(task) task && task->container_domain == NULL && task->container_task == &gsched_domain #define FP_SHIFT 20 #define fp_div(a, b) a * (1 << FP_SHIFT) / b #define to_fp(a) a << FP_SHIFT #define from_fp(a) a >> FP_SHIFT static int container_lower_prio(const void* _a, const void* _b) { cont_domain_t *a, *b; a = *(const cont_domain_t**)a; b = *(const cont_domain_t**)b; if (a->future_f_util < b->future_f_util) return -1; if (a->future_f_util > b->future_f_util) return 1; return return 0; } // finds the task_struct of the hrtimer set by task_exit static struct task_struct* task_of_list_node(struct list_head* node) { edfsc_params* a = container_of(node, edfsc_params, qnode); rt_param* b = container_of(a, rt_params, edfsc_params); return container_of(b, struct task_struct, rt_param); } static enum hrtimer_restart container_boundary(struct hrtimer* timer) { int i; struct list_head it; struct list_head temp; int u_extra; int need_reweight; raw_spin_lock(&gsnedf_lock); list_for_each_safe(&it, &temp, &pending_adds) { u_extra = NR_CPUS - sys_util; cont_domain_t* container = NULL; struct task_struct* t = task_of_list_node(it); if (u_extra >= tsk_rt(t)->utilization) { for (i = 0; i < NR_CPUS; i++) { u64 leftover = to_fp(1) - container_domains[i].future_f_util - tsk_rt(t)->utilization; if (leftover >= 0) { container = &(container_domains[i]); break; } } if (container) { tsk_rt(t)->edfsc_params.container_domain = container; requeue(t); container->f_util += tsk_rt(t)->utilization; container->future_f_util += tsk_rt(t)->utilization; } else { tsk_rt(t)->edfsc_params.container_domain = &gsched_domain; requeue(t); m_util += tsk_rt(t)->utilization; future_m_util += tsk_rt(t)->utilization; list_add(tsk_rt(t)->edfsc_params.qnode, migrating_tasks); } sys_util += -= tsk_rt(t)->utilization; need_reweight = 1; } else { struct sched_param param = {0}; sched_setscheduler_nocheck(t, SCHED_NORMAL, ¶m); //TODO: how to make the task not scheduled by us anymore? } list_del(it); } list_for_each(&it, &migrating_tasks) { struct task_struct* t = task_of_list_node(it); if (!(tsk_rt(t)->edfsc_params.will_remove) && !is_released(t) && get_deadline(t) < get_deadline(container_tasks[0]) + get_period(container_tasks[0])) { tsk_rt(t)->edfsc_params.will_remove = 1; tsk_rt(t)->edfsc_params.move_to = NULL; cont_domain_t* container = NULL; for (i = 0; i < NR_CPUS; i++) { u64 leftover = to_fp(1) - container_domains[i].future_f_util - tsk_rt(t)->utilization; if (leftover >= 0) { container = &(container_domains[i]); break; } } if (container) { container->future_f_util += t->utilization; tsk_rt(t)->edfsc_params.move_to = container; need_reweight = 1; } } } if (need_reweight) { sort(container_list, NR_CPUS * sizeof(cont_domain_t*), &container_lower_prio, NULL); u64 u_extra = future_sys_util; int i = 0; while (i < NR_CPUS && u_extra >= to_fp(1) - container_list[i]->future_f_util) { struct task_struct* t = container_list[i]->container; tsk_rt(t)->task_params.exec_cost = tsk_rt(t)->task_params.period; tsk_rt(t)->task_params.utilization = to_fp(1); u_extra -= 1 - container_list[i]->future_f_util; i++; } int remaining = NR_CPUS - i; while (i < NR_CPUS) { struct task_struct* t = container_list[i]->container; tsk_rt(t)->task_params.utilization = container_list[i]->future_f_util + u_extra / remaining; tsk_rt(t)->task_params.exec_cost = from_fp(tsk_rt(t)->task_params.utilization * tsk_rt(t)->task_params.period); i++; } } } INIT_LIST_HEAD(&pending_adds); for (i = 0; i < NR_CPUS; i++) { if (budget_exhausted(t)) { prepare_for_next_period(t); if (is_early_releasing(t) || is_released(t, litmus_clock())) sched_trace_task_release(t); /* requeue * But don't requeue a blocking task. */ if (is_current_running()) { //since we don't support blocking, this should always be true if (is_container(t) && is_migrating(tsk_rt(t)->edfsc_params.container_domain.scheduled)) { requeue(tsk_rt(t)->edfsc_params.container_domain.scheduled); } requeue(t); g_preempt_check(); } }else { tsk_rt(container_tasks[i])->edfsc_params.can_release = 1; } } raw_spin_unlock(&gsnedf_lock); } //preempts whatever is scheduled on that core. If it's a container, then preempt its fixed task //works if entry->scheduled is null static void preempt(cpu_entry_t *entry) { if (is_container(entry->scheduled)) preempt_if_preemptable(entry->scheduled->container_domain->scheduled, entry->cpu) else preempt_if_preemptable(entry->scheduled, entry->cpu); } //requeues task in the domain recorded in its edfsc_params static noinline void requeue(struct task_struct* task) { BUG_ON(!task); /* sanity check before insertion */ BUG_ON(is_queued(task)); if (is_early_releasing(task) || is_released(task, litmus_clock())) __add_ready(tsk_rt(task)->edfsc_params.container_domain, task); else { /* it has got to wait */ add_release(tsk_rt(task)->edfsc_params.container_domain, task); } } ///////////////////////////////////////////////////////////////////////////////////// /* * * CPU ORDERING * */ static struct bheap_node edfsc_cpu_heap_node[NR_CPUS]; static struct bheap edfsc_cpu_heap; static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) { cpu_entry_t *a, *b; a = _a->value; b = _b->value; /* Note that a and b are inverted: we want the lowest-priority CPU at * the top of the heap. */ return (is_container(b->linked) && tsk_rt(b->linked)->utilization == to_fp(1)) || edf_higher_prio(b->linked, a->linked); } /* caller must hold gsnedf lock */ static cpu_entry_t* lowest_prio_cpu(void) { struct bheap_node* hn; hn = bheap_peek(cpu_lower_prio, &edfsc_cpu_heap); return hn->value; } /* update_cpu_position - Move the cpu entry to the correct place to maintain * order in the cpu queue. Caller must hold gsnedf lock. */ static void update_cpu_position(cpu_entry_t *entry) { if (likely(bheap_node_in_heap(entry->hn))) bheap_delete(cpu_lower_prio, &edfsc_cpu_heap, entry->hn); bheap_insert(cpu_lower_prio, &edfsc_cpu_heap, entry->hn); } /* link_task_to_cpu - Links a migrating task or container to a CPU * Update the link of a CPU. * Handles the case where the to-be-linked task is already * scheduled on a different CPU. */ static noinline void link_task_to_cpu(struct task_struct* linked, cpu_entry_t *entry) { cpu_entry_t *sched; struct task_struct* tmp; int on_cpu; BUG_ON(linked && !is_realtime(linked)); BUG_ON(is_fixed(linked)); /* Currently linked task is set to be unlinked. */ if (entry->linked) { entry->linked->rt_param.linked_on = NO_CPU; } /* Link new task to CPU. */ if (linked) { /* handle task is already scheduled somewhere! */ on_cpu = linked->rt_param.scheduled_on; if (on_cpu != NO_CPU) { sched = &per_cpu(edfsc_cpu_entries, on_cpu); /* this should only happen if not linked already */ BUG_ON(sched->linked == linked); /* If we are already scheduled on the CPU to which we * wanted to link, we don't need to do the swap -- * we just link ourselves to the CPU and depend on * the caller to get things right. */ if (entry != sched) { TRACE_TASK(linked, "already scheduled on %d, updating link.\n", sched->cpu); tmp = sched->linked; linked->rt_param.linked_on = sched->cpu; sched->linked = linked; update_cpu_position(sched); linked = tmp; } } if (linked) /* might be NULL due to swap */ linked->rt_param.linked_on = entry->cpu; } entry->linked = linked; #ifdef WANT_ALL_SCHED_EVENTS if (linked) TRACE_TASK(linked, "linked to %d.\n", entry->cpu); else TRACE("NULL linked to %d.\n", entry->cpu); #endif update_cpu_position(entry); } /* unlink - Make sure a task is not linked any longer to an entry * where it was linked before. Must hold gsnedf_lock. */ static noinline void unlink(struct task_struct* t) { cpu_entry_t *entry; if (t->rt_param.linked_on != NO_CPU) { /* unlink */ entry = &per_cpu(edfsc_cpu_entries, t->rt_param.linked_on); t->rt_param.linked_on = NO_CPU; link_task_to_cpu(NULL, entry); } else if (is_queued(t)) { /* This is an interesting situation: t is scheduled, * but was just recently unlinked. It cannot be * linked anywhere else (because then it would have * been relinked to this CPU), thus it must be in some * queue. We must remove it from the list in this * case. */ remove(&gsched_domain, t); } } //TODO change local linking static void g_preempt_check(void) { struct task_struct *task; cpu_entry_t *last; cpu_entry_t *target; #ifdef CONFIG_PREFER_LOCAL_LINKING cpu_entry_t *local; /* Before linking to other CPUs, check first whether the local CPU is * idle. */ local = this_cpu_ptr(&edfsc_cpu_entries); task = __peek_ready(&gsched_domain); if (task && !local->linked) { task = __take_ready(&gsched_domain); TRACE_TASK(task, "linking to local CPU %d to avoid IPI\n", local->cpu); link_task_to_cpu(task, local); preempt(local); } #endif for (last = lowest_prio_cpu(); edf_preemption_needed(&gsched_domain, last->linked); last = lowest_prio_cpu()) { target = last; if (is_container(last->linked) && tsk_rt(last->linked)->utilization == to_fp(1)) break; /* preemption necessary */ task = __take_ready(&gsched_domain); if (is_container(task)) cpu_entry_t *target = edfsc_cpus[tsk_rt(task)->edfsc_params.id]; if (requeue_preempted_job(last->linked)) requeue(last->linked); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, target->cpu); if (target != last) { TRACE("check_for_preemptions: swapping tasks linked on %d and %d\n", last->cpu, target->cpu); link_task_to_cpu(target->linked, last); preempt(last); } link_task_to_cpu(task, target); preempt(target); } } static int c_preempt_check(container_domain_t* container) { if (is_migrating(container->scheduled) || edf_preemption_needed(&container->domain, container->scheduled)) { preempt(&container->domain); return 1; } else return 0; } static void g_remove_task(struct task_struct *t) { m_util -= tsk_rt(t)->task_params.utilization; future_m_util -= tsk_rt(t)->task_params.utilization; if (tsk_rt(t)->edfsc_params.move_to) { tsk_rt(t)->edfsc_params.container_domain = tsk_rt(t)->edfsc_params.move_to; requeue(t); tsk_rt(t)->edfsc_params.container_domain.f_util += tsk_rt(t)->utilization; } } static void c_remove_task(struct task_struct *t) { struct task_struct* container_task = tsk_rt(t)->edfsc_params.container_task; tsk_rt(container_task)->edfsc_params.container_domain.f_util -= tsk_rt(t)->task_params.utilization; tsk_rt(container_task)->edfsc_params.container_domain.future_f_util -= tsk_rt(t)->task_params.utilization; } // migrating or container task job_completion, called from edfsc_gschedule static noinline void g_job_completion(struct task_struct* t, int forced) { BUG_ON(!t); sched_trace_task_completion(t, forced); TRACE_TASK(t, "job_completion(forced=%d).\n", forced); /* set flags */ tsk_rt(t)->completed = 0; /* unlink */ unlink(t); if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) { if (t->rt_param.job_params.lateness > 0) { // remove the task now if (is_queued(t)) remove(tsk_rt(t)->edfsc_params.container_domain, t); g_remove_task(t); } } else if (is_migrating(t) || (is_container(t) && tsk_rt(t)->edfsc_params.can_release)) { tsk_rt(t)->edfsc_params.can_release = 0; //only matter for containers /* prepare for next period */ prepare_for_next_period(t); if (is_early_releasing(t) || is_released(t, litmus_clock())) sched_trace_task_release(t); /* requeue * But don't requeue a blocking task. */ if (is_current_running()) { //since we don't support blocking, this should always be true if (is_container(t) && is_container(tsk_rt(t)->edfsc_params.container_domain.scheduled)) { requeue(tsk_rt(t)->edfsc_params.container_domain.scheduled); } requeue(t); g_preempt_check(); } } } // fixed task job_completion, called from edfsc_cschedule static void c_job_completion(struct task_struct* t, int forced) { sched_trace_task_completion(t, forced); TRACE_TASK(t, "job_completion(forced=%d).\n", forced); tsk_rt(t)->completed = 0; if (tsk_rt(t)->edfsc_params.will_remove) { if (t->rt_param.job_params.lateness > 0) { // remove the task now if (is_queued(t)) remove(tsk_rt(t)->edfsc_params.container_domain, t); c_remove_task(t); } } else { prepare_for_next_period(t); } } // need to update cpu entries after global scheduling static void g_finish_switch(struct task_struct *prev) { cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries); entry->scheduled = is_realtime(current) ? current : NULL; entry->scheduled = (edfsc->scheduled && is_fixed(current)) ? current->container_task : current; #ifdef WANT_ALL_SCHED_EVENTS TRACE_TASK(prev, "switched away from\n"); #endif } static int fifo_prio(struct bheap_node* _a, struct bheap_node* _b) { return 0; } // takes in the container_domain pointer in container task_struct // assuming prev is previous task running on the processor before calling schedule // global lock in effect static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) { rt_domain_t* edf = cedf.domain; struct task_struct* next; int out_of_time, sleep, preempt, np, exists, blocks, resched, cont_out_of_time; raw_spin_lock(&cedf->c_lock); /* sanity checking * differently from gedf, when a task exits (dead) * cedf->schedule may be null and prev _is_ realtime */ BUG_ON(cedf->scheduled && cedf->scheduled != prev); BUG_ON(cedf->scheduled && !is_realtime(prev)); BUG_ON(is_migrating(cedf->scheduled)); /* (0) Determine state */ exists = cedf->scheduled != NULL; blocks = exists && !is_current_running(); out_of_time = exists && budget_enforced(cedf->scheduled) && budget_exhausted(cedf->scheduled); np = exists && is_np(cedf->scheduled); sleep = exists && is_completed(cedf->scheduled); preempt = is_migrating(prev) || edf_preemption_needed(edf, prev); /* If we need to preempt do so. * The following checks set resched to 1 in case of special * circumstances. */ resched = preempt; /* If a task blocks we have no choice but to reschedule. */ if (blocks) resched = 1; /* Request a sys_exit_np() call if we would like to preempt but cannot. * Multiple calls to request_exit_np() don't hurt. */ if (np && (out_of_time || preempt || sleep)) request_exit_np(cedf->scheduled); /* Any task that is preemptable and either exhausts its execution * budget or wants to sleep completes. We may have to reschedule after * this. */ if (!np && (out_of_time || sleep)) { if (is_fixed(cedf->scheduled)) c_job_completion(cedf->scheduled, !sleep); else { g_job_completion(cedf->scheduled, !sleep); } resched = 1; } /* The final scheduling decision. Do we need to switch for some reason? * Switch if we are in RT mode and have no task or if we need to * resched. */ next = NULL; if ((!np || blocks) && (resched || !exists)) { /* When preempting a task that does not block, then * re-insert it into either the ready queue or the * release queue (if it completed). requeue() picks * the appropriate queue. */ if (cedf->scheduled && !blocks) requeue(cedf->scheduled); next = __take_ready(edf); } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; if (next) { TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); } else { struct bheap temp; next = __take_ready(&gsched_domain); while (is_container(next)) { bheap_insert(fifo_prio, &temp, tsk_rt(next)->heap_node); next = __take_ready(&gsched_domain); } if (next) { TRACE("stealing stack at %llu\n", litmus_clock()); } else { TRACE("cpu become idle at %llu\n", litmus_clock()); } while (bheap_peak(fifo_prio, &temp)) { __add_ready(&gsched_domain, bheap_take(fifo_prio, &temp)); } } cedf->changed_budget = (budget_remaining(next) > budget_remaining(cedf->container)) ? budget_remaining(cedf->container) : budget_remaining(next); cedf->scheduled_last_exec_time = get_exec_time(next); tsk_rt(next)->job_params.exec_time += cedf->changed_budget; cedf->scheduled = next; raw_spin_unlock(&cedf->c_lock); return next; } //assuming prev is previous task running on the processor before calling schedule static struct task_struct* edfsc_gschedule(struct task_struct * prev) { cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries); int out_of_time, sleep, preempt, np, exists, blocks, is_cont; struct task_struct* next = NULL; raw_spin_lock(&g_lock); /* sanity checking */ BUG_ON(entry->scheduled && entry->scheduled != prev && !is_container(entry->scheduled)); BUG_ON(entry->scheduled && !is_realtime(prev)); BUG_ON(is_realtime(prev) && !entry->scheduled); // update container budget if prev was a fixed or background scheduled task if (prev != entry->scheduled) { cont_domain_t* cdomain = tsk_rt(entry->scheduled)->edfsc_params.container_domain; prev->job_params.exec_time -= cdomain->changed_budget; tsk_rt(entry->scheduled)->job_params.exec_time += prev->job_params.exec_time - cdomain->scheduled_last_exec_time; if (cdomain->changed_budget) tsk_rt(prev)->completed = 0; if (budget_exhausted(entry->scheduled) tsk_rt(entry->scheduled)->completed = 1; } /* (0) Determine state */ exists = entry->scheduled != NULL; is_cont = exists && entry->scheduled->container_domain == NULL; blocks = exists && !is_current_running(); out_of_time = exists && budget_enforced(entry->scheduled) && budget_exhausted(entry->scheduled); np = exists && is_np(entry->scheduled); sleep = exists && is_completed(entry->scheduled); preempt = entry->scheduled != entry->linked; #ifdef WANT_ALL_SCHED_EVENTS TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); #endif if (exists) TRACE_TASK(prev, "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " "state:%d sig:%d is_cont:%d\n", blocks, out_of_time, np, sleep, preempt, prev->state, signal_pending(prev), is_cont); if (entry->linked && preempt) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); /* If a task blocks we have no choice but to reschedule. * Note: containers never block since their task struct has state = IS_RUNNING */ if (blocks) unlink(entry->scheduled); /* Request a sys_exit_np() call if we would like to preempt but cannot. * We need to make sure to update the link structure anyway in case * that we are still linked. Multiple calls to request_exit_np() don't * hurt. */ if (np && (out_of_time || preempt || sleep)) { unlink(entry->scheduled); request_exit_np(entry->scheduled); } /* Any task that is preemptable and either exhausts its execution * budget or wants to sleep completes. We may have to reschedule after * this. Don't do a job completion if we block (can't have timers running * for blocked jobs). */ if (!np && (out_of_time || sleep)) g_job_completion(entry->scheduled, !sleep); /* Link pending task if we became unlinked. */ if (!entry->linked) link_task_to_cpu(__take_ready(&gsnedf), entry); /* The final scheduling decision. Do we need to switch for some reason? * If linked is different from scheduled, then select linked as next. */ if ((!np || blocks) && entry->linked != entry->scheduled) { /* Schedule a linked job? */ if (entry->linked) { entry->linked->rt_param.scheduled_on = entry->cpu; next = entry->linked; TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); } if (entry->scheduled) { /* not gonna be scheduled soon */ entry->scheduled->rt_param.scheduled_on = NO_CPU; TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); } } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; if (is_container(next)) next = edfsc_cschedule(tsk_rt(next)->edfsc_params.container_domain, prev); sched_state_task_picked(); raw_spin_unlock(&g_lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("g_lock released, next=0x%p\n", next); if (next) TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); else if (exists && !next) TRACE("becomes idle at %llu.\n", litmus_clock()); #endif return next; } static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) { unsigned long flags; cpu_entry_t* entry; raw_spin_lock_irqsave(&g_lock, flags); tsk_rt(t)->edfsc_params.will_remove = 0; tsk_rt(t).sporadic_release = 0; hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); tsk_rt(t)->edfsc_params.timer_armed = 0; tsk_rt(t)->task_params.utilization = fp_div(tsk_rt(t)->task_params.exec_cost, tsk_rt(t)->task_params.period); list_add_tail(tsk_rt(t)->edfsc_params.qnode, pending_adds); if (is_scheduled) { entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); BUG_ON(entry->scheduled); preempt(entry) } t->rt_param.scheduled_on = NO_CPU; t->rt_param.linked_on = NO_CPU; raw_spin_unlock_irqrestore(&g_lock, flags); } // finds the task_struct of the hrtimer set by task_exit static struct task_struct* task_of_timer(struct hrtimer* timer) { edfsc_params* a = container_of(timer, edfsc_params, deadline_timer); rt_param* b = container_of(a, rt_params, edfsc_params); return container_of(b, struct task_struct, rt_param); } static enum hrtimer_restart task_deadline_callback(struct hrtimer* timer) { struct task_struct* t = task_of_timer(timer); BUG_ON(is_container(t)); if (!is_released(t) || budget_exhausted(t)) { if (is_fixed(t)) { c_remove_task(t); } else { g_remove_task(t); } } return HRTIMER_NORESTART } static void edfsc_task_exit(struct task_struct* t) { unsigned long flags; raw_spin_lock_irqsave(&g_lock, flags); tsk_rt(t)->edfsc_params.will_remove = 1; if (!is_released(t, litmus_clock())) { if (lt_after(tsk_rt(t)->edfsc_params.prev_deadline, litmus_clock)) { if (is_queued(t)) remove(tsk_rt(t)->edfsc_params.container_domain, t); hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer), ns_to_ktime(tsk_rt(t)->edfsc_params.prev_deadline), HRTIMER_MODE_ABS_PINNED); tsk_rt(t)->edfsc_params.timer_armed = 1; tsk_rt(t)->edfsc_params.deadline_timer.function = task_deadline_callback; } else { if (is_queued(t)) remove(tsk_rt(t)->edfsc_params.container_domain, t); if ( } } else { //reserve the utilization, but remove it from being scheduled by litmus unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { edfsc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer), ns_to_ktime(get_deadline(t)), HRTIMER_MODE_ABS_PINNED); tsk_rt(t)->edfsc_params.timer_armed = 1; tsk_rt(t)->edfsc_params.deadline_timer.function = task_deadline_callback; } raw_spin_unlock_irqrestore(&g_lock, flags); }