/* * kernel/rt_domain.c * * LITMUS real-time infrastructure. This file contains the * functions that manipulate RT domains. RT domains are an abstraction * of a ready queue and a release queue. */ #include #include #include #include #include #include #include #include static int dummy_resched(rt_domain_t *rt) { return 0; } static int dummy_order(struct heap_node* a, struct heap_node* b) { return 0; } /* default implementation: use default lock */ static void default_release_job(struct task_struct* t, rt_domain_t* rt) { add_ready(rt, t); } static enum hrtimer_restart release_job_timer(struct hrtimer *timer) { struct task_struct *t; TS_RELEASE_START; t = container_of(timer, struct task_struct, rt_param.release_timer); get_domain(t)->release_job(t, get_domain(t)); TS_RELEASE_END; return HRTIMER_NORESTART; } static void setup_job_release_timer(struct task_struct *task) { hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); release_timer(task).function = release_job_timer; #ifdef CONFIG_HIGH_RES_TIMERS release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; #endif /* Expiration time of timer is release time of task. */ release_timer(task).expires = ns_to_ktime(get_release(task)); TRACE_TASK(task, "arming release timer rel=%llu at %llu\n", get_release(task), litmus_clock()); hrtimer_start(&release_timer(task), release_timer(task).expires, HRTIMER_MODE_ABS); } static void arm_release_timers(unsigned long _rt) { rt_domain_t *rt = (rt_domain_t*) _rt; unsigned long flags; struct list_head alt; struct list_head *pos, *safe; struct task_struct* t; spin_lock_irqsave(&rt->release_lock, flags); list_replace_init(&rt->release_queue, &alt); spin_unlock_irqrestore(&rt->release_lock, flags); list_for_each_safe(pos, safe, &alt) { t = list_entry(pos, struct task_struct, rt_param.list); list_del(pos); setup_job_release_timer(t); } } void rt_domain_init(rt_domain_t *rt, heap_prio_t order, check_resched_needed_t check, release_job_t release ) { BUG_ON(!rt); if (!check) check = dummy_resched; if (!release) release = default_release_job; if (!order) order = dummy_order; heap_init(&rt->ready_queue); INIT_LIST_HEAD(&rt->release_queue); spin_lock_init(&rt->ready_lock); spin_lock_init(&rt->release_lock); rt->check_resched = check; rt->release_job = release; rt->order = order; init_no_rqlock_work(&rt->arm_timers, arm_release_timers, (unsigned long) rt); } /* add_ready - add a real-time task to the rt ready queue. It must be runnable. * @new: the newly released task */ void __add_ready(rt_domain_t* rt, struct task_struct *new) { TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", new->comm, new->pid, get_exec_cost(new), get_rt_period(new), get_release(new), litmus_clock()); BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node)); heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); rt->check_resched(rt); } /* add_release - add a real-time task to the rt release queue. * @task: the sleeping task */ void __add_release(rt_domain_t* rt, struct task_struct *task) { TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); list_add(&tsk_rt(task)->list, &rt->release_queue); task->rt_param.domain = rt; do_without_rqlock(&rt->arm_timers); }