/*
* kernel/edf_common.c
*
* Common functions for EDF based scheduler.
*/
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <litmus/litmus.h>
#include <litmus/sched_plugin.h>
#include <litmus/sched_trace.h>
#ifdef CONFIG_LITMUS_NESTED_LOCKING
#include <litmus/locking.h>
#endif
#include <litmus/edf_common.h>
/* edf_higher_prio - returns true if first has a higher EDF priority
* than second. Deadline ties are broken by PID.
*
* both first and second may be NULL
*/
#ifdef CONFIG_LITMUS_NESTED_LOCKING
int __edf_higher_prio(
struct task_struct* first, comparison_mode_t first_mode,
struct task_struct* second, comparison_mode_t second_mode)
#else
int edf_higher_prio(struct task_struct* first, struct task_struct* second)
#endif
{
struct task_struct *first_task = first;
struct task_struct *second_task = second;
/* There is no point in comparing a task to itself. */
if (first && first == second) {
TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid);
WARN_ON(1);
return 0;
}
/* check for NULL tasks */
if (!first || !second) {
return first && !second;
}
#ifdef CONFIG_LITMUS_LOCKING
/* Check for EFFECTIVE priorities. Change task
* used for comparison in such a case.
*/
if (unlikely(first->rt_param.inh_task)
#ifdef CONFIG_LITMUS_NESTED_LOCKING
&& (first_mode == EFFECTIVE)
#endif
) {
first_task = first->rt_param.inh_task;
}
if (unlikely(second->rt_param.inh_task)
#ifdef CONFIG_LITMUS_NESTED_LOCKING
&& (second_mode == EFFECTIVE)
#endif
) {
second_task = second->rt_param.inh_task;
}
/* Check for priority boosting. Tie-break by start of boosting.
*/
if (unlikely(is_priority_boosted(first_task))) {
/* first_task is boosted, how about second_task? */
if (!is_priority_boosted(second_task) ||
lt_before(get_boost_start(first_task),
get_boost_start(second_task))) {
return 1;
}
else {
return 0;
}
}
else if (unlikely(is_priority_boosted(second_task))) {
/* second_task is boosted, first is not*/
return 0;
}
#endif
// // rate-monotonic for testing
// if (!is_realtime(second_task)) {
// return true;
// }
//
// if (shorter_period(first_task, second_task)) {
// return true;
// }
//
// if (get_period(first_task) == get_period(second_task)) {
// if (first_task->pid < second_task->pid) {
// return true;
// }
// else if (first_task->pid == second_task->pid) {
// return !second->rt_param.inh_task;
// }
// }
if (!is_realtime(second_task)) {
return true;
}
if (earlier_deadline(first_task, second_task)) {
return true;
}
if (get_deadline(first_task) == get_deadline(second_task)) {
if (shorter_period(first_task, second_task)) {
return true;
}
if (get_rt_period(first_task) == get_rt_period(second_task)) {
if (first_task->pid < second_task->pid) {
return true;
}
if (first_task->pid == second_task->pid) {
#ifdef CONFIG_LITMUS_SOFTIRQD
if (first_task->rt_param.is_proxy_thread <
second_task->rt_param.is_proxy_thread) {
return true;
}
if(first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) {
return !second->rt_param.inh_task;
}
#else
return !second->rt_param.inh_task;
#endif
}
}
}
return false;
}
#ifdef CONFIG_LITMUS_NESTED_LOCKING
int edf_higher_prio(struct task_struct* first, struct task_struct* second)
{
return __edf_higher_prio(first, EFFECTIVE, second, EFFECTIVE);
}
int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
{
struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE);
}
int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
{
return edf_max_heap_order(b, a); // swap comparison
}
int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
{
struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE);
}
int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
{
return edf_max_heap_base_priority_order(b, a); // swap comparison
}
#endif
int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
{
return edf_higher_prio(bheap2task(a), bheap2task(b));
}
void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
release_jobs_t release)
{
rt_domain_init(rt, edf_ready_order, resched, release);
}
/* need_to_preempt - check whether the task t needs to be preempted
* call only with irqs disabled and with ready_lock acquired
* THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
*/
int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
{
/* we need the read lock for edf_ready_queue */
/* no need to preempt if there is nothing pending */
if (!__jobs_pending(rt))
return 0;
/* we need to reschedule if t doesn't exist */
if (!t)
return 1;
/* NOTE: We cannot check for non-preemptibility since we
* don't know what address space we're currently in.
*/
/* make sure to get non-rt stuff out of the way */
return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t);
}