aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/sched_edzl.c
blob: 0664b78e540b0b3648fa6775815f978ff3248760 (plain) (tree)






































































































































































































































































































































                                                                                                                          
/*
 * litmus/sched_edzl.c
 *
 * Implementation of the EDZL scheduling algorithm.
 *
 * This version uses the simple approach and serializes all scheduling
 * decisions by the use of a queue lock. This is probably not the
 * best way to do it, but it should suffice for now.
 */

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>

#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/sched_global_plugin.h>
#include <litmus/edzl_common.h>
#include <litmus/sched_trace.h>

#include <litmus/preempt.h>

#include <litmus/bheap.h>

#include <linux/module.h>

static struct task_struct* __edzl_take_ready(rt_domain_t* rt);
static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new);
static void edzl_job_arrival(struct task_struct* task);
static void edzl_task_new(struct task_struct * t, int on_rq, int running);
static void edzl_task_wake_up(struct task_struct *task);
static void edzl_task_exit(struct task_struct * t);
static int edzl_preemption_needed(struct task_struct *t);


/* EDZL Plugin object */
static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = {
    .plugin = {
        .finish_switch	= gblv_finish_switch,
        .tick			= gblv_tick,
        .complete_job	= complete_job,
        .schedule		= gblv_schedule,
        .task_block		= gblv_task_block,
        .admit_task		= gblv_admit_task,
        .activate_plugin	= gbl_activate_plugin,
        
        .plugin_name	= "EDZL",        
        .task_new		= edzl_task_new,
        .task_wake_up	= edzl_task_wake_up,
        .task_exit		= edzl_task_exit,
    },
    
    .job_completion = gbl_job_completion,
    
    .prio_order = edzl_higher_prio,
    .take_ready = __edzl_take_ready,
    .add_ready = __edzl_add_ready,
    .job_arrival = edzl_job_arrival,
    .preemption_needed = edzl_preemption_needed
};


#define active_gbl_domain (active_gbl_plugin->domain)
#define active_gbl_domain_lock (active_gbl_domain.ready_lock)

DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries);


static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer)
{
	unsigned long flags;
	struct task_struct* t;
    
	lt_t now = litmus_clock();
    
	TRACE("Zero-laxity timer went off!\n");
    
	raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
    
	t = container_of(container_of(timer, struct rt_param, zl_timer),
                     struct task_struct,
                     rt_param);
    
	TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n",
               now,
               get_deadline(t) - budget_remaining(t),
               get_deadline(t) - now);
    
	set_zerolaxity(t);
	gbl_update_queue_position(t);
    
	raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
    
	return HRTIMER_NORESTART;
}

/* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */
static struct task_struct* __edzl_take_ready(rt_domain_t* rt)
{
	struct task_struct* t = __take_ready(rt);
    
	if(t)
	{
		if(get_zerolaxity(t) == 0)
		{
            if(hrtimer_active(&tsk_rt(t)->zl_timer))
			{
				int cancel_ret;
                
				TRACE_TASK(t, "Canceling zero-laxity timer.\n");
				cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer);
				WARN_ON(cancel_ret == 0); /* should never be inactive. */
			}
		}
		else
		{
			TRACE_TASK(t, "Task already has zero-laxity flagged.\n");
		}
	}
    
	return t;
}

/* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */
static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new)
{
	__add_ready(rt, new);
    
	if(get_zerolaxity(new) == 0)
	{
		lt_t when_to_fire;
        
		when_to_fire = get_deadline(new) - budget_remaining(new);
        
		TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n",
                   when_to_fire,
                   get_deadline(new),
                   budget_remaining(new));
        
		__hrtimer_start_range_ns(&tsk_rt(new)->zl_timer,
                                 ns_to_ktime(when_to_fire),
                                 0,
                                 HRTIMER_MODE_ABS_PINNED,
                                 0);
	}
	else
	{
		TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n",
                   get_deadline(new),
                   budget_remaining(new));
	}
}



/* edzl_job_arrival: task is either resumed or released */
static void edzl_job_arrival(struct task_struct* task)
{
	BUG_ON(!task);
    
	/* clear old laxity flag or tag zero-laxity upon release */
	if(laxity_remaining(task))
		clear_zerolaxity(task);
	else
		set_zerolaxity(task);
    
	gbl_requeue(task);
	gbl_check_for_preemptions();
}


/*	Prepare a task for running in RT mode
 */
static void edzl_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;
	cpu_entry_t* 		entry;
    
	TRACE("edzl: task new %d\n", t->pid);
    
	raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
    
	hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	t->rt_param.zl_timer.function = on_zero_laxity;
    
	/* setup job params */
	release_at(t, litmus_clock());
    
	if (running) {
        entry = active_gbl_plugin->cpus[task_cpu(t)];
		BUG_ON(entry->scheduled);
        
#ifdef CONFIG_RELEASE_MASTER
		if (entry->cpu != active_gbl_domain.release_master) {
#endif
			entry->scheduled = t;
			tsk_rt(t)->scheduled_on = task_cpu(t);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			/* do not schedule on release master */
			gbl_preempt(entry); /* force resched */
			tsk_rt(t)->scheduled_on = NO_CPU;
		}
#endif
	} else {
		t->rt_param.scheduled_on = NO_CPU;
	}
	t->rt_param.linked_on          = NO_CPU;
    
	active_gbl_plugin->job_arrival(t);
	raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
}


static void edzl_task_wake_up(struct task_struct *task)
{
	unsigned long flags;
	lt_t now;
    
	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
    
	raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	if (get_rt_flags(task) == RT_F_EXIT_SEM) {
		set_rt_flags(task, RT_F_RUNNING);
	} else {
		now = litmus_clock();
		if (is_tardy(task, now)) {
			/* new sporadic release */
			release_at(task, now);
			sched_trace_task_release(task);
		}
		else {
			if (task->rt.time_slice) {
				/* came back in time before deadline
                 */
				set_rt_flags(task, RT_F_RUNNING);
			}
		}
	}
	active_gbl_plugin->job_arrival(task);
	raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
}


static void edzl_task_exit(struct task_struct * t)
{
	unsigned long flags;
    
	/* unlink if necessary */
	raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
	gbl_unlink(t);
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
		tsk_rt(t)->scheduled_on = NO_CPU;
	}
    
    if(hrtimer_active(&tsk_rt(t)->zl_timer))
	{
		/* BUG if reached? */
		TRACE_TASK(t, "Canceled armed timer while exiting.\n");
		hrtimer_cancel(&tsk_rt(t)->zl_timer);
	}
    
	raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
    
	BUG_ON(!is_realtime(t));
	TRACE_TASK(t, "RIP\n");
}


/* need_to_preempt - check whether the task t needs to be preempted
 *                   call only with irqs disabled and with ready_lock acquired
 *                   THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
 */
static int edzl_preemption_needed(struct task_struct *t)
{
	/* we need the read lock for edf_ready_queue */
	/* no need to preempt if there is nothing pending */
	if (!__jobs_pending(&active_gbl_domain))
		return 0;
	/* we need to reschedule if t doesn't exist */
	if (!t)
		return 1;
	/* make sure to get non-rt stuff out of the way */
	if (!is_realtime(t))
		return 1;
    
	/* NOTE: We cannot check for non-preemptibility since we
	 *       don't know what address space we're currently in.
	 */
    
	/* Detect zero-laxity as needed.  Easier to do it here than in tick.
     (No timer is used to detect zero-laxity while a job is running.) */
	if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0))
	{
		set_zerolaxity(t);
	}
    
	return edzl_higher_prio(__next_ready(&active_gbl_domain), t);
}


static int __init init_edzl(void)
{
	int cpu;
	cpu_entry_t *entry;
    
	bheap_init(&edzl_plugin.cpu_heap);
	/* initialize CPU state */
	for (cpu = 0; cpu < NR_CPUS; cpu++)  {
		entry = &per_cpu(edzl_cpu_entries, cpu);
		edzl_plugin.cpus[cpu] = entry;
		entry->cpu 	 = cpu;
		entry->hn        = &edzl_plugin.heap_node[cpu];
		bheap_node_init(&entry->hn, entry);
	}
	gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs);
    
	return register_sched_plugin(&edzl_plugin.plugin);
}


module_init(init_edzl);