aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/sched_gsn_edf.c
blob: ac7685fe69f08f340dc3766c129d4734aacb3eab (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13












                                                                      
                       

                          






                                

                           
                         


                         












                                  








































































                                                                                
                                   




                                                
                                                             

                                                   




                                       




                                                                    
                                                                       














                                                                              


                                                                          




                                         

                                                          



















































                                                                                       
                            



                                                                  
      









                                                                 



















                                                                            
                                       
 
                                                             


















                                                      
















                                                                        



                                         
                          





                                                         










                                                                                                      

                                              




                                                                                   








                                                                 
    



                                
                                                                     


                            
                                                   



                                 
                                                        





                                                                      
        

                                               



                                                























                                                                           
                                                                          



                                                                      
                                                  


                                                             
                                           


































                                                                                 
                            


                                                                  

                                                  
                            
         
      
 
                                    








                                                              


                                                                      



                                                                             
                            
                                                       
      
 
          





                                                                                

           



                                                                    




































                                                                                 
                                                                                     





                                                                                


            




                                                                            
         
 

                                  
                                      
 
                            

                                                         



                                                                        
      










                                                                   
        
                                                                 
        
                            
                                                 
      











                                                                           
                                                   






                                                                  
 
                            
                                                          
      

                                                              
                            




                                                               
      





                                                  
                                                        




                                                         

                        

                                                              
                                                   


                             




















                                                                           



                                         
                                 
                                                        



                                                    
                                               




                                                         
                                                   
        
                  
        
                                                        









                                                    
                                                   




                                                                       
                                                        




                                





                                                      



                            

                                                                                           

                      








                                                                                               
                                       
        
                                          
        


                                                                       
                                                            




                                                                   
                                                 
                                                              
                                                 



                                                                    

                                                                       


                                                                              

                                                                                               





                                                                               
                                                                                    












                                                                               
                                                                                


                                                
 
 
















                                                                                             


                                      














































                                                                                             



                                                             


                                        
 



















                                                                                                                        
 









                                                               
 

























                                                                                                                              

                                      
      



















































































                                                                                     
























                                                               
                                                    























































































                                                                                           
















































































                                                                                                                             
 





                                                                  
 















































































































































































































                                                                                                                               


                                                                          

























































                                                                                                                                        


                                                                                  














































































































                                                                                    


                                
                                                                     
                                                  













                                                                            



                                                    






                   




                                        
                                     
                            
                                                                 
      


                                                          
                                                   

                                        
                            
                                                   
      

                                                                       
                            


                                                                           
      
         








                             













                                                                        

                                                         
                            






                                                                       
      
 







                                    
                                     



                                                          

                                                          
                                                   






                                                            
/*
 * litmus/sched_gsn_edf.c
 *
 * Implementation of the GSN-EDF scheduling algorithm.
 *
 * This version uses the simple approach and serializes all scheduling
 * decisions by the use of a queue lock. This is probably not the
 * best way to do it, but it should suffice for now.
 */

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>


#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>

#include <litmus/preempt.h>

#include <litmus/bheap.h>

#include <linux/module.h>

#ifdef CONFIG_SCHED_CPU_AFFINITY
#include <litmus/affinity.h>
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
#include <litmus/litmus_softirq.h>
#endif

#ifdef CONFIG_LITMUS_NVIDIA
#include <litmus/nvidia_info.h>
#endif


/* Overview of GSN-EDF operations.
 *
 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
 * description only covers how the individual operations are implemented in
 * LITMUS.
 *
 * link_task_to_cpu(T, cpu) 	- Low-level operation to update the linkage
 *                                structure (NOT the actually scheduled
 *                                task). If there is another linked task To
 *                                already it will set To->linked_on = NO_CPU
 *                                (thereby removing its association with this
 *                                CPU). However, it will not requeue the
 *                                previously linked task (if any). It will set
 *                                T's state to RT_F_RUNNING and check whether
 *                                it is already running somewhere else. If T
 *                                is scheduled somewhere else it will link
 *                                it to that CPU instead (and pull the linked
 *                                task to cpu). T may be NULL.
 *
 * unlink(T)			- Unlink removes T from all scheduler data
 *                                structures. If it is linked to some CPU it
 *                                will link NULL to that CPU. If it is
 *                                currently queued in the gsnedf queue it will
 *                                be removed from the rt_domain. It is safe to
 *                                call unlink(T) if T is not linked. T may not
 *                                be NULL.
 *
 * requeue(T)			- Requeue will insert T into the appropriate
 *                                queue. If the system is in real-time mode and
 *                                the T is released already, it will go into the
 *                                ready queue. If the system is not in
 *                                real-time mode is T, then T will go into the
 *                                release queue. If T's release time is in the
 *                                future, it will go into the release
 *                                queue. That means that T's release time/job
 *                                no/etc. has to be updated before requeu(T) is
 *                                called. It is not safe to call requeue(T)
 *                                when T is already queued. T may not be NULL.
 *
 * gsnedf_job_arrival(T)	- This is the catch all function when T enters
 *                                the system after either a suspension or at a
 *                                job release. It will queue T (which means it
 *                                is not safe to call gsnedf_job_arrival(T) if
 *                                T is already queued) and then check whether a
 *                                preemption is necessary. If a preemption is
 *                                necessary it will update the linkage
 *                                accordingly and cause scheduled to be called
 *                                (either with an IPI or need_resched). It is
 *                                safe to call gsnedf_job_arrival(T) if T's
 *                                next job has not been actually released yet
 *                                (releast time in the future). T will be put
 *                                on the release queue in that case.
 *
 * job_completion(T)		- Take care of everything that needs to be done
 *                                to prepare T for its next release and place
 *                                it in the right queue with
 *                                gsnedf_job_arrival().
 *
 *
 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
 * the functions will automatically propagate pending task from the ready queue
 * to a linked task. This is the job of the calling function ( by means of
 * __take_ready).
 */


/* cpu_entry_t - maintain the linked and scheduled state
 */
typedef struct  {
	int 			cpu;
	struct task_struct*	linked;		/* only RT tasks */
	struct task_struct*	scheduled;	/* only RT tasks */
	struct bheap_node*	hn;
} cpu_entry_t;
DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);

cpu_entry_t* gsnedf_cpus[NR_CPUS];

/* the cpus queue themselves according to priority in here */
static struct bheap_node gsnedf_heap_node[NR_CPUS];
static struct bheap      gsnedf_cpu_heap;

static rt_domain_t gsnedf;
#define gsnedf_lock (gsnedf.ready_lock)


/* Uncomment this if you want to see all scheduling decisions in the
 * TRACE() log.
#define WANT_ALL_SCHED_EVENTS
 */

static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
{
	cpu_entry_t *a, *b;
	a = _a->value;
	b = _b->value;
	/* Note that a and b are inverted: we want the lowest-priority CPU at
	 * the top of the heap.
	 */
	return edf_higher_prio(b->linked, a->linked);
}

/* update_cpu_position - Move the cpu entry to the correct place to maintain
 *                       order in the cpu queue. Caller must hold gsnedf lock.
 */
static void update_cpu_position(cpu_entry_t *entry)
{
	if (likely(bheap_node_in_heap(entry->hn)))
		bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
	bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
}

/* caller must hold gsnedf lock */
static cpu_entry_t* lowest_prio_cpu(void)
{
	struct bheap_node* hn;
	hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
	return hn->value;
}


/* link_task_to_cpu - Update the link of a CPU.
 *                    Handles the case where the to-be-linked task is already
 *                    scheduled on a different CPU.
 */
static noinline void link_task_to_cpu(struct task_struct* linked,
				      cpu_entry_t *entry)
{
	cpu_entry_t *sched;
	struct task_struct* tmp;
	int on_cpu;

	BUG_ON(linked && !is_realtime(linked));

	/* Currently linked task is set to be unlinked. */
	if (entry->linked) {
		entry->linked->rt_param.linked_on = NO_CPU;
	}

	/* Link new task to CPU. */
	if (linked) {
		set_rt_flags(linked, RT_F_RUNNING);
		/* handle task is already scheduled somewhere! */
		on_cpu = linked->rt_param.scheduled_on;
		if (on_cpu != NO_CPU) {
			sched = &per_cpu(gsnedf_cpu_entries, on_cpu);
			/* this should only happen if not linked already */
			BUG_ON(sched->linked == linked);

			/* If we are already scheduled on the CPU to which we
			 * wanted to link, we don't need to do the swap --
			 * we just link ourselves to the CPU and depend on
			 * the caller to get things right.
			 */
			if (entry != sched) {
				TRACE_TASK(linked,
					   "already scheduled on %d, updating link.\n",
					   sched->cpu);
				tmp = sched->linked;
				linked->rt_param.linked_on = sched->cpu;
				sched->linked = linked;
				update_cpu_position(sched);
				linked = tmp;
			}
		}
		if (linked) /* might be NULL due to swap */
			linked->rt_param.linked_on = entry->cpu;
	}
	entry->linked = linked;
#ifdef WANT_ALL_SCHED_EVENTS
	if (linked)
		TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
	else
		TRACE("NULL linked to %d.\n", entry->cpu);
#endif
	update_cpu_position(entry);
}

/* unlink - Make sure a task is not linked any longer to an entry
 *          where it was linked before. Must hold gsnedf_lock.
 */
static noinline void unlink(struct task_struct* t)
{
    	cpu_entry_t *entry;

	if (t->rt_param.linked_on != NO_CPU) {
		/* unlink */
		entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on);
		t->rt_param.linked_on = NO_CPU;
		link_task_to_cpu(NULL, entry);
	} else if (is_queued(t)) {
		/* This is an interesting situation: t is scheduled,
		 * but was just recently unlinked.  It cannot be
		 * linked anywhere else (because then it would have
		 * been relinked to this CPU), thus it must be in some
		 * queue. We must remove it from the list in this
		 * case.
		 */
		remove(&gsnedf, t);
	}
}


/* preempt - force a CPU to reschedule
 */
static void preempt(cpu_entry_t *entry)
{
	preempt_if_preemptable(entry->scheduled, entry->cpu);
}

/* requeue - Put an unlinked task into gsn-edf domain.
 *           Caller must hold gsnedf_lock.
 */
static noinline void requeue(struct task_struct* task)
{
	BUG_ON(!task);
	/* sanity check before insertion */
	BUG_ON(is_queued(task));

	if (is_released(task, litmus_clock()))
		__add_ready(&gsnedf, task);
	else {
		/* it has got to wait */
		add_release(&gsnedf, task);
	}
}

#ifdef CONFIG_SCHED_CPU_AFFINITY
static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start)
{
	cpu_entry_t* affinity;

	get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
#ifdef CONFIG_RELEASE_MASTER
					gsnedf.release_master
#else
					-1
#endif
					);

	return(affinity);
}
#endif

/* check for any necessary preemptions */
static void check_for_preemptions(void)
{
	struct task_struct *task;
	cpu_entry_t *last;

	for(last = lowest_prio_cpu();
	    edf_preemption_needed(&gsnedf, last->linked);
	    last = lowest_prio_cpu()) {
		/* preemption necessary */
		task = __take_ready(&gsnedf);

#ifdef CONFIG_SCHED_CPU_AFFINITY
		{
			cpu_entry_t* affinity = gsnedf_get_nearest_available_cpu(
							&per_cpu(gsnedf_cpu_entries, task_cpu(task)));
			if(affinity)
				last = affinity;
			else if(last->linked)
				requeue(last->linked);
		}
#else
		if (last->linked)
			requeue(last->linked);
#endif

		TRACE("check_for_preemptions: attempting to link task %d to %d\n", 
						task->pid, last->cpu);

		link_task_to_cpu(task, last);
		preempt(last);
	}
}

/* gsnedf_job_arrival: task is either resumed or released */
static noinline void gsnedf_job_arrival(struct task_struct* task)
{
	BUG_ON(!task);
    
	requeue(task);
	check_for_preemptions();
}

static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	__merge_ready(rt, tasks);
	check_for_preemptions();

	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

/* caller holds gsnedf_lock */
static noinline void job_completion(struct task_struct *t, int forced)
{
	BUG_ON(!t);
	
	sched_trace_task_completion(t, forced);

#ifdef CONFIG_LITMUS_NVIDIA
	atomic_set(&tsk_rt(t)->nv_int_count, 0);
#endif

	TRACE_TASK(t, "job_completion().\n");

	/* set flags */
	set_rt_flags(t, RT_F_SLEEP);
	/* prepare for next period */
	prepare_for_next_period(t);
	if (is_released(t, litmus_clock()))
		sched_trace_task_release(t);
	/* unlink */
	unlink(t);
	/* requeue
	 * But don't requeue a blocking task. */
	if (is_running(t))
		gsnedf_job_arrival(t);
}

/* gsnedf_tick - this function is called for every local timer
 *                         interrupt.
 *
 *                   checks whether the current task has expired and checks
 *                   whether we need to preempt it if it has not expired
 */
static void gsnedf_tick(struct task_struct* t)
{
	if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
		if (!is_np(t)) {
			/* np tasks will be preempted when they become
			 * preemptable again
			 */
			litmus_reschedule_local();
			TRACE("gsnedf_scheduler_tick: "
			      "%d is preemptable "
			      " => FORCE_RESCHED\n", t->pid);
		} else if (is_user_np(t)) {
			TRACE("gsnedf_scheduler_tick: "
			      "%d is non-preemptable, "
			      "preemption delayed.\n", t->pid);
			request_exit_np(t);
		}
	}
}

/* Getting schedule() right is a bit tricky. schedule() may not make any
 * assumptions on the state of the current task since it may be called for a
 * number of reasons. The reasons include a scheduler_tick() determined that it
 * was necessary, because sys_exit_np() was called, because some Linux
 * subsystem determined so, or even (in the worst case) because there is a bug
 * hidden somewhere. Thus, we must take extreme care to determine what the
 * current state is.
 *
 * The CPU could currently be scheduling a task (or not), be linked (or not).
 *
 * The following assertions for the scheduled task could hold:
 *
 *      - !is_running(scheduled)        // the job blocks
 *	- scheduled->timeslice == 0	// the job completed (forcefully)
 *	- get_rt_flag() == RT_F_SLEEP	// the job completed (by syscall)
 * 	- linked != scheduled		// we need to reschedule (for any reason)
 * 	- is_np(scheduled)		// rescheduling must be delayed,
 *					   sys_exit_np must be requested
 *
 * Any of these can occur together.
 */
static struct task_struct* gsnedf_schedule(struct task_struct * prev)
{
	cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
	int out_of_time, sleep, preempt, np, exists, blocks;
	struct task_struct* next = NULL;

#ifdef CONFIG_RELEASE_MASTER
	/* Bail out early if we are the release master.
	 * The release master never schedules any real-time tasks.
	 */
	if (gsnedf.release_master == entry->cpu) {
		sched_state_task_picked();
		return NULL;
	}
#endif

	raw_spin_lock(&gsnedf_lock);

	/* sanity checking */
	BUG_ON(entry->scheduled && entry->scheduled != prev);
	BUG_ON(entry->scheduled && !is_realtime(prev));
	BUG_ON(is_realtime(prev) && !entry->scheduled);

	/* (0) Determine state */
	exists      = entry->scheduled != NULL;
	blocks      = exists && !is_running(entry->scheduled);
	out_of_time = exists &&
				  budget_enforced(entry->scheduled) &&
				  budget_exhausted(entry->scheduled);
	np 	    = exists && is_np(entry->scheduled);
	sleep	    = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
	preempt     = entry->scheduled != entry->linked;

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
#endif

	/*
	if (exists)
		TRACE_TASK(prev,
			   "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
			   "state:%d sig:%d\n",
			   blocks, out_of_time, np, sleep, preempt,
			   prev->state, signal_pending(prev));
	 */
	
	if (entry->linked && preempt)
		TRACE_TASK(prev, "will be preempted by %s/%d\n",
			   entry->linked->comm, entry->linked->pid);

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		unlink(entry->scheduled);

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * We need to make sure to update the link structure anyway in case
	 * that we are still linked. Multiple calls to request_exit_np() don't
	 * hurt.
	 */
	if (np && (out_of_time || preempt || sleep)) {
		unlink(entry->scheduled);
		request_exit_np(entry->scheduled);
	}

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this. Don't do a job completion if we block (can't have timers running
	 * for blocked jobs). Preemption go first for the same reason.
	 */
	if (!np && (out_of_time || sleep) && !blocks && !preempt)
		job_completion(entry->scheduled, !sleep);

	/* Link pending task if we became unlinked.
	 */
	if (!entry->linked)
		link_task_to_cpu(__take_ready(&gsnedf), entry);

	/* The final scheduling decision. Do we need to switch for some reason?
	 * If linked is different from scheduled, then select linked as next.
	 */
	if ((!np || blocks) &&
	    entry->linked != entry->scheduled) {
		/* Schedule a linked job? */
		if (entry->linked) {
			entry->linked->rt_param.scheduled_on = entry->cpu;
			next = entry->linked;
			TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
		}
		if (entry->scheduled) {
			/* not gonna be scheduled soon */
			entry->scheduled->rt_param.scheduled_on = NO_CPU;
			TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
		}
	}
	else
	{
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;
	}

	sched_state_task_picked();

	raw_spin_unlock(&gsnedf_lock);

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE("gsnedf_lock released, next=0x%p\n", next);

	if (next)
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	else if (exists && !next)
		TRACE("becomes idle at %llu.\n", litmus_clock());
#endif


	return next;
}


/* _finish_switch - we just finished the switch away from prev
 */
static void gsnedf_finish_switch(struct task_struct *prev)
{
	cpu_entry_t* 	entry = &__get_cpu_var(gsnedf_cpu_entries);
	
	entry->scheduled = is_realtime(current) ? current : NULL;
	
#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "switched away from\n");
#endif
}


/*	Prepare a task for running in RT mode
 */
static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;
	cpu_entry_t* 		entry;

	TRACE("gsn edf: task new %d\n", t->pid);

	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	/* setup job params */
	release_at(t, litmus_clock());

	if (running) {
		entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t));
		BUG_ON(entry->scheduled);

#ifdef CONFIG_RELEASE_MASTER
		if (entry->cpu != gsnedf.release_master) {
#endif
			entry->scheduled = t;
			tsk_rt(t)->scheduled_on = task_cpu(t);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			/* do not schedule on release master */
			preempt(entry); /* force resched */
			tsk_rt(t)->scheduled_on = NO_CPU;
		}
#endif
	} else {
		t->rt_param.scheduled_on = NO_CPU;
	}
	t->rt_param.linked_on          = NO_CPU;

	gsnedf_job_arrival(t);
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

static void gsnedf_task_wake_up(struct task_struct *task)
{
	unsigned long flags;
	lt_t now;	
	
	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());

	raw_spin_lock_irqsave(&gsnedf_lock, flags);
	
	
#if 0  // sporadic task model
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	if (get_rt_flags(task) == RT_F_EXIT_SEM) {
		set_rt_flags(task, RT_F_RUNNING);
	} else {
		now = litmus_clock();
		if (is_tardy(task, now)) {
			/* new sporadic release */
			release_at(task, now);
			sched_trace_task_release(task);
		}
		else {
			if (task->rt.time_slice) {
				/* came back in time before deadline
				*/
				set_rt_flags(task, RT_F_RUNNING);
			}
		}
	}
#else  // periodic task model
	set_rt_flags(task, RT_F_RUNNING);
#endif
	
	gsnedf_job_arrival(task);
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

static void gsnedf_task_block(struct task_struct *t)
{
	// TODO: is this called on preemption??
	unsigned long flags;

	TRACE_TASK(t, "block at %llu\n", litmus_clock());

	/* unlink if necessary */
	raw_spin_lock_irqsave(&gsnedf_lock, flags);
	
	unlink(t);
	
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);

	BUG_ON(!is_realtime(t));
}


static void gsnedf_task_exit(struct task_struct * t)
{
	unsigned long flags;

	/* unlink if necessary */
	raw_spin_lock_irqsave(&gsnedf_lock, flags);
	unlink(t);
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
		tsk_rt(t)->scheduled_on = NO_CPU;
	}
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);

	BUG_ON(!is_realtime(t));
        TRACE_TASK(t, "RIP\n");
}


static long gsnedf_admit_task(struct task_struct* tsk)
{
	return 0;
}

#ifdef CONFIG_LITMUS_LOCKING

#include <litmus/fdso.h>


static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	int linked_on;
	int check_preempt = 0;	
	
	if(prio_inh != NULL)
		TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
	else
		TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
	
	sched_trace_eff_prio_change(t, prio_inh);
	
	tsk_rt(t)->inh_task = prio_inh;
	
	linked_on  = tsk_rt(t)->linked_on;
	
	/* If it is scheduled, then we need to reorder the CPU heap. */
	if (linked_on != NO_CPU) {
		TRACE_TASK(t, "%s: linked  on %d\n",
				   __FUNCTION__, linked_on);
		/* Holder is scheduled; need to re-order CPUs.
		 * We can't use heap_decrease() here since
		 * the cpu_heap is ordered in reverse direction, so
		 * it is actually an increase. */
		bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
                     gsnedf_cpus[linked_on]->hn);
		bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
                     gsnedf_cpus[linked_on]->hn);
	} else {
		/* holder may be queued: first stop queue changes */
		raw_spin_lock(&gsnedf.release_lock);
		if (is_queued(t)) {
			TRACE_TASK(t, "%s: is queued\n", __FUNCTION__);

			/* We need to update the position of holder in some
			 * heap. Note that this could be a release heap if we
			 * budget enforcement is used and this job overran. */
			check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node);

		} else {
			/* Nothing to do: if it is not queued and not linked
			 * then it is either sleeping or currently being moved
			 * by other code (e.g., a timer interrupt handler) that
			 * will use the correct priority when enqueuing the
			 * task. */
			TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__);
		}
		raw_spin_unlock(&gsnedf.release_lock);

		/* If holder was enqueued in a release heap, then the following
		 * preemption check is pointless, but we can't easily detect
		 * that case. If you want to fix this, then consider that
		 * simply adding a state flag requires O(n) time to update when
		 * releasing n tasks, which conflicts with the goal to have
		 * O(log n) merges. */
		if (check_preempt) {
			/* heap_decrease() hit the top level of the heap: make
			 * sure preemption checks get the right task, not the
			 * potentially stale cache. */
			bheap_uncache_min(edf_ready_order, &gsnedf.ready_queue);
			check_for_preemptions();
		}
	}
}

/* called with IRQs off */
static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	raw_spin_lock(&gsnedf_lock);

	__set_priority_inheritance(t, prio_inh);

#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d inherits a new priority!\n",
				tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);

		__set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
	}
#endif
	
	raw_spin_unlock(&gsnedf_lock);
}


/* called with IRQs off */
static void __clear_priority_inheritance(struct task_struct* t)
{
    TRACE_TASK(t, "priority restored\n");
	
    if(tsk_rt(t)->scheduled_on != NO_CPU)
    {
		sched_trace_eff_prio_change(t, NULL);
		
        tsk_rt(t)->inh_task = NULL;
        
        /* Check if rescheduling is necessary. We can't use heap_decrease()
         * since the priority was effectively lowered. */
        unlink(t);
        gsnedf_job_arrival(t);
    }
    else
    {
        __set_priority_inheritance(t, NULL);
    }

#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d inheritance set back to owner.\n",
				tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);

		if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU)
		{
			sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t);
			
			tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t;
			
			/* Check if rescheduling is necessary. We can't use heap_decrease()
			 * since the priority was effectively lowered. */
			unlink(tsk_rt(t)->cur_klitirqd);
			gsnedf_job_arrival(tsk_rt(t)->cur_klitirqd);
		}
		else
		{
			__set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t);
		}
	}
#endif
}

/* called with IRQs off */
static void clear_priority_inheritance(struct task_struct* t)
{
	raw_spin_lock(&gsnedf_lock);
	__clear_priority_inheritance(t);
	raw_spin_unlock(&gsnedf_lock);
}

#ifdef CONFIG_LITMUS_SOFTIRQD
/* called with IRQs off */
static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
											  struct task_struct* old_owner,
											  struct task_struct* new_owner)
{
	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
	
	raw_spin_lock(&gsnedf_lock);
	
	if(old_owner != new_owner)
	{
		if(old_owner)
		{
			// unreachable?
			tsk_rt(old_owner)->cur_klitirqd = NULL;
		}
	
		TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
				   new_owner->comm, new_owner->pid);

		tsk_rt(new_owner)->cur_klitirqd = klitirqd;
	}
	
	__set_priority_inheritance(klitirqd,
			(tsk_rt(new_owner)->inh_task == NULL) ?
				new_owner :
				tsk_rt(new_owner)->inh_task);
	
	raw_spin_unlock(&gsnedf_lock);
}

/* called with IRQs off */
static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
												struct task_struct* old_owner)
{
	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
	
	raw_spin_lock(&gsnedf_lock);
    
    TRACE_TASK(klitirqd, "priority restored\n");
	
    if(tsk_rt(klitirqd)->scheduled_on != NO_CPU)
    {
        tsk_rt(klitirqd)->inh_task = NULL;
        
        /* Check if rescheduling is necessary. We can't use heap_decrease()
         * since the priority was effectively lowered. */
        unlink(klitirqd);
        gsnedf_job_arrival(klitirqd);
    }
    else
    {
        __set_priority_inheritance(klitirqd, NULL);
    }
	
	tsk_rt(old_owner)->cur_klitirqd = NULL;
	
	raw_spin_unlock(&gsnedf_lock);
}
#endif


/* ******************** FMLP support ********************** */

/* struct for semaphore with priority inheritance */
struct fmlp_semaphore {
	struct litmus_lock litmus_lock;

	/* current resource holder */
	struct task_struct *owner;

	/* highest-priority waiter */
	struct task_struct *hp_waiter;

	/* FIFO queue of waiting tasks */
	wait_queue_head_t wait;
};

static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
{
	return container_of(lock, struct fmlp_semaphore, litmus_lock);
}

/* caller is responsible for locking */
struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
				   struct task_struct* skip)
{
	struct list_head	*pos;
	struct task_struct 	*queued, *found = NULL;

	list_for_each(pos, &sem->wait.task_list) {
		queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
							   task_list)->private;

		/* Compare task prios, find high prio task. */
		if (queued != skip && edf_higher_prio(queued, found))
			found = queued;
	}
	return found;
}

int gsnedf_fmlp_lock(struct litmus_lock* l)
{
	struct task_struct* t = current;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	wait_queue_t wait;
	unsigned long flags;

	if (!is_realtime(t))
		return -EPERM;

	spin_lock_irqsave(&sem->wait.lock, flags);

	if (sem->owner) {
		/* resource is not free => must suspend and wait */

		init_waitqueue_entry(&wait, t);

		/* FIXME: interruptible would be nice some day */
		set_task_state(t, TASK_UNINTERRUPTIBLE);

		__add_wait_queue_tail_exclusive(&sem->wait, &wait);

		/* check if we need to activate priority inheritance */
		if (edf_higher_prio(t, sem->hp_waiter)) {
			sem->hp_waiter = t;
			if (edf_higher_prio(t, sem->owner))
				set_priority_inheritance(sem->owner, sem->hp_waiter);
		}

		/* release lock before sleeping */
		spin_unlock_irqrestore(&sem->wait.lock, flags);

		/* We depend on the FIFO order.  Thus, we don't need to recheck
		 * when we wake up; we are guaranteed to have the lock since
		 * there is only one wake up per release.
		 */

		schedule();

		/* Since we hold the lock, no other task will change
		 * ->owner. We can thus check it without acquiring the spin
		 * lock. */
		BUG_ON(sem->owner != t);
	} else {
		/* it's ours now */
		sem->owner = t;

		spin_unlock_irqrestore(&sem->wait.lock, flags);
	}

	return 0;
}

int gsnedf_fmlp_unlock(struct litmus_lock* l)
{
	struct task_struct *t = current, *next;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	unsigned long flags;
	int err = 0;

	spin_lock_irqsave(&sem->wait.lock, flags);

	if (sem->owner != t) {
		err = -EINVAL;
		goto out;
	}

	/* check if there are jobs waiting for this resource */
	next = __waitqueue_remove_first(&sem->wait);
	if (next) {
		/* next becomes the resouce holder */
		sem->owner = next;
		TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);

		/* determine new hp_waiter if necessary */
		if (next == sem->hp_waiter) {
			TRACE_TASK(next, "was highest-prio waiter\n");
			/* next has the highest priority --- it doesn't need to
			 * inherit.  However, we need to make sure that the
			 * next-highest priority in the queue is reflected in
			 * hp_waiter. */
			sem->hp_waiter = find_hp_waiter(sem, next);
			if (sem->hp_waiter)
				TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
			else
				TRACE("no further waiters\n");
		} else {
			/* Well, if next is not the highest-priority waiter,
			 * then it ought to inherit the highest-priority
			 * waiter's priority. */
			set_priority_inheritance(next, sem->hp_waiter);
		}

		/* wake up next */
		wake_up_process(next);
	} else
		/* becomes available */
		sem->owner = NULL;

	/* we lose the benefit of priority inheritance (if any) */
	if (tsk_rt(t)->inh_task)
		clear_priority_inheritance(t);

out:
	spin_unlock_irqrestore(&sem->wait.lock, flags);

	return err;
}

int gsnedf_fmlp_close(struct litmus_lock* l)
{
	struct task_struct *t = current;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	unsigned long flags;

	int owner;

	spin_lock_irqsave(&sem->wait.lock, flags);

	owner = sem->owner == t;

	spin_unlock_irqrestore(&sem->wait.lock, flags);

	if (owner)
		gsnedf_fmlp_unlock(l);

	return 0;
}

void gsnedf_fmlp_free(struct litmus_lock* lock)
{
	kfree(fmlp_from_lock(lock));
}

static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
	.close  = gsnedf_fmlp_close,
	.lock   = gsnedf_fmlp_lock,
	.unlock = gsnedf_fmlp_unlock,
	.deallocate = gsnedf_fmlp_free,
};

static struct litmus_lock* gsnedf_new_fmlp(void)
{
	struct fmlp_semaphore* sem;

	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
	if (!sem)
		return NULL;

	sem->owner   = NULL;
	sem->hp_waiter = NULL;
	init_waitqueue_head(&sem->wait);
	sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops;

	return &sem->litmus_lock;
}







/* ******************** KFMLP support ********************** */

/* struct for semaphore with priority inheritance */
struct kfmlp_queue
{
	wait_queue_head_t wait;
	struct task_struct* owner;
	struct task_struct* hp_waiter;
	int count; /* number of waiters + holder */
};

struct kfmlp_semaphore
{
	struct litmus_lock litmus_lock;
	
	spinlock_t	lock;
	
	int num_resources; /* aka k */
	
	struct kfmlp_queue *queues; /* array */
	struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
};

static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
{
	return container_of(lock, struct kfmlp_semaphore, litmus_lock);
}

static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
								struct kfmlp_queue* queue)
{
	return (queue - &sem->queues[0]);
}

static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
												  struct task_struct* holder)
{
	int i;
	for(i = 0; i < sem->num_resources; ++i)
		if(sem->queues[i].owner == holder)
			return(&sem->queues[i]);
	return(NULL);
}

/* caller is responsible for locking */
static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
										 struct task_struct *skip)
{
	struct list_head	*pos;
	struct task_struct 	*queued, *found = NULL;
	
	list_for_each(pos, &kqueue->wait.task_list) {
		queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
												   task_list)->private;
		
		/* Compare task prios, find high prio task. */
		if (queued != skip && edf_higher_prio(queued, found))
			found = queued;
	}
	return found;
}

static inline struct kfmlp_queue* kfmlp_find_shortest(
										struct kfmlp_semaphore* sem,
										struct kfmlp_queue* search_start)
{
	// we start our search at search_start instead of at the beginning of the
	// queue list to load-balance across all resources.
	struct kfmlp_queue* step = search_start;
	struct kfmlp_queue* shortest = sem->shortest_queue;
	
	do
	{
		step = (step+1 != &sem->queues[sem->num_resources]) ?
			step+1 : &sem->queues[0];

		if(step->count < shortest->count)
		{
			shortest = step;
			if(step->count == 0)
				break; /* can't get any shorter */
		}

	}while(step != search_start);
	
	return(shortest);
}

static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
{
	/* must hold sem->lock */
	
	struct kfmlp_queue *my_queue = NULL;
	struct task_struct *max_hp = NULL;

	
	struct list_head	*pos;
	struct task_struct 	*queued;
	int i;
	
	for(i = 0; i < sem->num_resources; ++i)
	{
		if( (sem->queues[i].count > 1) &&
			((my_queue == NULL) ||
			 (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
		{
			my_queue = &sem->queues[i];
		}
	}
	
	if(my_queue)
	{		
		max_hp = my_queue->hp_waiter;
		
		BUG_ON(!max_hp);
		
		TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
				  kfmlp_get_idx(sem, my_queue),
				  max_hp->comm, max_hp->pid,
				  kfmlp_get_idx(sem, my_queue));
		
		my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
		
		/*
		if(my_queue->hp_waiter)
			TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
					  kfmlp_get_idx(sem, my_queue),
					  my_queue->hp_waiter->comm,
					  my_queue->hp_waiter->pid);
		else
			TRACE_CUR("queue %d: new hp_waiter is %p\n",
					  kfmlp_get_idx(sem, my_queue), NULL);
		 */
		
		raw_spin_lock(&gsnedf_lock);
		
		/*
		if(my_queue->owner)
			TRACE_CUR("queue %d: owner is %s/%d\n",
					  kfmlp_get_idx(sem, my_queue),
					  my_queue->owner->comm,
					  my_queue->owner->pid);
		else
			TRACE_CUR("queue %d: owner is %p\n",
					  kfmlp_get_idx(sem, my_queue),
					  NULL);
		 */
		
		if(tsk_rt(my_queue->owner)->inh_task == max_hp)
		{
			__clear_priority_inheritance(my_queue->owner);
			if(my_queue->hp_waiter != NULL)
			{
				__set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
			}
		}
		raw_spin_unlock(&gsnedf_lock);
		
		list_for_each(pos, &my_queue->wait.task_list)
		{
			queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
													   task_list)->private;
			/* Compare task prios, find high prio task. */
			if (queued == max_hp)
			{
				/*
				TRACE_CUR("queue %d: found entry in wait queue.  REMOVING!\n",
						  kfmlp_get_idx(sem, my_queue));
				*/
				__remove_wait_queue(&my_queue->wait,
									list_entry(pos, wait_queue_t, task_list));
				break;
			}
		}
		--(my_queue->count);
	}
	
	return(max_hp);
}

int gsnedf_kfmlp_lock(struct litmus_lock* l)
{
	struct task_struct* t = current;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue* my_queue;
	wait_queue_t wait;
	unsigned long flags;
	
	if (!is_realtime(t))
		return -EPERM;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = sem->shortest_queue;
	
	if (my_queue->owner) {
		/* resource is not free => must suspend and wait */
		TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
				  kfmlp_get_idx(sem, my_queue));
		
		init_waitqueue_entry(&wait, t);
		
		/* FIXME: interruptible would be nice some day */
		set_task_state(t, TASK_UNINTERRUPTIBLE);
		
		__add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
		
		/* check if we need to activate priority inheritance */
		if (edf_higher_prio(t, my_queue->hp_waiter))
		{
			my_queue->hp_waiter = t;
			if (edf_higher_prio(t, my_queue->owner))
			{
				set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
			}
		}
		
		++(my_queue->count);
		sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
		
		/* release lock before sleeping */
		spin_unlock_irqrestore(&sem->lock, flags);
		
		/* We depend on the FIFO order.  Thus, we don't need to recheck
		 * when we wake up; we are guaranteed to have the lock since
		 * there is only one wake up per release (or steal).
		 */
		schedule();


		if(my_queue->owner == t)
		{
			TRACE_CUR("queue %d: acquired through waiting\n",
					  kfmlp_get_idx(sem, my_queue));
		}
		else
		{
			/* this case may happen if our wait entry was stolen
			   between queues. record where we went. */
			my_queue = kfmlp_get_queue(sem, t);

			BUG_ON(!my_queue);
			TRACE_CUR("queue %d: acquired through stealing\n",
					  kfmlp_get_idx(sem, my_queue));
		}
	}
	else
	{
		TRACE_CUR("queue %d: acquired immediately\n",
				  kfmlp_get_idx(sem, my_queue));

		my_queue->owner = t;
		
		++(my_queue->count);
		sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);		
		
		spin_unlock_irqrestore(&sem->lock, flags);
	}
	
	return kfmlp_get_idx(sem, my_queue);
}

int gsnedf_kfmlp_unlock(struct litmus_lock* l)
{
	struct task_struct *t = current, *next;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue *my_queue;
	unsigned long flags;
	int err = 0;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = kfmlp_get_queue(sem, t);
	
	if (!my_queue) {
		err = -EINVAL;
		goto out;
	}
	
	/* check if there are jobs waiting for this resource */
	next = __waitqueue_remove_first(&my_queue->wait);
	if (next) {
		/*
		TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
				  kfmlp_get_idx(sem, my_queue),
				  next->comm, next->pid);
		*/
		/* next becomes the resouce holder */
		my_queue->owner = next;
		
		--(my_queue->count);
		// the '=' of '<=' is a dumb method to attempt to build
		// affinity until tasks can tell us where they ran last...
		if(my_queue->count <= sem->shortest_queue->count)
		{
			sem->shortest_queue = my_queue;
		}	
		
		TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
				  kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
		
		/* determine new hp_waiter if necessary */
		if (next == my_queue->hp_waiter) {
			TRACE_TASK(next, "was highest-prio waiter\n");
			/* next has the highest priority --- it doesn't need to
			 * inherit.  However, we need to make sure that the
			 * next-highest priority in the queue is reflected in
			 * hp_waiter. */
			my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
			if (my_queue->hp_waiter)
				TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
			else
				TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
		} else {
			/* Well, if next is not the highest-priority waiter,
			 * then it ought to inherit the highest-priority
			 * waiter's priority. */
			set_priority_inheritance(next, my_queue->hp_waiter);
		}
		
		/* wake up next */
		wake_up_process(next);
	}
	else
	{
		TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
		
		next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */

		/*
		if(next)
			TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
					  kfmlp_get_idx(sem, my_queue),
					  next->comm, next->pid);
		*/
		
		my_queue->owner = next;
		
		if(next)
		{
			TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
					  kfmlp_get_idx(sem, my_queue),
					  next->comm, next->pid);
			
			/* wake up next */
			wake_up_process(next);			
		}
		else
		{
			TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
			
			--(my_queue->count);
			// the '=' of '<=' is a dumb method to attempt to build
			// affinity until tasks can tell us where they ran last...
			if(my_queue->count <= sem->shortest_queue->count)
			{
				sem->shortest_queue = my_queue;
			}
		}
	}
	
	/* we lose the benefit of priority inheritance (if any) */
	if (tsk_rt(t)->inh_task)
		clear_priority_inheritance(t);
	
out:
	spin_unlock_irqrestore(&sem->lock, flags);
	
	return err;
}

int gsnedf_kfmlp_close(struct litmus_lock* l)
{
	struct task_struct *t = current;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue *my_queue;
	unsigned long flags;
	
	int owner;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = kfmlp_get_queue(sem, t);	
	owner = (my_queue) ? (my_queue->owner == t) : 0;

	spin_unlock_irqrestore(&sem->lock, flags);
	
	if (owner)
		gsnedf_kfmlp_unlock(l);
	
	return 0;
}

void gsnedf_kfmlp_free(struct litmus_lock* l)
{
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	kfree(sem->queues);
	kfree(sem);
}

static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
	.close  = gsnedf_kfmlp_close,
	.lock   = gsnedf_kfmlp_lock,
	.unlock = gsnedf_kfmlp_unlock,
	.deallocate = gsnedf_kfmlp_free,
};

static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code)
{
	struct kfmlp_semaphore* sem;
	int num_resources = 0;
	int i;
	
	if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
	{
		*ret_code = -EINVAL;
		return(NULL);
	}
	if(__copy_from_user(&num_resources, arg, sizeof(num_resources)))
	{
		*ret_code = -EINVAL;
		return(NULL);
	}
	if(num_resources < 1)
	{
		*ret_code = -EINVAL;
		return(NULL);		
	}
	
	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
	if(!sem)
	{
		*ret_code = -ENOMEM;
		return NULL;
	}
	
	sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
	if(!sem->queues)
	{
		kfree(sem);
		*ret_code = -ENOMEM;
		return NULL;		
	}
	
	sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops;
	spin_lock_init(&sem->lock);
	sem->num_resources = num_resources;
	
	for(i = 0; i < num_resources; ++i)
	{
		sem->queues[i].owner = NULL;
		sem->queues[i].hp_waiter = NULL;
		init_waitqueue_head(&sem->queues[i].wait);
		sem->queues[i].count = 0;
	}
	
	sem->shortest_queue = &sem->queues[0];
	
	*ret_code = 0;
	return &sem->litmus_lock;
}





/* **** lock constructor **** */


static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
				 void* __user arg)
{
	int err = -ENXIO;

	/* GSN-EDF currently only supports the FMLP for global resources. */
	switch (type) {

	case FMLP_SEM:
		/* Flexible Multiprocessor Locking Protocol */
		*lock = gsnedf_new_fmlp();
		if (*lock)
			err = 0;
		else
			err = -ENOMEM;
		break;
			
	case KFMLP_SEM:
		*lock = gsnedf_new_kfmlp(arg, &err);
		break;
	};

	return err;
}

#endif

static long gsnedf_activate_plugin(void)
{
	int cpu;
	cpu_entry_t *entry;

	bheap_init(&gsnedf_cpu_heap);
#ifdef CONFIG_RELEASE_MASTER
	gsnedf.release_master = atomic_read(&release_master_cpu);
#endif

	for_each_online_cpu(cpu) {
		entry = &per_cpu(gsnedf_cpu_entries, cpu);
		bheap_node_init(&entry->hn, entry);
		entry->linked    = NULL;
		entry->scheduled = NULL;
#ifdef CONFIG_RELEASE_MASTER
		if (cpu != gsnedf.release_master) {
#endif
			TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu);
			update_cpu_position(entry);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			TRACE("GSN-EDF: CPU %d is release master.\n", cpu);
		}
#endif
	}
    
#ifdef CONFIG_LITMUS_SOFTIRQD
    spawn_klitirqd(NULL);
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	init_nvidia_info();
#endif
	
	return 0;
}

/*	Plugin object	*/
static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
	.plugin_name		= "GSN-EDF",
	.finish_switch		= gsnedf_finish_switch,
	.tick			= gsnedf_tick,
	.task_new		= gsnedf_task_new,
	.complete_job		= complete_job,
	.task_exit		= gsnedf_task_exit,
	.schedule		= gsnedf_schedule,
	.task_wake_up		= gsnedf_task_wake_up,
	.task_block		= gsnedf_task_block,
	.admit_task		= gsnedf_admit_task,
	.activate_plugin	= gsnedf_activate_plugin,
#ifdef CONFIG_LITMUS_LOCKING
	.allocate_lock	= gsnedf_allocate_lock,
    .set_prio_inh   = set_priority_inheritance,
    .clear_prio_inh = clear_priority_inheritance,	
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
	.set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
	.clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
#endif

};


static int __init init_gsn_edf(void)
{
	int cpu;
	cpu_entry_t *entry;

	bheap_init(&gsnedf_cpu_heap);
	/* initialize CPU state */
	for (cpu = 0; cpu < NR_CPUS; cpu++)  {
		entry = &per_cpu(gsnedf_cpu_entries, cpu);
		gsnedf_cpus[cpu] = entry;
		entry->cpu 	 = cpu;
		entry->hn        = &gsnedf_heap_node[cpu];
		bheap_node_init(&entry->hn, entry);
	}
	edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
	return register_sched_plugin(&gsn_edf_plugin);
}


module_init(init_gsn_edf);