aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/sched_gsn_edf.c
blob: 4589888fc652301c6b4775628cc0ea52fdb64852 (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13












                                                                      
                       

                          






                                
                           
                          
 
                         
                           
                         





                                   
                             

                              
 



                                



                                





                                  








                                                                            













                                                                              
                                                                              


























































                                                                                
                               




                                                
                                                             
                                      



                                       









                                                                     


                                            
 




                                                                    
                                                                           
 


                                                            





                                                                             
 




                                                                              



                                                                   




                                         
                                                                    













                                                                             

                                                                

                                               

                    

                                                                                                                                                


          






                                                           
                                              


























                                                                                       











                                                                          









                                                                 



















                                                                            
                                       
 
                                                             










                                                      
                                                
                                


                                                                                              
                                                          
                 

                    
                                                   
         





                                           
                                
                                                                        
 
                              




                                                                      
                              






                          



                                         
                          
 


                                                          



                                                                                  


                                
                                               



                                                                                              
                                                                     


                                                      
                                                        
                                              

      













                                                                 
                                                                     


                            
                                                   

                                 
 

                                
                                                        











                                                                      
                                 

                                     
 

















                                                                           





















                                                                              

                 
 
          


                                                             
           

 


















                                                                                   
                                                                     




























































































































































                                                                                                                                                                           
                                                                                                                                                                           





























































































































                                                                                                                
                 
                                                                


         


                  













                                                                               
                                                                         








                                                                                 
                                                                           

                                        

                             
                            


                                                                  
                                                            
                                          
                            
         
      
 
                                    








                                                              






                                                     
                                                        
                                                               

                                                        
                            
                                                       
      
 
                     
                                
                                                                                                  
                                               
                                                                                  
                                                              
         
 



                                                                    
                                                                
                            
                                                 
         


                                                                
                     
                                         








                                                                                            













                                                                                 
                             
           
                                                       
                                                         

                                 














                                                                               
                                                                                     





                                                                                


            




                                                                            
         
 




                                                                
 

                                  
                                      
 
                            

                                                         



                                                                        
      
 










                                                                   
 
                            
                                                 
      









                                                                           
                                                                                          
 
                                                   






                                                                  
 
                            
                                                          
      

                                                              
                            




                                                               
      





                                                  
                                                        




                                                         
                   


                                                              
                                                   
 

                                                                           










                                                            

                 


                                                                             

      
                                
                                                                                


                                                                                                   
      
 






                                                                                                           
                                 
                                                        








                                                         
                                                   
 
                  
 
                                
                                                                          
 


                                                                                           
      
 







                                                                                           
                                                        








                                                    



                                                
                                 
                                                   
 
                                
                                                  




                                               

         
 






                                                  




                                                                       
                                                        

                                
                               

 


                                                      




                                                                                  


                 




 




                            
                                                                 
                                                                                                                 
 
                        


                              





                                                                          
 

                                                                      


















                                                                                      
                        



































                                                                                       
 
                                



                                                                    
      







                                                               
                 



                                                                    
                                                                                                                 


                                                                                                                                      

                                                                        
                                   
                            
         
      
 

                       


                          
                                                                                              
 
                    
 

                                    
                                                               
 
                                      
 













                                                                                                      
                                                                 

                                                                                                                     
                        
 


                                                                       
                                                                          


                                                                   
 



















                                                                                           
                 













                                                                                                          
                 
 
                                



                                                            

      







                                                       







                                                                                                                               

                                                                        
                            
         
      
 

                       




                                                                                                               
                    
 
                                    
 
                                                               

                                      
 









                                                                                                      

 
 

















































































                                                                                                                                       
                                                            
 




                                                            
 

                                                                                    

                                


                                                      


      
                                                      
 
                                                           

 






































                                                                         

































































                                                                               
                                                                                          

                 

                                







                                                                               
                                   
 

                               



                                                                           
























                                                               
                                                    




















                                                                                           
                                                                            









                                                                  
                                                       




































                                                       




                                               








                                                
                                     








                                                     
 
                                                                     
                                                   
 
                
 




                                                              

                                   

                                                

                      









                                               

          





                              


                   






                                     
                                                                                          




                                         
                                                                                          


                                         

      







                                                                                                   
                           
















                                                                                                  
      


                                                 

          





                              

                   

      
 
                                                                    
                                         
 
                                              
 


      




                                        
                                                              
                            
                                                                 
      


                                                          
                                              

                                        
                            
                                                   
      

                                                                       
                            


                                                                           
      
         






                                                                       
                   





                           













                                                                        

                                                         
                                                  

                                                       

                                                                

                                                                  











                                                                               



                                                                  
      

                                                                    
      







                                    
                                                              
                                  
                                              

                                                          
                                       

                                              
         




                                      





                                                            
/*
 * litmus/sched_gsn_edf.c
 *
 * Implementation of the GSN-EDF scheduling algorithm.
 *
 * This version uses the simple approach and serializes all scheduling
 * decisions by the use of a queue lock. This is probably not the
 * best way to do it, but it should suffice for now.
 */

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>

#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>

#include <litmus/preempt.h>
#include <litmus/budget.h>

#include <litmus/bheap.h>
#include <litmus/binheap.h>
#include <litmus/trace.h>

#ifdef CONFIG_LITMUS_LOCKING
#include <litmus/kfmlp_lock.h>
#endif

#ifdef CONFIG_LITMUS_NESTED_LOCKING
#include <litmus/fifo_lock.h>
#include <litmus/ikglp_lock.h>
#endif

#ifdef CONFIG_SCHED_CPU_AFFINITY
#include <litmus/affinity.h>
#endif

#ifdef CONFIG_REALTIME_AUX_TASKS
#include <litmus/aux_tasks.h>
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
#include <litmus/litmus_softirq.h>
#endif

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
#include <linux/interrupt.h>
#endif

#ifdef CONFIG_LITMUS_NVIDIA
#include <litmus/nvidia_info.h>
#endif

#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
#include <litmus/gpu_affinity.h>
#endif

/* Overview of GSN-EDF operations.
 *
 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
 * description only covers how the individual operations are implemented in
 * LITMUS.
 *
 * link_task_to_cpu(T, cpu) 	- Low-level operation to update the linkage
 *                                structure (NOT the actually scheduled
 *                                task). If there is another linked task To
 *                                already it will set To->linked_on = NO_CPU
 *                                (thereby removing its association with this
 *                                CPU). However, it will not requeue the
 *                                previously linked task (if any). It will set
 *                                T's state to not completed and check whether
 *                                it is already running somewhere else. If T
 *                                is scheduled somewhere else it will link
 *                                it to that CPU instead (and pull the linked
 *                                task to cpu). T may be NULL.
 *
 * unlink(T)			- Unlink removes T from all scheduler data
 *                                structures. If it is linked to some CPU it
 *                                will link NULL to that CPU. If it is
 *                                currently queued in the gsnedf queue it will
 *                                be removed from the rt_domain. It is safe to
 *                                call unlink(T) if T is not linked. T may not
 *                                be NULL.
 *
 * requeue(T)			- Requeue will insert T into the appropriate
 *                                queue. If the system is in real-time mode and
 *                                the T is released already, it will go into the
 *                                ready queue. If the system is not in
 *                                real-time mode is T, then T will go into the
 *                                release queue. If T's release time is in the
 *                                future, it will go into the release
 *                                queue. That means that T's release time/job
 *                                no/etc. has to be updated before requeu(T) is
 *                                called. It is not safe to call requeue(T)
 *                                when T is already queued. T may not be NULL.
 *
 * gsnedf_job_arrival(T)	- This is the catch all function when T enters
 *                                the system after either a suspension or at a
 *                                job release. It will queue T (which means it
 *                                is not safe to call gsnedf_job_arrival(T) if
 *                                T is already queued) and then check whether a
 *                                preemption is necessary. If a preemption is
 *                                necessary it will update the linkage
 *                                accordingly and cause scheduled to be called
 *                                (either with an IPI or need_resched). It is
 *                                safe to call gsnedf_job_arrival(T) if T's
 *                                next job has not been actually released yet
 *                                (releast time in the future). T will be put
 *                                on the release queue in that case.
 *
 * job_completion(T)		- Take care of everything that needs to be done
 *                                to prepare T for its next release and place
 *                                it in the right queue with
 *                                gsnedf_job_arrival().
 *
 *
 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
 * the functions will automatically propagate pending task from the ready queue
 * to a linked task. This is the job of the calling function ( by means of
 * __take_ready).
 */


/* cpu_entry_t - maintain the linked and scheduled state
 */
typedef struct  {
	int 			cpu;
	struct task_struct*	linked;		/* only RT tasks */
	struct task_struct*	scheduled;	/* only RT tasks */
	struct binheap_node hn;
} cpu_entry_t;
DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);

cpu_entry_t* gsnedf_cpus[NR_CPUS];

/* the cpus queue themselves according to priority in here */
static struct binheap gsnedf_cpu_heap;

static rt_domain_t gsnedf;
#define gsnedf_lock (gsnedf.ready_lock)

#ifdef CONFIG_LITMUS_DGL_SUPPORT
static raw_spinlock_t dgl_lock;

static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t)
{
	return(&dgl_lock);
}
#endif

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
struct tasklet_head gsnedf_pending_tasklets;
#endif


/* Uncomment this if you want to see all scheduling decisions in the
 * TRACE() log.
#define WANT_ALL_SCHED_EVENTS
 */

static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
{
	cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn);
	cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn);

	/* Note that a and b are inverted: we want the lowest-priority CPU at
	 * the top of the heap.
	 */
	return edf_higher_prio(b->linked, a->linked);
}


/* update_cpu_position - Move the cpu entry to the correct place to maintain
 *                       order in the cpu queue. Caller must hold gsnedf lock.
 */
static void update_cpu_position(cpu_entry_t *entry)
{
	if (likely(binheap_is_in_heap(&entry->hn))) {
		binheap_delete(&entry->hn, &gsnedf_cpu_heap);
	}
	binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn);
}

/* caller must hold gsnedf lock */
static cpu_entry_t* lowest_prio_cpu(void)
{
	return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn);
}


/* link_task_to_cpu - Update the link of a CPU.
 *                    Handles the case where the to-be-linked task is already
 *                    scheduled on a different CPU.
 */
static noinline void link_task_to_cpu(struct task_struct* linked,
				      cpu_entry_t *entry)
{
	cpu_entry_t *sched;
	struct task_struct* tmp;
	int on_cpu;

	//int print = (linked != NULL || entry->linked != NULL);

	BUG_ON(linked && !is_realtime(linked));

	/*
	if (print) {
		TRACE_CUR("linked = %s/%d\n", (linked) ? linked->comm : "(null)", (linked)? linked->pid : 0);
		TRACE_CUR("entry->linked = %s/%d\n", (entry->linked) ? entry->linked->comm : "(null)", (entry->linked)? entry->linked->pid : 0);
	}
	*/

	/* Currently linked task is set to be unlinked. */
	if (entry->linked) {
		entry->linked->rt_param.linked_on = NO_CPU;
	}

	/* Link new task to CPU. */
	if (linked) {
		tsk_rt(linked)->completed = 0;
		/* handle task is already scheduled somewhere! */
		on_cpu = linked->rt_param.scheduled_on;
		if (on_cpu != NO_CPU) {
			sched = &per_cpu(gsnedf_cpu_entries, on_cpu);
			/* this should only happen if not linked already */
			BUG_ON(sched->linked == linked);

			/* If we are already scheduled on the CPU to which we
			 * wanted to link, we don't need to do the swap --
			 * we just link ourselves to the CPU and depend on
			 * the caller to get things right.
			 */
			if (entry != sched) {
				TRACE_TASK(linked,
					   "already scheduled on %d, updating link.\n",
					   sched->cpu);
				tmp = sched->linked;
				linked->rt_param.linked_on = sched->cpu;
				sched->linked = linked;
				update_cpu_position(sched);
				linked = tmp;
			}
		}
		if (linked) /* might be NULL due to swap */
			linked->rt_param.linked_on = entry->cpu;
	}
	entry->linked = linked;

	/*
	if (print) {
		//#ifdef WANT_ALL_SCHED_EVENTS
		if (linked)
			TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
		else
			TRACE("NULL linked to %d.\n", entry->cpu);
		//#endif
	}
	*/

	update_cpu_position(entry);
}

/* unlink - Make sure a task is not linked any longer to an entry
 *          where it was linked before. Must hold gsnedf_lock.
 */
static noinline void unlink(struct task_struct* t)
{
    	cpu_entry_t *entry;

	if (t->rt_param.linked_on != NO_CPU) {
		/* unlink */
		entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on);
		t->rt_param.linked_on = NO_CPU;
		link_task_to_cpu(NULL, entry);
	} else if (is_queued(t)) {
		/* This is an interesting situation: t is scheduled,
		 * but was just recently unlinked.  It cannot be
		 * linked anywhere else (because then it would have
		 * been relinked to this CPU), thus it must be in some
		 * queue. We must remove it from the list in this
		 * case.
		 */
		remove(&gsnedf, t);
	}
}


/* preempt - force a CPU to reschedule
 */
static void preempt(cpu_entry_t *entry)
{
	preempt_if_preemptable(entry->scheduled, entry->cpu);
}

/* requeue - Put an unlinked task into gsn-edf domain.
 *           Caller must hold gsnedf_lock.
 */
static noinline void requeue(struct task_struct* task)
{
	BUG_ON(!task);
	/* sanity check before insertion */
	BUG_ON(is_queued(task));

	if (is_released(task, litmus_clock())) {
#ifdef CONFIG_REALTIME_AUX_TASKS
		if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) {
			/* aux_task probably transitioned to real-time while it was blocked */
			TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid);
			unlink(task); /* really needed? */
		}
		else
#endif
			__add_ready(&gsnedf, task);
	}
	else {
		/* it has got to wait */
		add_release(&gsnedf, task);
	}
}

#ifdef CONFIG_SCHED_CPU_AFFINITY
static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start)
{
	cpu_entry_t *affinity;

	get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
#ifdef CONFIG_RELEASE_MASTER
			gsnedf.release_master
#else
			NO_CPU
#endif
			);

	return(affinity);
}
#endif

/* check for any necessary preemptions */
static void check_for_preemptions(void)
{
	struct task_struct *task;
	cpu_entry_t *last;

	for (last = lowest_prio_cpu();
	     edf_preemption_needed(&gsnedf, last->linked);
	     last = lowest_prio_cpu()) {
		/* preemption necessary */
		task = __take_ready(&gsnedf);
		TRACE("check_for_preemptions: attempting to link task %d to %d\n",
		      task->pid, last->cpu);

#ifdef CONFIG_SCHED_CPU_AFFINITY
		{
			cpu_entry_t *affinity =
					gsnedf_get_nearest_available_cpu(
						&per_cpu(gsnedf_cpu_entries, task_cpu(task)));
			if (affinity)
				last = affinity;
			else if (requeue_preempted_job(last->linked))
				requeue(last->linked);
		}
#else
		if (requeue_preempted_job(last->linked))
			requeue(last->linked);
#endif

		link_task_to_cpu(task, last);
		preempt(last);
	}
}

/* gsnedf_job_arrival: task is either resumed or released */
static noinline void gsnedf_job_arrival(struct task_struct* task)
{
	BUG_ON(!task);

	requeue(task);
	check_for_preemptions();
}

static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	__merge_ready(rt, tasks);

	check_for_preemptions();

	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

/* caller holds gsnedf_lock */
static noinline void job_completion(struct task_struct *t, int forced)
{
	BUG_ON(!t);

	sched_trace_task_completion(t, forced);

	TRACE_TASK(t, "job_completion().\n");

	/* set flags */
	tsk_rt(t)->completed = 1;
	/* prepare for next period */
	prepare_for_next_period(t);

	if (is_released(t, litmus_clock()))
		sched_trace_task_release(t);
	/* unlink */
	unlink(t);
	/* requeue
	 * But don't requeue a blocking task. */
	if (is_running(t))
		gsnedf_job_arrival(t);
}

/* gsnedf_tick - this function is called for every local timer
 *                         interrupt.
 *
 *                   checks whether the current task has expired and checks
 *                   whether we need to preempt it if it has not expired
 */
static void gsnedf_tick(struct task_struct* t)
{
	if (is_realtime(t) && budget_exhausted(t))
	{
		if (budget_signalled(t) && !sigbudget_sent(t)) {
			/* signal exhaustion */
			send_sigbudget(t);
		}

		if (budget_enforced(t)) {
			if (!is_np(t)) {
				/* np tasks will be preempted when they become
				 * preemptable again
				 */
				litmus_reschedule_local();
				TRACE("gsnedf_scheduler_tick: "
					  "%d is preemptable "
					  " => FORCE_RESCHED\n", t->pid);
			} else if (is_user_np(t)) {
				TRACE("gsnedf_scheduler_tick: "
					  "%d is non-preemptable, "
					  "preemption delayed.\n", t->pid);
				request_exit_np(t);
			}
		}
	}

	/*
	if(is_realtime(t)) {
		TRACE_TASK(t, "tick %llu\n", litmus_clock());
	}
	 */
}




#ifdef CONFIG_LITMUS_PAI_SOFTIRQD


static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
{
	if (!atomic_read(&tasklet->count)) {
		if(tasklet->owner) {
			sched_trace_tasklet_begin(tasklet->owner);
		}

		if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
		{
			BUG();
		}
		TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
			  __FUNCTION__,
			  (tasklet->owner) ? tasklet->owner->pid : 0,
			  (tasklet->owner) ? 0 : 1);
		tasklet->func(tasklet->data);
		tasklet_unlock(tasklet);

		if(tasklet->owner) {
			sched_trace_tasklet_end(tasklet->owner, flushed);
		}
	}
	else {
		BUG();
	}
}

static void do_lit_tasklets(struct task_struct* sched_task)
{
	int work_to_do = 1;
	struct tasklet_struct *tasklet = NULL;
	unsigned long flags;

	while(work_to_do) {

		TS_NV_SCHED_BOTISR_START;

		// execute one tasklet that has higher priority
		raw_spin_lock_irqsave(&gsnedf_lock, flags);

		if(gsnedf_pending_tasklets.head != NULL) {
			struct tasklet_struct *prev = NULL;
			tasklet = gsnedf_pending_tasklets.head;

			while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) {
				prev = tasklet;
				tasklet = tasklet->next;
			}

			// remove the tasklet from the queue
			if(prev) {
				prev->next = tasklet->next;
				if(prev->next == NULL) {
					TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
					gsnedf_pending_tasklets.tail = &(prev);
				}
			}
			else {
				gsnedf_pending_tasklets.head = tasklet->next;
				if(tasklet->next == NULL) {
					TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
					gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
				}
			}
		}
		else {
			TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
		}

		raw_spin_unlock_irqrestore(&gsnedf_lock, flags);

		if(tasklet) {
			__do_lit_tasklet(tasklet, 0ul);
			tasklet = NULL;
		}
		else {
			work_to_do = 0;
		}

		TS_NV_SCHED_BOTISR_END;
	}
}

//static void do_lit_tasklets(struct task_struct* sched_task)
//{
//	int work_to_do = 1;
//	struct tasklet_struct *tasklet = NULL;
//	//struct tasklet_struct *step;
//	unsigned long flags;
//
//	while(work_to_do) {
//
//		TS_NV_SCHED_BOTISR_START;
//
//		// remove tasklet at head of list if it has higher priority.
//		raw_spin_lock_irqsave(&gsnedf_lock, flags);
//
//		if(gsnedf_pending_tasklets.head != NULL) {
//			// remove tasklet at head.
//			tasklet = gsnedf_pending_tasklets.head;
//
//			if(edf_higher_prio(tasklet->owner, sched_task)) {
//
//				if(NULL == tasklet->next) {
//					// tasklet is at the head, list only has one element
//					TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
//					gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
//				}
//
//				// remove the tasklet from the queue
//				gsnedf_pending_tasklets.head = tasklet->next;
//
//				TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
//			}
//			else {
//				TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
//				tasklet = NULL;
//			}
//		}
//		else {
//			TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
//		}
//
//		raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
//
//		TS_NV_SCHED_BOTISR_END;
//
//		if(tasklet) {
//			__do_lit_tasklet(tasklet, 0ul);
//			tasklet = NULL;
//		}
//		else {
//			work_to_do = 0;
//		}
//	}
//
//	//TRACE("%s: exited.\n", __FUNCTION__);
//}

static void __add_pai_tasklet(struct tasklet_struct* tasklet)
{
	struct tasklet_struct* step;

	tasklet->next = NULL;  // make sure there are no old values floating around

	step = gsnedf_pending_tasklets.head;
	if(step == NULL) {
		TRACE("%s: tasklet queue empty.  inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
		// insert at tail.
		*(gsnedf_pending_tasklets.tail) = tasklet;
		gsnedf_pending_tasklets.tail = &(tasklet->next);
	}
	else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
			edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
		// insert at tail.
		TRACE("%s: tasklet belongs at end.  inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);

		*(gsnedf_pending_tasklets.tail) = tasklet;
		gsnedf_pending_tasklets.tail = &(tasklet->next);
	}
	else {
		// insert the tasklet somewhere in the middle.

        TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);

		while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
			step = step->next;
		}

		// insert tasklet right before step->next.

		TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : 0);

		tasklet->next = step->next;
		step->next = tasklet;

		// patch up the head if needed.
		if(gsnedf_pending_tasklets.head == step)
		{
			TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
			gsnedf_pending_tasklets.head = tasklet;
		}
	}
}

static void gsnedf_run_tasklets(struct task_struct* sched_task)
{
	preempt_disable();

	if(gsnedf_pending_tasklets.head != NULL) {
		TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
		do_lit_tasklets(sched_task);
	}

	preempt_enable_no_resched();
}

static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
{
	cpu_entry_t *targetCPU = NULL;
	int thisCPU;
	int runLocal = 0;
	int runNow = 0;
	unsigned long flags;

    if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
    {
        TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
		return 0;
    }


	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	thisCPU = smp_processor_id();

#ifdef CONFIG_SCHED_CPU_AFFINITY
	{
		cpu_entry_t* affinity = NULL;

		// use this CPU if it is in our cluster and isn't running any RT work.
		if(
#ifdef CONFIG_RELEASE_MASTER
		   (thisCPU != gsnedf.release_master) &&
#endif
		   (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
			affinity = &(__get_cpu_var(gsnedf_cpu_entries));
		}
		else {
			// this CPU is busy or shouldn't run tasklet in this cluster.
			// look for available near by CPUs.
			// NOTE: Affinity towards owner and not this CPU.  Is this right?
			affinity =
				gsnedf_get_nearest_available_cpu(
					&per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
		}

		targetCPU = affinity;
	}
#endif

	if (targetCPU == NULL) {
		targetCPU = lowest_prio_cpu();
	}

	if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
		if (thisCPU == targetCPU->cpu) {
			TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
			runLocal = 1;
			runNow = 1;
		}
		else {
			TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
			runLocal = 0;
			runNow = 1;
		}
	}
	else {
		runLocal = 0;
		runNow = 0;
	}

	if(!runLocal) {
		// enqueue the tasklet
		__add_pai_tasklet(tasklet);
	}

	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);


	if (runLocal /*&& runNow */) {  // runNow == 1 is implied
		TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
		__do_lit_tasklet(tasklet, 0ul);
	}
	else if (runNow /*&& !runLocal */) {  // runLocal == 0 is implied
		TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
		preempt(targetCPU);  // need to be protected by cedf_lock?
	}
	else {
		TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
	}

	return(1); // success
}

static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
										   struct task_struct *new_prio)
{
	struct tasklet_struct* step;
	unsigned long flags;

	if(gsnedf_pending_tasklets.head != NULL) {
		raw_spin_lock_irqsave(&gsnedf_lock, flags);
		for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
			if(step->owner == old_prio) {
				TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
				step->owner = new_prio;
			}
		}
		raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
	}
}

#endif  // end PAI


/* Getting schedule() right is a bit tricky. schedule() may not make any
 * assumptions on the state of the current task since it may be called for a
 * number of reasons. The reasons include a scheduler_tick() determined that it
 * was necessary, because sys_exit_np() was called, because some Linux
 * subsystem determined so, or even (in the worst case) because there is a bug
 * hidden somewhere. Thus, we must take extreme care to determine what the
 * current state is.
 *
 * The CPU could currently be scheduling a task (or not), be linked (or not).
 *
 * The following assertions for the scheduled task could hold:
 *
 *      - !is_running(scheduled)        // the job blocks
 *	- scheduled->timeslice == 0	// the job completed (forcefully)
 *	- is_completed()		// the job completed (by syscall)
 * 	- linked != scheduled		// we need to reschedule (for any reason)
 * 	- is_np(scheduled)		// rescheduling must be delayed,
 *					   sys_exit_np must be requested
 *
 * Any of these can occur together.
 */
static struct task_struct* gsnedf_schedule(struct task_struct * prev)
{
	cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
	int out_of_time, signal_budget, sleep, preempt, np, exists, blocks;
	struct task_struct* next = NULL;

	//int completion = 0;

#ifdef CONFIG_RELEASE_MASTER
	/* Bail out early if we are the release master.
	 * The release master never schedules any real-time tasks.
	 */
	if (unlikely(gsnedf.release_master == entry->cpu)) {
		sched_state_task_picked();
		return NULL;
	}
#endif

	raw_spin_lock(&gsnedf_lock);

	/* sanity checking */
	BUG_ON(entry->scheduled && entry->scheduled != prev);
	BUG_ON(entry->scheduled && !is_realtime(prev));
	BUG_ON(is_realtime(prev) && !entry->scheduled);

	/* (0) Determine state */
	exists      = entry->scheduled != NULL;
	blocks      = exists && !is_running(entry->scheduled);
	out_of_time = exists &&
		budget_enforced(entry->scheduled) &&
		budget_exhausted(entry->scheduled);
	signal_budget = exists &&
		budget_signalled(entry->scheduled) &&
		budget_exhausted(entry->scheduled) &&
		!sigbudget_sent(entry->scheduled);
	np 	    = exists && is_np(entry->scheduled);
	sleep	    = exists && is_completed(entry->scheduled);
	preempt     = entry->scheduled != entry->linked;

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
#endif

	if (exists) {
		TRACE_TASK(prev,
			   "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d "
			   "state:%d sig:%d\n",
			   blocks, out_of_time, signal_budget, np, sleep, preempt,
			   prev->state, signal_pending(prev));
	}

	if (entry->linked && preempt)
		TRACE_TASK(prev, "will be preempted by %s/%d\n",
			   entry->linked->comm, entry->linked->pid);

	/* Send the signal that the budget has been exhausted */
	if (signal_budget) {
		send_sigbudget(entry->scheduled);
	}

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks) {
		unlink(entry->scheduled);
	}

#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
	if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) {
		if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) {
			stop_gpu_tracker(entry->scheduled);
		}
	}
#endif

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * We need to make sure to update the link structure anyway in case
	 * that we are still linked. Multiple calls to request_exit_np() don't
	 * hurt.
	 */
	if (np && (out_of_time || preempt || sleep)) {
		unlink(entry->scheduled);
		request_exit_np(entry->scheduled);
	}

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this. Don't do a job completion if we block (can't have timers running
	 * for blocked jobs).
	 */
	if (!np && (out_of_time || sleep) && !blocks) {
		job_completion(entry->scheduled, !sleep);
		//completion = 1;
	}

	/* Link pending task if we became unlinked.
	 */
	if (!entry->linked)
		link_task_to_cpu(__take_ready(&gsnedf), entry);

	/* The final scheduling decision. Do we need to switch for some reason?
	 * If linked is different from scheduled, then select linked as next.
	 */
	if ((!np || blocks) &&
	    entry->linked != entry->scheduled) {
		/* Schedule a linked job? */
		if (entry->linked) {
			entry->linked->rt_param.scheduled_on = entry->cpu;
			next = entry->linked;
			TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
		}
		if (entry->scheduled) {
			/* not gonna be scheduled soon */
			entry->scheduled->rt_param.scheduled_on = NO_CPU;
			TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
		}
	}
	else
	{
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;
	}

#if 0
	if (completion) {
		TRACE_CUR("switching away from a completion\n");
	}
#endif

	sched_state_task_picked();

	raw_spin_unlock(&gsnedf_lock);

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE("gsnedf_lock released, next=0x%p\n", next);

	if (next)
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	else if (exists && !next)
		TRACE("becomes idle at %llu.\n", litmus_clock());
#endif

	return next;
}


/* _finish_switch - we just finished the switch away from prev
 */
static void gsnedf_finish_switch(struct task_struct *prev)
{
	cpu_entry_t* 	entry = &__get_cpu_var(gsnedf_cpu_entries);

	entry->scheduled = is_realtime(current) ? current : NULL;

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "switched away from\n");
#endif
}


/*	Prepare a task for running in RT mode
 */
static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;
	cpu_entry_t* 		entry;

	TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running);

	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	/* setup job params */
	release_at(t, litmus_clock());

	if (running) {
		entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t));
		BUG_ON(entry->scheduled);

#ifdef CONFIG_RELEASE_MASTER
		if (entry->cpu != gsnedf.release_master) {
#endif
			entry->scheduled = t;
			tsk_rt(t)->scheduled_on = task_cpu(t);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			/* do not schedule on release master */
			preempt(entry); /* force resched */
			tsk_rt(t)->scheduled_on = NO_CPU;
		}
#endif
	} else {
		t->rt_param.scheduled_on = NO_CPU;
	}
	t->rt_param.linked_on          = NO_CPU;

	gsnedf_job_arrival(t);
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

static void gsnedf_task_wake_up(struct task_struct *task)
{
	unsigned long flags;
	//lt_t now;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());

	raw_spin_lock_irqsave(&gsnedf_lock, flags);

#if 0
	/* sporadic task model. will increment job numbers automatically */
	now = litmus_clock();
	if (is_tardy(task, now)) {
		/* new sporadic release */
		release_at(task, now);
		sched_trace_task_release(task);
	}
	else {
		if (task->rt.time_slice) {
			/* came back in time before deadline
			*/
			tsk_rt(task)->completed = 0;
		}
	}
#else
	/* don't force job to end.  rely on user to say when jobs complete */
	tsk_rt(task)->completed = 0;
#endif

#ifdef CONFIG_REALTIME_AUX_TASKS
	if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) {
		TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid);
		disable_aux_task_owner(task);
	}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) {
		TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid);
		disable_gpu_owner(task);
	}
#endif

	gsnedf_job_arrival(task);
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
}

static void gsnedf_task_block(struct task_struct *t)
{
	unsigned long flags;

	TRACE_TASK(t, "block at %llu\n", litmus_clock());

	/* unlink if necessary */
	raw_spin_lock_irqsave(&gsnedf_lock, flags);

	unlink(t);

#ifdef CONFIG_REALTIME_AUX_TASKS
	if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) {

		TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
		enable_aux_task_owner(t);
	}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) {

		TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
		enable_gpu_owner(t);
	}
#endif

	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);

	BUG_ON(!is_realtime(t));
}


static void gsnedf_task_exit(struct task_struct * t)
{
	unsigned long flags;

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	gsnedf_change_prio_pai_tasklet(t, NULL);
#endif

	/* unlink if necessary */
	raw_spin_lock_irqsave(&gsnedf_lock, flags);

#ifdef CONFIG_REALTIME_AUX_TASKS
	/* make sure we clean up on our way out */
	if (unlikely(tsk_rt(t)->is_aux_task)) {
		exit_aux_task(t);
	}
	else if(tsk_rt(t)->has_aux_tasks) {
		disable_aux_task_owner(t);
	}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	/* make sure we clean up on our way out */
	if(tsk_rt(t)->held_gpus) {
		disable_gpu_owner(t);
	}
#endif

	unlink(t);
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
		tsk_rt(t)->scheduled_on = NO_CPU;
	}
	raw_spin_unlock_irqrestore(&gsnedf_lock, flags);

	BUG_ON(!is_realtime(t));
	TRACE_TASK(t, "RIP\n");
}


static long gsnedf_admit_task(struct task_struct* tsk)
{
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
						edf_max_heap_base_priority_order);
#endif

	return 0;
}






#ifdef CONFIG_LITMUS_LOCKING

#include <litmus/fdso.h>

/* called with IRQs off */
static int __increase_priority_inheritance(struct task_struct* t,
										    struct task_struct* prio_inh)
{
	int success = 1;
	int linked_on;
	int check_preempt = 0;

	if (prio_inh && prio_inh == effective_priority(t)) {
		/* relationship already established. */
		TRACE_TASK(t, "already has effective priority of %s/%d\n",
					prio_inh->comm, prio_inh->pid);
		goto out;
	}

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	/* this sanity check allows for weaker locking in protocols */
	if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
#endif
		TRACE_TASK(t, "inherits priority from %s/%d\n",
				   prio_inh->comm, prio_inh->pid);
		tsk_rt(t)->inh_task = prio_inh;

		linked_on  = tsk_rt(t)->linked_on;

		/* If it is scheduled, then we need to reorder the CPU heap. */
		if (linked_on != NO_CPU) {
			TRACE_TASK(t, "%s: linked  on %d\n",
				   __FUNCTION__, linked_on);
			/* Holder is scheduled; need to re-order CPUs.
			 * We can't use heap_decrease() here since
			 * the cpu_heap is ordered in reverse direction, so
			 * it is actually an increase. */
			binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap);
			binheap_add(&gsnedf_cpus[linked_on]->hn,
					&gsnedf_cpu_heap, cpu_entry_t, hn);
		} else {
			/* holder may be queued: first stop queue changes */
			raw_spin_lock(&gsnedf.release_lock);
			if (is_queued(t)) {
				TRACE_TASK(t, "%s: is queued\n",
					   __FUNCTION__);
				/* We need to update the position of holder in some
				 * heap. Note that this could be a release heap if we
				 * budget enforcement is used and this job overran. */
				check_preempt =
					!bheap_decrease(edf_ready_order,
							   tsk_rt(t)->heap_node);
			} else {
				/* Nothing to do: if it is not queued and not linked
				 * then it is either sleeping or currently being moved
				 * by other code (e.g., a timer interrupt handler) that
				 * will use the correct priority when enqueuing the
				 * task. */
				TRACE_TASK(t, "%s: is NOT queued => Done.\n",
					   __FUNCTION__);
			}
			raw_spin_unlock(&gsnedf.release_lock);

			/* If holder was enqueued in a release heap, then the following
			 * preemption check is pointless, but we can't easily detect
			 * that case. If you want to fix this, then consider that
			 * simply adding a state flag requires O(n) time to update when
			 * releasing n tasks, which conflicts with the goal to have
			 * O(log n) merges. */
			if (check_preempt) {
				/* heap_decrease() hit the top level of the heap: make
				 * sure preemption checks get the right task, not the
				 * potentially stale cache. */
				bheap_uncache_min(edf_ready_order,
						 &gsnedf.ready_queue);
				check_for_preemptions();
			}

#ifdef CONFIG_REALTIME_AUX_TASKS
			/* propagate to aux tasks */
			if (tsk_rt(t)->has_aux_tasks) {
				aux_task_owner_increase_priority(t);
			}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
			/* propagate to gpu klmirqd */
			if (tsk_rt(t)->held_gpus) {
				gpu_owner_increase_priority(t);
			}
#endif

		}
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	}
	else {
		TRACE_TASK(t, "Spurious invalid priority increase. "
					  "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
					  "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
				   t->comm, t->pid,
				   effective_priority(t)->comm, effective_priority(t)->pid,
				   (prio_inh) ? prio_inh->comm : "null",
				   (prio_inh) ? prio_inh->pid : 0);
		WARN_ON(!prio_inh);
		success = 0;
	}
#endif

out:
	return success;
}

/* called with IRQs off */
static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	int success;

	raw_spin_lock(&gsnedf_lock);

	success = __increase_priority_inheritance(t, prio_inh);

	raw_spin_unlock(&gsnedf_lock);

#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
	if(tsk_rt(t)->held_gpus) {
		int i;
		for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
			i < NV_DEVICE_NUM;
			i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
			pai_check_priority_increase(t, i);
		}
	}
#endif
}


/* called with IRQs off */
static int __decrease_priority_inheritance(struct task_struct* t,
											struct task_struct* prio_inh)
{
	int success = 1;

	if (prio_inh == tsk_rt(t)->inh_task) {
		/* relationship already established. */
		TRACE_TASK(t, "already inherits priority from %s/%d\n",
				   (prio_inh) ? prio_inh->comm : "(null)",
				   (prio_inh) ? prio_inh->pid : 0);
		goto out;
	}

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
#endif
		/* A job only stops inheriting a priority when it releases a
		 * resource. Thus we can make the following assumption.*/
		if(prio_inh)
			TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n",
					   prio_inh->comm, prio_inh->pid);
		else
			TRACE_TASK(t, "base priority restored.\n");

		tsk_rt(t)->inh_task = prio_inh;

		if(tsk_rt(t)->scheduled_on != NO_CPU) {
			TRACE_TASK(t, "is scheduled.\n");

			/* Check if rescheduling is necessary. We can't use heap_decrease()
			 * since the priority was effectively lowered. */
			unlink(t);
			gsnedf_job_arrival(t);
		}
		else {
			/* task is queued */
			raw_spin_lock(&gsnedf.release_lock);
			if (is_queued(t)) {
				TRACE_TASK(t, "is queued.\n");

				/* decrease in priority, so we have to re-add to binomial heap */
				unlink(t);
				gsnedf_job_arrival(t);
			}
			else {
				TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n");
			}
			raw_spin_unlock(&gsnedf.release_lock);
		}

#ifdef CONFIG_REALTIME_AUX_TASKS
		/* propagate to aux tasks */
		if (tsk_rt(t)->has_aux_tasks) {
			aux_task_owner_decrease_priority(t);
		}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
		/* propagate to gpu */
		if (tsk_rt(t)->held_gpus) {
			gpu_owner_decrease_priority(t);
		}
#endif


#ifdef CONFIG_LITMUS_NESTED_LOCKING
	}
	else {
		TRACE_TASK(t, "Spurious invalid priority decrease. "
				   "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
				   "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
				   t->comm, t->pid,
				   effective_priority(t)->comm, effective_priority(t)->pid,
				   (prio_inh) ? prio_inh->comm : "null",
				   (prio_inh) ? prio_inh->pid : 0);
		success = 0;
	}
#endif

out:
	return success;
}

static void decrease_priority_inheritance(struct task_struct* t,
										  struct task_struct* prio_inh)
{
	int success;

	raw_spin_lock(&gsnedf_lock);

	success = __decrease_priority_inheritance(t, prio_inh);

	raw_spin_unlock(&gsnedf_lock);

#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
	if(tsk_rt(t)->held_gpus) {
		int i;
		for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
			i < NV_DEVICE_NUM;
			i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
			pai_check_priority_decrease(t, i);
		}
	}
#endif
}



#ifdef CONFIG_LITMUS_NESTED_LOCKING

/* called with IRQs off */
/* preconditions:
 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
 (2) The lock 'to_unlock' is held.
 */
static void nested_increase_priority_inheritance(struct task_struct* t,
												 struct task_struct* prio_inh,
												 raw_spinlock_t *to_unlock,
												 unsigned long irqflags)
{
	struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;

	if(tsk_rt(t)->inh_task != prio_inh) { 		// shield redundent calls.
		increase_priority_inheritance(t, prio_inh);  // increase our prio.
	}

	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);  // unlock the t's heap.


	if(blocked_lock) {
		if(blocked_lock->ops->propagate_increase_inheritance) {
			TRACE_TASK(t, "Inheritor is blocked (...perhaps).  Checking lock %d.\n",
					   blocked_lock->ident);

			// beware: recursion
			blocked_lock->ops->propagate_increase_inheritance(blocked_lock,
															  t, to_unlock,
															  irqflags);
		}
		else {
			TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n",
					   blocked_lock->ident);
			unlock_fine_irqrestore(to_unlock, irqflags);
		}
	}
	else {
		TRACE_TASK(t, "is not blocked.  No propagation.\n");
		unlock_fine_irqrestore(to_unlock, irqflags);
	}
}

/* called with IRQs off */
/* preconditions:
 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
 (2) The lock 'to_unlock' is held.
 */
static void nested_decrease_priority_inheritance(struct task_struct* t,
												 struct task_struct* prio_inh,
												 raw_spinlock_t *to_unlock,
												 unsigned long irqflags)
{
	struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;
	decrease_priority_inheritance(t, prio_inh);

	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);  // unlock the t's heap.

	if(blocked_lock) {
		if(blocked_lock->ops->propagate_decrease_inheritance) {
			TRACE_TASK(t, "Inheritor is blocked (...perhaps).  Checking lock %d.\n",
					   blocked_lock->ident);

			// beware: recursion
			blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t,
															  to_unlock,
															  irqflags);
		}
		else {
			TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n",
					   blocked_lock);
			unlock_fine_irqrestore(to_unlock, irqflags);
		}
	}
	else {
		TRACE_TASK(t, "is not blocked.  No propagation.\n");
		unlock_fine_irqrestore(to_unlock, irqflags);
	}
}


/* ******************** FIFO MUTEX ********************** */

static struct litmus_lock_ops gsnedf_fifo_mutex_lock_ops = {
	.lock   = fifo_mutex_lock,
	.unlock = fifo_mutex_unlock,
	.close  = fifo_mutex_close,
	.deallocate = fifo_mutex_free,

	.propagate_increase_inheritance = fifo_mutex_propagate_increase_inheritance,
	.propagate_decrease_inheritance = fifo_mutex_propagate_decrease_inheritance,

#ifdef CONFIG_LITMUS_DGL_SUPPORT
	.dgl_lock = fifo_mutex_dgl_lock,
	.is_owner = fifo_mutex_is_owner,
	.enable_priority = fifo_mutex_enable_priority,
#endif
};

static struct litmus_lock* gsnedf_new_fifo_mutex(void)
{
	return fifo_mutex_new(&gsnedf_fifo_mutex_lock_ops);
}

/* ******************** IKGLP ********************** */

static struct litmus_lock_ops gsnedf_ikglp_lock_ops = {
	.lock   = ikglp_lock,
	.unlock = ikglp_unlock,
	.close  = ikglp_close,
	.deallocate = ikglp_free,

	// ikglp can only be an outer-most lock.
	.propagate_increase_inheritance = NULL,
	.propagate_decrease_inheritance = NULL,
};

static struct litmus_lock* gsnedf_new_ikglp(void* __user arg)
{
	return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg);
}

#endif  /* CONFIG_LITMUS_NESTED_LOCKING */


/* ******************** KFMLP support ********************** */

static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
	.lock   = kfmlp_lock,
	.unlock = kfmlp_unlock,
	.close  = kfmlp_close,
	.deallocate = kfmlp_free,

	// kfmlp can only be an outer-most lock.
	.propagate_increase_inheritance = NULL,
	.propagate_decrease_inheritance = NULL,
};


static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg)
{
	return kfmlp_new(&gsnedf_kfmlp_lock_ops, arg);
}

/* ******************** FMLP support ********************** */

/* struct for semaphore with priority inheritance */
struct fmlp_semaphore {
	struct litmus_lock litmus_lock;

	/* current resource holder */
	struct task_struct *owner;

	/* highest-priority waiter */
	struct task_struct *hp_waiter;

	/* FIFO queue of waiting tasks */
	wait_queue_head_t wait;
};

static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
{
	return container_of(lock, struct fmlp_semaphore, litmus_lock);
}

/* caller is responsible for locking */
struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
				   struct task_struct* skip)
{
	struct list_head	*pos;
	struct task_struct 	*queued, *found = NULL;

	list_for_each(pos, &sem->wait.task_list) {
		queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
							   task_list)->private;

		/* Compare task prios, find high prio task. */
		if (queued != skip && edf_higher_prio(queued, found))
			found = queued;
	}
	return found;
}

int gsnedf_fmlp_lock(struct litmus_lock* l)
{
	struct task_struct* t = current;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	wait_queue_t wait;
	unsigned long flags;

	if (!is_realtime(t))
		return -EPERM;

	spin_lock_irqsave(&sem->wait.lock, flags);

	if (sem->owner) {
		/* resource is not free => must suspend and wait */

		init_waitqueue_entry(&wait, t);

		/* FIXME: interruptible would be nice some day */
		set_task_state(t, TASK_UNINTERRUPTIBLE);

		__add_wait_queue_tail_exclusive(&sem->wait, &wait);

		/* check if we need to activate priority inheritance */
		if (edf_higher_prio(t, sem->hp_waiter)) {
			sem->hp_waiter = t;
			if (edf_higher_prio(t, sem->owner))
				increase_priority_inheritance(sem->owner, sem->hp_waiter);
		}

		TS_LOCK_SUSPEND;

		/* release lock before sleeping */
		spin_unlock_irqrestore(&sem->wait.lock, flags);

		/* We depend on the FIFO order.  Thus, we don't need to recheck
		 * when we wake up; we are guaranteed to have the lock since
		 * there is only one wake up per release.
		 */

		suspend_for_lock();

		TS_LOCK_RESUME;

		/* Since we hold the lock, no other task will change
		 * ->owner. We can thus check it without acquiring the spin
		 * lock. */
		BUG_ON(sem->owner != t);
	} else {
		/* it's ours now */
		sem->owner = t;

		spin_unlock_irqrestore(&sem->wait.lock, flags);
	}

	return 0;
}

int gsnedf_fmlp_unlock(struct litmus_lock* l)
{
	struct task_struct *t = current, *next;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	unsigned long flags;
	int err = 0;

	spin_lock_irqsave(&sem->wait.lock, flags);

	if (sem->owner != t) {
		err = -EINVAL;
		goto out;
	}

	/* check if there are jobs waiting for this resource */
	next = __waitqueue_remove_first(&sem->wait);
	if (next) {
		/* next becomes the resouce holder */
		sem->owner = next;
		TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);

		/* determine new hp_waiter if necessary */
		if (next == sem->hp_waiter) {
			TRACE_TASK(next, "was highest-prio waiter\n");
			/* next has the highest priority --- it doesn't need to
			 * inherit.  However, we need to make sure that the
			 * next-highest priority in the queue is reflected in
			 * hp_waiter. */
			sem->hp_waiter = find_hp_waiter(sem, next);
			if (sem->hp_waiter)
				TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
			else
				TRACE("no further waiters\n");
		} else {
			/* Well, if next is not the highest-priority waiter,
			 * then it ought to inherit the highest-priority
			 * waiter's priority. */
			increase_priority_inheritance(next, sem->hp_waiter);
		}

		/* wake up next */
		wake_up_process(next);
	} else
		/* becomes available */
		sem->owner = NULL;

	/* we lose the benefit of priority inheritance (if any) */
	if (tsk_rt(t)->inh_task)
		decrease_priority_inheritance(t, NULL);

out:
	spin_unlock_irqrestore(&sem->wait.lock, flags);

	return err;
}

int gsnedf_fmlp_close(struct litmus_lock* l)
{
	struct task_struct *t = current;
	struct fmlp_semaphore *sem = fmlp_from_lock(l);
	unsigned long flags;

	int owner;

	spin_lock_irqsave(&sem->wait.lock, flags);

	owner = sem->owner == t;

	spin_unlock_irqrestore(&sem->wait.lock, flags);

	if (owner)
		gsnedf_fmlp_unlock(l);

	return 0;
}

void gsnedf_fmlp_free(struct litmus_lock* lock)
{
	kfree(fmlp_from_lock(lock));
}

static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
	.close  = gsnedf_fmlp_close,
	.lock   = gsnedf_fmlp_lock,
	.unlock = gsnedf_fmlp_unlock,
	.deallocate = gsnedf_fmlp_free,

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	.propagate_increase_inheritance = NULL,
	.propagate_decrease_inheritance = NULL
#endif
};

static struct litmus_lock* gsnedf_new_fmlp(void)
{
	struct fmlp_semaphore* sem;

	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
	if (!sem)
		return NULL;
	memset(sem, 0, sizeof(*sem));

	sem->owner   = NULL;
	sem->hp_waiter = NULL;
	init_waitqueue_head(&sem->wait);
	sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops;

	return &sem->litmus_lock;
}


static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
				 void* __user args)
{
	int err;

	switch (type) {

	case FMLP_SEM:
		/* Flexible Multiprocessor Locking Protocol */
		*lock = gsnedf_new_fmlp();
		break;
#ifdef CONFIG_LITMUS_NESTED_LOCKING
    case FIFO_MUTEX:
		*lock = gsnedf_new_fifo_mutex();
		break;

	case IKGLP_SEM:
		*lock = gsnedf_new_ikglp(args);
		break;
#endif
	case KFMLP_SEM:
		*lock = gsnedf_new_kfmlp(args);
		break;
	default:
		err = -ENXIO;
		goto UNSUPPORTED_LOCK;
	};

	if (*lock)
		err = 0;
	else
		err = -ENOMEM;

UNSUPPORTED_LOCK:
	return err;
}

#endif  // CONFIG_LITMUS_LOCKING





#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
static struct affinity_observer_ops gsnedf_kfmlp_affinity_ops __attribute__ ((unused)) = {
	.close = kfmlp_aff_obs_close,
	.deallocate = kfmlp_aff_obs_free,
};

#ifdef CONFIG_LITMUS_NESTED_LOCKING
static struct affinity_observer_ops gsnedf_ikglp_affinity_ops __attribute__ ((unused)) = {
	.close = ikglp_aff_obs_close,
	.deallocate = ikglp_aff_obs_free,
};
#endif

static long gsnedf_allocate_affinity_observer(
								struct affinity_observer **aff_obs,
								int type,
								void* __user args)
{
	int err;

	switch (type) {
#ifdef CONFIG_LITMUS_NVIDIA
		case KFMLP_SIMPLE_GPU_AFF_OBS:
			*aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
			break;

		case KFMLP_GPU_AFF_OBS:
			*aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
			break;

#ifdef CONFIG_LITMUS_NESTED_LOCKING
		case IKGLP_SIMPLE_GPU_AFF_OBS:
			*aff_obs = ikglp_simple_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args);
			break;

		case IKGLP_GPU_AFF_OBS:
			*aff_obs = ikglp_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args);
			break;
#endif
#endif
		default:
			err = -ENXIO;
			goto UNSUPPORTED_AFF_OBS;
	};

	if (*aff_obs)
		err = 0;
	else
		err = -ENOMEM;

UNSUPPORTED_AFF_OBS:
	return err;
}
#endif


#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
static int gsnedf_map_gpu_to_cpu(int gpu)
{
	return -1;  // No CPU affinity needed.
}
#endif


static long gsnedf_activate_plugin(void)
{
	int cpu;
	cpu_entry_t *entry;

	INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
#ifdef CONFIG_RELEASE_MASTER
	gsnedf.release_master = atomic_read(&release_master_cpu);
#endif

	for_each_online_cpu(cpu) {
		entry = &per_cpu(gsnedf_cpu_entries, cpu);
		INIT_BINHEAP_NODE(&entry->hn);
		entry->linked    = NULL;
		entry->scheduled = NULL;
#ifdef CONFIG_RELEASE_MASTER
		if (cpu != gsnedf.release_master) {
#endif
			TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu);
			update_cpu_position(entry);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			TRACE("GSN-EDF: CPU %d is release master.\n", cpu);
		}
#endif
	}

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	gsnedf_pending_tasklets.head = NULL;
	gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
    init_klmirqd();
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	init_nvidia_info();
#endif

	return 0;
}

/*	Plugin object	*/
static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
	.plugin_name		= "GSN-EDF",
	.finish_switch		= gsnedf_finish_switch,
	.tick			= gsnedf_tick,
	.task_new		= gsnedf_task_new,
	.complete_job		= complete_job,
	.task_exit		= gsnedf_task_exit,
	.schedule		= gsnedf_schedule,
	.task_wake_up		= gsnedf_task_wake_up,
	.task_block		= gsnedf_task_block,
	.admit_task		= gsnedf_admit_task,
	.activate_plugin	= gsnedf_activate_plugin,
	.compare		= edf_higher_prio,
#ifdef CONFIG_LITMUS_LOCKING
	.allocate_lock		= gsnedf_allocate_lock,
	.increase_prio		= increase_priority_inheritance,
	.decrease_prio		= decrease_priority_inheritance,
	.__increase_prio	= __increase_priority_inheritance,
	.__decrease_prio	= __decrease_priority_inheritance,
#endif
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	.nested_increase_prio		= nested_increase_priority_inheritance,
	.nested_decrease_prio		= nested_decrease_priority_inheritance,
	.__compare					= __edf_higher_prio,
#endif
#ifdef CONFIG_LITMUS_DGL_SUPPORT
	.get_dgl_spinlock = gsnedf_get_dgl_spinlock,
#endif
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
	.allocate_aff_obs = gsnedf_allocate_affinity_observer,
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	.enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet,
	.change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet,
	.run_tasklets = gsnedf_run_tasklets,
#endif
#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
	.map_gpu_to_cpu = gsnedf_map_gpu_to_cpu,
#endif
};


static int __init init_gsn_edf(void)
{
	int cpu;
	cpu_entry_t *entry;

	INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
	/* initialize CPU state */
	for (cpu = 0; cpu < NR_CPUS; ++cpu)  {
		entry = &per_cpu(gsnedf_cpu_entries, cpu);
		gsnedf_cpus[cpu] = entry;
		entry->cpu 	 = cpu;

		INIT_BINHEAP_NODE(&entry->hn);
	}

#ifdef CONFIG_LITMUS_DGL_SUPPORT
	raw_spin_lock_init(&dgl_lock);
#endif

	edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
	return register_sched_plugin(&gsn_edf_plugin);
}


module_init(init_gsn_edf);