/*
 * litmus/sched_crm_srt.c
 *
 * Implementation of the C-RM-SRT scheduling algorithm.
 *
 * This implementation is based on G-EDF:
 * - CPUs are clustered around L2 or L3 caches.
 * - Clusters topology is automatically detected (this is arch dependent
 *   and is working only on x86 at the moment --- and only with modern
 *   cpus that exports cpuid4 information)
 * - The plugins _does not_ attempt to put tasks in the right cluster i.e.
 *   the programmer needs to be aware of the topology to place tasks
 *   in the desired cluster
 * - default clustering is around L2 cache (cache index = 2)
 *   supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all
 *   online_cpus are placed in a single cluster).
 *
 *   For details on functions, take a look at sched_gsn_edf.c
 *
 * Currently, we do not support changes in the number of online cpus.
 * If the num_online_cpus() dynamically changes, the plugin is broken.
 *
 * This version uses the simple approach and serializes all scheduling
 * decisions by the use of a queue lock. This is probably not the
 * best way to do it, but it should suffice for now.
 */

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>

#include <linux/module.h>

#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/preempt.h>
#include <litmus/sched_plugin.h>
#include <litmus/rm_srt_common.h>
#include <litmus/sched_trace.h>

#include <litmus/clustered.h>

#include <litmus/bheap.h>

/* to configure the cluster size */
#include <litmus/litmus_proc.h>

#ifdef CONFIG_SCHED_CPU_AFFINITY
#include <litmus/affinity.h>
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
#include <litmus/litmus_softirq.h>
#endif

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
#include <linux/interrupt.h>
#include <litmus/trace.h>
#endif

#ifdef CONFIG_LITMUS_NVIDIA
#include <litmus/nvidia_info.h>
#endif

/* Reference configuration variable. Determines which cache level is used to
 * group CPUs into clusters.  GLOBAL_CLUSTER, which is the default, means that
 * all CPUs form a single cluster (just like GSN-EDF).
 */
static enum cache_level cluster_config = GLOBAL_CLUSTER;

struct clusterdomain;

/* cpu_entry_t - maintain the linked and scheduled state
 *
 * A cpu also contains a pointer to the crm_srt_domain_t cluster
 * that owns it (struct clusterdomain*)
 */
typedef struct  {
	int 			cpu;
	struct clusterdomain*	cluster;	/* owning cluster */
	struct task_struct*	linked;		/* only RT tasks */
	struct task_struct*	scheduled;	/* only RT tasks */
	atomic_t		will_schedule;	/* prevent unneeded IPIs */
	struct bheap_node*	hn;
} cpu_entry_t;

/* one cpu_entry_t per CPU */
DEFINE_PER_CPU(cpu_entry_t, crm_srt_cpu_entries);

#define set_will_schedule() \
	(atomic_set(&__get_cpu_var(crm_srt_cpu_entries).will_schedule, 1))
#define clear_will_schedule() \
	(atomic_set(&__get_cpu_var(crm_srt_cpu_entries).will_schedule, 0))
#define test_will_schedule(cpu) \
	(atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule))


#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
struct tasklet_head
{
	struct tasklet_struct *head;
	struct tasklet_struct **tail;
};
#endif

/*
 * In C-RM-SRT there is a crm_srt domain _per_ cluster
 * The number of clusters is dynamically determined accordingly to the
 * total cpu number and the cluster size
 */
typedef struct clusterdomain {
	/* rt_domain for this cluster */
	rt_domain_t	domain;
	/* cpus in this cluster */
	cpu_entry_t*	*cpus;
	/* map of this cluster cpus */
	cpumask_var_t	cpu_map;
	/* the cpus queue themselves according to priority in here */
	struct bheap_node *heap_node;
	struct bheap      cpu_heap;
	/* lock for this cluster */
#define crm_srt_lock domain.ready_lock
	
	
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	struct tasklet_head pending_tasklets;
#endif	
	
} crm_srt_domain_t;

/* a crm_srt_domain per cluster; allocation is done at init/activation time */
crm_srt_domain_t *crm_srt;

#define remote_cluster(cpu)	((crm_srt_domain_t *) per_cpu(crm_srt_cpu_entries, cpu).cluster)
#define task_cpu_cluster(task)	remote_cluster(get_partition(task))

/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling
 * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose
 * information during the initialization of the plugin (e.g., topology)
#define WANT_ALL_SCHED_EVENTS
 */
#define VERBOSE_INIT

static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
{
	cpu_entry_t *a, *b;
	a = _a->value;
	b = _b->value;
	/* Note that a and b are inverted: we want the lowest-priority CPU at
	 * the top of the heap.
	 */
	return rm_srt_higher_prio(b->linked, a->linked);
}

/* update_cpu_position - Move the cpu entry to the correct place to maintain
 *                       order in the cpu queue. Caller must hold crm_srt lock.
 */
static void update_cpu_position(cpu_entry_t *entry)
{
	crm_srt_domain_t *cluster = entry->cluster;

	if (likely(bheap_node_in_heap(entry->hn)))
		bheap_delete(cpu_lower_prio,
				&cluster->cpu_heap,
				entry->hn);

	bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn);
}

/* caller must hold crm_srt lock */
static cpu_entry_t* lowest_prio_cpu(crm_srt_domain_t *cluster)
{
	struct bheap_node* hn;
	hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap);
	return hn->value;
}


/* link_task_to_cpu - Update the link of a CPU.
 *                    Handles the case where the to-be-linked task is already
 *                    scheduled on a different CPU.
 */
static noinline void link_task_to_cpu(struct task_struct* linked,
				      cpu_entry_t *entry)
{
	cpu_entry_t *sched;
	struct task_struct* tmp;
	int on_cpu;

	BUG_ON(linked && !is_realtime(linked));

	/* Currently linked task is set to be unlinked. */
	if (entry->linked) {
		entry->linked->rt_param.linked_on = NO_CPU;
	}

	/* Link new task to CPU. */
	if (linked) {
		set_rt_flags(linked, RT_F_RUNNING);
		/* handle task is already scheduled somewhere! */
		on_cpu = linked->rt_param.scheduled_on;
		if (on_cpu != NO_CPU) {
			sched = &per_cpu(crm_srt_cpu_entries, on_cpu);
			/* this should only happen if not linked already */
			BUG_ON(sched->linked == linked);

			/* If we are already scheduled on the CPU to which we
			 * wanted to link, we don't need to do the swap --
			 * we just link ourselves to the CPU and depend on
			 * the caller to get things right.
			 */
			if (entry != sched) {
				TRACE_TASK(linked,
					   "already scheduled on %d, updating link.\n",
					   sched->cpu);
				tmp = sched->linked;
				linked->rt_param.linked_on = sched->cpu;
				sched->linked = linked;
				update_cpu_position(sched);
				linked = tmp;
			}
		}
		if (linked) /* might be NULL due to swap */
			linked->rt_param.linked_on = entry->cpu;
	}
	entry->linked = linked;
#ifdef WANT_ALL_SCHED_EVENTS
	if (linked)
		TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
	else
		TRACE("NULL linked to %d.\n", entry->cpu);
#endif
	update_cpu_position(entry);
}

/* unlink - Make sure a task is not linked any longer to an entry
 *          where it was linked before. Must hold crm_srt_lock.
 */
static noinline void unlink(struct task_struct* t)
{
    	cpu_entry_t *entry;

	if (t->rt_param.linked_on != NO_CPU) {
		/* unlink */
		entry = &per_cpu(crm_srt_cpu_entries, t->rt_param.linked_on);
		t->rt_param.linked_on = NO_CPU;
		link_task_to_cpu(NULL, entry);
	} else if (is_queued(t)) {
		/* This is an interesting situation: t is scheduled,
		 * but was just recently unlinked.  It cannot be
		 * linked anywhere else (because then it would have
		 * been relinked to this CPU), thus it must be in some
		 * queue. We must remove it from the list in this
		 * case.
		 *
		 * in C-RM-SRT case is should be somewhere in the queue for
		 * its domain, therefore and we can get the domain using
		 * task_cpu_cluster
		 */
		remove(&(task_cpu_cluster(t))->domain, t);
	}
}


/* preempt - force a CPU to reschedule
 */
static void preempt(cpu_entry_t *entry)
{
	preempt_if_preemptable(entry->scheduled, entry->cpu);
}

/* requeue - Put an unlinked task into c-rm-srt domain.
 *           Caller must hold crm_srt_lock.
 */
static noinline void requeue(struct task_struct* task)
{
	crm_srt_domain_t *cluster = task_cpu_cluster(task);
	BUG_ON(!task);
	/* sanity check before insertion */
	BUG_ON(is_queued(task));

	if (is_released(task, litmus_clock()))
		__add_ready(&cluster->domain, task);
	else {
		/* it has got to wait */
		add_release(&cluster->domain, task);
	}
}

#ifdef CONFIG_SCHED_CPU_AFFINITY
static cpu_entry_t* crm_srt_get_nearest_available_cpu(
				crm_srt_domain_t *cluster, cpu_entry_t* start)
{
	cpu_entry_t* affinity;

	get_nearest_available_cpu(affinity, start, crm_srt_cpu_entries, -1);

	/* make sure CPU is in our cluster */
	if(affinity && cpu_isset(affinity->cpu, *cluster->cpu_map))
		return(affinity);
	else
		return(NULL);
}
#endif


/* check for any necessary preemptions */
static void check_for_preemptions(crm_srt_domain_t *cluster)
{
	struct task_struct *task;
	cpu_entry_t *last;

	for(last = lowest_prio_cpu(cluster);
	    rm_srt_preemption_needed(&cluster->domain, last->linked);
	    last = lowest_prio_cpu(cluster)) {
		/* preemption necessary */
		task = __take_ready(&cluster->domain);
#ifdef CONFIG_SCHED_CPU_AFFINITY
		{
			cpu_entry_t* affinity =
					crm_srt_get_nearest_available_cpu(cluster,
							&per_cpu(crm_srt_cpu_entries, task_cpu(task)));
			if(affinity)
				last = affinity;
			else if(last->linked)
				requeue(last->linked);
		}
#else
		if (last->linked)
			requeue(last->linked);
#endif
		TRACE("check_for_preemptions: attempting to link task %d to %d\n",
				task->pid, last->cpu);
		link_task_to_cpu(task, last);
		preempt(last);
	}
}

/* crm_srt_job_arrival: task is either resumed or released */
static noinline void crm_srt_job_arrival(struct task_struct* task)
{
	crm_srt_domain_t *cluster = task_cpu_cluster(task);
	BUG_ON(!task);

	requeue(task);
	check_for_preemptions(cluster);
}

static void crm_srt_release_jobs(rt_domain_t* rt, struct bheap* tasks)
{
	crm_srt_domain_t* cluster = container_of(rt, crm_srt_domain_t, domain);
	unsigned long flags;

	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);

	__merge_ready(&cluster->domain, tasks);
	check_for_preemptions(cluster);

	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
}

/* caller holds crm_srt_lock */
static noinline void job_completion(struct task_struct *t, int forced)
{
	BUG_ON(!t);

	sched_trace_task_completion(t, forced);

#ifdef CONFIG_LITMUS_NVIDIA
	atomic_set(&tsk_rt(t)->nv_int_count, 0);
#endif

	TRACE_TASK(t, "job_completion().\n");

	/* set flags */
	set_rt_flags(t, RT_F_SLEEP);
	/* prepare for next period */
	prepare_for_next_period(t);
	if (is_released(t, litmus_clock()))
		sched_trace_task_release(t);
	/* unlink */
	unlink(t);
	/* requeue
	 * But don't requeue a blocking task. */
	if (is_running(t))
		crm_srt_job_arrival(t);
}

/* crm_srt_tick - this function is called for every local timer
 *                         interrupt.
 *
 *                   checks whether the current task has expired and checks
 *                   whether we need to preempt it if it has not expired
 */
static void crm_srt_tick(struct task_struct* t)
{
	if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
		if (!is_np(t)) {
			/* np tasks will be preempted when they become
			 * preemptable again
			 */
			litmus_reschedule_local();
			set_will_schedule();
			TRACE("crm_srt_scheduler_tick: "
			      "%d is preemptable "
			      " => FORCE_RESCHED\n", t->pid);
		} else if (is_user_np(t)) {
			TRACE("crm_srt_scheduler_tick: "
			      "%d is non-preemptable, "
			      "preemption delayed.\n", t->pid);
			request_exit_np(t);
		}
	}
}











#ifdef CONFIG_LITMUS_PAI_SOFTIRQD


static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
{
	if (!atomic_read(&tasklet->count)) {
		sched_trace_tasklet_begin(tasklet->owner);
		
		if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
		{
			BUG();
		}
		TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
		tasklet->func(tasklet->data);
		tasklet_unlock(tasklet);
		
		sched_trace_tasklet_end(tasklet->owner, flushed);
	}
	else {
		BUG();
	}
}


static void __extract_tasklets(crm_srt_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
{
	struct tasklet_struct* step;
	struct tasklet_struct* tasklet;
	struct tasklet_struct* prev;
	
	task_tasklets->head = NULL;
	task_tasklets->tail = &(task_tasklets->head);
	
	prev = NULL;
	for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
	{
		if(step->owner == task)
		{
			TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
			
			tasklet = step;
			
			if(prev) {
				prev->next = tasklet->next;
			}
			else if(cluster->pending_tasklets.head == tasklet) {
				// we're at the head.
				cluster->pending_tasklets.head = tasklet->next;
			}
			
			if(cluster->pending_tasklets.tail == &tasklet) {
				// we're at the tail
				if(prev) {
					cluster->pending_tasklets.tail = &prev;
				}
				else {
					cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
				}
			}
			
			tasklet->next = NULL;
			*(task_tasklets->tail) = tasklet;
			task_tasklets->tail = &(tasklet->next);
		}
		else {
			prev = step;
		}
	}
}

static void flush_tasklets(crm_srt_domain_t* cluster, struct task_struct* task)
{
	unsigned long flags;
	struct tasklet_head task_tasklets;
	struct tasklet_struct* step;
	
	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
	__extract_tasklets(cluster, task, &task_tasklets);
	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
	
	if(cluster->pending_tasklets.head != NULL) {
		TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
	}
	
	// now execute any flushed tasklets.
	for(step = cluster->pending_tasklets.head; step != NULL; /**/)
	{
		struct tasklet_struct* temp = step->next;
		
		step->next = NULL;
		__do_lit_tasklet(step, 1ul);
		
		step = temp;
	}
}


static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched_task)
{
	int work_to_do = 1;
	struct tasklet_struct *tasklet = NULL;
	//struct tasklet_struct *step;
	unsigned long flags;
	
	while(work_to_do) {
		
		TS_NV_SCHED_BOTISR_START;
		
		// remove tasklet at head of list if it has higher priority.
		raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);	
		
		/*
		step = cluster->pending_tasklets.head;
		TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
		while(step != NULL){
			TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
			step = step->next;
		}
		TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
		TRACE("%s: done.\n", __FUNCTION__);
		*/
		
		if(cluster->pending_tasklets.head != NULL) {
			// remove tasklet at head.
			tasklet = cluster->pending_tasklets.head;
			
			if(rm_srt_higher_prio(tasklet->owner, sched_task)) {
				
				if(NULL == tasklet->next) {
					// tasklet is at the head, list only has one element
					TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
					cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
				}
				
				// remove the tasklet from the queue
				cluster->pending_tasklets.head = tasklet->next;
				
				TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
			}
			else {
				TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
				tasklet = NULL;
			}
		}
		else {
			TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
		}
		
		/*
		step = cluster->pending_tasklets.head;
		TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
		while(step != NULL){
			TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
			step = step->next;
		}
		TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
		TRACE("%s: done.\n", __FUNCTION__);
		*/
		
		raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
		
		TS_NV_SCHED_BOTISR_END;
		
		if(tasklet) {
			__do_lit_tasklet(tasklet, 0ul);
			tasklet = NULL;	
		}
		else {
			work_to_do = 0;
		}
	}
	
	//TRACE("%s: exited.\n", __FUNCTION__);
}


static void run_tasklets(struct task_struct* sched_task)
{
	crm_srt_domain_t* cluster;
	
#if 0
	int task_is_rt = is_realtime(sched_task);
	crm_srt_domain_t* cluster;
	
	if(is_realtime(sched_task)) {
		cluster = task_cpu_cluster(sched_task);
	}
	else {
		cluster = remote_cluster(get_cpu());
	}
	
	if(cluster && cluster->pending_tasklets.head != NULL) {
		TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
		
		do_lit_tasklets(cluster, sched_task);
	}
	
	if(!task_is_rt) {
		put_cpu_no_resched();
	}
#else
	
	preempt_disable();
	
	cluster = (is_realtime(sched_task)) ?
	task_cpu_cluster(sched_task) :
	remote_cluster(smp_processor_id());
	
	if(cluster && cluster->pending_tasklets.head != NULL) {
		TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
		do_lit_tasklets(cluster, sched_task);
	}
	
	preempt_enable_no_resched();
	
#endif
}


static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_srt_domain_t* cluster)
{
	struct tasklet_struct* step;
	
	/*
	step = cluster->pending_tasklets.head;
	TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
	while(step != NULL){
		TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
		step = step->next;
	}
	TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
	TRACE("%s: done.\n", __FUNCTION__);
	*/
	
	tasklet->next = NULL;  // make sure there are no old values floating around
	
	step = cluster->pending_tasklets.head;
	if(step == NULL) {
		TRACE("%s: tasklet queue empty.  inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
		// insert at tail.
		*(cluster->pending_tasklets.tail) = tasklet;
		cluster->pending_tasklets.tail = &(tasklet->next);		
	}
	else if((*(cluster->pending_tasklets.tail) != NULL) &&
			rm_srt_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
		// insert at tail.
		TRACE("%s: tasklet belongs at end.  inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
		
		*(cluster->pending_tasklets.tail) = tasklet;
		cluster->pending_tasklets.tail = &(tasklet->next);
	}
	else {
		
        //WARN_ON(1 == 1);
		
		// insert the tasklet somewhere in the middle.
		
        TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
		
		while(step->next && rm_srt_higher_prio(step->next->owner, tasklet->owner)) {
			step = step->next;
		}
		
		// insert tasklet right before step->next.
		
		TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
		
		tasklet->next = step->next;
		step->next = tasklet;
		
		// patch up the head if needed.
		if(cluster->pending_tasklets.head == step)
		{
			TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
			cluster->pending_tasklets.head = tasklet;
		}
	}
	
	/*
	step = cluster->pending_tasklets.head;
	TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
	while(step != NULL){
		TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
		step = step->next;
	}
	TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
	TRACE("%s: done.\n", __FUNCTION__);	
	 */
	
	// TODO: Maintain this list in priority order.
	//	tasklet->next = NULL;
	//	*(cluster->pending_tasklets.tail) = tasklet;
	//	cluster->pending_tasklets.tail = &tasklet->next;
}

static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
{
	crm_srt_domain_t *cluster = NULL;
	cpu_entry_t *targetCPU = NULL;
	int thisCPU;
	int runLocal = 0;
	int runNow = 0;
	unsigned long flags;
	
    if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
    {
        TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
		return 0;
    }	
	
	cluster = task_cpu_cluster(tasklet->owner);
	
	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);		
	
	thisCPU = smp_processor_id();
	
#if 1
#ifdef CONFIG_SCHED_CPU_AFFINITY
	{
		cpu_entry_t* affinity = NULL;
		
		// use this CPU if it is in our cluster and isn't running any RT work.
		if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_srt_cpu_entries).linked == NULL)) {
			affinity = &(__get_cpu_var(crm_srt_cpu_entries));
		}
		else {
			// this CPU is busy or shouldn't run tasklet in this cluster.
			// look for available near by CPUs.
			// NOTE: Affinity towards owner and not this CPU.  Is this right?
			affinity = 
				crm_srt_get_nearest_available_cpu(cluster,
						&per_cpu(crm_srt_cpu_entries, task_cpu(tasklet->owner)));
		}
		
		targetCPU = affinity;
	}
#endif
#endif
	
	if (targetCPU == NULL) {
		targetCPU = lowest_prio_cpu(cluster);
	}
	
	if (rm_srt_higher_prio(tasklet->owner, targetCPU->linked)) {
		if (thisCPU == targetCPU->cpu) {
			TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
			runLocal = 1;
			runNow = 1;
		}
		else {
			TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
			runLocal = 0;
			runNow = 1;
		}
	}
	else {
		runLocal = 0;
		runNow = 0;
	}
	
	if(!runLocal) {
		// enqueue the tasklet
		__add_pai_tasklet(tasklet, cluster);
	}
	
	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
	
	
	if (runLocal /*&& runNow */) {  // runNow == 1 is implied
		TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
		__do_lit_tasklet(tasklet, 0ul);
	}
	else if (runNow /*&& !runLocal */) {  // runLocal == 0 is implied
		TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
		preempt(targetCPU);  // need to be protected by crm_srt_lock?
	}
	else {
		TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
	}
	
	return(1); // success
}


#endif



















/* Getting schedule() right is a bit tricky. schedule() may not make any
 * assumptions on the state of the current task since it may be called for a
 * number of reasons. The reasons include a scheduler_tick() determined that it
 * was necessary, because sys_exit_np() was called, because some Linux
 * subsystem determined so, or even (in the worst case) because there is a bug
 * hidden somewhere. Thus, we must take extreme care to determine what the
 * current state is.
 *
 * The CPU could currently be scheduling a task (or not), be linked (or not).
 *
 * The following assertions for the scheduled task could hold:
 *
 *      - !is_running(scheduled)        // the job blocks
 *	- scheduled->timeslice == 0	// the job completed (forcefully)
 *	- get_rt_flag() == RT_F_SLEEP	// the job completed (by syscall)
 * 	- linked != scheduled		// we need to reschedule (for any reason)
 * 	- is_np(scheduled)		// rescheduling must be delayed,
 *					   sys_exit_np must be requested
 *
 * Any of these can occur together.
 */
static struct task_struct* crm_srt_schedule(struct task_struct * prev)
{
	cpu_entry_t* entry = &__get_cpu_var(crm_srt_cpu_entries);
	crm_srt_domain_t *cluster = entry->cluster;
	int out_of_time, sleep, preempt, np, exists, blocks;
	struct task_struct* next = NULL;

	raw_spin_lock(&cluster->crm_srt_lock);
	clear_will_schedule();

	/* sanity checking */
	BUG_ON(entry->scheduled && entry->scheduled != prev);
	BUG_ON(entry->scheduled && !is_realtime(prev));
	BUG_ON(is_realtime(prev) && !entry->scheduled);

	/* (0) Determine state */
	exists      = entry->scheduled != NULL;
	blocks      = exists && !is_running(entry->scheduled);
	out_of_time = exists &&
				  budget_enforced(entry->scheduled) &&
				  budget_exhausted(entry->scheduled);
	np 	    = exists && is_np(entry->scheduled);
	sleep	    = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
	preempt     = entry->scheduled != entry->linked;

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "invoked crm_srt_schedule.\n");
#endif

	if (exists)
		TRACE_TASK(prev,
			   "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
			   "state:%d sig:%d\n",
			   blocks, out_of_time, np, sleep, preempt,
			   prev->state, signal_pending(prev));
	if (entry->linked && preempt)
		TRACE_TASK(prev, "will be preempted by %s/%d\n",
			   entry->linked->comm, entry->linked->pid);


	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		unlink(entry->scheduled);

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * We need to make sure to update the link structure anyway in case
	 * that we are still linked. Multiple calls to request_exit_np() don't
	 * hurt.
	 */
	if (np && (out_of_time || preempt || sleep)) {
		unlink(entry->scheduled);
		request_exit_np(entry->scheduled);
	}

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this. Don't do a job completion if we block (can't have timers running
	 * for blocked jobs). Preemption go first for the same reason.
	 */
	if (!np && (out_of_time || sleep) && !blocks && !preempt)
		job_completion(entry->scheduled, !sleep);

	/* Link pending task if we became unlinked.
	 */
	if (!entry->linked)
		link_task_to_cpu(__take_ready(&cluster->domain), entry);

	/* The final scheduling decision. Do we need to switch for some reason?
	 * If linked is different from scheduled, then select linked as next.
	 */
	if ((!np || blocks) &&
	    entry->linked != entry->scheduled) {
		/* Schedule a linked job? */
		if (entry->linked) {
			entry->linked->rt_param.scheduled_on = entry->cpu;
			next = entry->linked;
		}
		if (entry->scheduled) {
			/* not gonna be scheduled soon */
			entry->scheduled->rt_param.scheduled_on = NO_CPU;
			TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
		}
	} else
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;

	sched_state_task_picked();
	raw_spin_unlock(&cluster->crm_srt_lock);

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE("crm_srt_lock released, next=0x%p\n", next);

	if (next)
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	else if (exists && !next)
		TRACE("becomes idle at %llu.\n", litmus_clock());
#endif


	return next;
}


/* _finish_switch - we just finished the switch away from prev
 */
static void crm_srt_finish_switch(struct task_struct *prev)
{
	cpu_entry_t* 	entry = &__get_cpu_var(crm_srt_cpu_entries);

	entry->scheduled = is_realtime(current) ? current : NULL;
#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "switched away from\n");
#endif
}


/*	Prepare a task for running in RT mode
 */
static void crm_srt_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;
	cpu_entry_t* 		entry;
	crm_srt_domain_t*		cluster;

	TRACE("crm srt: task new %d\n", t->pid);

	/* the cluster doesn't change even if t is running */
	cluster = task_cpu_cluster(t);

	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);

	/* setup job params */
	release_at(t, litmus_clock());

	if (running) {
		entry = &per_cpu(crm_srt_cpu_entries, task_cpu(t));
		BUG_ON(entry->scheduled);

		entry->scheduled = t;
		tsk_rt(t)->scheduled_on = task_cpu(t);
	} else {
		t->rt_param.scheduled_on = NO_CPU;
	}
	t->rt_param.linked_on          = NO_CPU;

	crm_srt_job_arrival(t);
	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
}

static void crm_srt_task_wake_up(struct task_struct *task)
{
	unsigned long flags;
	//lt_t now;
	crm_srt_domain_t *cluster;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());

	cluster = task_cpu_cluster(task);

	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);

#if 0  // sporadic task model
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	if (get_rt_flags(task) == RT_F_EXIT_SEM) {
		set_rt_flags(task, RT_F_RUNNING);
	} else {
		now = litmus_clock();
		if (is_tardy(task, now)) {
			/* new sporadic release */
			release_at(task, now);
			sched_trace_task_release(task);
		}
		else {
			if (task->rt.time_slice) {
				/* came back in time before deadline
				*/
				set_rt_flags(task, RT_F_RUNNING);
			}
		}
	}
#endif

	//BUG_ON(tsk_rt(task)->linked_on != NO_CPU);
	set_rt_flags(task, RT_F_RUNNING);  // periodic model

	if(tsk_rt(task)->linked_on == NO_CPU)
		crm_srt_job_arrival(task);
	else
		TRACE("WTF, mate?!\n");

	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
}

static void crm_srt_task_block(struct task_struct *t)
{
	unsigned long flags;
	crm_srt_domain_t *cluster;

	TRACE_TASK(t, "block at %llu\n", litmus_clock());

	cluster = task_cpu_cluster(t);

	/* unlink if necessary */
	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
	unlink(t);
	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);

	BUG_ON(!is_realtime(t));
}


static void crm_srt_task_exit(struct task_struct * t)
{
	unsigned long flags;
	crm_srt_domain_t *cluster = task_cpu_cluster(t);

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	flush_tasklets(cluster, t);
#endif	
	
	/* unlink if necessary */
	raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
	unlink(t);
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		cpu_entry_t *cpu;
		cpu = &per_cpu(crm_srt_cpu_entries, tsk_rt(t)->scheduled_on);
		cpu->scheduled = NULL;
		tsk_rt(t)->scheduled_on = NO_CPU;
	}
	raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
		
	BUG_ON(!is_realtime(t));
        TRACE_TASK(t, "RIP\n");
}

static long crm_srt_admit_task(struct task_struct* tsk)
{
	return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
}













#ifdef CONFIG_LITMUS_LOCKING

#include <litmus/fdso.h>


static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	int linked_on;
	int check_preempt = 0;	
	
	crm_srt_domain_t* cluster = task_cpu_cluster(t);
	
	if(prio_inh != NULL)
		TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
	else
		TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
	
	sched_trace_eff_prio_change(t, prio_inh);
	
	tsk_rt(t)->inh_task = prio_inh;
	
	linked_on  = tsk_rt(t)->linked_on;
	
	/* If it is scheduled, then we need to reorder the CPU heap. */
	if (linked_on != NO_CPU) {
		TRACE_TASK(t, "%s: linked  on %d\n",
				   __FUNCTION__, linked_on);
		/* Holder is scheduled; need to re-order CPUs.
		 * We can't use heap_decrease() here since
		 * the cpu_heap is ordered in reverse direction, so
		 * it is actually an increase. */
		bheap_delete(cpu_lower_prio, &cluster->cpu_heap,
                     per_cpu(crm_srt_cpu_entries, linked_on).hn);
		bheap_insert(cpu_lower_prio, &cluster->cpu_heap,
                     per_cpu(crm_srt_cpu_entries, linked_on).hn);
	} else {
		/* holder may be queued: first stop queue changes */
		raw_spin_lock(&cluster->domain.release_lock);
		if (is_queued(t)) {
			TRACE_TASK(t, "%s: is queued\n", __FUNCTION__);
			
			/* We need to update the position of holder in some
			 * heap. Note that this could be a release heap if we
			 * budget enforcement is used and this job overran. */
			check_preempt = !bheap_decrease(rm_srt_ready_order, tsk_rt(t)->heap_node);
			
		} else {
			/* Nothing to do: if it is not queued and not linked
			 * then it is either sleeping or currently being moved
			 * by other code (e.g., a timer interrupt handler) that
			 * will use the correct priority when enqueuing the
			 * task. */
			TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__);
		}
		raw_spin_unlock(&cluster->domain.release_lock);
		
		/* If holder was enqueued in a release heap, then the following
		 * preemption check is pointless, but we can't easily detect
		 * that case. If you want to fix this, then consider that
		 * simply adding a state flag requires O(n) time to update when
		 * releasing n tasks, which conflicts with the goal to have
		 * O(log n) merges. */
		if (check_preempt) {
			/* heap_decrease() hit the top level of the heap: make
			 * sure preemption checks get the right task, not the
			 * potentially stale cache. */
			bheap_uncache_min(rm_srt_ready_order, &cluster->domain.ready_queue);
			check_for_preemptions(cluster);
		}
	}
}

/* called with IRQs off */
static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	crm_srt_domain_t* cluster = task_cpu_cluster(t);
	
	raw_spin_lock(&cluster->crm_srt_lock);
	
	__set_priority_inheritance(t, prio_inh);
	
#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d inherits a new priority!\n",
				   tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
		
		__set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
	}
#endif
	
	raw_spin_unlock(&cluster->crm_srt_lock);
}


/* called with IRQs off */
static void __clear_priority_inheritance(struct task_struct* t)
{
    TRACE_TASK(t, "priority restored\n");
	
    if(tsk_rt(t)->scheduled_on != NO_CPU)
    {
		sched_trace_eff_prio_change(t, NULL);
		
        tsk_rt(t)->inh_task = NULL;
        
        /* Check if rescheduling is necessary. We can't use heap_decrease()
         * since the priority was effectively lowered. */
        unlink(t);
        crm_srt_job_arrival(t);
    }
    else
    {
        __set_priority_inheritance(t, NULL);
    }
	
#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d inheritance set back to owner.\n",
				   tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
		
		if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU)
		{
			sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t);
			
			tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t;
			
			/* Check if rescheduling is necessary. We can't use heap_decrease()
			 * since the priority was effectively lowered. */
			unlink(tsk_rt(t)->cur_klitirqd);
			crm_srt_job_arrival(tsk_rt(t)->cur_klitirqd);
		}
		else
		{
			__set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t);
		}
	}
#endif
}

/* called with IRQs off */
static void clear_priority_inheritance(struct task_struct* t)
{
	crm_srt_domain_t* cluster = task_cpu_cluster(t);
	
	raw_spin_lock(&cluster->crm_srt_lock);
	__clear_priority_inheritance(t);
	raw_spin_unlock(&cluster->crm_srt_lock);
}



#ifdef CONFIG_LITMUS_SOFTIRQD
/* called with IRQs off */
static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
											  struct task_struct* old_owner,
											  struct task_struct* new_owner)
{
	crm_srt_domain_t* cluster = task_cpu_cluster(klitirqd);
	
	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
	
	raw_spin_lock(&cluster->crm_srt_lock);
	
	if(old_owner != new_owner)
	{
		if(old_owner)
		{
			// unreachable?
			tsk_rt(old_owner)->cur_klitirqd = NULL;
		}
		
		TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
				   new_owner->comm, new_owner->pid);
		
		tsk_rt(new_owner)->cur_klitirqd = klitirqd;
	}
	
	__set_priority_inheritance(klitirqd,
							   (tsk_rt(new_owner)->inh_task == NULL) ?
							   new_owner :
							   tsk_rt(new_owner)->inh_task);
	
	raw_spin_unlock(&cluster->crm_srt_lock);
}

/* called with IRQs off */
static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
												struct task_struct* old_owner)
{
	crm_srt_domain_t* cluster = task_cpu_cluster(klitirqd);
	
	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
	
	raw_spin_lock(&cluster->crm_srt_lock);
    
    TRACE_TASK(klitirqd, "priority restored\n");
	
    if(tsk_rt(klitirqd)->scheduled_on != NO_CPU)
    {
        tsk_rt(klitirqd)->inh_task = NULL;
        
        /* Check if rescheduling is necessary. We can't use heap_decrease()
         * since the priority was effectively lowered. */
        unlink(klitirqd);
        crm_srt_job_arrival(klitirqd);
    }
    else
    {
        __set_priority_inheritance(klitirqd, NULL);
    }
	
	tsk_rt(old_owner)->cur_klitirqd = NULL;
	
	raw_spin_unlock(&cluster->crm_srt_lock);
}
#endif  // CONFIG_LITMUS_SOFTIRQD


/* ******************** KFMLP support ********************** */

/* struct for semaphore with priority inheritance */
struct kfmlp_queue
{
	wait_queue_head_t wait;
	struct task_struct* owner;
	struct task_struct* hp_waiter;
	int count; /* number of waiters + holder */
};

struct kfmlp_semaphore
{
	struct litmus_lock litmus_lock;
	
	spinlock_t lock;
	
	int num_resources; /* aka k */
	struct kfmlp_queue *queues; /* array */
	struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
};

static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
{
	return container_of(lock, struct kfmlp_semaphore, litmus_lock);
}

static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
								struct kfmlp_queue* queue)
{
	return (queue - &sem->queues[0]);
}

static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
												  struct task_struct* holder)
{
	int i;
	for(i = 0; i < sem->num_resources; ++i)
		if(sem->queues[i].owner == holder)
			return(&sem->queues[i]);
	return(NULL);
}

/* caller is responsible for locking */
static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
										 struct task_struct *skip)
{
	struct list_head	*pos;
	struct task_struct 	*queued, *found = NULL;
	
	list_for_each(pos, &kqueue->wait.task_list) {
		queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
												   task_list)->private;
		
		/* Compare task prios, find high prio task. */
		if (queued != skip && rm_srt_higher_prio(queued, found))
			found = queued;
	}
	return found;
}

static inline struct kfmlp_queue* kfmlp_find_shortest(
										  struct kfmlp_semaphore* sem,
										  struct kfmlp_queue* search_start)
{
	// we start our search at search_start instead of at the beginning of the
	// queue list to load-balance across all resources.
	struct kfmlp_queue* step = search_start;
	struct kfmlp_queue* shortest = sem->shortest_queue;
	
	do
	{
		step = (step+1 != &sem->queues[sem->num_resources]) ?
		step+1 : &sem->queues[0];
		if(step->count < shortest->count)
		{
			shortest = step;
			if(step->count == 0)
				break; /* can't get any shorter */
		}
	}while(step != search_start);
	
	return(shortest);
}

static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
{
	/* must hold sem->lock */
	
	struct kfmlp_queue *my_queue = NULL;
	struct task_struct *max_hp = NULL;
	
	
	struct list_head	*pos;
	struct task_struct 	*queued;
	int i;
	
	for(i = 0; i < sem->num_resources; ++i)
	{
		if( (sem->queues[i].count > 1) &&
		   ((my_queue == NULL) ||
			(rm_srt_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
		{
			my_queue = &sem->queues[i];
		}
	}
	
	if(my_queue)
	{
		crm_srt_domain_t* cluster;
		
		max_hp = my_queue->hp_waiter;
		BUG_ON(!max_hp);

		TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
				  kfmlp_get_idx(sem, my_queue),
				  max_hp->comm, max_hp->pid,
				  kfmlp_get_idx(sem, my_queue));
		
		my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
		
		/*
		 if(my_queue->hp_waiter)
		 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
		 kfmlp_get_idx(sem, my_queue),
		 my_queue->hp_waiter->comm,
		 my_queue->hp_waiter->pid);
		 else
		 TRACE_CUR("queue %d: new hp_waiter is %p\n",
		 kfmlp_get_idx(sem, my_queue), NULL);
		 */
	
		cluster = task_cpu_cluster(max_hp);

		raw_spin_lock(&cluster->crm_srt_lock);
		
		/*
		 if(my_queue->owner)
		 TRACE_CUR("queue %d: owner is %s/%d\n",
		 kfmlp_get_idx(sem, my_queue),
		 my_queue->owner->comm,
		 my_queue->owner->pid);
		 else
		 TRACE_CUR("queue %d: owner is %p\n",
		 kfmlp_get_idx(sem, my_queue),
		 NULL);
		 */
		
		if(tsk_rt(my_queue->owner)->inh_task == max_hp)
		{
			__clear_priority_inheritance(my_queue->owner);
			if(my_queue->hp_waiter != NULL)
			{
				__set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
			}
		}
		raw_spin_unlock(&cluster->crm_srt_lock);
		
		list_for_each(pos, &my_queue->wait.task_list)
		{
			queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
													   task_list)->private;
			/* Compare task prios, find high prio task. */
			if (queued == max_hp)
			{
				/*
				 TRACE_CUR("queue %d: found entry in wait queue.  REMOVING!\n",
				 kfmlp_get_idx(sem, my_queue));
				 */
				__remove_wait_queue(&my_queue->wait,
									list_entry(pos, wait_queue_t, task_list));
				break;
			}
		}
		--(my_queue->count);
	}
	
	return(max_hp);
}

int crm_srt_kfmlp_lock(struct litmus_lock* l)
{
	struct task_struct* t = current;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue* my_queue;
	wait_queue_t wait;
	unsigned long flags;
	
	if (!is_realtime(t))
		return -EPERM;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = sem->shortest_queue;
	
	if (my_queue->owner) {
		/* resource is not free => must suspend and wait */
		TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
				  kfmlp_get_idx(sem, my_queue));
		
		init_waitqueue_entry(&wait, t);
		
		/* FIXME: interruptible would be nice some day */
		set_task_state(t, TASK_UNINTERRUPTIBLE);
		
		__add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
		
		/* check if we need to activate priority inheritance */
		if (rm_srt_higher_prio(t, my_queue->hp_waiter))
		{
			my_queue->hp_waiter = t;
			if (rm_srt_higher_prio(t, my_queue->owner))
			{
				set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
			}
		}
		
		++(my_queue->count);
		sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
		
		/* release lock before sleeping */
		spin_unlock_irqrestore(&sem->lock, flags);
		
		/* We depend on the FIFO order.  Thus, we don't need to recheck
		 * when we wake up; we are guaranteed to have the lock since
		 * there is only one wake up per release (or steal).
		 */
		schedule();
		
		
		if(my_queue->owner == t)
		{
			TRACE_CUR("queue %d: acquired through waiting\n",
					  kfmlp_get_idx(sem, my_queue));
		}
		else
		{
			/* this case may happen if our wait entry was stolen
			 between queues.  record where we went.*/
			my_queue = kfmlp_get_queue(sem, t);
			BUG_ON(!my_queue);
			TRACE_CUR("queue %d: acquired through stealing\n",
					  kfmlp_get_idx(sem, my_queue));
		}
	}
	else
	{
		TRACE_CUR("queue %d: acquired immediately\n",
				  kfmlp_get_idx(sem, my_queue));
		
		my_queue->owner = t;
		
		++(my_queue->count);
		sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);		
		
		spin_unlock_irqrestore(&sem->lock, flags);
	}
	
	return kfmlp_get_idx(sem, my_queue);
}

int crm_srt_kfmlp_unlock(struct litmus_lock* l)
{
	struct task_struct *t = current, *next;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue *my_queue;
	unsigned long flags;
	int err = 0;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = kfmlp_get_queue(sem, t);
	
	if (!my_queue) {
		err = -EINVAL;
		goto out;
	}
	
	/* check if there are jobs waiting for this resource */
	next = __waitqueue_remove_first(&my_queue->wait);
	if (next) {
		/*
		 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
		 kfmlp_get_idx(sem, my_queue),
		 next->comm, next->pid);
		 */
		/* next becomes the resouce holder */
		my_queue->owner = next;
		
		--(my_queue->count);
		if(my_queue->count < sem->shortest_queue->count)
		{
			sem->shortest_queue = my_queue;
		}	
		
		TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
				  kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
		
		/* determine new hp_waiter if necessary */
		if (next == my_queue->hp_waiter) {
			TRACE_TASK(next, "was highest-prio waiter\n");
			/* next has the highest priority --- it doesn't need to
			 * inherit.  However, we need to make sure that the
			 * next-highest priority in the queue is reflected in
			 * hp_waiter. */
			my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
			if (my_queue->hp_waiter)
				TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
			else
				TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
		} else {
			/* Well, if next is not the highest-priority waiter,
			 * then it ought to inherit the highest-priority
			 * waiter's priority. */
			set_priority_inheritance(next, my_queue->hp_waiter);
		}
		
		/* wake up next */
		wake_up_process(next);
	}
	else
	{
		TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
		
		next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
		
		/*
		 if(next)
		 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
		 kfmlp_get_idx(sem, my_queue),
		 next->comm, next->pid);
		 */
		
		my_queue->owner = next;
		
		if(next)
		{
			TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
					  kfmlp_get_idx(sem, my_queue),
					  next->comm, next->pid);
			
			/* wake up next */
			wake_up_process(next);			
		}
		else
		{
			TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
			
			--(my_queue->count);
			if(my_queue->count < sem->shortest_queue->count)
			{
				sem->shortest_queue = my_queue;
			}
		}
	}
	
	/* we lose the benefit of priority inheritance (if any) */
	if (tsk_rt(t)->inh_task)
		clear_priority_inheritance(t);
	
out:
	spin_unlock_irqrestore(&sem->lock, flags);
	
	return err;
}

int crm_srt_kfmlp_close(struct litmus_lock* l)
{
	struct task_struct *t = current;
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	struct kfmlp_queue *my_queue;
	unsigned long flags;
	
	int owner;
	
	spin_lock_irqsave(&sem->lock, flags);
	
	my_queue = kfmlp_get_queue(sem, t);	
	owner = (my_queue) ? (my_queue->owner == t) : 0;
	
	spin_unlock_irqrestore(&sem->lock, flags);
	
	if (owner)
		crm_srt_kfmlp_unlock(l);
	
	return 0;
}

void crm_srt_kfmlp_free(struct litmus_lock* l)
{
	struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
	kfree(sem->queues);
	kfree(sem);
}

static struct litmus_lock_ops crm_srt_kfmlp_lock_ops = {
	.close  = crm_srt_kfmlp_close,
	.lock   = crm_srt_kfmlp_lock,
	.unlock = crm_srt_kfmlp_unlock,
	.deallocate = crm_srt_kfmlp_free,
};

static struct litmus_lock* crm_srt_new_kfmlp(void* __user arg, int* ret_code)
{
	struct kfmlp_semaphore* sem;
	int num_resources = 0;
	int i;
	
	if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
	{
		*ret_code = -EINVAL;
		return(NULL);
	}
	if(__copy_from_user(&num_resources, arg, sizeof(num_resources)))
	{
		*ret_code = -EINVAL;
		return(NULL);
	}
	if(num_resources < 1)
	{
		*ret_code = -EINVAL;
		return(NULL);		
	}
	
	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
	if(!sem)
	{
		*ret_code = -ENOMEM;
		return NULL;
	}
	
	sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
	if(!sem->queues)
	{
		kfree(sem);
		*ret_code = -ENOMEM;
		return NULL;		
	}
	
	sem->litmus_lock.ops = &crm_srt_kfmlp_lock_ops;
	spin_lock_init(&sem->lock);
	sem->num_resources = num_resources;
	
	for(i = 0; i < num_resources; ++i)
	{
		sem->queues[i].owner = NULL;
		sem->queues[i].hp_waiter = NULL;
		init_waitqueue_head(&sem->queues[i].wait);
		sem->queues[i].count = 0;
	}
	
	sem->shortest_queue = &sem->queues[0];
	
	*ret_code = 0;
	return &sem->litmus_lock;
}


/* **** lock constructor **** */

static long crm_srt_allocate_lock(struct litmus_lock **lock, int type,
								 void* __user arg)
{
	int err = -ENXIO;
	
	/* C-RM-SRT currently only supports the FMLP for global resources
		WITHIN a given cluster.  DO NOT USE CROSS-CLUSTER! */
	switch (type) {
		case KFMLP_SEM:
			*lock = crm_srt_new_kfmlp(arg, &err);
			break;
	};
	
	return err;
}

#endif  // CONFIG_LITMUS_LOCKING






/* total number of cluster */
static int num_clusters;
/* we do not support cluster of different sizes */
static unsigned int cluster_size;

#ifdef VERBOSE_INIT
static void print_cluster_topology(cpumask_var_t mask, int cpu)
{
	int chk;
	char buf[255];

	chk = cpulist_scnprintf(buf, 254, mask);
	buf[chk] = '\0';
	printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf);

}
#endif

static int clusters_allocated = 0;

static void cleanup_crm_srt(void)
{
	int i;

	if (clusters_allocated) {
		for (i = 0; i < num_clusters; i++) {
			kfree(crm_srt[i].cpus);
			kfree(crm_srt[i].heap_node);
			free_cpumask_var(crm_srt[i].cpu_map);
		}

		kfree(crm_srt);
	}
}

static long crm_srt_activate_plugin(void)
{
	int i, j, cpu, ccpu, cpu_count;
	cpu_entry_t *entry;

	cpumask_var_t mask;
	int chk = 0;

	/* de-allocate old clusters, if any */
	cleanup_crm_srt();

	printk(KERN_INFO "C-RM-SRT: Activate Plugin, cluster configuration = %d\n",
			cluster_config);

	/* need to get cluster_size first */
	if(!zalloc_cpumask_var(&mask, GFP_ATOMIC))
		return -ENOMEM;

	if (unlikely(cluster_config == GLOBAL_CLUSTER)) {
		cluster_size = num_online_cpus();
	} else {
		chk = get_shared_cpu_map(mask, 0, cluster_config);
		if (chk) {
			/* if chk != 0 then it is the max allowed index */
			printk(KERN_INFO "C-RM-SRT: Cluster configuration = %d "
			       "is not supported on this hardware.\n",
			       cluster_config);
			/* User should notice that the configuration failed, so
			 * let's bail out. */
			return -EINVAL;
		}

		cluster_size = cpumask_weight(mask);
	}

	if ((num_online_cpus() % cluster_size) != 0) {
		/* this can't be right, some cpus are left out */
		printk(KERN_ERR "C-RM-SRT: Trying to group %d cpus in %d!\n",
				num_online_cpus(), cluster_size);
		return -1;
	}

	num_clusters = num_online_cpus() / cluster_size;
	printk(KERN_INFO "C-RM-SRT: %d cluster(s) of size = %d\n",
			num_clusters, cluster_size);

	/* initialize clusters */
	crm_srt = kmalloc(num_clusters * sizeof(crm_srt_domain_t), GFP_ATOMIC);
	for (i = 0; i < num_clusters; i++) {

		crm_srt[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t),
				GFP_ATOMIC);
		crm_srt[i].heap_node = kmalloc(
				cluster_size * sizeof(struct bheap_node),
				GFP_ATOMIC);
		bheap_init(&(crm_srt[i].cpu_heap));
		rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs);

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
		crm_srt[i].pending_tasklets.head = NULL;
		crm_srt[i].pending_tasklets.tail = &(crm_srt[i].pending_tasklets.head);
#endif		
		
		if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC))
			return -ENOMEM;
	}

	/* cycle through cluster and add cpus to them */
	for (i = 0; i < num_clusters; i++) {

		for_each_online_cpu(cpu) {
			/* check if the cpu is already in a cluster */
			for (j = 0; j < num_clusters; j++)
				if (cpumask_test_cpu(cpu, crm_srt[j].cpu_map))
					break;
			/* if it is in a cluster go to next cpu */
			if (j < num_clusters &&
					cpumask_test_cpu(cpu, crm_srt[j].cpu_map))
				continue;

			/* this cpu isn't in any cluster */
			/* get the shared cpus */
			if (unlikely(cluster_config == GLOBAL_CLUSTER))
				cpumask_copy(mask, cpu_online_mask);
			else
				get_shared_cpu_map(mask, cpu, cluster_config);

			cpumask_copy(crm_srt[i].cpu_map, mask);
#ifdef VERBOSE_INIT
			print_cluster_topology(mask, cpu);
#endif
			/* add cpus to current cluster and init cpu_entry_t */
			cpu_count = 0;
			for_each_cpu(ccpu, crm_srt[i].cpu_map) {

				entry = &per_cpu(crm_srt_cpu_entries, ccpu);
				crm_srt[i].cpus[cpu_count] = entry;
				atomic_set(&entry->will_schedule, 0);
				entry->cpu = ccpu;
				entry->cluster = &crm_srt[i];
				entry->hn = &(crm_srt[i].heap_node[cpu_count]);
				bheap_node_init(&entry->hn, entry);

				cpu_count++;

				entry->linked = NULL;
				entry->scheduled = NULL;
				update_cpu_position(entry);
			}
			/* done with this cluster */
			break;
		}
	}
	
#ifdef CONFIG_LITMUS_SOFTIRQD
	{
		/* distribute the daemons evenly across the clusters. */
		int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC);
		int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters;
		int left_over = NR_LITMUS_SOFTIRQD % num_clusters;
		
		int daemon = 0;
		for(i = 0; i < num_clusters; ++i)
		{
			int num_on_this_cluster = num_daemons_per_cluster;
			if(left_over)
			{
				++num_on_this_cluster;
				--left_over;
			}
			
			for(j = 0; j < num_on_this_cluster; ++j)
			{
				// first CPU of this cluster
				affinity[daemon++] = i*cluster_size;
			}
		}
	
		spawn_klitirqd(affinity);
		
		kfree(affinity);
	}
#endif
	
#ifdef CONFIG_LITMUS_NVIDIA
	init_nvidia_info();
#endif	

	free_cpumask_var(mask);
	clusters_allocated = 1;
	return 0;
}

/*	Plugin object	*/
static struct sched_plugin crm_srt_plugin __cacheline_aligned_in_smp = {
	.plugin_name		= "C-RM-SRT",
	.finish_switch		= crm_srt_finish_switch,
	.tick			= crm_srt_tick,
	.task_new		= crm_srt_task_new,
	.complete_job		= complete_job,
	.task_exit		= crm_srt_task_exit,
	.schedule		= crm_srt_schedule,
	.task_wake_up		= crm_srt_task_wake_up,
	.task_block		= crm_srt_task_block,
	.admit_task		= crm_srt_admit_task,
	.activate_plugin	= crm_srt_activate_plugin,
#ifdef CONFIG_LITMUS_LOCKING
	.allocate_lock	= crm_srt_allocate_lock,
    .set_prio_inh   = set_priority_inheritance,
    .clear_prio_inh = clear_priority_inheritance,	
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
	.set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
	.clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	.enqueue_pai_tasklet = enqueue_pai_tasklet,
	.run_tasklets = run_tasklets,
#endif	
};

static struct proc_dir_entry *cluster_file = NULL, *crm_srt_dir = NULL;

static int __init init_crm_srt(void)
{
	int err, fs;

	err = register_sched_plugin(&crm_srt_plugin);
	if (!err) {
		fs = make_plugin_proc_dir(&crm_srt_plugin, &crm_srt_dir);
		if (!fs)
			cluster_file = create_cluster_file(crm_srt_dir, &cluster_config);
		else
			printk(KERN_ERR "Could not allocate C-RM-SRT procfs dir.\n");
	}
	return err;
}

static void clean_crm_srt(void)
{
	cleanup_crm_srt();
	if (cluster_file)
		remove_proc_entry("cluster", crm_srt_dir);
	if (crm_srt_dir)
		remove_plugin_proc_dir(&crm_srt_plugin);
}

module_init(init_crm_srt);
module_exit(clean_crm_srt);