aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/sched_cedf.c
blob: 6746d4d6033e330b1d012240a86d57403869588d (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681













                                                                          

                                                                       


                                                             


                                                                      







                                                                      
                       
                          

                         

                          
                           
                          



                                

                             
                         









                                   
 



                                



                                

                                   




















                                                                            
 


                                                                              
   
                                                        
 












                                                                           
                               











                                                                       







                                     












                                                                      
                                
                                   
                                      







                                             







                                                                                          















                                                                   






                                                                       
                                                                           
 


                                                            












                                                                             


                                                               
 
                                                                     




                                                           
                                                                      




























































                                                                                       
                                                               




                                                  





























                                                                          
                                           








                                                        








                                                                                              





                                                    

                                                   
                                                                           
 
                              
 






                                                                    









                                                                    



                                                         
                          







                                                                                  

                                
                                               



                                                                                            
                                                                    


                                                      
                                                        
                                              
      



















                                                                         
                                                             



                                               
                                                                  

 
                               





                                                                      



                                                























                                                                           






















                                                                              



                 



















































































































































































































































































































                                                                                                                                                       
























                                                                                 
                                                                           

                                        



                                                                  
                                                                     
                                          
                            
         

      
                                              









                                                              


                                                                      



                                                                             

















                                                                                


                                                                





                                                                












                                                                                                    












                                                                                 
                             
           
                                                     




























                                                                                
                                  
                                                

                            
                                                          






                                                                        
























                                                                         
                                              



                                                             
                                                             







                                                                











                                                                   





                                                  
                                                                    




                                                       
                   





                                                              
                                                             

                            




















                                                                           



                                                            
                                
                                                                                



                                                                                                   
 
                               
                                                                  











                                                         
                                                             
 
                  

                                
                                                                          





                                                                                           
                                                                  









                                                     



                                              
                                 
                                                             


                                                  




                                               


         

                                                


                                                                          

                                                 
                                                                  






                                                    




                                                                                  


                                                                            








                            
                                                                 

                                                                                                                 
                        

                              
                               
 







                                                                          




























































                                                                                                






                                                                    











                                                                                                                               
                            

         

    
                       



































                                                                                                      
                                                                 

                                                                                                                     
                        








                                                                         




































                                                                                                          







                                                            









                                                                                                                               
                            

         

    
                       






















































































































































































































































































































































                                                                                                                                       













                                                                     



                              



                               


                                                    

















                                                          

                                                                                




                                                  
                                                         

                                                 
                                                                  

                                                                          





                                                                               



                                                    

















                                                                          
                                                                         

                                                                            






                                                                                 

                                                                     


                                                                                 




                                                        



                                                      





                                                                           

                                                                               



                                                           
                                                                       

                                                                    
                                                                              
 












                                                                              

                                                              




                                                        




                                                                                        





                                                    

































                                                                                      

















                                                                     




                                                                

                                                                  




















                                                                               

  

                                                                    

                                 




                                                                   


                                                                                      
                                                                                  

                   




                            



                                                       



                        
/*
 * litmus/sched_cedf.c
 *
 * Implementation of the C-EDF scheduling algorithm.
 *
 * This implementation is based on G-EDF:
 * - CPUs are clustered around L2 or L3 caches.
 * - Clusters topology is automatically detected (this is arch dependent
 *   and is working only on x86 at the moment --- and only with modern
 *   cpus that exports cpuid4 information)
 * - The plugins _does not_ attempt to put tasks in the right cluster i.e.
 *   the programmer needs to be aware of the topology to place tasks
 *   in the desired cluster
 * - default clustering is around L2 cache (cache index = 2)
 *   supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all
 *   online_cpus are placed in a single cluster).
 *
 *   For details on functions, take a look at sched_gsn_edf.c
 *
 * Currently, we do not support changes in the number of online cpus.
 * If the num_online_cpus() dynamically changes, the plugin is broken.
 *
 * This version uses the simple approach and serializes all scheduling
 * decisions by the use of a queue lock. This is probably not the
 * best way to do it, but it should suffice for now.
 */

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>

#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/preempt.h>
#include <litmus/budget.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>

#include <litmus/clustered.h>

#include <litmus/bheap.h>
#include <litmus/binheap.h>

#ifdef CONFIG_LITMUS_LOCKING
#include <litmus/kfmlp_lock.h>
#endif

#ifdef CONFIG_LITMUS_NESTED_LOCKING
#include <litmus/rsm_lock.h>
#include <litmus/ikglp_lock.h>
#endif

#ifdef CONFIG_SCHED_CPU_AFFINITY
#include <litmus/affinity.h>
#endif

#ifdef CONFIG_REALTIME_AUX_TASKS
#include <litmus/aux_tasks.h>
#endif

/* to configure the cluster size */
#include <litmus/litmus_proc.h>

#ifdef CONFIG_SCHED_CPU_AFFINITY
#include <litmus/affinity.h>
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
#include <litmus/litmus_softirq.h>
#endif

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
#include <linux/interrupt.h>
#include <litmus/trace.h>
#endif

#ifdef CONFIG_LITMUS_NVIDIA
#include <litmus/nvidia_info.h>
#endif

#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
#include <litmus/gpu_affinity.h>
#endif

/* Reference configuration variable. Determines which cache level is used to
 * group CPUs into clusters.  GLOBAL_CLUSTER, which is the default, means that
 * all CPUs form a single cluster (just like GSN-EDF).
 */
static enum cache_level cluster_config = GLOBAL_CLUSTER;

struct clusterdomain;

/* cpu_entry_t - maintain the linked and scheduled state
 *
 * A cpu also contains a pointer to the cedf_domain_t cluster
 * that owns it (struct clusterdomain*)
 */
typedef struct  {
	int 			cpu;
	struct clusterdomain*	cluster;	/* owning cluster */
	struct task_struct*	linked;		/* only RT tasks */
	struct task_struct*	scheduled;	/* only RT tasks */
	atomic_t		will_schedule;	/* prevent unneeded IPIs */
	struct binheap_node hn;
} cpu_entry_t;

/* one cpu_entry_t per CPU */
DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);

#define set_will_schedule() \
	(atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1))
#define clear_will_schedule() \
	(atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0))
#define test_will_schedule(cpu) \
	(atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule))

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
struct tasklet_head
{
	struct tasklet_struct *head;
	struct tasklet_struct **tail;
};
#endif

/*
 * In C-EDF there is a cedf domain _per_ cluster
 * The number of clusters is dynamically determined accordingly to the
 * total cpu number and the cluster size
 */
typedef struct clusterdomain {
	/* rt_domain for this cluster */
	rt_domain_t	domain;
	/* cpus in this cluster */
	cpu_entry_t*	*cpus;
	/* map of this cluster cpus */
	cpumask_var_t	cpu_map;
	/* the cpus queue themselves according to priority in here */
	struct binheap cpu_heap;
	/* lock for this cluster */
#define cluster_lock domain.ready_lock

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	struct tasklet_head pending_tasklets;
#endif

#ifdef CONFIG_LITMUS_DGL_SUPPORT
	raw_spinlock_t dgl_lock;
#endif
} cedf_domain_t;

/* a cedf_domain per cluster; allocation is done at init/activation time */
cedf_domain_t *cedf;

#define remote_cluster(cpu)	((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster)
#define task_cpu_cluster(task)	remote_cluster(get_partition(task))

/* total number of cluster */
static int num_clusters;
/* we do not support cluster of different sizes */
static unsigned int cluster_size;

static int clusters_allocated = 0;

#ifdef CONFIG_LITMUS_DGL_SUPPORT
static raw_spinlock_t* cedf_get_dgl_spinlock(struct task_struct *t)
{
	cedf_domain_t *cluster = task_cpu_cluster(t);
	return(&cluster->dgl_lock);
}
#endif


/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling
 * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose
 * information during the initialization of the plugin (e.g., topology)
#define WANT_ALL_SCHED_EVENTS
 */
#define VERBOSE_INIT

static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
{
	cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn);
	cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn);

	/* Note that a and b are inverted: we want the lowest-priority CPU at
	 * the top of the heap.
	 */
	return edf_higher_prio(b->linked, a->linked);
}

/* update_cpu_position - Move the cpu entry to the correct place to maintain
 *                       order in the cpu queue. Caller must hold cedf lock.
 */
static void update_cpu_position(cpu_entry_t *entry)
{
	cedf_domain_t *cluster = entry->cluster;

	if (likely(binheap_is_in_heap(&entry->hn))) {
		binheap_delete(&entry->hn, &cluster->cpu_heap);
	}

	binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn);
}

/* caller must hold cedf lock */
static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster)
{
	return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn);
}


/* link_task_to_cpu - Update the link of a CPU.
 *                    Handles the case where the to-be-linked task is already
 *                    scheduled on a different CPU.
 */
static noinline void link_task_to_cpu(struct task_struct* linked,
				      cpu_entry_t *entry)
{
	cpu_entry_t *sched;
	struct task_struct* tmp;
	int on_cpu;

	BUG_ON(linked && !is_realtime(linked));

	/* Currently linked task is set to be unlinked. */
	if (entry->linked) {
		entry->linked->rt_param.linked_on = NO_CPU;
	}

	/* Link new task to CPU. */
	if (linked) {
		set_rt_flags(linked, RT_F_RUNNING);
		/* handle task is already scheduled somewhere! */
		on_cpu = linked->rt_param.scheduled_on;
		if (on_cpu != NO_CPU) {
			sched = &per_cpu(cedf_cpu_entries, on_cpu);
			/* this should only happen if not linked already */
			BUG_ON(sched->linked == linked);

			/* If we are already scheduled on the CPU to which we
			 * wanted to link, we don't need to do the swap --
			 * we just link ourselves to the CPU and depend on
			 * the caller to get things right.
			 */
			if (entry != sched) {
				TRACE_TASK(linked,
					   "already scheduled on %d, updating link.\n",
					   sched->cpu);
				tmp = sched->linked;
				linked->rt_param.linked_on = sched->cpu;
				sched->linked = linked;
				update_cpu_position(sched);
				linked = tmp;
			}
		}
		if (linked) /* might be NULL due to swap */
			linked->rt_param.linked_on = entry->cpu;
	}
	entry->linked = linked;
#ifdef WANT_ALL_SCHED_EVENTS
	if (linked)
		TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
	else
		TRACE("NULL linked to %d.\n", entry->cpu);
#endif
	update_cpu_position(entry);
}

/* unlink - Make sure a task is not linked any longer to an entry
 *          where it was linked before. Must hold cluster_lock.
 */
static noinline void unlink(struct task_struct* t)
{
    	cpu_entry_t *entry;

	if (t->rt_param.linked_on != NO_CPU) {
		/* unlink */
		entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
		t->rt_param.linked_on = NO_CPU;
		link_task_to_cpu(NULL, entry);
	} else if (is_queued(t)) {
		/* This is an interesting situation: t is scheduled,
		 * but was just recently unlinked.  It cannot be
		 * linked anywhere else (because then it would have
		 * been relinked to this CPU), thus it must be in some
		 * queue. We must remove it from the list in this
		 * case.
		 *
		 * in C-EDF case is should be somewhere in the queue for
		 * its domain, therefore and we can get the domain using
		 * task_cpu_cluster
		 */
		remove(&(task_cpu_cluster(t))->domain, t);
	}
}


/* preempt - force a CPU to reschedule
 */
static void preempt(cpu_entry_t *entry)
{
	preempt_if_preemptable(entry->scheduled, entry->cpu);
}

/* requeue - Put an unlinked task into gsn-edf domain.
 *           Caller must hold cluster_lock.
 */
static noinline void requeue(struct task_struct* task)
{
	cedf_domain_t *cluster = task_cpu_cluster(task);
	BUG_ON(!task);
	/* sanity check before insertion */
	BUG_ON(is_queued(task));

	if (is_released(task, litmus_clock()))
#ifdef CONFIG_REALTIME_AUX_TASKS
		if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) {
			/* aux_task probably transitioned to real-time while it was blocked */
			TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid);
			unlink(task); /* really needed? */
		}
		else
#endif
			__add_ready(&cluster->domain, task);
	else {
		/* it has got to wait */
		add_release(&cluster->domain, task);
	}
}

#ifdef CONFIG_SCHED_CPU_AFFINITY
static cpu_entry_t* cedf_get_nearest_available_cpu(
				cedf_domain_t *cluster, cpu_entry_t *start)
{
	cpu_entry_t *affinity;

	get_nearest_available_cpu(affinity, start, cedf_cpu_entries,
#ifdef CONFIG_RELEASE_MASTER
		cluster->domain.release_master
#else
		NO_CPU
#endif
		);

	/* make sure CPU is in our cluster */
	if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map))
		return(affinity);
	else
		return(NULL);
}
#endif


/* check for any necessary preemptions */
static void check_for_preemptions(cedf_domain_t *cluster)
{
	struct task_struct *task;
	cpu_entry_t *last;

	for(last = lowest_prio_cpu(cluster);
	    edf_preemption_needed(&cluster->domain, last->linked);
	    last = lowest_prio_cpu(cluster)) {
		/* preemption necessary */
		task = __take_ready(&cluster->domain);
		TRACE("check_for_preemptions: attempting to link task %d to %d\n",
		      task->pid, last->cpu);
#ifdef CONFIG_SCHED_CPU_AFFINITY
		{
			cpu_entry_t *affinity =
					cedf_get_nearest_available_cpu(cluster,
						&per_cpu(cedf_cpu_entries, task_cpu(task)));
			if(affinity)
				last = affinity;
			else if(requeue_preempted_job(last->linked))
				requeue(last->linked);
		}
#else
		if (requeue_preempted_job(last->linked))
			requeue(last->linked);
#endif
		link_task_to_cpu(task, last);
		preempt(last);
	}
}

/* cedf_job_arrival: task is either resumed or released */
static noinline void cedf_job_arrival(struct task_struct* task)
{
	cedf_domain_t *cluster = task_cpu_cluster(task);
	BUG_ON(!task);

	requeue(task);
	check_for_preemptions(cluster);
}

static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
{
	cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
	unsigned long flags;

	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

	__merge_ready(&cluster->domain, tasks);
	check_for_preemptions(cluster);

	raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
}

/* caller holds cluster_lock */
static noinline void job_completion(struct task_struct *t, int forced)
{
	BUG_ON(!t);

	sched_trace_task_completion(t, forced);

#ifdef CONFIG_LITMUS_NVIDIA
	atomic_set(&tsk_rt(t)->nv_int_count, 0);
#endif

	TRACE_TASK(t, "job_completion().\n");

	/* set flags */
	set_rt_flags(t, RT_F_SLEEP);
	/* prepare for next period */
	prepare_for_next_period(t);
	if (is_released(t, litmus_clock()))
		sched_trace_task_release(t);
	/* unlink */
	unlink(t);
	/* requeue
	 * But don't requeue a blocking task. */
	if (is_running(t))
		cedf_job_arrival(t);
}

/* cedf_tick - this function is called for every local timer
 *                         interrupt.
 *
 *                   checks whether the current task has expired and checks
 *                   whether we need to preempt it if it has not expired
 */
static void cedf_tick(struct task_struct* t)
{
	if (is_realtime(t) && budget_exhausted(t))
	{
		if (budget_signalled(t) && !sigbudget_sent(t)) {
			/* signal exhaustion */
			send_sigbudget(t);
		}

		if (budget_enforced(t)) {
			if (!is_np(t)) {
				/* np tasks will be preempted when they become
				 * preemptable again
				 */
				litmus_reschedule_local();
				set_will_schedule();
				TRACE("cedf_scheduler_tick: "
					  "%d is preemptable "
					  " => FORCE_RESCHED\n", t->pid);
			} else if (is_user_np(t)) {
				TRACE("cedf_scheduler_tick: "
					  "%d is non-preemptable, "
					  "preemption delayed.\n", t->pid);
				request_exit_np(t);
			}
		}
	}
}













#ifdef CONFIG_LITMUS_PAI_SOFTIRQD


static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
{
	if (!atomic_read(&tasklet->count)) {
		if(tasklet->owner) {
			sched_trace_tasklet_begin(tasklet->owner);
		}

		if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
		{
			BUG();
		}
		TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
			  __FUNCTION__,
			  (tasklet->owner) ? tasklet->owner->pid : -1,
			  (tasklet->owner) ? 0 : 1);
		tasklet->func(tasklet->data);
		tasklet_unlock(tasklet);

		if(tasklet->owner) {
			sched_trace_tasklet_end(tasklet->owner, flushed);
		}
	}
	else {
		BUG();
	}
}


static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
{
	int work_to_do = 1;
	struct tasklet_struct *tasklet = NULL;
	unsigned long flags;

	while(work_to_do) {

		TS_NV_SCHED_BOTISR_START;

		raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

		if(cluster->pending_tasklets.head != NULL) {
			// remove tasklet at head.
			struct tasklet_struct *prev = NULL;
			tasklet = cluster->pending_tasklets.head;

			// find a tasklet with prio to execute; skip ones where
			// sched_task has a higher priority.
			// We use the '!edf' test instead of swaping function arguments since
			// both sched_task and owner could be NULL.  In this case, we want to
			// still execute the tasklet.
			while(tasklet && !edf_higher_prio(tasklet->owner, sched_task)) {
				prev = tasklet;
				tasklet = tasklet->next;
			}

			if(tasklet) {  // found something to execuite
				// remove the tasklet from the queue
				if(prev) {
					prev->next = tasklet->next;
					if(prev->next == NULL) {
						TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
						cluster->pending_tasklets.tail = &(prev);
					}
				}
				else {
					cluster->pending_tasklets.head = tasklet->next;
					if(tasklet->next == NULL) {
						TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
						cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
					}
				}
			}
			else {
				TRACE("%s: No tasklets with eligible priority.\n", __FUNCTION__);
			}
		}
		else {
			TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
		}

		raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);

		if(tasklet) {
			__do_lit_tasklet(tasklet, 0ul);
			tasklet = NULL;
		}
		else {
			work_to_do = 0;
		}

		TS_NV_SCHED_BOTISR_END;
	}
}

static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
{
	struct tasklet_struct* step;

	tasklet->next = NULL;  // make sure there are no old values floating around

	step = cluster->pending_tasklets.head;
	if(step == NULL) {
		TRACE("%s: tasklet queue empty.  inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
		// insert at tail.
		*(cluster->pending_tasklets.tail) = tasklet;
		cluster->pending_tasklets.tail = &(tasklet->next);
	}
	else if((*(cluster->pending_tasklets.tail) != NULL) &&
			edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
		// insert at tail.
		TRACE("%s: tasklet belongs at end.  inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);

		*(cluster->pending_tasklets.tail) = tasklet;
		cluster->pending_tasklets.tail = &(tasklet->next);
	}
	else {

		// insert the tasklet somewhere in the middle.

        TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);

		while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
			step = step->next;
		}

		// insert tasklet right before step->next.

		TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
			  tasklet->owner->pid,
			  (step->owner) ?
			  step->owner->pid :
			  -1,
			  (step->next) ?
			  ((step->next->owner) ?
			   step->next->owner->pid :
			   -1) :
			  -1);

		tasklet->next = step->next;
		step->next = tasklet;

		// patch up the head if needed.
		if(cluster->pending_tasklets.head == step)
		{
			TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
			cluster->pending_tasklets.head = tasklet;
		}
	}
}

static void cedf_run_tasklets(struct task_struct* sched_task)
{
	cedf_domain_t* cluster;

	preempt_disable();

	cluster = (is_realtime(sched_task)) ?
		task_cpu_cluster(sched_task) :
		remote_cluster(smp_processor_id());

	if(cluster && cluster->pending_tasklets.head != NULL) {
		TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
		do_lit_tasklets(cluster, sched_task);
	}

	preempt_enable_no_resched();
}



static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
{
#if 0
	cedf_domain_t *cluster = NULL;
	cpu_entry_t *targetCPU = NULL;
	int thisCPU;
	int runLocal = 0;
	int runNow = 0;
	unsigned long flags;

    if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
    {
        TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
		return 0;
    }

	cluster = task_cpu_cluster(tasklet->owner);

	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

	thisCPU = smp_processor_id();

#ifdef CONFIG_SCHED_CPU_AFFINITY
	{
		cpu_entry_t* affinity = NULL;

		// use this CPU if it is in our cluster and isn't running any RT work.
		if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) {
			affinity = &(__get_cpu_var(cedf_cpu_entries));
		}
		else {
			// this CPU is busy or shouldn't run tasklet in this cluster.
			// look for available near by CPUs.
			// NOTE: Affinity towards owner and not this CPU.  Is this right?
			affinity =
				cedf_get_nearest_available_cpu(cluster,
								&per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner)));
		}

		targetCPU = affinity;
	}
#endif

	if (targetCPU == NULL) {
		targetCPU = lowest_prio_cpu(cluster);
	}

	if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
		if (thisCPU == targetCPU->cpu) {
			TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
			runLocal = 1;
			runNow = 1;
		}
		else {
			TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
			runLocal = 0;
			runNow = 1;
		}
	}
	else {
		runLocal = 0;
		runNow = 0;
	}

	if(!runLocal) {
		// enqueue the tasklet
		__add_pai_tasklet(tasklet, cluster);
	}

	raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);


	if (runLocal /*&& runNow */) {  // runNow == 1 is implied
		TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
		__do_lit_tasklet(tasklet, 0ul);
	}
	else if (runNow /*&& !runLocal */) {  // runLocal == 0 is implied
		TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
		preempt(targetCPU);  // need to be protected by cluster_lock?
	}
	else {
		TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
	}
#else
	TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
	__do_lit_tasklet(tasklet, 0ul);
#endif
	return(1); // success
}

static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio,
										 struct task_struct *new_prio)
{
	struct tasklet_struct* step;
	unsigned long flags;
	cedf_domain_t *cluster;
	struct task_struct *probe;

	// identify the cluster by the assignment of these tasks.  one should
	// be non-NULL.
	probe = (old_prio) ? old_prio : new_prio;

	if(probe) {
		cluster = task_cpu_cluster(probe);

		if(cluster->pending_tasklets.head != NULL) {
			raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
			for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
				if(step->owner == old_prio) {
					TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
					step->owner = new_prio;
				}
			}
			raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
		}
	}
	else {
		TRACE("%s: Both priorities were NULL\n");
	}
}

#endif  // PAI

/* Getting schedule() right is a bit tricky. schedule() may not make any
 * assumptions on the state of the current task since it may be called for a
 * number of reasons. The reasons include a scheduler_tick() determined that it
 * was necessary, because sys_exit_np() was called, because some Linux
 * subsystem determined so, or even (in the worst case) because there is a bug
 * hidden somewhere. Thus, we must take extreme care to determine what the
 * current state is.
 *
 * The CPU could currently be scheduling a task (or not), be linked (or not).
 *
 * The following assertions for the scheduled task could hold:
 *
 *      - !is_running(scheduled)        // the job blocks
 *	- scheduled->timeslice == 0	// the job completed (forcefully)
 *	- get_rt_flag() == RT_F_SLEEP	// the job completed (by syscall)
 * 	- linked != scheduled		// we need to reschedule (for any reason)
 * 	- is_np(scheduled)		// rescheduling must be delayed,
 *					   sys_exit_np must be requested
 *
 * Any of these can occur together.
 */
static struct task_struct* cedf_schedule(struct task_struct * prev)
{
	cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
	cedf_domain_t *cluster = entry->cluster;
	int out_of_time, signal_budget, sleep, preempt, np, exists, blocks;
	struct task_struct* next = NULL;

#ifdef CONFIG_RELEASE_MASTER
	/* Bail out early if we are the release master.
	 * The release master never schedules any real-time tasks.
	 */
	if (unlikely(cluster->domain.release_master == entry->cpu)) {
		sched_state_task_picked();
		return NULL;
	}
#endif

	raw_spin_lock(&cluster->cluster_lock);
	clear_will_schedule();

	/* sanity checking */
	BUG_ON(entry->scheduled && entry->scheduled != prev);
	BUG_ON(entry->scheduled && !is_realtime(prev));
	BUG_ON(is_realtime(prev) && !entry->scheduled);

	/* (0) Determine state */
	exists      = entry->scheduled != NULL;
	blocks      = exists && !is_running(entry->scheduled);
	out_of_time = exists &&
				  budget_enforced(entry->scheduled) &&
				  budget_exhausted(entry->scheduled);
	signal_budget = exists &&
					budget_signalled(entry->scheduled) &&
					budget_exhausted(entry->scheduled) &&
					!sigbudget_sent(entry->scheduled);
	np 	    = exists && is_np(entry->scheduled);
	sleep	    = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
	preempt     = entry->scheduled != entry->linked;

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "invoked cedf_schedule.\n");
#endif

	if (exists)
		TRACE_TASK(prev,
			   "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
			   "state:%d sig:%d\n",
			   blocks, out_of_time, np, sleep, preempt,
			   prev->state, signal_pending(prev));
	if (entry->linked && preempt)
		TRACE_TASK(prev, "will be preempted by %s/%d\n",
			   entry->linked->comm, entry->linked->pid);

	/* Send the signal that the budget has been exhausted */
	if (signal_budget)
		send_sigbudget(entry->scheduled);

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		unlink(entry->scheduled);

#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
	if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) {
		if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) {
			// don't track preemptions or locking protocol suspensions.
			TRACE_TASK(entry->scheduled, "stopping GPU tracker.\n");
			stop_gpu_tracker(entry->scheduled);
		}
		else if(blocks && !tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) {
			TRACE_TASK(entry->scheduled, "GPU tracker remains on during suspension.\n");
		}
	}
#endif

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * We need to make sure to update the link structure anyway in case
	 * that we are still linked. Multiple calls to request_exit_np() don't
	 * hurt.
	 */
	if (np && (out_of_time || preempt || sleep)) {
		unlink(entry->scheduled);
		request_exit_np(entry->scheduled);
	}

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this. Don't do a job completion if we block (can't have timers running
	 * for blocked jobs).
	 */
	if (!np && (out_of_time || sleep) && !blocks)
		job_completion(entry->scheduled, !sleep);

	/* Link pending task if we became unlinked.
	 */
	if (!entry->linked)
		link_task_to_cpu(__take_ready(&cluster->domain), entry);

	/* The final scheduling decision. Do we need to switch for some reason?
	 * If linked is different from scheduled, then select linked as next.
	 */
	if ((!np || blocks) &&
	    entry->linked != entry->scheduled) {
		/* Schedule a linked job? */
		if (entry->linked) {
			entry->linked->rt_param.scheduled_on = entry->cpu;
			next = entry->linked;
		}
		if (entry->scheduled) {
			/* not gonna be scheduled soon */
			entry->scheduled->rt_param.scheduled_on = NO_CPU;
			TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
		}
	} else
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;

	sched_state_task_picked();
	raw_spin_unlock(&cluster->cluster_lock);

#ifdef WANT_ALL_SCHED_EVENTS
	TRACE("cluster_lock released, next=0x%p\n", next);

	if (next)
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	else if (exists && !next)
		TRACE("becomes idle at %llu.\n", litmus_clock());
#endif

	return next;
}


/* _finish_switch - we just finished the switch away from prev
 */
static void cedf_finish_switch(struct task_struct *prev)
{
	cpu_entry_t* 	entry = &__get_cpu_var(cedf_cpu_entries);

	entry->scheduled = is_realtime(current) ? current : NULL;
#ifdef WANT_ALL_SCHED_EVENTS
	TRACE_TASK(prev, "switched away from\n");
#endif
}


/*	Prepare a task for running in RT mode
 */
static void cedf_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;
	cpu_entry_t* 		entry;
	cedf_domain_t*		cluster;

	TRACE("c-edf: task new %d\n", t->pid);

	/* the cluster doesn't change even if t is running */
	cluster = task_cpu_cluster(t);

	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

	/* setup job params */
	release_at(t, litmus_clock());

	if (running) {
		entry = &per_cpu(cedf_cpu_entries, task_cpu(t));
		BUG_ON(entry->scheduled);

#ifdef CONFIG_RELEASE_MASTER
		if (entry->cpu != cluster->domain.release_master) {
#endif
			entry->scheduled = t;
			tsk_rt(t)->scheduled_on = task_cpu(t);
#ifdef CONFIG_RELEASE_MASTER
		} else {
			/* do not schedule on release master */
			preempt(entry); /* force resched */
			tsk_rt(t)->scheduled_on = NO_CPU;
		}
#endif
	} else {
		t->rt_param.scheduled_on = NO_CPU;
	}
	t->rt_param.linked_on          = NO_CPU;

	cedf_job_arrival(t);
	raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags);
}

static void cedf_task_wake_up(struct task_struct *task)
{
	unsigned long flags;
	//lt_t now;
	cedf_domain_t *cluster;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());

	cluster = task_cpu_cluster(task);

	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

#if 0 // sproadic task model
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	if (get_rt_flags(task) == RT_F_EXIT_SEM) {
		set_rt_flags(task, RT_F_RUNNING);
	} else {
		now = litmus_clock();
		if (is_tardy(task, now)) {
			/* new sporadic release */
			release_at(task, now);
			sched_trace_task_release(task);
		}
		else {
			if (task->rt.time_slice) {
				/* came back in time before deadline
				*/
				set_rt_flags(task, RT_F_RUNNING);
			}
		}
	}
#else
	set_rt_flags(task, RT_F_RUNNING);  // periodic model
#endif

#ifdef CONFIG_REALTIME_AUX_TASKS
	if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) {
		TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid);
		disable_aux_task_owner(task);
	}
#endif

	cedf_job_arrival(task);
	raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
}

static void cedf_task_block(struct task_struct *t)
{
	unsigned long flags;
	cedf_domain_t *cluster;

	TRACE_TASK(t, "block at %llu\n", litmus_clock());

	cluster = task_cpu_cluster(t);

	/* unlink if necessary */
	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

	unlink(t);

#ifdef CONFIG_REALTIME_AUX_TASKS
	if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) {

		TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
		enable_aux_task_owner(t);
	}
#endif

	raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);

	BUG_ON(!is_realtime(t));
}


static void cedf_task_exit(struct task_struct * t)
{
	unsigned long flags;
	cedf_domain_t *cluster = task_cpu_cluster(t);

#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	cedf_change_prio_pai_tasklet(t, NULL);
#endif

	/* unlink if necessary */
	raw_spin_lock_irqsave(&cluster->cluster_lock, flags);

#ifdef CONFIG_REALTIME_AUX_TASKS
	/* make sure we clean up on our way out */
	if (unlikely(tsk_rt(t)->is_aux_task)) {
		exit_aux_task(t);
	}
	else if(tsk_rt(t)->has_aux_tasks) {
		disable_aux_task_owner(t);
	}
#endif

	unlink(t);
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		cpu_entry_t *cpu;
		cpu = &per_cpu(cedf_cpu_entries, tsk_rt(t)->scheduled_on);
		cpu->scheduled = NULL;
		tsk_rt(t)->scheduled_on = NO_CPU;
	}
	raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);

	BUG_ON(!is_realtime(t));
        TRACE_TASK(t, "RIP\n");
}

static long cedf_admit_task(struct task_struct* tsk)
{
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
						edf_max_heap_base_priority_order);
#endif

	return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
}



#ifdef CONFIG_LITMUS_LOCKING

#include <litmus/fdso.h>



/* called with IRQs off */
static int __increase_priority_inheritance(struct task_struct* t,
										    struct task_struct* prio_inh)
{
	int success = 1;
	int linked_on;
	int check_preempt = 0;
	cedf_domain_t* cluster;

	if (prio_inh && prio_inh == effective_priority(t)) {
		/* relationship already established. */
		TRACE_TASK(t, "already has effective priority of %s/%d\n",
				   prio_inh->comm, prio_inh->pid);
		goto out;
	}

	cluster = task_cpu_cluster(t);

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	/* this sanity check allows for weaker locking in protocols */
	/* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */
	if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
#endif
		TRACE_TASK(t, "inherits priority from %s/%d\n",
				   prio_inh->comm, prio_inh->pid);
		tsk_rt(t)->inh_task = prio_inh;

		linked_on  = tsk_rt(t)->linked_on;

		/* If it is scheduled, then we need to reorder the CPU heap. */
		if (linked_on != NO_CPU) {
			TRACE_TASK(t, "%s: linked  on %d\n",
					   __FUNCTION__, linked_on);
			/* Holder is scheduled; need to re-order CPUs.
			 * We can't use heap_decrease() here since
			 * the cpu_heap is ordered in reverse direction, so
			 * it is actually an increase. */
			binheap_delete(&per_cpu(cedf_cpu_entries, linked_on).hn,
						   &cluster->cpu_heap);
			binheap_add(&per_cpu(cedf_cpu_entries, linked_on).hn,
						&cluster->cpu_heap, cpu_entry_t, hn);

		} else {
			/* holder may be queued: first stop queue changes */
			raw_spin_lock(&cluster->domain.release_lock);
			if (is_queued(t)) {
				TRACE_TASK(t, "%s: is queued\n",
						   __FUNCTION__);
				/* We need to update the position of holder in some
				 * heap. Note that this could be a release heap if we
				 * budget enforcement is used and this job overran. */
				check_preempt =
					!bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node);
			} else {
				/* Nothing to do: if it is not queued and not linked
				 * then it is either sleeping or currently being moved
				 * by other code (e.g., a timer interrupt handler) that
				 * will use the correct priority when enqueuing the
				 * task. */
				TRACE_TASK(t, "%s: is NOT queued => Done.\n",
						   __FUNCTION__);
			}
			raw_spin_unlock(&cluster->domain.release_lock);

			/* If holder was enqueued in a release heap, then the following
			 * preemption check is pointless, but we can't easily detect
			 * that case. If you want to fix this, then consider that
			 * simply adding a state flag requires O(n) time to update when
			 * releasing n tasks, which conflicts with the goal to have
			 * O(log n) merges. */
			if (check_preempt) {
				/* heap_decrease() hit the top level of the heap: make
				 * sure preemption checks get the right task, not the
				 * potentially stale cache. */
				bheap_uncache_min(edf_ready_order,
								  &cluster->domain.ready_queue);
				check_for_preemptions(cluster);
			}

#ifdef CONFIG_REALTIME_AUX_TASKS
			/* propagate to aux tasks */
			if (tsk_rt(t)->has_aux_tasks) {
				aux_task_owner_increase_priority(t);
			}
#endif
		}
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	}
	else {
		TRACE_TASK(t, "Spurious invalid priority increase. "
				   "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
				   "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
				   t->comm, t->pid,
				   effective_priority(t)->comm, effective_priority(t)->pid,
				   (prio_inh) ? prio_inh->comm : "nil",
				   (prio_inh) ? prio_inh->pid : -1);
		WARN_ON(!prio_inh);
		success = 0;
	}
#endif

out:
	return success;
}

/* called with IRQs off */
static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
{
	cedf_domain_t* cluster = task_cpu_cluster(t);

	raw_spin_lock(&cluster->cluster_lock);

	__increase_priority_inheritance(t, prio_inh);

#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d inherits a new priority!\n",
				   tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);

		__increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
	}
#endif

	raw_spin_unlock(&cluster->cluster_lock);

#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
	if(tsk_rt(t)->held_gpus) {
		int i;
		for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
			i < NV_DEVICE_NUM;
			i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
			pai_check_priority_increase(t, i);
		}
	}
#endif
}

/* called with IRQs off */
static int __decrease_priority_inheritance(struct task_struct* t,
											struct task_struct* prio_inh)
{
	int success = 1;

	if (prio_inh == tsk_rt(t)->inh_task) {
		/* relationship already established. */
		TRACE_TASK(t, "already inherits priority from %s/%d\n",
				   (prio_inh) ? prio_inh->comm : "(nil)",
				   (prio_inh) ? prio_inh->pid : 0);
		goto out;
	}

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
#endif
		/* A job only stops inheriting a priority when it releases a
		 * resource. Thus we can make the following assumption.*/
		if(prio_inh)
			TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n",
					   prio_inh->comm, prio_inh->pid);
		else
			TRACE_TASK(t, "base priority restored.\n");

		tsk_rt(t)->inh_task = prio_inh;

		if(tsk_rt(t)->scheduled_on != NO_CPU) {
			TRACE_TASK(t, "is scheduled.\n");

			/* Check if rescheduling is necessary. We can't use heap_decrease()
			 * since the priority was effectively lowered. */
			unlink(t);
			cedf_job_arrival(t);
		}
		else {
			cedf_domain_t* cluster = task_cpu_cluster(t);
			/* task is queued */
			raw_spin_lock(&cluster->domain.release_lock);
			if (is_queued(t)) {
				TRACE_TASK(t, "is queued.\n");

				/* decrease in priority, so we have to re-add to binomial heap */
				unlink(t);
				cedf_job_arrival(t);
			}
			else {
				TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n");
			}
			raw_spin_unlock(&cluster->domain.release_lock);
		}

#ifdef CONFIG_REALTIME_AUX_TASKS
		/* propagate to aux tasks */
		if (tsk_rt(t)->has_aux_tasks) {
			aux_task_owner_decrease_priority(t);
		}
#endif

#ifdef CONFIG_LITMUS_NESTED_LOCKING
	}
	else {
		TRACE_TASK(t, "Spurious invalid priority decrease. "
				   "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
				   "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
				   t->comm, t->pid,
				   effective_priority(t)->comm, effective_priority(t)->pid,
				   (prio_inh) ? prio_inh->comm : "nil",
				   (prio_inh) ? prio_inh->pid : -1);
		success = 0;
	}
#endif

out:
	return success;
}

static void decrease_priority_inheritance(struct task_struct* t,
										struct task_struct* prio_inh)
{
	cedf_domain_t* cluster = task_cpu_cluster(t);

	raw_spin_lock(&cluster->cluster_lock);
	__decrease_priority_inheritance(t, prio_inh);

#ifdef CONFIG_LITMUS_SOFTIRQD
	if(tsk_rt(t)->cur_klitirqd != NULL)
	{
		TRACE_TASK(t, "%s/%d decreases in priority!\n",
				   tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);

		__decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
	}
#endif

	raw_spin_unlock(&cluster->cluster_lock);

#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
	if(tsk_rt(t)->held_gpus) {
		int i;
		for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
			i < NV_DEVICE_NUM;
			i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
			pai_check_priority_decrease(t, i);
		}
	}
#endif
}





#ifdef CONFIG_LITMUS_SOFTIRQD
/* called with IRQs off */
static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
											  struct task_struct* old_owner,
											  struct task_struct* new_owner)
{
	cedf_domain_t* cluster = task_cpu_cluster(klitirqd);

	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));

	raw_spin_lock(&cluster->cluster_lock);

	if(old_owner != new_owner)
	{
		if(old_owner)
		{
			// unreachable?
			tsk_rt(old_owner)->cur_klitirqd = NULL;
		}

		TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
				   new_owner->comm, new_owner->pid);

		tsk_rt(new_owner)->cur_klitirqd = klitirqd;
	}

	__decrease_priority_inheritance(klitirqd, NULL);  // kludge to clear out cur prio.

	__increase_priority_inheritance(klitirqd,
			(tsk_rt(new_owner)->inh_task == NULL) ?
				new_owner :
				tsk_rt(new_owner)->inh_task);

	raw_spin_unlock(&cluster->cluster_lock);
}


/* called with IRQs off */
static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd,
												   struct task_struct* old_owner,
												   struct task_struct* new_owner)
{
	cedf_domain_t* cluster = task_cpu_cluster(klitirqd);

	BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));

	raw_spin_lock(&cluster->cluster_lock);

    TRACE_TASK(klitirqd, "priority restored\n");

	__decrease_priority_inheritance(klitirqd, new_owner);

	tsk_rt(old_owner)->cur_klitirqd = NULL;

	raw_spin_unlock(&cluster->cluster_lock);
}
#endif // CONFIG_LITMUS_SOFTIRQD







#ifdef CONFIG_LITMUS_NESTED_LOCKING

/* called with IRQs off */
/* preconditions:
 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
 (2) The lock 'to_unlock' is held.
 */
static void nested_increase_priority_inheritance(struct task_struct* t,
												 struct task_struct* prio_inh,
												 raw_spinlock_t *to_unlock,
												 unsigned long irqflags)
{
	struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;

	if(tsk_rt(t)->inh_task != prio_inh) { 		// shield redundent calls.
		increase_priority_inheritance(t, prio_inh);  // increase our prio.
	}

	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);  // unlock the t's heap.


	if(blocked_lock) {
		if(blocked_lock->ops->propagate_increase_inheritance) {
			TRACE_TASK(t, "Inheritor is blocked (...perhaps).  Checking lock %d.\n",
					   blocked_lock->ident);

			// beware: recursion
			blocked_lock->ops->propagate_increase_inheritance(blocked_lock,
															  t, to_unlock,
															  irqflags);
		}
		else {
			TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n",
					   blocked_lock->ident);
			unlock_fine_irqrestore(to_unlock, irqflags);
		}
	}
	else {
		TRACE_TASK(t, "is not blocked.  No propagation.\n");
		unlock_fine_irqrestore(to_unlock, irqflags);
	}
}

/* called with IRQs off */
/* preconditions:
 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
 (2) The lock 'to_unlock' is held.
 */
static void nested_decrease_priority_inheritance(struct task_struct* t,
												 struct task_struct* prio_inh,
												 raw_spinlock_t *to_unlock,
												 unsigned long irqflags)
{
	struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;
	decrease_priority_inheritance(t, prio_inh);

	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);  // unlock the t's heap.

	if(blocked_lock) {
		if(blocked_lock->ops->propagate_decrease_inheritance) {
			TRACE_TASK(t, "Inheritor is blocked (...perhaps).  Checking lock %d.\n",
					   blocked_lock->ident);

			// beware: recursion
			blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t,
															  to_unlock,
															  irqflags);
		}
		else {
			TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n",
					   blocked_lock);
			unlock_fine_irqrestore(to_unlock, irqflags);
		}
	}
	else {
		TRACE_TASK(t, "is not blocked.  No propagation.\n");
		unlock_fine_irqrestore(to_unlock, irqflags);
	}
}


/* ******************** RSM MUTEX ********************** */

static struct litmus_lock_ops cedf_rsm_mutex_lock_ops = {
	.lock   = rsm_mutex_lock,
	.unlock = rsm_mutex_unlock,
	.close  = rsm_mutex_close,
	.deallocate = rsm_mutex_free,

	.propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance,
	.propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance,

#ifdef CONFIG_LITMUS_DGL_SUPPORT
	.dgl_lock = rsm_mutex_dgl_lock,
	.is_owner = rsm_mutex_is_owner,
	.enable_priority = rsm_mutex_enable_priority,
#endif
};

static struct litmus_lock* cedf_new_rsm_mutex(void)
{
	return rsm_mutex_new(&cedf_rsm_mutex_lock_ops);
}

/* ******************** IKGLP ********************** */

static struct litmus_lock_ops cedf_ikglp_lock_ops = {
	.lock   = ikglp_lock,
	.unlock = ikglp_unlock,
	.close  = ikglp_close,
	.deallocate = ikglp_free,

	// ikglp can only be an outer-most lock.
	.propagate_increase_inheritance = NULL,
	.propagate_decrease_inheritance = NULL,
};

static struct litmus_lock* cedf_new_ikglp(void* __user arg)
{
	// assumes clusters of uniform size.
	return ikglp_new(cluster_size/num_clusters, &cedf_ikglp_lock_ops, arg);
}

#endif  /* CONFIG_LITMUS_NESTED_LOCKING */




/* ******************** KFMLP support ********************** */

static struct litmus_lock_ops cedf_kfmlp_lock_ops = {
	.lock   = kfmlp_lock,
	.unlock = kfmlp_unlock,
	.close  = kfmlp_close,
	.deallocate = kfmlp_free,

	// kfmlp can only be an outer-most lock.
	.propagate_increase_inheritance = NULL,
	.propagate_decrease_inheritance = NULL,
};


static struct litmus_lock* cedf_new_kfmlp(void* __user arg)
{
	return kfmlp_new(&cedf_kfmlp_lock_ops, arg);
}


/* **** lock constructor **** */

static long cedf_allocate_lock(struct litmus_lock **lock, int type,
								 void* __user args)
{
	int err;

	switch (type) {
#ifdef CONFIG_LITMUS_NESTED_LOCKING
		case RSM_MUTEX:
			*lock = cedf_new_rsm_mutex();
			break;

		case IKGLP_SEM:
			*lock = cedf_new_ikglp(args);
			break;
#endif
		case KFMLP_SEM:
			*lock = cedf_new_kfmlp(args);
			break;

		default:
			err = -ENXIO;
			goto UNSUPPORTED_LOCK;
	};

	if (*lock)
		err = 0;
	else
		err = -ENOMEM;

UNSUPPORTED_LOCK:
	return err;
}

#endif  // CONFIG_LITMUS_LOCKING


#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
static struct affinity_observer_ops cedf_kfmlp_affinity_ops = {
	.close = kfmlp_aff_obs_close,
	.deallocate = kfmlp_aff_obs_free,
};

#ifdef CONFIG_LITMUS_NESTED_LOCKING
static struct affinity_observer_ops cedf_ikglp_affinity_ops = {
	.close = ikglp_aff_obs_close,
	.deallocate = ikglp_aff_obs_free,
};
#endif

static long cedf_allocate_affinity_observer(struct affinity_observer **aff_obs,
											int type,
											void* __user args)
{
	int err;

	switch (type) {

		case KFMLP_SIMPLE_GPU_AFF_OBS:
			*aff_obs = kfmlp_simple_gpu_aff_obs_new(&cedf_kfmlp_affinity_ops, args);
			break;

		case KFMLP_GPU_AFF_OBS:
			*aff_obs = kfmlp_gpu_aff_obs_new(&cedf_kfmlp_affinity_ops, args);
			break;

#ifdef CONFIG_LITMUS_NESTED_LOCKING
		case IKGLP_SIMPLE_GPU_AFF_OBS:
			*aff_obs = ikglp_simple_gpu_aff_obs_new(&cedf_ikglp_affinity_ops, args);
			break;

		case IKGLP_GPU_AFF_OBS:
			*aff_obs = ikglp_gpu_aff_obs_new(&cedf_ikglp_affinity_ops, args);
			break;
#endif
		default:
			err = -ENXIO;
			goto UNSUPPORTED_AFF_OBS;
	};

	if (*aff_obs)
		err = 0;
	else
		err = -ENOMEM;

UNSUPPORTED_AFF_OBS:
	return err;
}
#endif




#ifdef VERBOSE_INIT
static void print_cluster_topology(cpumask_var_t mask, int cpu)
{
	int chk;
	char buf[255];

	chk = cpulist_scnprintf(buf, 254, mask);
	buf[chk] = '\0';
	printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf);

}
#endif

static void cleanup_cedf(void)
{
	int i;

#ifdef CONFIG_LITMUS_NVIDIA
	shutdown_nvidia_info();
#endif

	if (clusters_allocated) {
		for (i = 0; i < num_clusters; i++) {
			kfree(cedf[i].cpus);
			free_cpumask_var(cedf[i].cpu_map);
		}

		kfree(cedf);
	}
}

static long cedf_activate_plugin(void)
{
	int i, j, cpu, ccpu, cpu_count;
	cpu_entry_t *entry;

	cpumask_var_t mask;
	int chk = 0;

	/* de-allocate old clusters, if any */
	cleanup_cedf();

	printk(KERN_INFO "C-EDF: Activate Plugin, cluster configuration = %d\n",
			cluster_config);

	/* need to get cluster_size first */
	if(!zalloc_cpumask_var(&mask, GFP_ATOMIC))
		return -ENOMEM;

	if (unlikely(cluster_config == GLOBAL_CLUSTER)) {
		cluster_size = num_online_cpus();
	} else {
		chk = get_shared_cpu_map(mask, 0, cluster_config);
		if (chk) {
			/* if chk != 0 then it is the max allowed index */
			printk(KERN_INFO "C-EDF: Cluster configuration = %d "
			       "is not supported on this hardware.\n",
			       cluster_config);
			/* User should notice that the configuration failed, so
			 * let's bail out. */
			return -EINVAL;
		}

		cluster_size = cpumask_weight(mask);
	}

	if ((num_online_cpus() % cluster_size) != 0) {
		/* this can't be right, some cpus are left out */
		printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n",
				num_online_cpus(), cluster_size);
		return -1;
	}

	num_clusters = num_online_cpus() / cluster_size;
	printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n",
			num_clusters, cluster_size);

	/* initialize clusters */
	cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC);
	for (i = 0; i < num_clusters; i++) {

		cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t),
				GFP_ATOMIC);
		INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio);
		edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);


#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
		cedf[i].pending_tasklets.head = NULL;
		cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
#endif


		if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
			return -ENOMEM;
#ifdef CONFIG_RELEASE_MASTER
		cedf[i].domain.release_master = atomic_read(&release_master_cpu);
#endif
	}

	/* cycle through cluster and add cpus to them */
	for (i = 0; i < num_clusters; i++) {

#ifdef CONFIG_LITMUS_DGL_SUPPORT
		raw_spin_lock_init(&cedf[i].dgl_lock);
#endif

		for_each_online_cpu(cpu) {
			/* check if the cpu is already in a cluster */
			for (j = 0; j < num_clusters; j++)
				if (cpumask_test_cpu(cpu, cedf[j].cpu_map))
					break;
			/* if it is in a cluster go to next cpu */
			if (j < num_clusters &&
					cpumask_test_cpu(cpu, cedf[j].cpu_map))
				continue;

			/* this cpu isn't in any cluster */
			/* get the shared cpus */
			if (unlikely(cluster_config == GLOBAL_CLUSTER))
				cpumask_copy(mask, cpu_online_mask);
			else
				get_shared_cpu_map(mask, cpu, cluster_config);

			cpumask_copy(cedf[i].cpu_map, mask);
#ifdef VERBOSE_INIT
			print_cluster_topology(mask, cpu);
#endif
			/* add cpus to current cluster and init cpu_entry_t */
			cpu_count = 0;
			for_each_cpu(ccpu, cedf[i].cpu_map) {

				entry = &per_cpu(cedf_cpu_entries, ccpu);
				cedf[i].cpus[cpu_count] = entry;
				atomic_set(&entry->will_schedule, 0);
				entry->cpu = ccpu;
				entry->cluster = &cedf[i];

				INIT_BINHEAP_NODE(&entry->hn);

				cpu_count++;

				entry->linked = NULL;
				entry->scheduled = NULL;
#ifdef CONFIG_RELEASE_MASTER
				/* only add CPUs that should schedule jobs */
				if (entry->cpu != entry->cluster->domain.release_master)
#endif
					update_cpu_position(entry);
			}
			/* done with this cluster */
			break;
		}
	}

#ifdef CONFIG_LITMUS_SOFTIRQD
	{
		/* distribute the daemons evenly across the clusters. */
		int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC);
		int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters;
		int left_over = NR_LITMUS_SOFTIRQD % num_clusters;

		int daemon = 0;
		for(i = 0; i < num_clusters; ++i)
		{
			int num_on_this_cluster = num_daemons_per_cluster;
			if(left_over)
			{
				++num_on_this_cluster;
				--left_over;
			}

			for(j = 0; j < num_on_this_cluster; ++j)
			{
				// first CPU of this cluster
				affinity[daemon++] = i*cluster_size;
			}
		}

		spawn_klitirqd(affinity);

		kfree(affinity);
	}
#endif

#ifdef CONFIG_LITMUS_NVIDIA
	init_nvidia_info();
#endif

	free_cpumask_var(mask);
	clusters_allocated = 1;
	return 0;
}

/*	Plugin object	*/
static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
	.plugin_name		= "C-EDF",
	.finish_switch		= cedf_finish_switch,
	.tick			= cedf_tick,
	.task_new		= cedf_task_new,
	.complete_job		= complete_job,
	.task_exit		= cedf_task_exit,
	.schedule		= cedf_schedule,
	.task_wake_up		= cedf_task_wake_up,
	.task_block		= cedf_task_block,
	.admit_task		= cedf_admit_task,
	.activate_plugin	= cedf_activate_plugin,
	.compare		= edf_higher_prio,
#ifdef CONFIG_LITMUS_LOCKING
	.allocate_lock		= cedf_allocate_lock,
	.increase_prio		= increase_priority_inheritance,
	.decrease_prio		= decrease_priority_inheritance,
	.__increase_prio	= __increase_priority_inheritance,
	.__decrease_prio	= __decrease_priority_inheritance,
#endif
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	.nested_increase_prio		= nested_increase_priority_inheritance,
	.nested_decrease_prio		= nested_decrease_priority_inheritance,
	.__compare					= __edf_higher_prio,
#endif
#ifdef CONFIG_LITMUS_DGL_SUPPORT
	.get_dgl_spinlock = cedf_get_dgl_spinlock,
#endif
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
	.allocate_aff_obs = cedf_allocate_affinity_observer,
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
	.increase_prio_klitirqd = increase_priority_inheritance_klitirqd,
	.decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd,
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
	.enqueue_pai_tasklet = cedf_enqueue_pai_tasklet,
	.change_prio_pai_tasklet = cedf_change_prio_pai_tasklet,
	.run_tasklets = cedf_run_tasklets,
#endif
};

static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL;

static int __init init_cedf(void)
{
	int err, fs;

	err = register_sched_plugin(&cedf_plugin);
	if (!err) {
		fs = make_plugin_proc_dir(&cedf_plugin, &cedf_dir);
		if (!fs)
			cluster_file = create_cluster_file(cedf_dir, &cluster_config);
		else
			printk(KERN_ERR "Could not allocate C-EDF procfs dir.\n");
	}
	return err;
}

static void clean_cedf(void)
{
	cleanup_cedf();
	if (cluster_file)
		remove_proc_entry("cluster", cedf_dir);
	if (cedf_dir)
		remove_plugin_proc_dir(&cedf_plugin);
}

module_init(init_cedf);
module_exit(clean_cedf);