From f4aef3b7d845324eb79a226d87f232dcd8867f3b Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Sun, 15 Apr 2012 18:06:04 -0400 Subject: Update PAI to support multiGPUs (todo: klitirqd) --- include/litmus/edf_common.h | 6 - include/litmus/nvidia_info.h | 10 +- include/litmus/rt_param.h | 1 + include/litmus/sched_plugin.h | 22 ++++ kernel/softirq.c | 6 +- kernel/workqueue.c | 2 +- litmus/ikglp_lock.c | 64 ++++++----- litmus/kfmlp_lock.c | 14 ++- litmus/litmus.c | 4 + litmus/nvidia_info.c | 248 +++++++++++++++++++++++++----------------- litmus/rsm_lock.c | 35 +++--- litmus/sched_gsn_edf.c | 203 ++++++++++++++++++++++------------ litmus/sched_plugin.c | 23 ++++ 13 files changed, 412 insertions(+), 226 deletions(-) diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index 818f4094b53c..63dff7efe8fb 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h @@ -27,12 +27,6 @@ int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b); int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); -typedef enum -{ - BASE, - EFFECTIVE -} comparison_mode_t; - int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode, struct task_struct* second, comparison_mode_t second_mode); diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h index 9e07a27fdee3..dd41c4c72b85 100644 --- a/include/litmus/nvidia_info.h +++ b/include/litmus/nvidia_info.h @@ -28,11 +28,17 @@ int init_nv_device_reg(void); int reg_nv_device(int reg_device_id, int register_device); -struct task_struct* get_nv_device_owner(u32 target_device_id); +struct task_struct* get_nv_max_device_owner(u32 target_device_id); +//int is_nv_device_owner(u32 target_device_id); void lock_nv_registry(u32 reg_device_id, unsigned long* flags); void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); -void increment_nv_int_count(u32 device); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +void pai_check_priority_increase(struct task_struct *t, int reg_device_id); +void pai_check_priority_decrease(struct task_struct *t, int reg_device_id); +#endif + +//void increment_nv_int_count(u32 device); #endif diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index d0040bfd2d0c..b4eb8ee95687 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -154,6 +154,7 @@ struct rt_param { #ifdef CONFIG_LITMUS_NVIDIA /* number of top-half interrupts handled on behalf of current job */ atomic_t nv_int_count; + long unsigned int held_gpus; // bitmap of held GPUs. #endif #ifdef CONFIG_LITMUS_LOCKING diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index e31008fcdd59..8e65555d9b7f 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -74,10 +74,28 @@ typedef void (*decrease_prio_klitirqd_t)(struct task_struct* klitirqd, typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); +typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio, + struct task_struct *new_prio); typedef void (*run_tasklets_t)(struct task_struct* next); typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); + +typedef int (*higher_prio_t)(struct task_struct* a, struct task_struct* b); + +#ifdef CONFIG_LITMUS_NESTED_LOCKING + +typedef enum +{ + BASE, + EFFECTIVE +} comparison_mode_t; + +typedef int (*__higher_prio_t)(struct task_struct* a, comparison_mode_t a_mod, + struct task_struct* b, comparison_mode_t b_mod); +#endif + + /********************* sys call backends ********************/ /* This function causes the caller to sleep until the next release */ typedef long (*complete_job_t) (void); @@ -112,6 +130,8 @@ struct sched_plugin { task_block_t task_block; task_exit_t task_exit; + higher_prio_t compare; + #ifdef CONFIG_LITMUS_LOCKING /* locking protocols */ allocate_lock_t allocate_lock; @@ -121,6 +141,7 @@ struct sched_plugin { #ifdef CONFIG_LITMUS_NESTED_LOCKING nested_increase_prio_t nested_increase_prio; nested_decrease_prio_t nested_decrease_prio; + __higher_prio_t __compare; #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT get_dgl_spinlock_t get_dgl_spinlock; @@ -132,6 +153,7 @@ struct sched_plugin { #endif #ifdef CONFIG_LITMUS_PAI_SOFTIRQD enqueue_pai_tasklet_t enqueue_pai_tasklet; + change_prio_pai_tasklet_t change_prio_pai_tasklet; run_tasklets_t run_tasklets; #endif } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); diff --git a/kernel/softirq.c b/kernel/softirq.c index 7c562558a863..1c42e08fdfaa 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -427,7 +427,7 @@ void __tasklet_schedule(struct tasklet_struct *t) lock_nv_registry(nvidia_device, &flags); - device_owner = get_nv_device_owner(nvidia_device); + device_owner = get_nv_max_device_owner(nvidia_device); if(device_owner==NULL) { @@ -497,7 +497,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) lock_nv_registry(nvidia_device, &flags); - device_owner = get_nv_device_owner(nvidia_device); + device_owner = get_nv_max_device_owner(nvidia_device); if(device_owner==NULL) { @@ -564,7 +564,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) lock_nv_registry(nvidia_device, &flags); - device_owner = get_nv_device_owner(nvidia_device); + device_owner = get_nv_max_device_owner(nvidia_device); if(device_owner==NULL) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2ceb7b43a045..6b59d59ce3cf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2704,7 +2704,7 @@ int schedule_work(struct work_struct *work) lock_nv_registry(nvidiaDevice, &flags); - device_owner = get_nv_device_owner(nvidiaDevice); + device_owner = get_nv_max_device_owner(nvidiaDevice); //2) If there is an owner, set work->owner to the owner's task struct. if(device_owner==NULL) diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 0ae9994111fb..a41a9d9a3627 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c @@ -5,9 +5,9 @@ #include #include -#include +//#include -int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, +int ikglp_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) { ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); @@ -16,29 +16,32 @@ int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, BUG_ON(!d_a); BUG_ON(!d_b); - return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); + //return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); + return litmus->__compare(d_a->task, BASE, d_b->task, BASE); } -int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, +int ikglp_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) { ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); - return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); + //return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); + return litmus->__compare(d_b->task, BASE, d_a->task, BASE); } -int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, +int ikglp_donor_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) { ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); - return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); + //return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); + return litmus->__compare(d_a->task, BASE, d_b->task, BASE); } -int ikglp_edf_min_heap_donee_order(struct binheap_node *a, +int ikglp_min_heap_donee_order(struct binheap_node *a, struct binheap_node *b) { struct task_struct *prio_a, *prio_b; @@ -65,7 +68,8 @@ int ikglp_edf_min_heap_donee_order(struct binheap_node *a, } // note reversed order - return __edf_higher_prio(prio_b, BASE, prio_a, BASE); + //return __edf_higher_prio(prio_b, BASE, prio_a, BASE); + return litmus->__compare(prio_b, BASE, prio_a, BASE); } @@ -99,7 +103,8 @@ static struct task_struct* ikglp_find_hp_waiter(struct fifo_queue *kqueue, wait_queue_t, task_list)->private; /* Compare task prios, find high prio task. */ - if (queued != skip && edf_higher_prio(queued, found)) + //if (queued != skip && edf_higher_prio(queued, found)) + if(queued != skip && litmus->compare(queued, found)) found = queued; } return found; @@ -241,7 +246,8 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem, // TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); // print_global_list(sem->top_m.root, 1); } - else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { + //else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { + else if(litmus->__compare(t, BASE, ikglp_mth_highest(sem), BASE)) { ikglp_heap_node_t *evicted = binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); @@ -361,7 +367,8 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, unsigned long flags) { // priority of 't' has increased (note: 't' might already be hp_waiter). - if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { + // if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { + if ((t == fq->hp_waiter) || litmus->compare(t, fq->hp_waiter)) { struct task_struct *old_max_eff_prio; struct task_struct *new_max_eff_prio; struct task_struct *new_prio = NULL; @@ -397,7 +404,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, TRACE_TASK(t, "is new hp_waiter.\n"); if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ new_prio = new_max_eff_prio; } @@ -471,7 +478,8 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + //if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", (new_max_eff_prio) ? new_max_eff_prio->pid : -1, @@ -532,7 +540,8 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + //if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); decreased_prio = new_max_eff_prio; @@ -573,7 +582,8 @@ static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, // Need to set new effective_priority for owner struct task_struct *decreased_prio; - if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { + //if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { + if(litmus->__compare(new_max_eff_prio, BASE, t, BASE)) { decreased_prio = new_max_eff_prio; } else { @@ -803,7 +813,8 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, if(new_max_eff_prio != old_max_eff_prio) { if ((effective_priority(donee) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ + //(__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ + (litmus->__compare(new_max_eff_prio, BASE, donee, EFFECTIVE))){ TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid); new_prio = new_max_eff_prio; @@ -907,7 +918,8 @@ int ikglp_lock(struct litmus_lock* l) // no room in fifos. Go to PQ or donors. - if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { + //if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { + if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) { // enqueue on PQ ikglp_enqueue_on_pq(sem, &wait); unlock_fine_irqrestore(&sem->lock, flags); @@ -994,7 +1006,8 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( for(i = 0; i < sem->nr_replicas; ++i) { if( (sem->fifo_queues[i].count > 1) && - (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { + //(!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { + (!fq || litmus->compare(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", ikglp_get_idx(sem, &sem->fifo_queues[i]), @@ -1331,7 +1344,8 @@ int ikglp_unlock(struct litmus_lock* l) fq->hp_waiter->comm, fq->hp_waiter->pid); fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... } - else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { + //else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { + else if(litmus->compare(new_on_fq, fq->hp_waiter)) { if(fq->hp_waiter) TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); @@ -1577,11 +1591,11 @@ struct litmus_lock* ikglp_new(int m, sem->top_m_size = 0; // init heaps - INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); - INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_min_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->donees, ikglp_min_heap_donee_order); + INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_max_heap_base_priority_order); return &sem->litmus_lock; } diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c index 37302064bd8c..f7bb17103383 100644 --- a/litmus/kfmlp_lock.c +++ b/litmus/kfmlp_lock.c @@ -5,7 +5,7 @@ #include #include -#include +//#include static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, struct kfmlp_queue* queue) @@ -35,7 +35,8 @@ static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, task_list)->private; /* Compare task prios, find high prio task. */ - if (queued != skip && edf_higher_prio(queued, found)) + //if (queued != skip && edf_higher_prio(queued, found)) + if (queued != skip && litmus->compare(queued, found)) found = queued; } return found; @@ -82,7 +83,8 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) { if( (sem->queues[i].count > 1) && ((my_queue == NULL) || - (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) + //(edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) + (litmus->compare(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) { my_queue = &sem->queues[i]; } @@ -156,10 +158,12 @@ int kfmlp_lock(struct litmus_lock* l) __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); /* check if we need to activate priority inheritance */ - if (edf_higher_prio(t, my_queue->hp_waiter)) + //if (edf_higher_prio(t, my_queue->hp_waiter)) + if (litmus->compare(t, my_queue->hp_waiter)) { my_queue->hp_waiter = t; - if (edf_higher_prio(t, my_queue->owner)) + //if (edf_higher_prio(t, my_queue->owner)) + if (litmus->compare(t, my_queue->owner)) { litmus->increase_prio(my_queue->owner, my_queue->hp_waiter); } diff --git a/litmus/litmus.c b/litmus/litmus.c index 4a40c571d8c6..2f9079421ec7 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -374,6 +374,10 @@ static void reinit_litmus_state(struct task_struct* p, int restore) up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem); #endif +#ifdef CONFIG_LITMUS_NVIDIA + WARN_ON(p->rt_param.held_gpus != 0); +#endif + /* Cleanup everything else. */ memset(&p->rt_param, 0, sizeof(p->rt_param)); diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index d17152138c63..66181515186a 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c @@ -6,6 +6,10 @@ #include #include +#include + +#include + typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ typedef unsigned char NvU8; /* 0 to 255 */ @@ -316,9 +320,13 @@ u32 get_work_nv_device_num(const struct work_struct *t) +#define MAX_NR_OWNERS 3 + typedef struct { raw_spinlock_t lock; - struct task_struct *device_owner; + int nr_owners; + struct task_struct* max_prio_owner; + struct task_struct* owners[MAX_NR_OWNERS]; }nv_device_registry_t; static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; @@ -327,12 +335,11 @@ int init_nv_device_reg(void) { int i; - //memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); + memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); for(i = 0; i < NV_DEVICE_NUM; ++i) { raw_spin_lock_init(&NV_DEVICE_REG[i].lock); - NV_DEVICE_REG[i].device_owner = NULL; } return(1); @@ -357,107 +364,148 @@ int get_nv_device_id(struct task_struct* owner) } */ +static struct task_struct* find_hp_owner(nv_device_registry_t *reg, struct task_struct *skip) { + int i; + struct task_struct *found = NULL; + for(i = 0; i < reg->nr_owners; ++i) { + if(reg->owners[i] != skip && litmus->compare(reg->owners[i], found)) { + found = reg->owners[i]; + } + } + return found; +} +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +void pai_check_priority_increase(struct task_struct *t, int reg_device_id) +{ + unsigned long flags; + nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; + + if(reg->max_prio_owner != t) { + + raw_spin_lock_irqsave(®->lock, flags); + + if(reg->max_prio_owner != t) { + if(litmus->compare(t, reg->max_prio_owner)) { + litmus->change_prio_pai_tasklet(reg->max_prio_owner, t); + reg->max_prio_owner = t; + } + } + + raw_spin_unlock_irqrestore(®->lock, flags); + } +} + + +void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) +{ + unsigned long flags; + nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; + + if(reg->max_prio_owner == t) { + + raw_spin_lock_irqsave(®->lock, flags); + + if(reg->max_prio_owner == t) { + reg->max_prio_owner = find_hp_owner(reg, NULL); + if(reg->max_prio_owner != t) { + litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); + } + } + + raw_spin_unlock_irqrestore(®->lock, flags); + } +} +#endif static int __reg_nv_device(int reg_device_id) { int ret = 0; - struct task_struct* old = - cmpxchg(&NV_DEVICE_REG[reg_device_id].device_owner, - NULL, - current); - - mb(); - - if(likely(old == NULL)) - { + int i; + struct task_struct *t = current; + struct task_struct *old_max = NULL; + unsigned long flags; + nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; + + raw_spin_lock_irqsave(®->lock, flags); + + if(reg->nr_owners < MAX_NR_OWNERS) { + for(i = 0; i < MAX_NR_OWNERS; ++i) { + if(reg->owners[i] == NULL) { + reg->owners[i] = t; + + //if(edf_higher_prio(t, reg->max_prio_owner)) { + if(litmus->compare(t, reg->max_prio_owner)) { + old_max = reg->max_prio_owner; + reg->max_prio_owner = t; + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + litmus->change_prio_pai_tasklet(old_max, t); +#endif + } + #ifdef CONFIG_LITMUS_SOFTIRQD - down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); -#endif - TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); - } + down_and_set_stat(t, HELD, &tsk_rt(t)->klitirqd_sem); +#endif + ++(reg->nr_owners); + + break; + } + } + } else - { + { TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); - ret = -EBUSY; + ret = -EBUSY; } - return(ret); - + raw_spin_unlock_irqrestore(®->lock, flags); + __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); -#if 0 - //unsigned long flags; - //raw_spin_lock_irqsave(&NV_DEVICE_REG[reg_device_id].lock, flags); - //lock_nv_registry(reg_device_id, &flags); - - if(likely(NV_DEVICE_REG[reg_device_id].device_owner == NULL)) - { - NV_DEVICE_REG[reg_device_id].device_owner = current; - mb(); // needed? - - // release spin lock before chance of going to sleep. - //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); - //unlock_nv_registry(reg_device_id, &flags); - - down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem); - TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id); - return(0); - } - else - { - //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags); - //unlock_nv_registry(reg_device_id, &flags); - - TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); - return(-EBUSY); - } -#endif + return(ret); } static int __clear_reg_nv_device(int de_reg_device_id) { - int ret = 0; - struct task_struct* old; + int ret = 0; + int i; + struct task_struct *t = current; + unsigned long flags; + nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; #ifdef CONFIG_LITMUS_SOFTIRQD - unsigned long flags; struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); - lock_nv_registry(de_reg_device_id, &flags); #endif - old = cmpxchg(&NV_DEVICE_REG[de_reg_device_id].device_owner, - current, - NULL); + raw_spin_lock_irqsave(®->lock, flags); - mb(); - -#ifdef CONFIG_LITMUS_SOFTIRQD - if(likely(old == current)) - { - flush_pending(klitirqd_th, current); - //unlock_nv_registry(de_reg_device_id, &flags); - - up_and_set_stat(current, NOT_HELD, &tsk_rt(current)->klitirqd_sem); - - unlock_nv_registry(de_reg_device_id, &flags); - ret = 0; - - TRACE_CUR("%s: semaphore released.\n",__FUNCTION__); - } - else - { - unlock_nv_registry(de_reg_device_id, &flags); - ret = -EINVAL; - - if(old) - TRACE_CUR("%s: device %d is not registered for this process's use! %s/%d is!\n", - __FUNCTION__, de_reg_device_id, old->comm, old->pid); - else - TRACE_CUR("%s: device %d is not registered for this process's use! No one is!\n", - __FUNCTION__, de_reg_device_id); - } + for(i = 0; i < reg->nr_owners; ++i) { + if(reg->owners[i] == t) { +#ifdef CONFIG_LITMUS_SOFTIRQD + flush_pending(klitirqd_th, t); +#endif + if(reg->max_prio_owner == t) { + reg->max_prio_owner = find_hp_owner(reg, t); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); +#endif + } + +#ifdef CONFIG_LITMUS_SOFTIRQD + up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klitirqd_sem); #endif + + reg->owners[i] = NULL; + --(reg->nr_owners); + + break; + } + } + + raw_spin_unlock_irqrestore(®->lock, flags); + + __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); return(ret); } @@ -483,11 +531,11 @@ int reg_nv_device(int reg_device_id, int reg_action) } /* use to get the owner of nv_device_id. */ -struct task_struct* get_nv_device_owner(u32 target_device_id) +struct task_struct* get_nv_max_device_owner(u32 target_device_id) { - struct task_struct* owner; + struct task_struct *owner = NULL; BUG_ON(target_device_id >= NV_DEVICE_NUM); - owner = NV_DEVICE_REG[target_device_id].device_owner; + owner = NV_DEVICE_REG[target_device_id].max_prio_owner; return(owner); } @@ -516,21 +564,21 @@ void unlock_nv_registry(u32 target_device_id, unsigned long* flags) } -void increment_nv_int_count(u32 device) -{ - unsigned long flags; - struct task_struct* owner; - - lock_nv_registry(device, &flags); - - owner = NV_DEVICE_REG[device].device_owner; - if(owner) - { - atomic_inc(&tsk_rt(owner)->nv_int_count); - } - - unlock_nv_registry(device, &flags); -} -EXPORT_SYMBOL(increment_nv_int_count); +//void increment_nv_int_count(u32 device) +//{ +// unsigned long flags; +// struct task_struct* owner; +// +// lock_nv_registry(device, &flags); +// +// owner = NV_DEVICE_REG[device].device_owner; +// if(owner) +// { +// atomic_inc(&tsk_rt(owner)->nv_int_count); +// } +// +// unlock_nv_registry(device, &flags); +//} +//EXPORT_SYMBOL(increment_nv_int_count); diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c index 11d119210ef9..aaca93c1e5d1 100644 --- a/litmus/rsm_lock.c +++ b/litmus/rsm_lock.c @@ -5,7 +5,7 @@ #include #include -#include +//#include /* caller is responsible for locking */ @@ -41,7 +41,8 @@ static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, #endif /* Compare task prios, find high prio task. */ - if (queued && queued != skip && edf_higher_prio(queued, found)) { + //if (queued && queued != skip && edf_higher_prio(queued, found)) { + if (queued && queued != skip && litmus->compare(queued, found)) { found = queued; } } @@ -107,7 +108,8 @@ void rsm_mutex_enable_priority(struct litmus_lock *l, tsk_rt(t)->blocked_lock = l; mb(); - if (edf_higher_prio(t, mutex->hp_waiter)) { + //if (edf_higher_prio(t, mutex->hp_waiter)) { + if (litmus->compare(t, mutex->hp_waiter)) { struct task_struct *old_max_eff_prio; struct task_struct *new_max_eff_prio; @@ -132,7 +134,8 @@ void rsm_mutex_enable_priority(struct litmus_lock *l, TRACE_TASK(t, "is new hp_waiter.\n"); if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ new_prio = new_max_eff_prio; } } @@ -215,7 +218,8 @@ int rsm_mutex_lock(struct litmus_lock* l) __add_wait_queue_tail_exclusive(&mutex->wait, &wait); /* check if we need to activate priority inheritance */ - if (edf_higher_prio(t, mutex->hp_waiter)) { + //if (edf_higher_prio(t, mutex->hp_waiter)) { + if (litmus->compare(t, mutex->hp_waiter)) { struct task_struct *old_max_eff_prio; struct task_struct *new_max_eff_prio; @@ -240,8 +244,8 @@ int rsm_mutex_lock(struct litmus_lock* l) TRACE_TASK(t, "is new hp_waiter.\n"); if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, - owner, EFFECTIVE))){ + //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ new_prio = new_max_eff_prio; } } @@ -353,7 +357,8 @@ int rsm_mutex_unlock(struct litmus_lock* l) { // old_max_eff_prio > new_max_eff_prio - if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { + //if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { + if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) { TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", new_max_eff_prio->comm, new_max_eff_prio->pid, t->comm, t->pid, tsk_rt(t)->inh_task->comm, @@ -460,8 +465,8 @@ int rsm_mutex_unlock(struct litmus_lock* l) { if(dgl_wait && tsk_rt(next)->blocked_lock) { BUG_ON(wake_up_task); - if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, - next, EFFECTIVE)) { + //if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { + if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { litmus->nested_increase_prio(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. goto out; // all spinlocks are released. bail out now. @@ -532,7 +537,8 @@ void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { + //if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { + if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); mutex->hp_waiter = t; } @@ -554,8 +560,8 @@ void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, if(new_max_eff_prio != old_max_eff_prio) { // new_max_eff_prio > old_max_eff_prio holds. if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { - + //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) { TRACE_CUR("Propagating inheritance to holder of lock %d.\n", l->ident); @@ -649,7 +655,8 @@ void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", l->ident); - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + //if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", (new_max_eff_prio) ? new_max_eff_prio->pid : -1, diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e009b7f34aca..37f7821dca50 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -436,14 +436,6 @@ static void gsnedf_tick(struct task_struct* t) - - - - - - - - #ifdef CONFIG_LITMUS_PAI_SOFTIRQD @@ -467,96 +459,117 @@ static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flush } } - -static void flush_tasklets(struct task_struct* task) -{ - // lazy flushing. - // just change ownership to NULL and let idel processor - // take care of it. :P - - struct tasklet_struct* step; - unsigned long flags; - - raw_spin_lock_irqsave(&gsnedf_lock, flags); - for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { - if(step->owner == task) { - TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); - step->owner = NULL; - } - } - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); -} - - static void do_lit_tasklets(struct task_struct* sched_task) { int work_to_do = 1; struct tasklet_struct *tasklet = NULL; - //struct tasklet_struct *step; unsigned long flags; while(work_to_do) { TS_NV_SCHED_BOTISR_START; - // remove tasklet at head of list if it has higher priority. + // execute one tasklet that has higher priority raw_spin_lock_irqsave(&gsnedf_lock, flags); if(gsnedf_pending_tasklets.head != NULL) { - // remove tasklet at head. - tasklet = gsnedf_pending_tasklets.head; + struct tasklet_struct *prev = NULL; + tasklet = gsnedf_pending_tasklets.head; + + while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) { + prev = tasklet; + tasklet = tasklet->next; + } - if(edf_higher_prio(tasklet->owner, sched_task)) { - - if(NULL == tasklet->next) { - // tasklet is at the head, list only has one element + // remove the tasklet from the queue + if(prev) { + prev->next = tasklet->next; + if(prev->next == NULL) { TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); - gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); - } - - // remove the tasklet from the queue - gsnedf_pending_tasklets.head = tasklet->next; - - TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + gsnedf_pending_tasklets.tail = &(prev); + } } else { - TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); - tasklet = NULL; + gsnedf_pending_tasklets.head = tasklet->next; + if(tasklet->next == NULL) { + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); + } } } else { TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); - } + } raw_spin_unlock_irqrestore(&gsnedf_lock, flags); - TS_NV_SCHED_BOTISR_END; - if(tasklet) { __do_lit_tasklet(tasklet, 0ul); - tasklet = NULL; + tasklet = NULL; } else { work_to_do = 0; } + + TS_NV_SCHED_BOTISR_END; } - - //TRACE("%s: exited.\n", __FUNCTION__); -} - - -static void run_tasklets(struct task_struct* sched_task) -{ - preempt_disable(); - - if(gsnedf_pending_tasklets.head != NULL) { - TRACE("%s: There are tasklets to process.\n", __FUNCTION__); - do_lit_tasklets(sched_task); - } - - preempt_enable_no_resched(); } +//static void do_lit_tasklets(struct task_struct* sched_task) +//{ +// int work_to_do = 1; +// struct tasklet_struct *tasklet = NULL; +// //struct tasklet_struct *step; +// unsigned long flags; +// +// while(work_to_do) { +// +// TS_NV_SCHED_BOTISR_START; +// +// // remove tasklet at head of list if it has higher priority. +// raw_spin_lock_irqsave(&gsnedf_lock, flags); +// +// if(gsnedf_pending_tasklets.head != NULL) { +// // remove tasklet at head. +// tasklet = gsnedf_pending_tasklets.head; +// +// if(edf_higher_prio(tasklet->owner, sched_task)) { +// +// if(NULL == tasklet->next) { +// // tasklet is at the head, list only has one element +// TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); +// gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); +// } +// +// // remove the tasklet from the queue +// gsnedf_pending_tasklets.head = tasklet->next; +// +// TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); +// } +// else { +// TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); +// tasklet = NULL; +// } +// } +// else { +// TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); +// } +// +// raw_spin_unlock_irqrestore(&gsnedf_lock, flags); +// +// TS_NV_SCHED_BOTISR_END; +// +// if(tasklet) { +// __do_lit_tasklet(tasklet, 0ul); +// tasklet = NULL; +// } +// else { +// work_to_do = 0; +// } +// } +// +// //TRACE("%s: exited.\n", __FUNCTION__); +//} static void __add_pai_tasklet(struct tasklet_struct* tasklet) { @@ -604,7 +617,19 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet) } } -static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +static void gsnedf_run_tasklets(struct task_struct* sched_task) +{ + preempt_disable(); + + if(gsnedf_pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + do_lit_tasklets(sched_task); + } + + preempt_enable_no_resched(); +} + +static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet) { cpu_entry_t *targetCPU = NULL; int thisCPU; @@ -692,6 +717,23 @@ static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) return(1); // success } +static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio, + struct task_struct *new_prio) +{ + struct tasklet_struct* step; + unsigned long flags; + + raw_spin_lock_irqsave(&gsnedf_lock, flags); + + for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { + if(step->owner == old_prio) { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + step->owner = new_prio; + } + } + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); +} + #endif // end PAI @@ -954,7 +996,7 @@ static void gsnedf_task_exit(struct task_struct * t) unsigned long flags; #ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(t); + gsnedf_change_prio_pai_tasklet(t, NULL); #endif /* unlink if necessary */ @@ -1072,6 +1114,8 @@ static void __increase_priority_inheritance(struct task_struct* t, /* called with IRQs off */ static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int i = 0; + raw_spin_lock(&gsnedf_lock); __increase_priority_inheritance(t, prio_inh); @@ -1087,6 +1131,14 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str #endif raw_spin_unlock(&gsnedf_lock); + +#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) + for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + i < NV_DEVICE_NUM; + i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i)) { + pai_check_priority_increase(t, i); + } +#endif } @@ -1147,6 +1199,8 @@ static void __decrease_priority_inheritance(struct task_struct* t, static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int i; + raw_spin_lock(&gsnedf_lock); __decrease_priority_inheritance(t, prio_inh); @@ -1160,7 +1214,15 @@ static void decrease_priority_inheritance(struct task_struct* t, } #endif - raw_spin_unlock(&gsnedf_lock); + raw_spin_unlock(&gsnedf_lock); + +#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) + for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + i < NV_DEVICE_NUM; + i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i)) { + pai_check_priority_decrease(t, i); + } +#endif } @@ -1687,8 +1749,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, #endif #ifdef CONFIG_LITMUS_PAI_SOFTIRQD - .enqueue_pai_tasklet = enqueue_pai_tasklet, - .run_tasklets = run_tasklets, + .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, + .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet, + .run_tasklets = gsnedf_run_tasklets, #endif }; diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 75694350a9ad..24326ce4657e 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -110,6 +110,11 @@ static long litmus_dummy_deactivate_plugin(void) return 0; } +static int litmus_dummy_compare(struct task_struct* a, struct task_struct* b) +{ + return 0; +} + #ifdef CONFIG_LITMUS_LOCKING static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, void* __user config) @@ -146,6 +151,12 @@ static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t) return(0); // failure. } +static void litmus_dummy_change_prio_pai_tasklet(struct task_struct *old_prio, + struct task_struct *new_prio) +{ + TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__); +} + static void litmus_dummy_run_tasklets(struct task_struct* t) { //TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__); @@ -162,6 +173,12 @@ static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task raw_spinlock_t *to_unlock, unsigned long irqflags) { } + +static int litmus_dummy___compare(struct task_struct* a, comparison_mode_t a_mod, + struct task_struct* b, comparison_mode_t b_mode) +{ + return 0; +} #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT @@ -188,6 +205,7 @@ struct sched_plugin linux_sched_plugin = { .finish_switch = litmus_dummy_finish_switch, .activate_plugin = litmus_dummy_activate_plugin, .deactivate_plugin = litmus_dummy_deactivate_plugin, + .compare = litmus_dummy_compare, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = litmus_dummy_allocate_lock, .increase_prio = litmus_dummy_increase_prio, @@ -196,6 +214,7 @@ struct sched_plugin linux_sched_plugin = { #ifdef CONFIG_LITMUS_NESTED_LOCKING .nested_increase_prio = litmus_dummy_nested_increase_prio, .nested_decrease_prio = litmus_dummy_nested_decrease_prio, + .__compare = litmus_dummy___compare, #endif #ifdef CONFIG_LITMUS_SOFTIRQD .increase_prio_klitirqd = litmus_dummy_increase_prio_klitirqd, @@ -203,6 +222,7 @@ struct sched_plugin linux_sched_plugin = { #endif #ifdef CONFIG_LITMUS_PAI_SOFTIRQD .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, + .change_prio_pai_tasklet = litmus_dummy_change_prio_pai_tasklet, .run_tasklets = litmus_dummy_run_tasklets, #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT @@ -243,6 +263,7 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(complete_job); CHECK(activate_plugin); CHECK(deactivate_plugin); + CHECK(compare); #ifdef CONFIG_LITMUS_LOCKING CHECK(allocate_lock); CHECK(increase_prio); @@ -251,6 +272,7 @@ int register_sched_plugin(struct sched_plugin* plugin) #ifdef CONFIG_LITMUS_NESTED_LOCKING CHECK(nested_increase_prio); CHECK(nested_decrease_prio); + CHECK(__compare); #endif #ifdef CONFIG_LITMUS_SOFTIRQD CHECK(increase_prio_klitirqd); @@ -258,6 +280,7 @@ int register_sched_plugin(struct sched_plugin* plugin) #endif #ifdef CONFIG_LITMUS_PAI_SOFTIRQD CHECK(enqueue_pai_tasklet); + CHECK(change_prio_pai_tasklet); CHECK(run_tasklets); #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT -- cgit v1.2.2