From 8eb55f8fa1a2c3854f0f77b9b8663178c0129f6c Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Wed, 11 Apr 2012 15:57:59 -0400 Subject: Added support for Dynamic Group Locks (DGLs) Added support for Dynamic Group Locks. Locks are FIFO ordered (no timestamps), so a big DGL lock is needed to enqueue for resources atomically. Unfortunatly, this requires nested inheritance to use coarse-grain locking. Coarse-grain locking is used when DGLs are enabled. Fine-grain locking is used when DGLs are disabled. TODO: Clean up IKGLP implementatio. There is a lot of needless debug/TRACE work. --- include/litmus/locking.h | 76 ++++++- include/litmus/sched_plugin.h | 5 + include/litmus/trace.h | 14 +- include/litmus/unistd_32.h | 4 +- include/litmus/unistd_64.h | 6 +- litmus/Kconfig | 19 ++ litmus/binheap.c | 60 +++++- litmus/locking.c | 339 +++++++++++++++++++++++++++++-- litmus/sched_gsn_edf.c | 460 ++++++++++++++++++++++++++++++++++-------- litmus/sched_plugin.c | 17 ++ 10 files changed, 893 insertions(+), 107 deletions(-) diff --git a/include/litmus/locking.h b/include/litmus/locking.h index e0c13f4c31e5..972cbdb7fdd5 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -36,6 +36,29 @@ struct litmus_lock { #endif }; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE + +typedef struct dgl_wait_state { + struct task_struct *task; + struct litmus_lock *locks[MAX_DGL_SIZE]; + int size; + int nr_remaining; + + int last_primary; + + wait_queue_t wq_nodes[MAX_DGL_SIZE]; +} dgl_wait_state_t; + +void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); +void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock); + +void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); +int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); +void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); +#endif + struct litmus_lock_ops { /* Current task tries to obtain / drop a reference to a lock. * Optional methods, allowed by default. */ @@ -45,7 +68,7 @@ struct litmus_lock_ops { /* Current tries to lock/unlock this lock (mandatory methods). */ int (*lock)(struct litmus_lock*); int (*unlock)(struct litmus_lock*); - + /* The lock is no longer being referenced (mandatory method). */ void (*deallocate)(struct litmus_lock*); @@ -53,6 +76,57 @@ struct litmus_lock_ops { void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); #endif + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); + int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); + int (*is_owner)(struct litmus_lock *l, struct task_struct *t); + void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); +#endif }; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) +#define lock_global(lock) raw_spin_lock((lock)) +#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) +#define unlock_global(lock) raw_spin_unlock((lock)) + +/* fine-grain locking are no-ops with DGL support */ +#define lock_fine_irqsave(lock, flags) +#define lock_fine(lock) +#define unlock_fine_irqrestore(lock, flags) +#define unlock_fine(lock) + +#elif CONFIG_LITMUS_NESTED_LOCKING + +/* global locking are no-ops without DGL support */ +#define lock_global_irqsave(lock, flags) +#define lock_global(lock) +#define unlock_global_irqrestore(lock, flags) +#define unlock_global(lock) + +#define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) +#define lock_fine(lock) raw_spin_lock((lock)) +#define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) +#define unlock_fine(lock) raw_spin_unlock((lock)) + #endif + + +#endif + + + + + + + + + + + + + + + diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..ae11e3ac9266 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -58,6 +58,7 @@ typedef void (*task_exit_t) (struct task_struct *); typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, void* __user config); +typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); /********************* sys call backends ********************/ /* This function causes the caller to sleep until the next release */ @@ -97,6 +98,10 @@ struct sched_plugin { /* locking protocols */ allocate_lock_t allocate_lock; #endif + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + get_dgl_spinlock_t get_dgl_spinlock; +#endif } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e809376d6487..1a1b0d479f61 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h @@ -103,11 +103,23 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ #define TS_LOCK_START TIMESTAMP(170) #define TS_LOCK_SUSPEND TIMESTAMP(171) #define TS_LOCK_RESUME TIMESTAMP(172) -#define TS_LOCK_END TIMESTAMP(173) +#define TS_LOCK_END TIMESTAMP(173) + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +#define TS_DGL_LOCK_START TIMESTAMP(175) +#define TS_DGL_LOCK_SUSPEND TIMESTAMP(176) +#define TS_DGL_LOCK_RESUME TIMESTAMP(177) +#define TS_DGL_LOCK_END TIMESTAMP(178) +#endif #define TS_UNLOCK_START TIMESTAMP(180) #define TS_UNLOCK_END TIMESTAMP(181) +#ifdef CONFIG_LITMUS_DGL_SUPPORT +#define TS_DGL_UNLOCK_START TIMESTAMP(185) +#define TS_DGL_UNLOCK_END TIMESTAMP(186) +#endif + #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 94264c27d9ac..941231c8184b 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h @@ -17,5 +17,7 @@ #define __NR_wait_for_ts_release __LSC(9) #define __NR_release_ts __LSC(10) #define __NR_null_call __LSC(11) +#define __NR_litmus_dgl_lock __LSC(12) +#define __NR_litmus_dgl_unlock __LSC(13) -#define NR_litmus_syscalls 12 +#define NR_litmus_syscalls 14 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index d5ced0d2642c..bf2ffeac2dbb 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h @@ -29,5 +29,9 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) __SYSCALL(__NR_release_ts, sys_release_ts) #define __NR_null_call __LSC(11) __SYSCALL(__NR_null_call, sys_null_call) +#define __NR_litmus_dgl_lock __LSC(12) +__SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) +#define __NR_litmus_dgl_unlock __LSC(13) +__SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) -#define NR_litmus_syscalls 12 +#define NR_litmus_syscalls 14 diff --git a/litmus/Kconfig b/litmus/Kconfig index 841a7e4e9723..97200506e31c 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -67,6 +67,25 @@ config LITMUS_NESTED_LOCKING help Enable nested priority inheritance. +config LITMUS_DGL_SUPPORT + bool "Support for dynamic group locks" + depends on LITMUS_NESTED_LOCKING + default n + help + Enable dynamic group lock support. + +config LITMUS_MAX_DGL_SIZE + int "Maximum size of a dynamic group lock." + depends on LITMUS_DGL_SUPPORT + range 1 128 + default "10" + help + Dynamic group lock data structures are allocated on the process + stack when a group is requested. We set a maximum size of + locks in a dynamic group lock to avoid dynamic allocation. + + TODO: Batch DGL requests exceeding LITMUS_MAX_DGL_SIZE. + endmenu menu "Performance Enhancements" diff --git a/litmus/binheap.c b/litmus/binheap.c index f76260e64b0b..22feea614e50 100644 --- a/litmus/binheap.c +++ b/litmus/binheap.c @@ -1,5 +1,7 @@ #include +//extern void dump_node_data(struct binheap_node* parent, struct binheap_node* child); +//extern void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node); int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap) @@ -29,6 +31,11 @@ static void __update_ref(struct binheap_node *parent, static void __binheap_swap(struct binheap_node *parent, struct binheap_node *child) { +// if(parent == BINHEAP_POISON || child == BINHEAP_POISON) { +// dump_node_data(parent, child); +// BUG(); +// } + swap(parent->data, child->data); __update_ref(parent, child); } @@ -185,12 +192,24 @@ static void __binheap_bubble_up( struct binheap_handle *handle, struct binheap_node *node) { - /* Note: NULL data pointers are used internally for arbitrary delete */ + //BUG_ON(!binheap_is_in_heap(node)); +// if(!binheap_is_in_heap(node)) +// { +// dump_node_data2(handle, node); +// BUG(); +// } + while((node->parent != NULL) && ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || handle->compare(node, node->parent))) { __binheap_swap(node->parent, node); node = node->parent; + +// if(!binheap_is_in_heap(node)) +// { +// dump_node_data2(handle, node); +// BUG(); +// } } } @@ -228,6 +247,12 @@ void __binheap_add(struct binheap_node *new_node, struct binheap_handle *handle, void *data) { +// if(binheap_is_in_heap(new_node)) +// { +// dump_node_data2(handle, new_node); +// BUG(); +// } + new_node->data = data; new_node->ref = new_node; new_node->ref_ptr = &(new_node->ref); @@ -284,6 +309,12 @@ void __binheap_delete_root(struct binheap_handle *handle, { struct binheap_node *root = handle->root; +// if(!binheap_is_in_heap(container)) +// { +// dump_node_data2(handle, container); +// BUG(); +// } + if(root != container) { /* coalesce */ __binheap_swap_safe(handle, root, container); @@ -366,6 +397,18 @@ void __binheap_delete(struct binheap_node *node_to_delete, struct binheap_node *target = node_to_delete->ref; void *temp_data = target->data; +// if(!binheap_is_in_heap(node_to_delete)) +// { +// dump_node_data2(handle, node_to_delete); +// BUG(); +// } +// +// if(!binheap_is_in_heap(target)) +// { +// dump_node_data2(handle, target); +// BUG(); +// } + /* temporarily set data to null to allow node to bubble up to the top. */ target->data = BINHEAP_POISON; @@ -373,7 +416,7 @@ void __binheap_delete(struct binheap_node *node_to_delete, __binheap_delete_root(handle, node_to_delete); node_to_delete->data = temp_data; /* restore node data pointer */ - node_to_delete->parent = BINHEAP_POISON; /* poison the node */ + //node_to_delete->parent = BINHEAP_POISON; /* poison the node */ } /** @@ -383,5 +426,18 @@ void __binheap_decrease(struct binheap_node *orig_node, struct binheap_handle *handle) { struct binheap_node *target = orig_node->ref; + +// if(!binheap_is_in_heap(orig_node)) +// { +// dump_node_data2(handle, orig_node); +// BUG(); +// } +// +// if(!binheap_is_in_heap(target)) +// { +// dump_node_data2(handle, target); +// BUG(); +// } +// __binheap_bubble_up(handle, target); } diff --git a/litmus/locking.c b/litmus/locking.c index 19ed5a8e16e9..b2f4a205cd04 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -6,6 +6,10 @@ #include #include +#ifdef CONFIG_LITMUS_DGL_SUPPORT +#include +#endif + static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); static int open_generic_lock(struct od_table_entry* entry, void* __user arg); static int close_generic_lock(struct od_table_entry* entry); @@ -31,7 +35,7 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) atomic_t lock_id_gen = ATOMIC_INIT(0); -raw_spinlock_t rsm_global_lock; +//raw_spinlock_t rsm_global_lock; static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) @@ -50,9 +54,9 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar lock->ident = atomic_inc_return(&lock_id_gen); - if(lock->ident == 1) { - raw_spin_lock_init(&rsm_global_lock); - } +// if(lock->ident == 1) { +// raw_spin_lock_init(&rsm_global_lock); +// } #endif *obj_ref = lock; } @@ -142,25 +146,322 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) } -//#ifdef CONFIG_LITMUS_NESTED_LOCKING -///* not "lock_nest" ... get it? */ -//void nest_lock(struct litmus_lock *l, struct task_struct *t) -//{ -// if(tsk_rt(t)->last_lock) { -// /* push new lock to front of old lock */ -// struct litmus_lock *old = tsk_rt(t)->last_lock; -// -// list_add(&l->lock_chain, &old->lock_chain); -// } +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock) +{ +// int i = dgl_wait->size - 1; + + + BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); + + WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); // -// tsk_rt(t)->last_lock = l; +// // since dgl_wait->task->blocked_lock, all locks after prev_lock +// // are already held. // -// // local inh now becomes transitive inh -// tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio??? -// tsk_rt(t)->local_prio = NULL; +// // find the lock after prev. +// if(prev_lock) { +// for(/**/; i >= 0; --i) { +// if(prev_lock == dgl_wait->locks[i]) { +// --i; +// break; +// } +// else { +// BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task)); +// } +// } +// } + + for(dgl_wait->last_primary = dgl_wait->last_primary - 1; + dgl_wait->last_primary >= 0; + --(dgl_wait->last_primary)){ + if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { + + tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary]; + mb(); + + TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident); + + break; + } + } + +// for(/**/; i >= 0; --i) { +// struct litmus_lock *l = dgl_wait->locks[i]; +// if(!l->ops->is_owner(l, dgl_wait->task)) { +// +// tsk_rt(dgl_wait->task)->blocked_lock = l; +// mb(); +// +// TRACE_CUR("New blocked lock is %d\n", l->ident); +// +// if(dgl_wait->last_primary >= 0) +// { +// TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident); +// WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l); +// } +// +// break; +// } +// else { +// TRACE_CUR("Lock %d is actually held!\n", l->ident); +// } +// } +} + +int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) +{ + // should never be called. + BUG(); + return 1; +} + +void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task) +{ + wait_queue_t *q; + + *dgl_wait = NULL; + *task = NULL; + + if (waitqueue_active(wq)) { + q = list_entry(wq->task_list.next, + wait_queue_t, task_list); + + if(q->func == dgl_wake_up) { + *dgl_wait = (dgl_wait_state_t*) q->private; + } + else { + *task = (struct task_struct*) q->private; + } + + __remove_wait_queue(wq, q); + } +} + +void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait) +{ + init_waitqueue_entry(wq_node, dgl_wait->task); + wq_node->private = dgl_wait; + wq_node->func = dgl_wake_up; +} + + +static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) +{ + int i; + unsigned long irqflags; //, dummyflags; + raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); + + BUG_ON(dgl_wait->task != current); + + raw_spin_lock_irqsave(dgl_lock, irqflags); + + + dgl_wait->nr_remaining = dgl_wait->size; + //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size); + + // try to acquire each lock. enqueue (non-blocking) if it is unavailable. + for(i = 0; i < dgl_wait->size; ++i) { + struct litmus_lock *l = dgl_wait->locks[i]; + + // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. + + if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { + --(dgl_wait->nr_remaining); + //atomic_dec(&dgl_wait->nr_remaining); + TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); + } + } + + //if(atomic_read(&dgl_wait->nr_remaining) == 0) { + if(dgl_wait->nr_remaining == 0) { + // acquired entire group immediatly + TRACE_CUR("Acquired all locks in DGL immediatly!\n"); + } + else { + + TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining)); + + for(i = dgl_wait->size - 1; i >= 0; --i) { + struct litmus_lock *l = dgl_wait->locks[i]; + if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe + + TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident); + + TS_DGL_LOCK_SUSPEND; + + l->ops->enable_priority(l, dgl_wait); + dgl_wait->last_primary = i; + + TRACE_CUR("Suspending for lock %d\n", l->ident); + + raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending + + schedule(); // suspend!!! + + TS_DGL_LOCK_RESUME; + + TRACE_CUR("Woken up from DGL suspension.\n"); + + goto all_acquired; // we should hold all locks when we wake up. + } + } + + TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); + BUG(); + } + + raw_spin_unlock_irqrestore(dgl_lock, irqflags); + +all_acquired: + + // FOR SANITY CHECK FOR TESTING + for(i = 0; i < dgl_wait->size; ++i) { + struct litmus_lock *l = dgl_wait->locks[i]; + BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); + } + + TRACE_CUR("Acquired entire DGL\n"); + + return 0; +} + +//static int supports_dgl(struct litmus_lock *l) +//{ +// struct litmus_lock_ops* ops = l->ops; +// +// return (ops->dgl_lock && +// ops->is_owner && +// ops->enable_priority); //} -//#endif +asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) +{ + struct task_struct *t = current; + long err = -EINVAL; + int dgl_ods[MAX_DGL_SIZE]; + int i; + + dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. + + if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) + goto out; + + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) + goto out; + + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) + goto out; + + if (!is_realtime(t)) { + err = -EPERM; + goto out; + } + + for(i = 0; i < dgl_size; ++i) { + struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); + if(entry && is_lock(entry)) { + dgl_wait_state.locks[i] = get_lock(entry); +// if(!supports_dgl(dgl_wait_state.locks[i])) { +// TRACE_CUR("Lock %d does not support all required DGL operations.\n", +// dgl_wait_state.locks[i]->ident); +// goto out; +// } + } + else { + TRACE_CUR("Invalid lock identifier\n"); + goto out; + } + } + + dgl_wait_state.task = t; + dgl_wait_state.size = dgl_size; + + TS_DGL_LOCK_START; + err = do_litmus_dgl_lock(&dgl_wait_state); + + /* Note: task my have been suspended or preempted in between! Take + * this into account when computing overheads. */ + TS_DGL_LOCK_END; + +out: + return err; +} + +static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) +{ + int i; + long err = 0; + + TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); + + for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order + + struct litmus_lock *l = dgl_locks[i]; + long tmp_err; + + TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); + + tmp_err = l->ops->unlock(l); + + if(tmp_err) { + TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); + err = tmp_err; + } + } + + TRACE_CUR("DGL unlocked. err = %d\n", err); + + return err; +} + +asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) +{ + long err = -EINVAL; + int dgl_ods[MAX_DGL_SIZE]; + struct od_table_entry* entry; + int i; + + struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; + + if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) + goto out; + + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) + goto out; + + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) + goto out; + + for(i = 0; i < dgl_size; ++i) { + entry = get_entry_for_od(dgl_ods[i]); + if(entry && is_lock(entry)) { + dgl_locks[i] = get_lock(entry); +// if(!supports_dgl(dgl_locks[i])) { +// TRACE_CUR("Lock %d does not support all required DGL operations.\n", +// dgl_locks[i]->ident); +// goto out; +// } + } + else { + TRACE_CUR("Invalid lock identifier\n"); + goto out; + } + } + + TS_DGL_UNLOCK_START; + err = do_litmus_dgl_unlock(dgl_locks, dgl_size); + + /* Note: task my have been suspended or preempted in between! Take + * this into account when computing overheads. */ + TS_DGL_UNLOCK_END; + +out: + return err; +} + +#endif #else diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 3d653bdca357..c0316c4a1b35 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -120,6 +120,9 @@ static struct binheap_handle gsnedf_cpu_heap; static rt_domain_t gsnedf; #define gsnedf_lock (gsnedf.ready_lock) +#ifdef CONFIG_LITMUS_DGL_SUPPORT +static raw_spinlock_t dgl_lock; +#endif /* Uncomment this if you want to see all scheduling decisions in the * TRACE() log. @@ -835,6 +838,43 @@ void print_hp_waiters(struct binheap_node* n, int depth) if(n->right) print_hp_waiters(n->right, depth+1); } +void dump_node_data(struct binheap_node* parent, struct binheap_node* child) +{ + struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child; + struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child; + struct nested_info *nest; + + while(root->parent != NULL) { + root = root->parent; + } + + if(parent == BINHEAP_POISON) { + TRACE_CUR("parent was bad node.\n"); + } + else { + TRACE_CUR("child was bad node.\n"); + } + TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); + + nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); + TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); + + print_hp_waiters(root, 1); +} + +void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node) +{ + struct binheap_node *root = handle->root; + struct nested_info *nest; + + TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); + + nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); + TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); + + print_hp_waiters(root, 1); +} + /* called with IRQs off */ /* preconditions: @@ -861,12 +901,12 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t } else { TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); - raw_spin_unlock_irqrestore(to_unlock, irqflags); + unlock_fine_irqrestore(to_unlock, irqflags); } } else { TRACE_TASK(t, "is not blocked. No propagation.\n"); - raw_spin_unlock_irqrestore(to_unlock, irqflags); + unlock_fine_irqrestore(to_unlock, irqflags); } } @@ -891,12 +931,12 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t } else { TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); - raw_spin_unlock_irqrestore(to_unlock, irqflags); + unlock_fine_irqrestore(to_unlock, irqflags); } } else { TRACE_TASK(t, "is not blocked. No propagation.\n"); - raw_spin_unlock_irqrestore(to_unlock, irqflags); + unlock_fine_irqrestore(to_unlock, irqflags); } } @@ -930,16 +970,38 @@ static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, struct task_struct* skip) { + wait_queue_t *q; struct list_head *pos; - struct task_struct *queued, *found = NULL; + struct task_struct *queued = NULL, *found = NULL; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; +#endif list_for_each(pos, &mutex->wait.task_list) { - queued = (struct task_struct*) list_entry(pos, wait_queue_t, - task_list)->private; + q = list_entry(pos, wait_queue_t, task_list); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(q->func == dgl_wake_up) { + dgl_wait = (dgl_wait_state_t*) q->private; + if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { + queued = dgl_wait->task; + } + else { + queued = NULL; // skip it. + } + } + else { + queued = (struct task_struct*) q->private; + } +#else + queued = (struct task_struct*) q->private; +#endif /* Compare task prios, find high prio task. */ - if (queued != skip && edf_higher_prio(queued, found)) + if (queued && queued != skip && edf_higher_prio(queued, found)) { found = queued; + } } return found; } @@ -951,6 +1013,136 @@ static inline struct task_struct* top_priority(struct binheap_handle* handle) { return NULL; } +#ifdef CONFIG_LITMUS_DGL_SUPPORT +//static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags) +//{ +// struct rsm_mutex *mutex = rsm_mutex_from_lock(l); +// raw_spin_lock_irqsave(&mutex->lock, *irqflags); +//} +// +//static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags) +//{ +// struct rsm_mutex *mutex = rsm_mutex_from_lock(l); +// raw_spin_unlock_irqrestore(&mutex->lock, irqflags); +//} + +static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t) +{ + return(&dgl_lock); +} + +static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + return(mutex->owner == t); +} + + +// return 1 if resource was immediatly acquired. +// Assumes mutex->lock is held. +// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. +static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + + int acquired_immediatly = 0; + + BUG_ON(t != current); + + if (mutex->owner) { + TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); + + init_dgl_waitqueue_entry(wq_node, dgl_wait); + + set_task_state(t, TASK_UNINTERRUPTIBLE); + __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); + } else { + TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); + + /* it's ours now */ + mutex->owner = t; + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + acquired_immediatly = 1; + } + + return acquired_immediatly; +} + +// Assumes mutex->lock is held. +static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + struct task_struct *owner = mutex->owner; + unsigned long flags = 0; // these are unused under DGL coarse-grain locking + + BUG_ON(owner == t); + + tsk_rt(t)->blocked_lock = l; + mb(); + + if (edf_higher_prio(t, mutex->hp_waiter)) { + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + if(mutex->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + //TRACE_TASK(owner, "Heap Before:\n"); + //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + mutex->hp_waiter = t; + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + + binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); + + //TRACE_TASK(owner, "Heap After:\n"); + //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + if(new_prio) { + nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, flags); + } + } + else { + TRACE_TASK(t, "no change in hp_waiter.\n"); + unlock_fine_irqrestore(&mutex->lock, flags); + } +} +#endif + int gsnedf_rsm_mutex_lock(struct litmus_lock* l) { struct task_struct *t = current; @@ -962,9 +1154,10 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; - raw_spin_lock_irqsave(&mutex->lock, flags); - //raw_spin_lock_irqsave(&rsm_global_lock, flags); - + + lock_global_irqsave(&dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + if (mutex->owner) { TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); @@ -1023,29 +1216,24 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); } - //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - if(new_prio) { nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. } else { raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&mutex->lock, flags); + unlock_fine_irqrestore(&mutex->lock, flags); } - } else { TRACE_TASK(t, "no change in hp_waiter.\n"); - raw_spin_unlock_irqrestore(&mutex->lock, flags); + + unlock_fine_irqrestore(&mutex->lock, flags); } - + unlock_global_irqrestore(&dgl_lock, flags); + TS_LOCK_SUSPEND; - - /* release lock before sleeping */ - //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); - //raw_spin_unlock_irqrestore(&mutex->lock, flags); - + /* We depend on the FIFO order. Thus, we don't need to recheck * when we wake up; we are guaranteed to have the lock since * there is only one wake up per release. @@ -1072,32 +1260,56 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&mutex->lock, flags); - //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); + + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(&dgl_lock, flags); } return 0; } +#ifdef CONFIG_LITMUS_DGL_SUPPORT +void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait) +{ + if(tsk_rt(dgl_wait->task)->blocked_lock == l) { + TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + tsk_rt(dgl_wait->task)->blocked_lock = NULL; + mb(); + select_next_lock(dgl_wait, l); // pick the next lock to be blocked on + } + else { + TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + } +} +#endif + + int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) { - struct task_struct *t = current, *next; + struct task_struct *t = current, *next = NULL; struct rsm_mutex *mutex = rsm_mutex_from_lock(l); unsigned long flags; struct task_struct *old_max_eff_prio; + int wake_up_task = 1; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; +#endif int err = 0; - raw_spin_lock_irqsave(&mutex->lock, flags); - //raw_spin_lock_irqsave(&rsm_global_lock, flags); + lock_global_irqsave(&dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); if (mutex->owner != t) { err = -EINVAL; - goto out; + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(&dgl_lock, flags); + return err; } @@ -1147,16 +1359,25 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) /* check if there are jobs waiting for this resource */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); + if(dgl_wait) { + next = dgl_wait->task; + //select_next_lock_if_primary(l, dgl_wait); + } +#else next = __waitqueue_remove_first(&mutex->wait); +#endif if (next) { /* next becomes the resouce holder */ mutex->owner = next; TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); - - tsk_rt(next)->blocked_lock = NULL; +// if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL. +// tsk_rt(next)->blocked_lock = NULL; +// mb(); +// } - /* determine new hp_waiter if necessary */ if (next == mutex->hp_waiter) { @@ -1181,10 +1402,19 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); //TRACE_TASK(next, "Heap After:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - + //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(dgl_wait) { + select_next_lock_if_primary(l, dgl_wait); + //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); + --(dgl_wait->nr_remaining); + wake_up_task = (dgl_wait->nr_remaining == 0); + } +#endif raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); - } else { + } + else { /* Well, if 'next' is not the highest-priority waiter, * then it (probably) ought to inherit the highest-priority * waiter's priority. */ @@ -1198,6 +1428,16 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(dgl_wait) { + select_next_lock_if_primary(l, dgl_wait); +// wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); + --(dgl_wait->nr_remaining); + wake_up_task = (dgl_wait->nr_remaining == 0); + } +#endif + //TRACE_TASK(next, "Heap After:\n"); //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); @@ -1209,26 +1449,53 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) * since the effective priority of hp_waiter can change (and the * update has not made it to this lock).) */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) + { + if(dgl_wait && tsk_rt(next)->blocked_lock) { + BUG_ON(wake_up_task); + if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { + nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. + goto out; // all spinlocks are released. bail out now. + } + } + else { + increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); + } + } + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#else if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) { increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); } - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#endif + } + + if(wake_up_task) { + TRACE_TASK(next, "waking up since it is no longer blocked.\n"); + + tsk_rt(next)->blocked_lock = NULL; + mb(); + + wake_up_process(next); + } + else { + TRACE_TASK(next, "is still blocked.\n"); } - - /* wake up next */ - wake_up_process(next); } else { /* becomes available */ mutex->owner = NULL; } - + + unlock_fine_irqrestore(&mutex->lock, flags); + out: - raw_spin_unlock_irqrestore(&mutex->lock, flags); - //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); - + unlock_global_irqrestore(&dgl_lock, flags); + return err; } @@ -1241,8 +1508,8 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, struct rsm_mutex *mutex = rsm_mutex_from_lock(l); // relay-style locking - raw_spin_lock(&mutex->lock); - raw_spin_unlock(to_unlock); + lock_fine(&mutex->lock); + unlock_fine(to_unlock); if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked struct task_struct *owner = mutex->owner; @@ -1261,6 +1528,10 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, if(t == mutex->hp_waiter) { // reflect the decreased priority in the heap node. l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + + BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); + BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks)); + binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); } @@ -1280,13 +1551,13 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, else { TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { @@ -1303,11 +1574,11 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, } else { TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } } @@ -1321,8 +1592,8 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, struct rsm_mutex *mutex = rsm_mutex_from_lock(l); // relay-style locking - raw_spin_lock(&mutex->lock); - raw_spin_unlock(to_unlock); + lock_fine(&mutex->lock); + unlock_fine(to_unlock); if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked if(t == mutex->hp_waiter) { @@ -1377,12 +1648,12 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, } else { raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { @@ -1399,11 +1670,11 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, } else { TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } else { - raw_spin_unlock_irqrestore(&mutex->lock, irqflags); + unlock_fine_irqrestore(&mutex->lock, irqflags); } } } @@ -1418,14 +1689,15 @@ int gsnedf_rsm_mutex_close(struct litmus_lock* l) int owner; - raw_spin_lock_irqsave(&mutex->lock, flags); - //raw_spin_lock_irqsave(&rsm_global_lock, flags); + + lock_global_irqsave(&dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); owner = (mutex->owner == t); - raw_spin_unlock_irqrestore(&mutex->lock, flags); - //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); - + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(&dgl_lock, flags); + if (owner) gsnedf_rsm_mutex_unlock(l); @@ -1443,7 +1715,15 @@ static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { .unlock = gsnedf_rsm_mutex_unlock, .deallocate = gsnedf_rsm_mutex_free, .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, - .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance + .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance, + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +// .reserve = gsnedf_rsm_mutex_reserve, +// .unreserve = gsnedf_rsm_mutex_unreserve, + .dgl_lock = gsn_edf_rsm_mutex_dgl_lock, + .is_owner = gsn_edf_rsm_mutex_is_owner, + .enable_priority = gsn_edf_rsm_enable_priority, +#endif }; static struct litmus_lock* gsnedf_new_rsm_mutex(void) @@ -1928,7 +2208,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", new_max_eff_prio->comm, new_max_eff_prio->pid); raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } } else { @@ -1936,12 +2216,12 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); TRACE_TASK(t, "no owner??\n"); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } } else { TRACE_TASK(t, "hp_waiter is unaffected.\n"); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } } @@ -1955,7 +2235,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg if(!owner) { TRACE_CUR("No owner. Returning.\n"); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); return; } @@ -2004,7 +2284,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg else { TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } } @@ -2049,7 +2329,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo else { TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } } @@ -2103,7 +2383,7 @@ static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, st sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } @@ -2136,9 +2416,9 @@ static void __ikglp_enqueue_on_fq( } // update donor eligiblity list. if(likely(donee_heap_node)) { - if(binheap_is_in_heap(&donee_heap_node->node)) { - WARN_ON(1); - } +// if(binheap_is_in_heap(&donee_heap_node->node)) { +// WARN_ON(1); +// } ikglp_add_donees(sem, fq, t, donee_heap_node); } @@ -2353,7 +2633,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", new_max_eff_prio->comm, new_max_eff_prio->pid); raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } @@ -2366,7 +2646,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) { struct task_struct* t = current; struct ikglp_semaphore *sem = ikglp_from_lock(l); - unsigned long flags, real_flags; + unsigned long flags = 0, real_flags; struct fifo_queue *fq = NULL; int replica = -EINVAL; @@ -2376,13 +2656,17 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) return -EPERM; raw_spin_lock_irqsave(&sem->real_lock, real_flags); - raw_spin_lock_irqsave(&sem->lock, flags); + + lock_global_irqsave(&dgl_lock, flags); + lock_fine_irqsave(&sem->lock, flags); if(sem->shortest_fifo_queue->count == 0) { // take available resource replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock + + unlock_global_irqrestore(&dgl_lock, flags); raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); } else @@ -2410,7 +2694,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { // enqueue on PQ ikglp_enqueue_on_pq(sem, &wait); - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); } else { // enqueue as donor @@ -2418,6 +2702,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) } } + unlock_global_irqrestore(&dgl_lock, flags); raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); TS_LOCK_SUSPEND; @@ -2631,12 +2916,14 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) struct fifo_queue *to_steal = NULL; struct fifo_queue *fq; - unsigned long flags, real_flags; + unsigned long flags = 0, real_flags; int err = 0; raw_spin_lock_irqsave(&sem->real_lock, real_flags); - raw_spin_lock_irqsave(&sem->lock, flags); + + lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper + lock_fine_irqsave(&sem->lock, flags); fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. @@ -2781,7 +3068,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) ikglp_get_idx(sem, other_fq)); ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); - raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! } else { TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", @@ -2801,7 +3088,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. - raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! } } } @@ -2810,7 +3097,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) ikglp_get_idx(sem, to_steal)); ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. - raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! } // check for new HP waiter. @@ -2930,7 +3217,8 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) } out: - raw_spin_unlock_irqrestore(&sem->lock, flags); + unlock_fine_irqrestore(&sem->lock, flags); + unlock_global_irqrestore(&dgl_lock, flags); raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); @@ -2947,7 +3235,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l) int owner = 0; int i; - raw_spin_lock_irqsave(&sem->lock, flags); + raw_spin_lock_irqsave(&sem->real_lock, flags); for(i = 0; i < sem->nr_replicas; ++i) { if(sem->fifo_queues[i].owner == t) { @@ -2956,7 +3244,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l) } } - raw_spin_unlock_irqrestore(&sem->lock, flags); + raw_spin_unlock_irqrestore(&sem->real_lock, flags); if (owner) gsnedf_ikglp_unlock(l); @@ -3384,6 +3672,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = gsnedf_allocate_lock, #endif +#ifdef CONFIG_LITMUS_DGL_SUPPORT + .get_dgl_spinlock = gsn_edf_get_dgl_spinlock, +#endif }; @@ -3401,6 +3692,11 @@ static int __init init_gsn_edf(void) INIT_BINHEAP_NODE(&entry->hn); } + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spin_lock_init(&dgl_lock); +#endif + edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); return register_sched_plugin(&gsn_edf_plugin); } diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 00a1900d6457..77ae3eeb3966 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -120,6 +120,17 @@ static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, #endif +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) +{ + BUG(); + return NULL; +} + +#endif + + /* The default scheduler plugin. It doesn't do anything and lets Linux do its * job. @@ -138,6 +149,9 @@ struct sched_plugin linux_sched_plugin = { .deactivate_plugin = litmus_dummy_deactivate_plugin, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = litmus_dummy_allocate_lock, +#endif +#ifdef CONFIG_LITMUS_DGL_SUPPORT + .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, #endif .admit_task = litmus_dummy_admit_task }; @@ -176,6 +190,9 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(deactivate_plugin); #ifdef CONFIG_LITMUS_LOCKING CHECK(allocate_lock); +#endif +#ifdef CONFIG_LITMUS_DGL_SUPPORT + CHECK(get_dgl_spinlock); #endif CHECK(admit_task); -- cgit v1.2.2