aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/locking.c
blob: b2f4a205cd0434104d33d83971136d62e6a7d297 (plain) (tree)
1
2
3
4
5
6
7
8





                                
                          
 



                                
                                                                                  





















                                                                             

                                      
                                 

 
                                                                                  



                                 
                                                      
                       
                                   

                                                     
                

                                                               


                                                              


                                                             
      
                                
     
                   































                                                                            
                      



                                          
                                                        




                                                                           
                    









                                              
                        



                                          

                                                          



                                                                        
                      



                   
                                                                   
 

                                     



                                                        



                                                     


 









                                                                                
        

                                                                        
        













































































































































































                                                                                                                                                        
   
 





























































































































                                                                                                    

 














                                             
#include <litmus/fdso.h>

#ifdef CONFIG_LITMUS_LOCKING

#include <litmus/sched_plugin.h>
#include <litmus/trace.h>
#include <litmus/litmus.h>

#ifdef CONFIG_LITMUS_DGL_SUPPORT
#include <linux/uaccess.h>
#endif

static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
static int close_generic_lock(struct od_table_entry* entry);
static void destroy_generic_lock(obj_type_t type, void* sem);

struct fdso_ops generic_lock_ops = {
	.create  = create_generic_lock,
	.open    = open_generic_lock,
	.close   = close_generic_lock,
	.destroy = destroy_generic_lock
};

static inline bool is_lock(struct od_table_entry* entry)
{
	return entry->class == &generic_lock_ops;
}

static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
{
	BUG_ON(!is_lock(entry));
	return (struct litmus_lock*) entry->obj->obj;
}


atomic_t lock_id_gen = ATOMIC_INIT(0);
//raw_spinlock_t rsm_global_lock;


static  int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
{
	struct litmus_lock* lock;
	int err;

	err = litmus->allocate_lock(&lock, type, arg);
	if (err == 0) {
#ifdef CONFIG_LITMUS_NESTED_LOCKING
		lock->nest.lock = lock;
		lock->nest.hp_waiter_eff_prio = NULL;
		
		INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node);
		WARN_ON(!(lock->nest.hp_waiter_ptr));
		
		lock->ident = atomic_inc_return(&lock_id_gen);
		
//		if(lock->ident == 1) {
//			raw_spin_lock_init(&rsm_global_lock);
//		}
#endif
		*obj_ref = lock;
    }
	return err;
}

static int open_generic_lock(struct od_table_entry* entry, void* __user arg)
{
	struct litmus_lock* lock = get_lock(entry);
	if (lock->ops->open)
		return lock->ops->open(lock, arg);
	else
		return 0; /* default: any task can open it */
}

static int close_generic_lock(struct od_table_entry* entry)
{
	struct litmus_lock* lock = get_lock(entry);
	if (lock->ops->close)
		return lock->ops->close(lock);
	else
		return 0; /* default: closing succeeds */
}

static void destroy_generic_lock(obj_type_t type, void* obj)
{
	struct litmus_lock* lock = (struct litmus_lock*) obj;
	lock->ops->deallocate(lock);
}

asmlinkage long sys_litmus_lock(int lock_od)
{
	long err = -EINVAL;
	struct od_table_entry* entry;
	struct litmus_lock* l;

	TS_LOCK_START;

	entry = get_entry_for_od(lock_od);
	if (entry && is_lock(entry)) {
		l = get_lock(entry);
		TRACE_CUR("attempts to lock 0x%p\n", l);
		err = l->ops->lock(l);
	}

	/* Note: task my have been suspended or preempted in between!  Take
	 * this into account when computing overheads. */
	TS_LOCK_END;

	return err;
}

asmlinkage long sys_litmus_unlock(int lock_od)
{
	long err = -EINVAL;
	struct od_table_entry* entry;
	struct litmus_lock* l;

	TS_UNLOCK_START;

	entry = get_entry_for_od(lock_od);
	if (entry && is_lock(entry)) {
		l = get_lock(entry);
		TRACE_CUR("attempts to unlock 0x%p\n", l);
		err = l->ops->unlock(l);
	}

	/* Note: task my have been preempted in between!  Take this into
	 * account when computing overheads. */
	TS_UNLOCK_END;

	return err;
}

struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
{
	wait_queue_t* q;
	struct task_struct* t = NULL;

	if (waitqueue_active(wq)) {
		q = list_entry(wq->task_list.next,
			       wait_queue_t, task_list);
		t = (struct task_struct*) q->private;
		__remove_wait_queue(wq, q);
	}
	return(t);
}


#ifdef CONFIG_LITMUS_DGL_SUPPORT

void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock)
{
//	int i = dgl_wait->size - 1;
	
	
	BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock);
	
	WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock);
//	
//	// since dgl_wait->task->blocked_lock, all locks after prev_lock
//	// are already held.
//	
//	// find the lock after prev.
//	if(prev_lock) {
//		for(/**/; i >= 0; --i) {
//			if(prev_lock == dgl_wait->locks[i]) {
//				--i;
//				break;
//			}
//			else {
//				BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task));
//			}
//		}
//	}
	
	for(dgl_wait->last_primary = dgl_wait->last_primary - 1;
		dgl_wait->last_primary >= 0;
		--(dgl_wait->last_primary)){
		if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) {
			
			tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary];
			mb();
			
			TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident);
			
			break;
		}
	}
	
//	for(/**/; i >= 0; --i) {
//		struct litmus_lock *l = dgl_wait->locks[i];
//		if(!l->ops->is_owner(l, dgl_wait->task)) {		
//			
//			tsk_rt(dgl_wait->task)->blocked_lock = l;
//			mb();
//			
//			TRACE_CUR("New blocked lock is %d\n", l->ident);
//			
//			if(dgl_wait->last_primary >= 0)
//			{
//				TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident);
//				WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l);
//			}
//
//			break;
//		}
//		else {
//			TRACE_CUR("Lock %d is actually held!\n", l->ident);
//		}
//	}
}

int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
{
	// should never be called.
	BUG();
	return 1;
}

void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task)
{
	wait_queue_t *q;
	
	*dgl_wait = NULL;
	*task = NULL;
	
	if (waitqueue_active(wq)) {
		q = list_entry(wq->task_list.next,
					   wait_queue_t, task_list);
		
		if(q->func == dgl_wake_up) {
			*dgl_wait = (dgl_wait_state_t*) q->private;
		}
		else {
			*task = (struct task_struct*) q->private;
		}
		
		__remove_wait_queue(wq, q);
	}
}

void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait)
{
	init_waitqueue_entry(wq_node, dgl_wait->task);
	wq_node->private = dgl_wait;
	wq_node->func = dgl_wake_up;
}


static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
{
	int i;
	unsigned long irqflags; //, dummyflags;
	raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
	
	BUG_ON(dgl_wait->task != current);
	
	raw_spin_lock_irqsave(dgl_lock, irqflags);
	
	
	dgl_wait->nr_remaining = dgl_wait->size;
	//atomic_set(&dgl_wait->nr_remaining, dgl_wait->size);
	
	// try to acquire each lock.  enqueue (non-blocking) if it is unavailable.
	for(i = 0; i < dgl_wait->size; ++i) {
		struct litmus_lock *l = dgl_wait->locks[i];
		
		// dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks.
		
		if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) {
			--(dgl_wait->nr_remaining);
			//atomic_dec(&dgl_wait->nr_remaining);
			TRACE_CUR("Acquired lock %d immediatly.\n", l->ident);
		}
	}

	//if(atomic_read(&dgl_wait->nr_remaining) == 0) {
	if(dgl_wait->nr_remaining == 0) {
		// acquired entire group immediatly
		TRACE_CUR("Acquired all locks in DGL immediatly!\n");
	}
	else {		

		TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining));	
		
		for(i = dgl_wait->size - 1; i >= 0; --i) {
			struct litmus_lock *l = dgl_wait->locks[i];
			if(!l->ops->is_owner(l, dgl_wait->task)) {  // double-check to be thread safe
				
				TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident);
				
				TS_DGL_LOCK_SUSPEND;
				
				l->ops->enable_priority(l, dgl_wait);
				dgl_wait->last_primary = i;
				
				TRACE_CUR("Suspending for lock %d\n", l->ident);
				
				raw_spin_unlock_irqrestore(dgl_lock, irqflags);  // free dgl_lock before suspending
				
				schedule();  // suspend!!!
				
				TS_DGL_LOCK_RESUME;
				
				TRACE_CUR("Woken up from DGL suspension.\n");
				
				goto all_acquired;  // we should hold all locks when we wake up.
			}
		}
		
		TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n");
		BUG();
	}
	
	raw_spin_unlock_irqrestore(dgl_lock, irqflags);
	
all_acquired:
	
	// FOR SANITY CHECK FOR TESTING
	for(i = 0; i < dgl_wait->size; ++i) {
		struct litmus_lock *l = dgl_wait->locks[i];
		BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
	}
	
	TRACE_CUR("Acquired entire DGL\n");

	return 0;
}

//static int supports_dgl(struct litmus_lock *l)
//{
//	struct litmus_lock_ops* ops = l->ops;
//	
//	return (ops->dgl_lock			&&
//			ops->is_owner			&&
//			ops->enable_priority);
//}

asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
{
	struct task_struct *t = current;
	long err = -EINVAL;
	int dgl_ods[MAX_DGL_SIZE];
	int i;
	
	dgl_wait_state_t dgl_wait_state;  // lives on the stack until all resources in DGL are held.
	
	if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
		goto out;
	
	if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
		goto out;
	
	if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
		goto out;
	
	if (!is_realtime(t)) {
		err = -EPERM;
		goto out;
	}
	
	for(i = 0; i < dgl_size; ++i) {
		struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
		if(entry && is_lock(entry)) {
			dgl_wait_state.locks[i] = get_lock(entry);
//			if(!supports_dgl(dgl_wait_state.locks[i])) {
//				TRACE_CUR("Lock %d does not support all required DGL operations.\n",
//						  dgl_wait_state.locks[i]->ident);
//				goto out;
//			}
		}
		else {
			TRACE_CUR("Invalid lock identifier\n");
			goto out;
		}
	}
	
	dgl_wait_state.task = t;
	dgl_wait_state.size = dgl_size;
	
	TS_DGL_LOCK_START;
	err = do_litmus_dgl_lock(&dgl_wait_state);
	
	/* Note: task my have been suspended or preempted in between!  Take
	 * this into account when computing overheads. */
	TS_DGL_LOCK_END;	
	
out:
	return err;
}

static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
{
	int i;
	long err = 0;
	
	TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size);
	
	for(i = dgl_size - 1; i >= 0; --i) {  // unlock in reverse order
		
		struct litmus_lock *l = dgl_locks[i];
		long tmp_err;
		
		TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident);
		
		tmp_err = l->ops->unlock(l);
		
		if(tmp_err) {
			TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err);
			err = tmp_err;
		}
	}
	
	TRACE_CUR("DGL unlocked. err = %d\n", err);
	
	return err;
}

asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
{
	long err = -EINVAL;
	int dgl_ods[MAX_DGL_SIZE];
	struct od_table_entry* entry;
	int i;
	
	struct litmus_lock* dgl_locks[MAX_DGL_SIZE];
	
	if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
		goto out;
	
	if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
		goto out;
	
	if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
		goto out;
	
	for(i = 0; i < dgl_size; ++i) {
		entry = get_entry_for_od(dgl_ods[i]);
		if(entry && is_lock(entry)) {
			dgl_locks[i] = get_lock(entry);
//			if(!supports_dgl(dgl_locks[i])) {
//				TRACE_CUR("Lock %d does not support all required DGL operations.\n",
//						  dgl_locks[i]->ident);
//				goto out;
//			}
		}
		else {
			TRACE_CUR("Invalid lock identifier\n");
			goto out;
		}
	}
	
	TS_DGL_UNLOCK_START;
	err = do_litmus_dgl_unlock(dgl_locks, dgl_size);
	
	/* Note: task my have been suspended or preempted in between!  Take
	 * this into account when computing overheads. */
	TS_DGL_UNLOCK_END;	
	
out:
	return err;	
}

#endif


#else

struct fdso_ops generic_lock_ops = {};

asmlinkage long sys_litmus_lock(int sem_od)
{
	return -ENOSYS;
}

asmlinkage long sys_litmus_unlock(int sem_od)
{
	return -ENOSYS;
}

#endif