From a463f9a9e04385f0729f7435a0a6dff7d89b25de Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Sat, 26 May 2012 17:29:58 -0400 Subject: GPUSync patch for Litmus 2012.1. --- kernel/mutex.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) (limited to 'kernel/mutex.c') diff --git a/kernel/mutex.c b/kernel/mutex.c index d607ed5dd441..2f363b9bfc1f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) return 1; } EXPORT_SYMBOL(atomic_dec_and_mutex_lock); + + + + +void mutex_lock_sfx(struct mutex *lock, + side_effect_t pre, unsigned long pre_arg, + side_effect_t post, unsigned long post_arg) +{ + long state = TASK_UNINTERRUPTIBLE; + + struct task_struct *task = current; + struct mutex_waiter waiter; + unsigned long flags; + + preempt_disable(); + mutex_acquire(&lock->dep_map, subclass, 0, ip); + + spin_lock_mutex(&lock->wait_lock, flags); + + if(pre) + { + if(unlikely(pre(pre_arg))) + { + // this will fuck with lockdep's CONFIG_PROVE_LOCKING... + spin_unlock_mutex(&lock->wait_lock, flags); + preempt_enable(); + return; + } + } + + debug_mutex_lock_common(lock, &waiter); + debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); + + /* add waiting tasks to the end of the waitqueue (FIFO): */ + list_add_tail(&waiter.list, &lock->wait_list); + waiter.task = task; + + if (atomic_xchg(&lock->count, -1) == 1) + goto done; + + lock_contended(&lock->dep_map, ip); + + for (;;) { + /* + * Lets try to take the lock again - this is needed even if + * we get here for the first time (shortly after failing to + * acquire the lock), to make sure that we get a wakeup once + * it's unlocked. Later on, if we sleep, this is the + * operation that gives us the lock. We xchg it to -1, so + * that when we release the lock, we properly wake up the + * other waiters: + */ + if (atomic_xchg(&lock->count, -1) == 1) + break; + + __set_task_state(task, state); + + /* didnt get the lock, go to sleep: */ + spin_unlock_mutex(&lock->wait_lock, flags); + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + spin_lock_mutex(&lock->wait_lock, flags); + } + +done: + lock_acquired(&lock->dep_map, ip); + /* got the lock - rejoice! */ + mutex_remove_waiter(lock, &waiter, current_thread_info()); + mutex_set_owner(lock); + + /* set it to 0 if there are no waiters left: */ + if (likely(list_empty(&lock->wait_list))) + atomic_set(&lock->count, 0); + + if(post) + post(post_arg); + + spin_unlock_mutex(&lock->wait_lock, flags); + + debug_mutex_free_waiter(&waiter); + preempt_enable(); +} +EXPORT_SYMBOL(mutex_lock_sfx); + +void mutex_unlock_sfx(struct mutex *lock, + side_effect_t pre, unsigned long pre_arg, + side_effect_t post, unsigned long post_arg) +{ + unsigned long flags; + + spin_lock_mutex(&lock->wait_lock, flags); + + if(pre) + pre(pre_arg); + + //mutex_release(&lock->dep_map, nested, _RET_IP_); + mutex_release(&lock->dep_map, 1, _RET_IP_); + debug_mutex_unlock(lock); + + /* + * some architectures leave the lock unlocked in the fastpath failure + * case, others need to leave it locked. In the later case we have to + * unlock it here + */ + if (__mutex_slowpath_needs_to_unlock()) + atomic_set(&lock->count, 1); + + if (!list_empty(&lock->wait_list)) { + /* get the first entry from the wait-list: */ + struct mutex_waiter *waiter = + list_entry(lock->wait_list.next, + struct mutex_waiter, list); + + debug_mutex_wake_waiter(lock, waiter); + + wake_up_process(waiter->task); + } + + if(post) + post(post_arg); + + spin_unlock_mutex(&lock->wait_lock, flags); +} +EXPORT_SYMBOL(mutex_unlock_sfx); -- cgit v1.2.2