aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-01-10 16:29:31 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-01-10 16:29:31 -0500
commit32849f80c0c4a0c8a1802525bdecc6cd4447d545 (patch)
tree83d88fbb96fe343a21cd507de5710a40bb528f3f
parent8d00682ce5ddaedfb62287773d21c727f08fda69 (diff)
remove sem/mutex hacks needed for old klmirqd
-rw-r--r--include/linux/mutex.h10
-rw-r--r--include/linux/semaphore.h9
-rw-r--r--kernel/mutex.c125
-rw-r--r--kernel/semaphore.c13
4 files changed, 4 insertions, 153 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cb47debbf24d..a940fe435aca 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -126,15 +126,6 @@ static inline int mutex_is_locked(struct mutex *lock)
126 return atomic_read(&lock->count) != 1; 126 return atomic_read(&lock->count) != 1;
127} 127}
128 128
129/* return non-zero to abort. only pre-side-effects may abort */
130typedef int (*side_effect_t)(unsigned long);
131extern void mutex_lock_sfx(struct mutex *lock,
132 side_effect_t pre, unsigned long pre_arg,
133 side_effect_t post, unsigned long post_arg);
134extern void mutex_unlock_sfx(struct mutex *lock,
135 side_effect_t pre, unsigned long pre_arg,
136 side_effect_t post, unsigned long post_arg);
137
138/* 129/*
139 * See kernel/mutex.c for detailed documentation of these APIs. 130 * See kernel/mutex.c for detailed documentation of these APIs.
140 * Also see Documentation/mutex-design.txt. 131 * Also see Documentation/mutex-design.txt.
@@ -162,7 +153,6 @@ extern void mutex_lock(struct mutex *lock);
162extern int __must_check mutex_lock_interruptible(struct mutex *lock); 153extern int __must_check mutex_lock_interruptible(struct mutex *lock);
163extern int __must_check mutex_lock_killable(struct mutex *lock); 154extern int __must_check mutex_lock_killable(struct mutex *lock);
164 155
165
166# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 156# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
167# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 157# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
168# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 158# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index c83fc2b65f01..39fa04966aa8 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -43,13 +43,4 @@ extern int __must_check down_trylock(struct semaphore *sem);
43extern int __must_check down_timeout(struct semaphore *sem, long jiffies); 43extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
44extern void up(struct semaphore *sem); 44extern void up(struct semaphore *sem);
45 45
46extern void __down(struct semaphore *sem);
47extern void __up(struct semaphore *sem);
48
49struct semaphore_waiter {
50 struct list_head list;
51 struct task_struct *task;
52 int up;
53};
54
55#endif /* __LINUX_SEMAPHORE_H */ 46#endif /* __LINUX_SEMAPHORE_H */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 2f363b9bfc1f..d607ed5dd441 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -498,128 +498,3 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
498 return 1; 498 return 1;
499} 499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501
502
503
504
505void mutex_lock_sfx(struct mutex *lock,
506 side_effect_t pre, unsigned long pre_arg,
507 side_effect_t post, unsigned long post_arg)
508{
509 long state = TASK_UNINTERRUPTIBLE;
510
511 struct task_struct *task = current;
512 struct mutex_waiter waiter;
513 unsigned long flags;
514
515 preempt_disable();
516 mutex_acquire(&lock->dep_map, subclass, 0, ip);
517
518 spin_lock_mutex(&lock->wait_lock, flags);
519
520 if(pre)
521 {
522 if(unlikely(pre(pre_arg)))
523 {
524 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
525 spin_unlock_mutex(&lock->wait_lock, flags);
526 preempt_enable();
527 return;
528 }
529 }
530
531 debug_mutex_lock_common(lock, &waiter);
532 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
533
534 /* add waiting tasks to the end of the waitqueue (FIFO): */
535 list_add_tail(&waiter.list, &lock->wait_list);
536 waiter.task = task;
537
538 if (atomic_xchg(&lock->count, -1) == 1)
539 goto done;
540
541 lock_contended(&lock->dep_map, ip);
542
543 for (;;) {
544 /*
545 * Lets try to take the lock again - this is needed even if
546 * we get here for the first time (shortly after failing to
547 * acquire the lock), to make sure that we get a wakeup once
548 * it's unlocked. Later on, if we sleep, this is the
549 * operation that gives us the lock. We xchg it to -1, so
550 * that when we release the lock, we properly wake up the
551 * other waiters:
552 */
553 if (atomic_xchg(&lock->count, -1) == 1)
554 break;
555
556 __set_task_state(task, state);
557
558 /* didnt get the lock, go to sleep: */
559 spin_unlock_mutex(&lock->wait_lock, flags);
560 preempt_enable_no_resched();
561 schedule();
562 preempt_disable();
563 spin_lock_mutex(&lock->wait_lock, flags);
564 }
565
566done:
567 lock_acquired(&lock->dep_map, ip);
568 /* got the lock - rejoice! */
569 mutex_remove_waiter(lock, &waiter, current_thread_info());
570 mutex_set_owner(lock);
571
572 /* set it to 0 if there are no waiters left: */
573 if (likely(list_empty(&lock->wait_list)))
574 atomic_set(&lock->count, 0);
575
576 if(post)
577 post(post_arg);
578
579 spin_unlock_mutex(&lock->wait_lock, flags);
580
581 debug_mutex_free_waiter(&waiter);
582 preempt_enable();
583}
584EXPORT_SYMBOL(mutex_lock_sfx);
585
586void mutex_unlock_sfx(struct mutex *lock,
587 side_effect_t pre, unsigned long pre_arg,
588 side_effect_t post, unsigned long post_arg)
589{
590 unsigned long flags;
591
592 spin_lock_mutex(&lock->wait_lock, flags);
593
594 if(pre)
595 pre(pre_arg);
596
597 //mutex_release(&lock->dep_map, nested, _RET_IP_);
598 mutex_release(&lock->dep_map, 1, _RET_IP_);
599 debug_mutex_unlock(lock);
600
601 /*
602 * some architectures leave the lock unlocked in the fastpath failure
603 * case, others need to leave it locked. In the later case we have to
604 * unlock it here
605 */
606 if (__mutex_slowpath_needs_to_unlock())
607 atomic_set(&lock->count, 1);
608
609 if (!list_empty(&lock->wait_list)) {
610 /* get the first entry from the wait-list: */
611 struct mutex_waiter *waiter =
612 list_entry(lock->wait_list.next,
613 struct mutex_waiter, list);
614
615 debug_mutex_wake_waiter(lock, waiter);
616
617 wake_up_process(waiter->task);
618 }
619
620 if(post)
621 post(post_arg);
622
623 spin_unlock_mutex(&lock->wait_lock, flags);
624}
625EXPORT_SYMBOL(mutex_unlock_sfx);
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index c947a046a6d7..94a62c0d4ade 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -33,11 +33,11 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/ftrace.h> 34#include <linux/ftrace.h>
35 35
36noinline void __down(struct semaphore *sem); 36static noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem); 37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem); 38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long jiffies); 39static noinline int __down_timeout(struct semaphore *sem, long jiffies);
40noinline void __up(struct semaphore *sem); 40static noinline void __up(struct semaphore *sem);
41 41
42/** 42/**
43 * down - acquire the semaphore 43 * down - acquire the semaphore
@@ -190,13 +190,11 @@ EXPORT_SYMBOL(up);
190 190
191/* Functions for the contended case */ 191/* Functions for the contended case */
192 192
193/*
194struct semaphore_waiter { 193struct semaphore_waiter {
195 struct list_head list; 194 struct list_head list;
196 struct task_struct *task; 195 struct task_struct *task;
197 int up; 196 int up;
198}; 197};
199 */
200 198
201/* 199/*
202 * Because this function is inlined, the 'state' parameter will be 200 * Because this function is inlined, the 'state' parameter will be
@@ -235,12 +233,10 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
235 return -EINTR; 233 return -EINTR;
236} 234}
237 235
238noinline void __sched __down(struct semaphore *sem) 236static noinline void __sched __down(struct semaphore *sem)
239{ 237{
240 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
241} 239}
242EXPORT_SYMBOL(__down);
243
244 240
245static noinline int __sched __down_interruptible(struct semaphore *sem) 241static noinline int __sched __down_interruptible(struct semaphore *sem)
246{ 242{
@@ -257,7 +253,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
257 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
258} 254}
259 255
260noinline void __sched __up(struct semaphore *sem) 256static noinline void __sched __up(struct semaphore *sem)
261{ 257{
262 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
263 struct semaphore_waiter, list); 259 struct semaphore_waiter, list);
@@ -265,4 +261,3 @@ noinline void __sched __up(struct semaphore *sem)
265 waiter->up = 1; 261 waiter->up = 1;
266 wake_up_process(waiter->task); 262 wake_up_process(waiter->task);
267} 263}
268EXPORT_SYMBOL(__up); \ No newline at end of file