diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-06-17 15:11:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-18 12:48:41 -0400 |
commit | bd197234b0a616c8f04f6b682326a5a24b33ca92 (patch) | |
tree | 1c31f8934b0d5472c9038c042ff27d08b52ffdc6 /kernel/rtmutex.c | |
parent | 188e1f81ba31af1b65a2f3611df4c670b092bbac (diff) |
Revert "futex_requeue_pi optimization"
This reverts commit d0aa7a70bf03b9de9e995ab272293be1f7937822.
It not only introduced user space visible changes to the futex syscall,
it is also non-functional and there is no way to fix it proper before
the 2.6.22 release.
The breakage report ( http://lkml.org/lkml/2007/5/12/17 ) went
unanswered, and unfortunately it turned out that the concept is not
feasible at all. It violates the rtmutex semantics badly by introducing
a virtual owner, which hacks around the coupling of the user-space
pi_futex and the kernel internal rt_mutex representation.
At the moment the only safe option is to remove it fully as it contains
user-space visible changes to broken kernel code, which we do not want
to expose in the 2.6.22 release.
The patch reverts the original patch mostly 1:1, but contains a couple
of trivial manual cleanups which were necessary due to patches, which
touched the same area of code later.
Verified against the glibc tests and my own PI futex tests.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Ulrich Drepper <drepper@redhat.com>
Cc: Pierre Peiffer <pierre.peiffer@bull.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 41 |
1 files changed, 32 insertions, 9 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index a6fbb4130521..17d28ce20300 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -56,7 +56,7 @@ | |||
56 | * state. | 56 | * state. |
57 | */ | 57 | */ |
58 | 58 | ||
59 | void | 59 | static void |
60 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | 60 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, |
61 | unsigned long mask) | 61 | unsigned long mask) |
62 | { | 62 | { |
@@ -81,6 +81,29 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * We can speed up the acquire/release, if the architecture | ||
85 | * supports cmpxchg and if there's no debugging state to be set up | ||
86 | */ | ||
87 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | ||
88 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | ||
89 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
90 | { | ||
91 | unsigned long owner, *p = (unsigned long *) &lock->owner; | ||
92 | |||
93 | do { | ||
94 | owner = *p; | ||
95 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | ||
96 | } | ||
97 | #else | ||
98 | # define rt_mutex_cmpxchg(l,c,n) (0) | ||
99 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
100 | { | ||
101 | lock->owner = (struct task_struct *) | ||
102 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | /* | ||
84 | * Calculate task priority from the waiter list priority | 107 | * Calculate task priority from the waiter list priority |
85 | * | 108 | * |
86 | * Return task->normal_prio when the waiter list is empty or when | 109 | * Return task->normal_prio when the waiter list is empty or when |
@@ -100,7 +123,7 @@ int rt_mutex_getprio(struct task_struct *task) | |||
100 | * | 123 | * |
101 | * This can be both boosting and unboosting. task->pi_lock must be held. | 124 | * This can be both boosting and unboosting. task->pi_lock must be held. |
102 | */ | 125 | */ |
103 | void __rt_mutex_adjust_prio(struct task_struct *task) | 126 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
104 | { | 127 | { |
105 | int prio = rt_mutex_getprio(task); | 128 | int prio = rt_mutex_getprio(task); |
106 | 129 | ||
@@ -136,11 +159,11 @@ int max_lock_depth = 1024; | |||
136 | * Decreases task's usage by one - may thus free the task. | 159 | * Decreases task's usage by one - may thus free the task. |
137 | * Returns 0 or -EDEADLK. | 160 | * Returns 0 or -EDEADLK. |
138 | */ | 161 | */ |
139 | int rt_mutex_adjust_prio_chain(struct task_struct *task, | 162 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
140 | int deadlock_detect, | 163 | int deadlock_detect, |
141 | struct rt_mutex *orig_lock, | 164 | struct rt_mutex *orig_lock, |
142 | struct rt_mutex_waiter *orig_waiter, | 165 | struct rt_mutex_waiter *orig_waiter, |
143 | struct task_struct *top_task) | 166 | struct task_struct *top_task) |
144 | { | 167 | { |
145 | struct rt_mutex *lock; | 168 | struct rt_mutex *lock; |
146 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 169 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
@@ -514,8 +537,8 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
514 | * | 537 | * |
515 | * Must be called with lock->wait_lock held | 538 | * Must be called with lock->wait_lock held |
516 | */ | 539 | */ |
517 | void remove_waiter(struct rt_mutex *lock, | 540 | static void remove_waiter(struct rt_mutex *lock, |
518 | struct rt_mutex_waiter *waiter) | 541 | struct rt_mutex_waiter *waiter) |
519 | { | 542 | { |
520 | int first = (waiter == rt_mutex_top_waiter(lock)); | 543 | int first = (waiter == rt_mutex_top_waiter(lock)); |
521 | struct task_struct *owner = rt_mutex_owner(lock); | 544 | struct task_struct *owner = rt_mutex_owner(lock); |