aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/rtmutex.h104
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/rtmutex.c904
-rw-r--r--kernel/rtmutex.h29
-rw-r--r--kernel/rtmutex_common.h93
-rw-r--r--kernel/sysctl.c15
11 files changed, 1181 insertions, 0 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 678c1a90380d..3a256957fb56 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -124,6 +124,7 @@ extern struct group_info init_groups;
124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 .fs_excl = ATOMIC_INIT(0), \ 125 .fs_excl = ATOMIC_INIT(0), \
126 .pi_lock = SPIN_LOCK_UNLOCKED, \ 126 .pi_lock = SPIN_LOCK_UNLOCKED, \
127 INIT_RT_MUTEXES(tsk) \
127} 128}
128 129
129 130
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
new file mode 100644
index 000000000000..12309c916c68
--- /dev/null
+++ b/include/linux/rtmutex.h
@@ -0,0 +1,104 @@
1/*
2 * RT Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains the public data structure and API definitions.
10 */
11
12#ifndef __LINUX_RT_MUTEX_H
13#define __LINUX_RT_MUTEX_H
14
15#include <linux/linkage.h>
16#include <linux/plist.h>
17#include <linux/spinlock_types.h>
18
19/*
20 * The rt_mutex structure
21 *
22 * @wait_lock: spinlock to protect the structure
23 * @wait_list: pilist head to enqueue waiters in priority order
24 * @owner: the mutex owner
25 */
26struct rt_mutex {
27 spinlock_t wait_lock;
28 struct plist_head wait_list;
29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES
31 int save_state;
32 struct list_head held_list_entry;
33 unsigned long acquire_ip;
34 const char *name, *file;
35 int line;
36 void *magic;
37#endif
38};
39
40struct rt_mutex_waiter;
41struct hrtimer_sleeper;
42
43#ifdef CONFIG_DEBUG_RT_MUTEXES
44# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
45 , .name = #mutexname, .file = __FILE__, .line = __LINE__
46# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__)
47 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
48#else
49# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
50# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
51# define rt_mutex_debug_task_free(t) do { } while (0)
52#endif
53
54#define __RT_MUTEX_INITIALIZER(mutexname) \
55 { .wait_lock = SPIN_LOCK_UNLOCKED \
56 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
57 , .owner = NULL \
58 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
59
60#define DEFINE_RT_MUTEX(mutexname) \
61 struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
62
63/***
64 * rt_mutex_is_locked - is the mutex locked
65 * @lock: the mutex to be queried
66 *
67 * Returns 1 if the mutex is locked, 0 if unlocked.
68 */
69static inline int rt_mutex_is_locked(struct rt_mutex *lock)
70{
71 return lock->owner != NULL;
72}
73
74extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
75extern void rt_mutex_destroy(struct rt_mutex *lock);
76
77extern void rt_mutex_lock(struct rt_mutex *lock);
78extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
79 int detect_deadlock);
80extern int rt_mutex_timed_lock(struct rt_mutex *lock,
81 struct hrtimer_sleeper *timeout,
82 int detect_deadlock);
83
84extern int rt_mutex_trylock(struct rt_mutex *lock);
85
86extern void rt_mutex_unlock(struct rt_mutex *lock);
87
88#ifdef CONFIG_DEBUG_RT_MUTEXES
89# define INIT_RT_MUTEX_DEBUG(tsk) \
90 .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
91 .held_list_lock = SPIN_LOCK_UNLOCKED
92#else
93# define INIT_RT_MUTEX_DEBUG(tsk)
94#endif
95
96#ifdef CONFIG_RT_MUTEXES
97# define INIT_RT_MUTEXES(tsk) \
98 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
99 INIT_RT_MUTEX_DEBUG(tsk)
100#else
101# define INIT_RT_MUTEXES(tsk)
102#endif
103
104#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f167645e7e2..6ea23c9af413 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -73,6 +73,7 @@ struct sched_param {
73#include <linux/seccomp.h> 73#include <linux/seccomp.h>
74#include <linux/rcupdate.h> 74#include <linux/rcupdate.h>
75#include <linux/futex.h> 75#include <linux/futex.h>
76#include <linux/rtmutex.h>
76 77
77#include <linux/time.h> 78#include <linux/time.h>
78#include <linux/param.h> 79#include <linux/param.h>
@@ -858,6 +859,17 @@ struct task_struct {
858 /* Protection of the PI data structures: */ 859 /* Protection of the PI data structures: */
859 spinlock_t pi_lock; 860 spinlock_t pi_lock;
860 861
862#ifdef CONFIG_RT_MUTEXES
863 /* PI waiters blocked on a rt_mutex held by this task */
864 struct plist_head pi_waiters;
865 /* Deadlock detection and priority inheritance handling */
866 struct rt_mutex_waiter *pi_blocked_on;
867# ifdef CONFIG_DEBUG_RT_MUTEXES
868 spinlock_t held_list_lock;
869 struct list_head held_list_head;
870# endif
871#endif
872
861#ifdef CONFIG_DEBUG_MUTEXES 873#ifdef CONFIG_DEBUG_MUTEXES
862 /* mutex deadlock detection */ 874 /* mutex deadlock detection */
863 struct mutex_waiter *blocked_on; 875 struct mutex_waiter *blocked_on;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index bee12a7a0576..46e4d8f2771f 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -149,6 +149,7 @@ enum
149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ 149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ 150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */ 151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */
152 KERN_MAX_LOCK_DEPTH=74,
152}; 153};
153 154
154 155
diff --git a/init/Kconfig b/init/Kconfig
index df55b3665601..f70f2fd273c2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -339,9 +339,14 @@ config BASE_FULL
339 kernel data structures. This saves memory on small machines, 339 kernel data structures. This saves memory on small machines,
340 but may reduce performance. 340 but may reduce performance.
341 341
342config RT_MUTEXES
343 boolean
344 select PLIST
345
342config FUTEX 346config FUTEX
343 bool "Enable futex support" if EMBEDDED 347 bool "Enable futex support" if EMBEDDED
344 default y 348 default y
349 select RT_MUTEXES
345 help 350 help
346 Disabling this option will cause the kernel to be built without 351 Disabling this option will cause the kernel to be built without
347 support for "fast userspace mutexes". The resulting kernel may not 352 support for "fast userspace mutexes". The resulting kernel may not
diff --git a/kernel/Makefile b/kernel/Makefile
index 752bd7d383af..21df9a338ff0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_FUTEX) += futex.o
16ifeq ($(CONFIG_COMPAT),y) 16ifeq ($(CONFIG_COMPAT),y)
17obj-$(CONFIG_FUTEX) += futex_compat.o 17obj-$(CONFIG_FUTEX) += futex_compat.o
18endif 18endif
19obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
19obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 20obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
20obj-$(CONFIG_SMP) += cpu.o spinlock.o 21obj-$(CONFIG_SMP) += cpu.o spinlock.o
21obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 22obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
diff --git a/kernel/fork.c b/kernel/fork.c
index 9b4e54ef0225..b664a081fffa 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -104,6 +104,7 @@ static kmem_cache_t *mm_cachep;
104void free_task(struct task_struct *tsk) 104void free_task(struct task_struct *tsk)
105{ 105{
106 free_thread_info(tsk->thread_info); 106 free_thread_info(tsk->thread_info);
107 rt_mutex_debug_task_free(tsk);
107 free_task_struct(tsk); 108 free_task_struct(tsk);
108} 109}
109EXPORT_SYMBOL(free_task); 110EXPORT_SYMBOL(free_task);
@@ -913,6 +914,19 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
913 return current->pid; 914 return current->pid;
914} 915}
915 916
917static inline void rt_mutex_init_task(struct task_struct *p)
918{
919#ifdef CONFIG_RT_MUTEXES
920 spin_lock_init(&p->pi_lock);
921 plist_head_init(&p->pi_waiters, &p->pi_lock);
922 p->pi_blocked_on = NULL;
923# ifdef CONFIG_DEBUG_RT_MUTEXES
924 spin_lock_init(&p->held_list_lock);
925 INIT_LIST_HEAD(&p->held_list_head);
926# endif
927#endif
928}
929
916/* 930/*
917 * This creates a new process as a copy of the old one, 931 * This creates a new process as a copy of the old one,
918 * but does not actually start it yet. 932 * but does not actually start it yet.
@@ -1034,6 +1048,8 @@ static task_t *copy_process(unsigned long clone_flags,
1034 mpol_fix_fork_child_flag(p); 1048 mpol_fix_fork_child_flag(p);
1035#endif 1049#endif
1036 1050
1051 rt_mutex_init_task(p);
1052
1037#ifdef CONFIG_DEBUG_MUTEXES 1053#ifdef CONFIG_DEBUG_MUTEXES
1038 p->blocked_on = NULL; /* not blocked yet */ 1054 p->blocked_on = NULL; /* not blocked yet */
1039#endif 1055#endif
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
new file mode 100644
index 000000000000..937a474fae94
--- /dev/null
+++ b/kernel/rtmutex.c
@@ -0,0 +1,904 @@
1/*
2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner.
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
10 */
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/timer.h>
15
16#include "rtmutex_common.h"
17
18#ifdef CONFIG_DEBUG_RT_MUTEXES
19# include "rtmutex-debug.h"
20#else
21# include "rtmutex.h"
22#endif
23
24/*
25 * lock->owner state tracking:
26 *
27 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
28 * are used to keep track of the "owner is pending" and "lock has
29 * waiters" state.
30 *
31 * owner bit1 bit0
32 * NULL 0 0 lock is free (fast acquire possible)
33 * NULL 0 1 invalid state
34 * NULL 1 0 Transitional State*
35 * NULL 1 1 invalid state
36 * taskpointer 0 0 lock is held (fast release possible)
37 * taskpointer 0 1 task is pending owner
38 * taskpointer 1 0 lock is held and has waiters
39 * taskpointer 1 1 task is pending owner and lock has more waiters
40 *
41 * Pending ownership is assigned to the top (highest priority)
42 * waiter of the lock, when the lock is released. The thread is woken
43 * up and can now take the lock. Until the lock is taken (bit 0
44 * cleared) a competing higher priority thread can steal the lock
45 * which puts the woken up thread back on the waiters list.
46 *
47 * The fast atomic compare exchange based acquire and release is only
48 * possible when bit 0 and 1 of lock->owner are 0.
49 *
50 * (*) There's a small time where the owner can be NULL and the
51 * "lock has waiters" bit is set. This can happen when grabbing the lock.
52 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
53 * bit before looking at the lock, hence the reason this is a transitional
54 * state.
55 */
56
57static void
58rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
59 unsigned long mask)
60{
61 unsigned long val = (unsigned long)owner | mask;
62
63 if (rt_mutex_has_waiters(lock))
64 val |= RT_MUTEX_HAS_WAITERS;
65
66 lock->owner = (struct task_struct *)val;
67}
68
69static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
70{
71 lock->owner = (struct task_struct *)
72 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
73}
74
75static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
76{
77 if (!rt_mutex_has_waiters(lock))
78 clear_rt_mutex_waiters(lock);
79}
80
81/*
82 * We can speed up the acquire/release, if the architecture
83 * supports cmpxchg and if there's no debugging state to be set up
84 */
85#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
86# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
87static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
88{
89 unsigned long owner, *p = (unsigned long *) &lock->owner;
90
91 do {
92 owner = *p;
93 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
94}
95#else
96# define rt_mutex_cmpxchg(l,c,n) (0)
97static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
98{
99 lock->owner = (struct task_struct *)
100 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
101}
102#endif
103
104/*
105 * Calculate task priority from the waiter list priority
106 *
107 * Return task->normal_prio when the waiter list is empty or when
108 * the waiter is not allowed to do priority boosting
109 */
110int rt_mutex_getprio(struct task_struct *task)
111{
112 if (likely(!task_has_pi_waiters(task)))
113 return task->normal_prio;
114
115 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
116 task->normal_prio);
117}
118
119/*
120 * Adjust the priority of a task, after its pi_waiters got modified.
121 *
122 * This can be both boosting and unboosting. task->pi_lock must be held.
123 */
124static void __rt_mutex_adjust_prio(struct task_struct *task)
125{
126 int prio = rt_mutex_getprio(task);
127
128 if (task->prio != prio)
129 rt_mutex_setprio(task, prio);
130}
131
132/*
133 * Adjust task priority (undo boosting). Called from the exit path of
134 * rt_mutex_slowunlock() and rt_mutex_slowlock().
135 *
136 * (Note: We do this outside of the protection of lock->wait_lock to
137 * allow the lock to be taken while or before we readjust the priority
138 * of task. We do not use the spin_xx_mutex() variants here as we are
139 * outside of the debug path.)
140 */
141static void rt_mutex_adjust_prio(struct task_struct *task)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&task->pi_lock, flags);
146 __rt_mutex_adjust_prio(task);
147 spin_unlock_irqrestore(&task->pi_lock, flags);
148}
149
150/*
151 * Max number of times we'll walk the boosting chain:
152 */
153int max_lock_depth = 1024;
154
155/*
156 * Adjust the priority chain. Also used for deadlock detection.
157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK.
159 */
160static int rt_mutex_adjust_prio_chain(task_t *task,
161 int deadlock_detect,
162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter
164 __IP_DECL__)
165{
166 struct rt_mutex *lock;
167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
168 int detect_deadlock, ret = 0, depth = 0;
169 unsigned long flags;
170
171 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
172 deadlock_detect);
173
174 /*
175 * The (de)boosting is a step by step approach with a lot of
176 * pitfalls. We want this to be preemptible and we want hold a
177 * maximum of two locks per step. So we have to check
178 * carefully whether things change under us.
179 */
180 again:
181 if (++depth > max_lock_depth) {
182 static int prev_max;
183
184 /*
185 * Print this only once. If the admin changes the limit,
186 * print a new message when reaching the limit again.
187 */
188 if (prev_max != max_lock_depth) {
189 prev_max = max_lock_depth;
190 printk(KERN_WARNING "Maximum lock depth %d reached "
191 "task: %s (%d)\n", max_lock_depth,
192 current->comm, current->pid);
193 }
194 put_task_struct(task);
195
196 return deadlock_detect ? -EDEADLK : 0;
197 }
198 retry:
199 /*
200 * Task can not go away as we did a get_task() before !
201 */
202 spin_lock_irqsave(&task->pi_lock, flags);
203
204 waiter = task->pi_blocked_on;
205 /*
206 * Check whether the end of the boosting chain has been
207 * reached or the state of the chain has changed while we
208 * dropped the locks.
209 */
210 if (!waiter || !waiter->task)
211 goto out_unlock_pi;
212
213 if (top_waiter && (!task_has_pi_waiters(task) ||
214 top_waiter != task_top_pi_waiter(task)))
215 goto out_unlock_pi;
216
217 /*
218 * When deadlock detection is off then we check, if further
219 * priority adjustment is necessary.
220 */
221 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
222 goto out_unlock_pi;
223
224 lock = waiter->lock;
225 if (!spin_trylock(&lock->wait_lock)) {
226 spin_unlock_irqrestore(&task->pi_lock, flags);
227 cpu_relax();
228 goto retry;
229 }
230
231 /* Deadlock detection */
232 if (lock == orig_lock || rt_mutex_owner(lock) == current) {
233 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
234 spin_unlock(&lock->wait_lock);
235 ret = deadlock_detect ? -EDEADLK : 0;
236 goto out_unlock_pi;
237 }
238
239 top_waiter = rt_mutex_top_waiter(lock);
240
241 /* Requeue the waiter */
242 plist_del(&waiter->list_entry, &lock->wait_list);
243 waiter->list_entry.prio = task->prio;
244 plist_add(&waiter->list_entry, &lock->wait_list);
245
246 /* Release the task */
247 spin_unlock_irqrestore(&task->pi_lock, flags);
248 put_task_struct(task);
249
250 /* Grab the next task */
251 task = rt_mutex_owner(lock);
252 spin_lock_irqsave(&task->pi_lock, flags);
253
254 if (waiter == rt_mutex_top_waiter(lock)) {
255 /* Boost the owner */
256 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
257 waiter->pi_list_entry.prio = waiter->list_entry.prio;
258 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
259 __rt_mutex_adjust_prio(task);
260
261 } else if (top_waiter == waiter) {
262 /* Deboost the owner */
263 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
264 waiter = rt_mutex_top_waiter(lock);
265 waiter->pi_list_entry.prio = waiter->list_entry.prio;
266 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
267 __rt_mutex_adjust_prio(task);
268 }
269
270 get_task_struct(task);
271 spin_unlock_irqrestore(&task->pi_lock, flags);
272
273 top_waiter = rt_mutex_top_waiter(lock);
274 spin_unlock(&lock->wait_lock);
275
276 if (!detect_deadlock && waiter != top_waiter)
277 goto out_put_task;
278
279 goto again;
280
281 out_unlock_pi:
282 spin_unlock_irqrestore(&task->pi_lock, flags);
283 out_put_task:
284 put_task_struct(task);
285 return ret;
286}
287
288/*
289 * Optimization: check if we can steal the lock from the
290 * assigned pending owner [which might not have taken the
291 * lock yet]:
292 */
293static inline int try_to_steal_lock(struct rt_mutex *lock)
294{
295 struct task_struct *pendowner = rt_mutex_owner(lock);
296 struct rt_mutex_waiter *next;
297 unsigned long flags;
298
299 if (!rt_mutex_owner_pending(lock))
300 return 0;
301
302 if (pendowner == current)
303 return 1;
304
305 spin_lock_irqsave(&pendowner->pi_lock, flags);
306 if (current->prio >= pendowner->prio) {
307 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
308 return 0;
309 }
310
311 /*
312 * Check if a waiter is enqueued on the pending owners
313 * pi_waiters list. Remove it and readjust pending owners
314 * priority.
315 */
316 if (likely(!rt_mutex_has_waiters(lock))) {
317 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
318 return 1;
319 }
320
321 /* No chain handling, pending owner is not blocked on anything: */
322 next = rt_mutex_top_waiter(lock);
323 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
324 __rt_mutex_adjust_prio(pendowner);
325 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
326
327 /*
328 * We are going to steal the lock and a waiter was
329 * enqueued on the pending owners pi_waiters queue. So
330 * we have to enqueue this waiter into
331 * current->pi_waiters list. This covers the case,
332 * where current is boosted because it holds another
333 * lock and gets unboosted because the booster is
334 * interrupted, so we would delay a waiter with higher
335 * priority as current->normal_prio.
336 *
337 * Note: in the rare case of a SCHED_OTHER task changing
338 * its priority and thus stealing the lock, next->task
339 * might be current:
340 */
341 if (likely(next->task != current)) {
342 spin_lock_irqsave(&current->pi_lock, flags);
343 plist_add(&next->pi_list_entry, &current->pi_waiters);
344 __rt_mutex_adjust_prio(current);
345 spin_unlock_irqrestore(&current->pi_lock, flags);
346 }
347 return 1;
348}
349
350/*
351 * Try to take an rt-mutex
352 *
353 * This fails
354 * - when the lock has a real owner
355 * - when a different pending owner exists and has higher priority than current
356 *
357 * Must be called with lock->wait_lock held.
358 */
359static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
360{
361 /*
362 * We have to be careful here if the atomic speedups are
363 * enabled, such that, when
364 * - no other waiter is on the lock
365 * - the lock has been released since we did the cmpxchg
366 * the lock can be released or taken while we are doing the
367 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
368 *
369 * The atomic acquire/release aware variant of
370 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
371 * the WAITERS bit, the atomic release / acquire can not
372 * happen anymore and lock->wait_lock protects us from the
373 * non-atomic case.
374 *
375 * Note, that this might set lock->owner =
376 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
377 * any more. This is fixed up when we take the ownership.
378 * This is the transitional state explained at the top of this file.
379 */
380 mark_rt_mutex_waiters(lock);
381
382 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
383 return 0;
384
385 /* We got the lock. */
386 debug_rt_mutex_lock(lock __IP__);
387
388 rt_mutex_set_owner(lock, current, 0);
389
390 rt_mutex_deadlock_account_lock(lock, current);
391
392 return 1;
393}
394
395/*
396 * Task blocks on lock.
397 *
398 * Prepare waiter and propagate pi chain
399 *
400 * This must be called with lock->wait_lock held.
401 */
402static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
403 struct rt_mutex_waiter *waiter,
404 int detect_deadlock
405 __IP_DECL__)
406{
407 struct rt_mutex_waiter *top_waiter = waiter;
408 task_t *owner = rt_mutex_owner(lock);
409 int boost = 0, res;
410 unsigned long flags;
411
412 spin_lock_irqsave(&current->pi_lock, flags);
413 __rt_mutex_adjust_prio(current);
414 waiter->task = current;
415 waiter->lock = lock;
416 plist_node_init(&waiter->list_entry, current->prio);
417 plist_node_init(&waiter->pi_list_entry, current->prio);
418
419 /* Get the top priority waiter on the lock */
420 if (rt_mutex_has_waiters(lock))
421 top_waiter = rt_mutex_top_waiter(lock);
422 plist_add(&waiter->list_entry, &lock->wait_list);
423
424 current->pi_blocked_on = waiter;
425
426 spin_unlock_irqrestore(&current->pi_lock, flags);
427
428 if (waiter == rt_mutex_top_waiter(lock)) {
429 spin_lock_irqsave(&owner->pi_lock, flags);
430 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
431 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
432
433 __rt_mutex_adjust_prio(owner);
434 if (owner->pi_blocked_on) {
435 boost = 1;
436 get_task_struct(owner);
437 }
438 spin_unlock_irqrestore(&owner->pi_lock, flags);
439 }
440 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
441 spin_lock_irqsave(&owner->pi_lock, flags);
442 if (owner->pi_blocked_on) {
443 boost = 1;
444 get_task_struct(owner);
445 }
446 spin_unlock_irqrestore(&owner->pi_lock, flags);
447 }
448 if (!boost)
449 return 0;
450
451 spin_unlock(&lock->wait_lock);
452
453 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
454 waiter __IP__);
455
456 spin_lock(&lock->wait_lock);
457
458 return res;
459}
460
461/*
462 * Wake up the next waiter on the lock.
463 *
464 * Remove the top waiter from the current tasks waiter list and from
465 * the lock waiter list. Set it as pending owner. Then wake it up.
466 *
467 * Called with lock->wait_lock held.
468 */
469static void wakeup_next_waiter(struct rt_mutex *lock)
470{
471 struct rt_mutex_waiter *waiter;
472 struct task_struct *pendowner;
473 unsigned long flags;
474
475 spin_lock_irqsave(&current->pi_lock, flags);
476
477 waiter = rt_mutex_top_waiter(lock);
478 plist_del(&waiter->list_entry, &lock->wait_list);
479
480 /*
481 * Remove it from current->pi_waiters. We do not adjust a
482 * possible priority boost right now. We execute wakeup in the
483 * boosted mode and go back to normal after releasing
484 * lock->wait_lock.
485 */
486 plist_del(&waiter->pi_list_entry, &current->pi_waiters);
487 pendowner = waiter->task;
488 waiter->task = NULL;
489
490 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
491
492 spin_unlock_irqrestore(&current->pi_lock, flags);
493
494 /*
495 * Clear the pi_blocked_on variable and enqueue a possible
496 * waiter into the pi_waiters list of the pending owner. This
497 * prevents that in case the pending owner gets unboosted a
498 * waiter with higher priority than pending-owner->normal_prio
499 * is blocked on the unboosted (pending) owner.
500 */
501 spin_lock_irqsave(&pendowner->pi_lock, flags);
502
503 WARN_ON(!pendowner->pi_blocked_on);
504 WARN_ON(pendowner->pi_blocked_on != waiter);
505 WARN_ON(pendowner->pi_blocked_on->lock != lock);
506
507 pendowner->pi_blocked_on = NULL;
508
509 if (rt_mutex_has_waiters(lock)) {
510 struct rt_mutex_waiter *next;
511
512 next = rt_mutex_top_waiter(lock);
513 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
514 }
515 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
516
517 wake_up_process(pendowner);
518}
519
520/*
521 * Remove a waiter from a lock
522 *
523 * Must be called with lock->wait_lock held
524 */
525static void remove_waiter(struct rt_mutex *lock,
526 struct rt_mutex_waiter *waiter __IP_DECL__)
527{
528 int first = (waiter == rt_mutex_top_waiter(lock));
529 int boost = 0;
530 task_t *owner = rt_mutex_owner(lock);
531 unsigned long flags;
532
533 spin_lock_irqsave(&current->pi_lock, flags);
534 plist_del(&waiter->list_entry, &lock->wait_list);
535 waiter->task = NULL;
536 current->pi_blocked_on = NULL;
537 spin_unlock_irqrestore(&current->pi_lock, flags);
538
539 if (first && owner != current) {
540
541 spin_lock_irqsave(&owner->pi_lock, flags);
542
543 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
544
545 if (rt_mutex_has_waiters(lock)) {
546 struct rt_mutex_waiter *next;
547
548 next = rt_mutex_top_waiter(lock);
549 plist_add(&next->pi_list_entry, &owner->pi_waiters);
550 }
551 __rt_mutex_adjust_prio(owner);
552
553 if (owner->pi_blocked_on) {
554 boost = 1;
555 get_task_struct(owner);
556 }
557 spin_unlock_irqrestore(&owner->pi_lock, flags);
558 }
559
560 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
561
562 if (!boost)
563 return;
564
565 spin_unlock(&lock->wait_lock);
566
567 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__);
568
569 spin_lock(&lock->wait_lock);
570}
571
572/*
573 * Slow path lock function:
574 */
575static int __sched
576rt_mutex_slowlock(struct rt_mutex *lock, int state,
577 struct hrtimer_sleeper *timeout,
578 int detect_deadlock __IP_DECL__)
579{
580 struct rt_mutex_waiter waiter;
581 int ret = 0;
582
583 debug_rt_mutex_init_waiter(&waiter);
584 waiter.task = NULL;
585
586 spin_lock(&lock->wait_lock);
587
588 /* Try to acquire the lock again: */
589 if (try_to_take_rt_mutex(lock __IP__)) {
590 spin_unlock(&lock->wait_lock);
591 return 0;
592 }
593
594 set_current_state(state);
595
596 /* Setup the timer, when timeout != NULL */
597 if (unlikely(timeout))
598 hrtimer_start(&timeout->timer, timeout->timer.expires,
599 HRTIMER_ABS);
600
601 for (;;) {
602 /* Try to acquire the lock: */
603 if (try_to_take_rt_mutex(lock __IP__))
604 break;
605
606 /*
607 * TASK_INTERRUPTIBLE checks for signals and
608 * timeout. Ignored otherwise.
609 */
610 if (unlikely(state == TASK_INTERRUPTIBLE)) {
611 /* Signal pending? */
612 if (signal_pending(current))
613 ret = -EINTR;
614 if (timeout && !timeout->task)
615 ret = -ETIMEDOUT;
616 if (ret)
617 break;
618 }
619
620 /*
621 * waiter.task is NULL the first time we come here and
622 * when we have been woken up by the previous owner
623 * but the lock got stolen by a higher prio task.
624 */
625 if (!waiter.task) {
626 ret = task_blocks_on_rt_mutex(lock, &waiter,
627 detect_deadlock __IP__);
628 /*
629 * If we got woken up by the owner then start loop
630 * all over without going into schedule to try
631 * to get the lock now:
632 */
633 if (unlikely(!waiter.task))
634 continue;
635
636 if (unlikely(ret))
637 break;
638 }
639 spin_unlock(&lock->wait_lock);
640
641 debug_rt_mutex_print_deadlock(&waiter);
642
643 schedule();
644
645 spin_lock(&lock->wait_lock);
646 set_current_state(state);
647 }
648
649 set_current_state(TASK_RUNNING);
650
651 if (unlikely(waiter.task))
652 remove_waiter(lock, &waiter __IP__);
653
654 /*
655 * try_to_take_rt_mutex() sets the waiter bit
656 * unconditionally. We might have to fix that up.
657 */
658 fixup_rt_mutex_waiters(lock);
659
660 spin_unlock(&lock->wait_lock);
661
662 /* Remove pending timer: */
663 if (unlikely(timeout))
664 hrtimer_cancel(&timeout->timer);
665
666 /*
667 * Readjust priority, when we did not get the lock. We might
668 * have been the pending owner and boosted. Since we did not
669 * take the lock, the PI boost has to go.
670 */
671 if (unlikely(ret))
672 rt_mutex_adjust_prio(current);
673
674 debug_rt_mutex_free_waiter(&waiter);
675
676 return ret;
677}
678
679/*
680 * Slow path try-lock function:
681 */
682static inline int
683rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
684{
685 int ret = 0;
686
687 spin_lock(&lock->wait_lock);
688
689 if (likely(rt_mutex_owner(lock) != current)) {
690
691 ret = try_to_take_rt_mutex(lock __IP__);
692 /*
693 * try_to_take_rt_mutex() sets the lock waiters
694 * bit unconditionally. Clean this up.
695 */
696 fixup_rt_mutex_waiters(lock);
697 }
698
699 spin_unlock(&lock->wait_lock);
700
701 return ret;
702}
703
704/*
705 * Slow path to release a rt-mutex:
706 */
707static void __sched
708rt_mutex_slowunlock(struct rt_mutex *lock)
709{
710 spin_lock(&lock->wait_lock);
711
712 debug_rt_mutex_unlock(lock);
713
714 rt_mutex_deadlock_account_unlock(current);
715
716 if (!rt_mutex_has_waiters(lock)) {
717 lock->owner = NULL;
718 spin_unlock(&lock->wait_lock);
719 return;
720 }
721
722 wakeup_next_waiter(lock);
723
724 spin_unlock(&lock->wait_lock);
725
726 /* Undo pi boosting if necessary: */
727 rt_mutex_adjust_prio(current);
728}
729
730/*
731 * debug aware fast / slowpath lock,trylock,unlock
732 *
733 * The atomic acquire/release ops are compiled away, when either the
734 * architecture does not support cmpxchg or when debugging is enabled.
735 */
736static inline int
737rt_mutex_fastlock(struct rt_mutex *lock, int state,
738 int detect_deadlock,
739 int (*slowfn)(struct rt_mutex *lock, int state,
740 struct hrtimer_sleeper *timeout,
741 int detect_deadlock __IP_DECL__))
742{
743 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
744 rt_mutex_deadlock_account_lock(lock, current);
745 return 0;
746 } else
747 return slowfn(lock, state, NULL, detect_deadlock __RET_IP__);
748}
749
750static inline int
751rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
752 struct hrtimer_sleeper *timeout, int detect_deadlock,
753 int (*slowfn)(struct rt_mutex *lock, int state,
754 struct hrtimer_sleeper *timeout,
755 int detect_deadlock __IP_DECL__))
756{
757 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
758 rt_mutex_deadlock_account_lock(lock, current);
759 return 0;
760 } else
761 return slowfn(lock, state, timeout, detect_deadlock __RET_IP__);
762}
763
764static inline int
765rt_mutex_fasttrylock(struct rt_mutex *lock,
766 int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
767{
768 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
769 rt_mutex_deadlock_account_lock(lock, current);
770 return 1;
771 }
772 return slowfn(lock __RET_IP__);
773}
774
775static inline void
776rt_mutex_fastunlock(struct rt_mutex *lock,
777 void (*slowfn)(struct rt_mutex *lock))
778{
779 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
780 rt_mutex_deadlock_account_unlock(current);
781 else
782 slowfn(lock);
783}
784
785/**
786 * rt_mutex_lock - lock a rt_mutex
787 *
788 * @lock: the rt_mutex to be locked
789 */
790void __sched rt_mutex_lock(struct rt_mutex *lock)
791{
792 might_sleep();
793
794 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
795}
796EXPORT_SYMBOL_GPL(rt_mutex_lock);
797
798/**
799 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
800 *
801 * @lock: the rt_mutex to be locked
802 * @detect_deadlock: deadlock detection on/off
803 *
804 * Returns:
805 * 0 on success
806 * -EINTR when interrupted by a signal
807 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
808 */
809int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
810 int detect_deadlock)
811{
812 might_sleep();
813
814 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
815 detect_deadlock, rt_mutex_slowlock);
816}
817EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
818
819/**
820 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
821 * the timeout structure is provided
822 * by the caller
823 *
824 * @lock: the rt_mutex to be locked
825 * @timeout: timeout structure or NULL (no timeout)
826 * @detect_deadlock: deadlock detection on/off
827 *
828 * Returns:
829 * 0 on success
830 * -EINTR when interrupted by a signal
831 * -ETIMEOUT when the timeout expired
832 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
833 */
834int
835rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
836 int detect_deadlock)
837{
838 might_sleep();
839
840 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
841 detect_deadlock, rt_mutex_slowlock);
842}
843EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
844
845/**
846 * rt_mutex_trylock - try to lock a rt_mutex
847 *
848 * @lock: the rt_mutex to be locked
849 *
850 * Returns 1 on success and 0 on contention
851 */
852int __sched rt_mutex_trylock(struct rt_mutex *lock)
853{
854 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
855}
856EXPORT_SYMBOL_GPL(rt_mutex_trylock);
857
858/**
859 * rt_mutex_unlock - unlock a rt_mutex
860 *
861 * @lock: the rt_mutex to be unlocked
862 */
863void __sched rt_mutex_unlock(struct rt_mutex *lock)
864{
865 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
866}
867EXPORT_SYMBOL_GPL(rt_mutex_unlock);
868
869/***
870 * rt_mutex_destroy - mark a mutex unusable
871 * @lock: the mutex to be destroyed
872 *
873 * This function marks the mutex uninitialized, and any subsequent
874 * use of the mutex is forbidden. The mutex must not be locked when
875 * this function is called.
876 */
877void rt_mutex_destroy(struct rt_mutex *lock)
878{
879 WARN_ON(rt_mutex_is_locked(lock));
880#ifdef CONFIG_DEBUG_RT_MUTEXES
881 lock->magic = NULL;
882#endif
883}
884
885EXPORT_SYMBOL_GPL(rt_mutex_destroy);
886
887/**
888 * __rt_mutex_init - initialize the rt lock
889 *
890 * @lock: the rt lock to be initialized
891 *
892 * Initialize the rt lock to unlocked state.
893 *
894 * Initializing of a locked rt lock is not allowed
895 */
896void __rt_mutex_init(struct rt_mutex *lock, const char *name)
897{
898 lock->owner = NULL;
899 spin_lock_init(&lock->wait_lock);
900 plist_head_init(&lock->wait_list, &lock->wait_lock);
901
902 debug_rt_mutex_init(lock, name);
903}
904EXPORT_SYMBOL_GPL(__rt_mutex_init);
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
new file mode 100644
index 000000000000..1e0fca13ff72
--- /dev/null
+++ b/kernel/rtmutex.h
@@ -0,0 +1,29 @@
1/*
2 * RT-Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains macros used solely by rtmutex.c.
10 * Non-debug version.
11 */
12
13#define __IP_DECL__
14#define __IP__
15#define __RET_IP__
16#define rt_mutex_deadlock_check(l) (0)
17#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
18#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
19#define debug_rt_mutex_init_waiter(w) do { } while (0)
20#define debug_rt_mutex_free_waiter(w) do { } while (0)
21#define debug_rt_mutex_lock(l) do { } while (0)
22#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
23#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
24#define debug_rt_mutex_unlock(l) do { } while (0)
25#define debug_rt_mutex_init(m, n) do { } while (0)
26#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
27#define debug_rt_mutex_print_deadlock(w) do { } while (0)
28#define debug_rt_mutex_detect_deadlock(w,d) (d)
29#define debug_rt_mutex_reset_waiter(w) do { } while (0)
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
new file mode 100644
index 000000000000..50eed60eb085
--- /dev/null
+++ b/kernel/rtmutex_common.h
@@ -0,0 +1,93 @@
1/*
2 * RT Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains the private data structure and API definitions.
10 */
11
12#ifndef __KERNEL_RTMUTEX_COMMON_H
13#define __KERNEL_RTMUTEX_COMMON_H
14
15#include <linux/rtmutex.h>
16
17/*
18 * This is the control structure for tasks blocked on a rt_mutex,
19 * which is allocated on the kernel stack on of the blocked task.
20 *
21 * @list_entry: pi node to enqueue into the mutex waiters list
22 * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
23 * @task: task reference to the blocked task
24 */
25struct rt_mutex_waiter {
26 struct plist_node list_entry;
27 struct plist_node pi_list_entry;
28 struct task_struct *task;
29 struct rt_mutex *lock;
30#ifdef CONFIG_DEBUG_RT_MUTEXES
31 unsigned long ip;
32 pid_t deadlock_task_pid;
33 struct rt_mutex *deadlock_lock;
34#endif
35};
36
37/*
38 * Various helpers to access the waiters-plist:
39 */
40static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
41{
42 return !plist_head_empty(&lock->wait_list);
43}
44
45static inline struct rt_mutex_waiter *
46rt_mutex_top_waiter(struct rt_mutex *lock)
47{
48 struct rt_mutex_waiter *w;
49
50 w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
51 list_entry);
52 BUG_ON(w->lock != lock);
53
54 return w;
55}
56
57static inline int task_has_pi_waiters(struct task_struct *p)
58{
59 return !plist_head_empty(&p->pi_waiters);
60}
61
62static inline struct rt_mutex_waiter *
63task_top_pi_waiter(struct task_struct *p)
64{
65 return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
66 pi_list_entry);
67}
68
69/*
70 * lock->owner state tracking:
71 */
72#define RT_MUTEX_OWNER_PENDING 1UL
73#define RT_MUTEX_HAS_WAITERS 2UL
74#define RT_MUTEX_OWNER_MASKALL 3UL
75
76static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
77{
78 return (struct task_struct *)
79 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
80}
81
82static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
83{
84 return (struct task_struct *)
85 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
86}
87
88static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
89{
90 return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
91}
92
93#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f54afed8426f..93a2c5398648 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -133,6 +133,10 @@ extern int acct_parm[];
133extern int no_unaligned_warning; 133extern int no_unaligned_warning;
134#endif 134#endif
135 135
136#ifdef CONFIG_RT_MUTEXES
137extern int max_lock_depth;
138#endif
139
136static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 140static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
137 ctl_table *, void **); 141 ctl_table *, void **);
138static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 142static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -688,6 +692,17 @@ static ctl_table kern_table[] = {
688 .proc_handler = &proc_dointvec, 692 .proc_handler = &proc_dointvec,
689 }, 693 },
690#endif 694#endif
695#ifdef CONFIG_RT_MUTEXES
696 {
697 .ctl_name = KERN_MAX_LOCK_DEPTH,
698 .procname = "max_lock_depth",
699 .data = &max_lock_depth,
700 .maxlen = sizeof(int),
701 .mode = 0644,
702 .proc_handler = &proc_dointvec,
703 },
704#endif
705
691 { .ctl_name = 0 } 706 { .ctl_name = 0 }
692}; 707};
693 708