aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 20:31:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 20:31:38 -0500
commit80c0531514516e43ae118ddf38424e06e5c3cb3c (patch)
tree2eef8cf8fdf505b18f83078d1eb41167e98f5b54 /include
parenta457aa6c2bdd743bbbffd3f9e4fdbd8c71f8af1b (diff)
parent11b751ae8c8ca3fa24c85bd5a3e51dd9f95cda17 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/mingo/mutex-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/atomic.h1
-rw-r--r--include/asm-alpha/mutex.h9
-rw-r--r--include/asm-arm/atomic.h2
-rw-r--r--include/asm-arm/mutex.h128
-rw-r--r--include/asm-arm26/atomic.h2
-rw-r--r--include/asm-cris/atomic.h2
-rw-r--r--include/asm-cris/mutex.h9
-rw-r--r--include/asm-frv/atomic.h1
-rw-r--r--include/asm-frv/mutex.h9
-rw-r--r--include/asm-generic/mutex-dec.h110
-rw-r--r--include/asm-generic/mutex-null.h24
-rw-r--r--include/asm-generic/mutex-xchg.h117
-rw-r--r--include/asm-h8300/atomic.h2
-rw-r--r--include/asm-h8300/mutex.h9
-rw-r--r--include/asm-i386/atomic.h1
-rw-r--r--include/asm-i386/mutex.h124
-rw-r--r--include/asm-ia64/atomic.h1
-rw-r--r--include/asm-ia64/mutex.h9
-rw-r--r--include/asm-m32r/atomic.h1
-rw-r--r--include/asm-m32r/mutex.h9
-rw-r--r--include/asm-m68k/atomic.h1
-rw-r--r--include/asm-m68k/mutex.h9
-rw-r--r--include/asm-m68knommu/atomic.h1
-rw-r--r--include/asm-m68knommu/mutex.h9
-rw-r--r--include/asm-mips/atomic.h1
-rw-r--r--include/asm-mips/mutex.h9
-rw-r--r--include/asm-parisc/atomic.h1
-rw-r--r--include/asm-parisc/mutex.h9
-rw-r--r--include/asm-powerpc/atomic.h1
-rw-r--r--include/asm-powerpc/mutex.h9
-rw-r--r--include/asm-s390/atomic.h2
-rw-r--r--include/asm-s390/mutex.h9
-rw-r--r--include/asm-sh/atomic.h2
-rw-r--r--include/asm-sh/mutex.h9
-rw-r--r--include/asm-sh64/atomic.h2
-rw-r--r--include/asm-sh64/mutex.h9
-rw-r--r--include/asm-sparc/atomic.h1
-rw-r--r--include/asm-sparc/mutex.h9
-rw-r--r--include/asm-sparc64/atomic.h1
-rw-r--r--include/asm-sparc64/mutex.h9
-rw-r--r--include/asm-um/mutex.h9
-rw-r--r--include/asm-v850/atomic.h2
-rw-r--r--include/asm-v850/mutex.h9
-rw-r--r--include/asm-x86_64/atomic.h1
-rw-r--r--include/asm-x86_64/mutex.h113
-rw-r--r--include/asm-xtensa/atomic.h1
-rw-r--r--include/asm-xtensa/mutex.h9
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/ide.h5
-rw-r--r--include/linux/jffs2_fs_i.h4
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/loop.h4
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mutex-debug.h21
-rw-r--r--include/linux/mutex.h119
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/sched.h5
60 files changed, 995 insertions, 18 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index cb03bbe92cdf..fc77f7413083 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -176,6 +176,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
176} 176}
177 177
178#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 178#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
179#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
179 180
180#define atomic_add_unless(v, a, u) \ 181#define atomic_add_unless(v, a, u) \
181({ \ 182({ \
diff --git a/include/asm-alpha/mutex.h b/include/asm-alpha/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-alpha/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f72b63309bc5..3d7283d84405 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -175,6 +175,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
175 175
176#endif /* __LINUX_ARM_ARCH__ */ 176#endif /* __LINUX_ARM_ARCH__ */
177 177
178#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
179
178static inline int atomic_add_unless(atomic_t *v, int a, int u) 180static inline int atomic_add_unless(atomic_t *v, int a, int u)
179{ 181{
180 int c, old; 182 int c, old;
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
new file mode 100644
index 000000000000..6caa59f1f595
--- /dev/null
+++ b/include/asm-arm/mutex.h
@@ -0,0 +1,128 @@
1/*
2 * include/asm-arm/mutex.h
3 *
4 * ARM optimized mutex locking primitives
5 *
6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
7 */
8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H
10
11#if __LINUX_ARM_ARCH__ < 6
12/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
13# include <asm-generic/mutex-xchg.h>
14#else
15
16/*
17 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
18 * atomic decrement (it is not a reliable atomic decrement but it satisfies
19 * the defined semantics for our purpose, while being smaller and faster
20 * than a real atomic decrement or atomic swap. The idea is to attempt
21 * decrementing the lock value only once. If once decremented it isn't zero,
22 * or if its store-back fails due to a dispute on the exclusive store, we
23 * simply bail out immediately through the slow path where the lock will be
24 * reattempted until it succeeds.
25 */
26#define __mutex_fastpath_lock(count, fail_fn) \
27do { \
28 int __ex_flag, __res; \
29 \
30 typecheck(atomic_t *, count); \
31 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
32 \
33 __asm__ ( \
34 "ldrex %0, [%2] \n" \
35 "sub %0, %0, #1 \n" \
36 "strex %1, %0, [%2] \n" \
37 \
38 : "=&r" (__res), "=&r" (__ex_flag) \
39 : "r" (&(count)->counter) \
40 : "cc","memory" ); \
41 \
42 if (unlikely(__res || __ex_flag)) \
43 fail_fn(count); \
44} while (0)
45
46#define __mutex_fastpath_lock_retval(count, fail_fn) \
47({ \
48 int __ex_flag, __res; \
49 \
50 typecheck(atomic_t *, count); \
51 typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
52 \
53 __asm__ ( \
54 "ldrex %0, [%2] \n" \
55 "sub %0, %0, #1 \n" \
56 "strex %1, %0, [%2] \n" \
57 \
58 : "=&r" (__res), "=&r" (__ex_flag) \
59 : "r" (&(count)->counter) \
60 : "cc","memory" ); \
61 \
62 __res |= __ex_flag; \
63 if (unlikely(__res != 0)) \
64 __res = fail_fn(count); \
65 __res; \
66})
67
68/*
69 * Same trick is used for the unlock fast path. However the original value,
70 * rather than the result, is used to test for success in order to have
71 * better generated assembly.
72 */
73#define __mutex_fastpath_unlock(count, fail_fn) \
74do { \
75 int __ex_flag, __res, __orig; \
76 \
77 typecheck(atomic_t *, count); \
78 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
79 \
80 __asm__ ( \
81 "ldrex %0, [%3] \n" \
82 "add %1, %0, #1 \n" \
83 "strex %2, %1, [%3] \n" \
84 \
85 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
86 : "r" (&(count)->counter) \
87 : "cc","memory" ); \
88 \
89 if (unlikely(__orig || __ex_flag)) \
90 fail_fn(count); \
91} while (0)
92
93/*
94 * If the unlock was done on a contended lock, or if the unlock simply fails
95 * then the mutex remains locked.
96 */
97#define __mutex_slowpath_needs_to_unlock() 1
98
99/*
100 * For __mutex_fastpath_trylock we use another construct which could be
101 * described as a "single value cmpxchg".
102 *
103 * This provides the needed trylock semantics like cmpxchg would, but it is
104 * lighter and less generic than a true cmpxchg implementation.
105 */
106static inline int
107__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
108{
109 int __ex_flag, __res, __orig;
110
111 __asm__ (
112
113 "1: ldrex %0, [%3] \n"
114 "subs %1, %0, #1 \n"
115 "strexeq %2, %1, [%3] \n"
116 "movlt %0, #0 \n"
117 "cmpeq %2, #0 \n"
118 "bgt 1b \n"
119
120 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
121 : "r" (&count->counter)
122 : "cc", "memory" );
123
124 return __orig;
125}
126
127#endif
128#endif
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 3074b0e76343..1552c8653990 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -76,6 +76,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
76 return ret; 76 return ret;
77} 77}
78 78
79#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
80
79static inline int atomic_add_unless(atomic_t *v, int a, int u) 81static inline int atomic_add_unless(atomic_t *v, int a, int u)
80{ 82{
81 int ret; 83 int ret;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 2df2c7aa19b7..0b51a87e5532 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -136,6 +136,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
136 return ret; 136 return ret;
137} 137}
138 138
139#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
140
139static inline int atomic_add_unless(atomic_t *v, int a, int u) 141static inline int atomic_add_unless(atomic_t *v, int a, int u)
140{ 142{
141 int ret; 143 int ret;
diff --git a/include/asm-cris/mutex.h b/include/asm-cris/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-cris/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 9c9e9499cfd8..a59f684b4f33 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -328,6 +328,7 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
328#endif 328#endif
329 329
330#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 330#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
331#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
331 332
332#define atomic_add_unless(v, a, u) \ 333#define atomic_add_unless(v, a, u) \
333({ \ 334({ \
diff --git a/include/asm-frv/mutex.h b/include/asm-frv/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-frv/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
new file mode 100644
index 000000000000..74b18cda169f
--- /dev/null
+++ b/include/asm-generic/mutex-dec.h
@@ -0,0 +1,110 @@
1/*
2 * asm-generic/mutex-dec.h
3 *
4 * Generic implementation of the mutex fastpath, based on atomic
5 * decrement/increment.
6 */
7#ifndef _ASM_GENERIC_MUTEX_DEC_H
8#define _ASM_GENERIC_MUTEX_DEC_H
9
10/**
11 * __mutex_fastpath_lock - try to take the lock by moving the count
12 * from 1 to a 0 value
13 * @count: pointer of type atomic_t
14 * @fail_fn: function to call if the original value was not 1
15 *
16 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
17 * it wasn't 1 originally. This function MUST leave the value lower than
18 * 1 even when the "1" assertion wasn't true.
19 */
20#define __mutex_fastpath_lock(count, fail_fn) \
21do { \
22 if (unlikely(atomic_dec_return(count) < 0)) \
23 fail_fn(count); \
24 else \
25 smp_mb(); \
26} while (0)
27
28/**
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
33 *
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
36 * or anything the slow path function returns.
37 */
38static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
40{
41 if (unlikely(atomic_dec_return(count) < 0))
42 return fail_fn(count);
43 else {
44 smp_mb();
45 return 0;
46 }
47}
48
49/**
50 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
51 * @count: pointer of type atomic_t
52 * @fail_fn: function to call if the original value was not 0
53 *
54 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
55 * In the failure case, this function is allowed to either set the value to
56 * 1, or to set it to a value lower than 1.
57 *
58 * If the implementation sets it to a value of lower than 1, then the
59 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
60 * to return 0 otherwise.
61 */
62#define __mutex_fastpath_unlock(count, fail_fn) \
63do { \
64 smp_mb(); \
65 if (unlikely(atomic_inc_return(count) <= 0)) \
66 fail_fn(count); \
67} while (0)
68
69#define __mutex_slowpath_needs_to_unlock() 1
70
71/**
72 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
73 *
74 * @count: pointer of type atomic_t
75 * @fail_fn: fallback function
76 *
77 * Change the count from 1 to a value lower than 1, and return 0 (failure)
78 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
79 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
80 * Additionally, if the value was < 0 originally, this function must not leave
81 * it to 0 on failure.
82 *
83 * If the architecture has no effective trylock variant, it should call the
84 * <fail_fn> spinlock-based trylock variant unconditionally.
85 */
86static inline int
87__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
88{
89 /*
90 * We have two variants here. The cmpxchg based one is the best one
91 * because it never induce a false contention state. It is included
92 * here because architectures using the inc/dec algorithms over the
93 * xchg ones are much more likely to support cmpxchg natively.
94 *
95 * If not we fall back to the spinlock based variant - that is
96 * just as efficient (and simpler) as a 'destructive' probing of
97 * the mutex state would be.
98 */
99#ifdef __HAVE_ARCH_CMPXCHG
100 if (likely(atomic_cmpxchg(count, 1, 0)) == 1) {
101 smp_mb();
102 return 1;
103 }
104 return 0;
105#else
106 return fail_fn(count);
107#endif
108}
109
110#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
new file mode 100644
index 000000000000..5cf8b7ce0c45
--- /dev/null
+++ b/include/asm-generic/mutex-null.h
@@ -0,0 +1,24 @@
1/*
2 * asm-generic/mutex-null.h
3 *
4 * Generic implementation of the mutex fastpath, based on NOP :-)
5 *
6 * This is used by the mutex-debugging infrastructure, but it can also
7 * be used by architectures that (for whatever reason) want to use the
8 * spinlock based slowpath.
9 */
10#ifndef _ASM_GENERIC_MUTEX_NULL_H
11#define _ASM_GENERIC_MUTEX_NULL_H
12
13/* extra parameter only needed for mutex debugging: */
14#ifndef __IP__
15# define __IP__
16#endif
17
18#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
19#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
20#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
21#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
22#define __mutex_slowpath_needs_to_unlock() 1
23
24#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
new file mode 100644
index 000000000000..1d24f47e6c48
--- /dev/null
+++ b/include/asm-generic/mutex-xchg.h
@@ -0,0 +1,117 @@
1/*
2 * asm-generic/mutex-xchg.h
3 *
4 * Generic implementation of the mutex fastpath, based on xchg().
5 *
6 * NOTE: An xchg based implementation is less optimal than an atomic
7 * decrement/increment based implementation. If your architecture
8 * has a reasonable atomic dec/inc then you should probably use
9 * asm-generic/mutex-dec.h instead, or you could open-code an
10 * optimized version in asm/mutex.h.
11 */
12#ifndef _ASM_GENERIC_MUTEX_XCHG_H
13#define _ASM_GENERIC_MUTEX_XCHG_H
14
15/**
16 * __mutex_fastpath_lock - try to take the lock by moving the count
17 * from 1 to a 0 value
18 * @count: pointer of type atomic_t
19 * @fail_fn: function to call if the original value was not 1
20 *
21 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
22 * wasn't 1 originally. This function MUST leave the value lower than 1
23 * even when the "1" assertion wasn't true.
24 */
25#define __mutex_fastpath_lock(count, fail_fn) \
26do { \
27 if (unlikely(atomic_xchg(count, 0) != 1)) \
28 fail_fn(count); \
29 else \
30 smp_mb(); \
31} while (0)
32
33
34/**
35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
36 * from 1 to a 0 value
37 * @count: pointer of type atomic_t
38 * @fail_fn: function to call if the original value was not 1
39 *
40 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
41 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
42 * or anything the slow path function returns
43 */
44static inline int
45__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
46{
47 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count);
49 else {
50 smp_mb();
51 return 0;
52 }
53}
54
55/**
56 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
57 * @count: pointer of type atomic_t
58 * @fail_fn: function to call if the original value was not 0
59 *
60 * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
61 * In the failure case, this function is allowed to either set the value to
62 * 1, or to set it to a value lower than one.
63 * If the implementation sets it to a value of lower than one, the
64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
65 * to return 0 otherwise.
66 */
67#define __mutex_fastpath_unlock(count, fail_fn) \
68do { \
69 smp_mb(); \
70 if (unlikely(atomic_xchg(count, 1) != 0)) \
71 fail_fn(count); \
72} while (0)
73
74#define __mutex_slowpath_needs_to_unlock() 0
75
76/**
77 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
78 *
79 * @count: pointer of type atomic_t
80 * @fail_fn: spinlock based trylock implementation
81 *
82 * Change the count from 1 to a value lower than 1, and return 0 (failure)
83 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
84 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
85 * Additionally, if the value was < 0 originally, this function must not leave
86 * it to 0 on failure.
87 *
88 * If the architecture has no effective trylock variant, it should call the
89 * <fail_fn> spinlock-based trylock variant unconditionally.
90 */
91static inline int
92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
93{
94 int prev = atomic_xchg(count, 0);
95
96 if (unlikely(prev < 0)) {
97 /*
98 * The lock was marked contended so we must restore that
99 * state. If while doing so we get back a prev value of 1
100 * then we just own it.
101 *
102 * [ In the rare case of the mutex going to 1, to 0, to -1
103 * and then back to 0 in this few-instructions window,
104 * this has the potential to trigger the slowpath for the
105 * owner's unlock path needlessly, but that's not a problem
106 * in practice. ]
107 */
108 prev = atomic_xchg(count, prev);
109 if (prev < 0)
110 prev = 0;
111 }
112 smp_mb();
113
114 return prev;
115}
116
117#endif
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index d891541e89c3..21f54428c86b 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -95,6 +95,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
95 return ret; 95 return ret;
96} 96}
97 97
98#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
99
98static inline int atomic_add_unless(atomic_t *v, int a, int u) 100static inline int atomic_add_unless(atomic_t *v, int a, int u)
99{ 101{
100 int ret; 102 int ret;
diff --git a/include/asm-h8300/mutex.h b/include/asm-h8300/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-h8300/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 7a5472d77091..de649d3aa2d4 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -216,6 +216,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
216} 216}
217 217
218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
219 220
220/** 221/**
221 * atomic_add_unless - add unless the number is a given value 222 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
new file mode 100644
index 000000000000..4e5e3de1b9a6
--- /dev/null
+++ b/include/asm-i386/mutex.h
@@ -0,0 +1,124 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12/**
13 * __mutex_fastpath_lock - try to take the lock by moving the count
14 * from 1 to a 0 value
15 * @count: pointer of type atomic_t
16 * @fn: function to call if the original value was not 1
17 *
18 * Change the count from 1 to a value lower than 1, and call <fn> if it
19 * wasn't 1 originally. This function MUST leave the value lower than 1
20 * even when the "1" assertion wasn't true.
21 */
22#define __mutex_fastpath_lock(count, fail_fn) \
23do { \
24 unsigned int dummy; \
25 \
26 typecheck(atomic_t *, count); \
27 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
28 \
29 __asm__ __volatile__( \
30 LOCK " decl (%%eax) \n" \
31 " js "#fail_fn" \n" \
32 \
33 :"=a" (dummy) \
34 : "a" (count) \
35 : "memory", "ecx", "edx"); \
36} while (0)
37
38
39/**
40 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
41 * from 1 to a 0 value
42 * @count: pointer of type atomic_t
43 * @fail_fn: function to call if the original value was not 1
44 *
45 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
46 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
47 * or anything the slow path function returns
48 */
49static inline int
50__mutex_fastpath_lock_retval(atomic_t *count,
51 int fastcall (*fail_fn)(atomic_t *))
52{
53 if (unlikely(atomic_dec_return(count) < 0))
54 return fail_fn(count);
55 else
56 return 0;
57}
58
59/**
60 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
61 * @count: pointer of type atomic_t
62 * @fail_fn: function to call if the original value was not 0
63 *
64 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
65 * In the failure case, this function is allowed to either set the value
66 * to 1, or to set it to a value lower than 1.
67 *
68 * If the implementation sets it to a value of lower than 1, the
69 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
70 * to return 0 otherwise.
71 */
72#define __mutex_fastpath_unlock(count, fail_fn) \
73do { \
74 unsigned int dummy; \
75 \
76 typecheck(atomic_t *, count); \
77 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
78 \
79 __asm__ __volatile__( \
80 LOCK " incl (%%eax) \n" \
81 " jle "#fail_fn" \n" \
82 \
83 :"=a" (dummy) \
84 : "a" (count) \
85 : "memory", "ecx", "edx"); \
86} while (0)
87
88#define __mutex_slowpath_needs_to_unlock() 1
89
90/**
91 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
92 *
93 * @count: pointer of type atomic_t
94 * @fail_fn: fallback function
95 *
96 * Change the count from 1 to a value lower than 1, and return 0 (failure)
97 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
98 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
99 * Additionally, if the value was < 0 originally, this function must not leave
100 * it to 0 on failure.
101 */
102static inline int
103__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
104{
105 /*
106 * We have two variants here. The cmpxchg based one is the best one
107 * because it never induce a false contention state. It is included
108 * here because architectures using the inc/dec algorithms over the
109 * xchg ones are much more likely to support cmpxchg natively.
110 *
111 * If not we fall back to the spinlock based variant - that is
112 * just as efficient (and simpler) as a 'destructive' probing of
113 * the mutex state would be.
114 */
115#ifdef __HAVE_ARCH_CMPXCHG
116 if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
117 return 1;
118 return 0;
119#else
120 return fail_fn(count);
121#endif
122}
123
124#endif
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 15cf7984c48e..d3e0dfa99e1f 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -89,6 +89,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
89} 89}
90 90
91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
92#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
92 93
93#define atomic_add_unless(v, a, u) \ 94#define atomic_add_unless(v, a, u) \
94({ \ 95({ \
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-ia64/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index 70761278b6cb..3122fe106f05 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -243,6 +243,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
243#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) 243#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
244 244
245#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 245#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
246#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246 247
247/** 248/**
248 * atomic_add_unless - add unless the number is a given value 249 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-m32r/mutex.h b/include/asm-m32r/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m32r/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index b8a4e75d679d..a4a84d5c65d5 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -140,6 +140,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
140} 140}
141 141
142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
143#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
143 144
144#define atomic_add_unless(v, a, u) \ 145#define atomic_add_unless(v, a, u) \
145({ \ 146({ \
diff --git a/include/asm-m68k/mutex.h b/include/asm-m68k/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m68k/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 1702dbe9318c..6c4e4b63e454 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -129,6 +129,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
129} 129}
130 130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
132 133
133#define atomic_add_unless(v, a, u) \ 134#define atomic_add_unless(v, a, u) \
134({ \ 135({ \
diff --git a/include/asm-m68knommu/mutex.h b/include/asm-m68knommu/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m68knommu/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 92256e43a938..94a95872d727 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -289,6 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
289} 289}
290 290
291#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 291#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
292#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
292 293
293/** 294/**
294 * atomic_add_unless - add unless the number is a given value 295 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-mips/mutex.h b/include/asm-mips/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-mips/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 64ebd086c40d..2ca56d34aaad 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -165,6 +165,7 @@ static __inline__ int atomic_read(const atomic_t *v)
165 165
166/* exported interface */ 166/* exported interface */
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
168 169
169/** 170/**
170 * atomic_add_unless - add unless the number is a given value 171 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-parisc/mutex.h b/include/asm-parisc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-parisc/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ae395a0632a6..248f9aec959c 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -165,6 +165,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
165} 165}
166 166
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
168 169
169/** 170/**
170 * atomic_add_unless - add unless the number is a given value 171 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-powerpc/mutex.h b/include/asm-powerpc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-powerpc/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index d82aedf616fe..be6fefe223d6 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -75,6 +75,8 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
75 __CS_LOOP(v, mask, "or"); 75 __CS_LOOP(v, mask, "or");
76} 76}
77 77
78#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79
78static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 80static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
79{ 81{
80 __asm__ __volatile__(" cs %0,%3,0(%2)\n" 82 __asm__ __volatile__(" cs %0,%3,0(%2)\n"
diff --git a/include/asm-s390/mutex.h b/include/asm-s390/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-s390/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 618d8e0de348..fb627de217f2 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -101,6 +101,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
101 return ret; 101 return ret;
102} 102}
103 103
104#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
105
104static inline int atomic_add_unless(atomic_t *v, int a, int u) 106static inline int atomic_add_unless(atomic_t *v, int a, int u)
105{ 107{
106 int ret; 108 int ret;
diff --git a/include/asm-sh/mutex.h b/include/asm-sh/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sh/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index f3ce5c0df13a..28f2ea9b567b 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -113,6 +113,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
113 return ret; 113 return ret;
114} 114}
115 115
116#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
117
116static inline int atomic_add_unless(atomic_t *v, int a, int u) 118static inline int atomic_add_unless(atomic_t *v, int a, int u)
117{ 119{
118 int ret; 120 int ret;
diff --git a/include/asm-sh64/mutex.h b/include/asm-sh64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sh64/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index accb4967e9d2..e1033170bd3a 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -20,6 +20,7 @@ typedef struct { volatile int counter; } atomic_t;
20 20
21extern int __atomic_add_return(int, atomic_t *); 21extern int __atomic_add_return(int, atomic_t *);
22extern int atomic_cmpxchg(atomic_t *, int, int); 22extern int atomic_cmpxchg(atomic_t *, int, int);
23#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
23extern int atomic_add_unless(atomic_t *, int, int); 24extern int atomic_add_unless(atomic_t *, int, int);
24extern void atomic_set(atomic_t *, int); 25extern void atomic_set(atomic_t *, int);
25 26
diff --git a/include/asm-sparc/mutex.h b/include/asm-sparc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sparc/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 11f5aa5d108c..25256bdc8aae 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
72#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 72#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
73 73
74#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 74#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
75#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75 76
76#define atomic_add_unless(v, a, u) \ 77#define atomic_add_unless(v, a, u) \
77({ \ 78({ \
diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sparc64/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-um/mutex.h b/include/asm-um/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-um/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index f5b9ab6f4e70..166df00457ea 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -104,6 +104,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
104 return ret; 104 return ret;
105} 105}
106 106
107#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
108
107static inline int atomic_add_unless(atomic_t *v, int a, int u) 109static inline int atomic_add_unless(atomic_t *v, int a, int u)
108{ 110{
109 int ret; 111 int ret;
diff --git a/include/asm-v850/mutex.h b/include/asm-v850/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-v850/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 72eb071488c7..6b540237a2f8 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -389,6 +389,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
389#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 389#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
390 390
391#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 391#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
392#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
392 393
393/** 394/**
394 * atomic_add_unless - add unless the number is a given value 395 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
new file mode 100644
index 000000000000..818abfd262d1
--- /dev/null
+++ b/include/asm-x86_64/mutex.h
@@ -0,0 +1,113 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \
21 unsigned long dummy; \
22 \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
25 \
26 __asm__ __volatile__( \
27 LOCK " decl (%%rdi) \n" \
28 " js 2f \n" \
29 "1: \n" \
30 \
31 LOCK_SECTION_START("") \
32 "2: call "#fail_fn" \n" \
33 " jmp 1b \n" \
34 LOCK_SECTION_END \
35 \
36 :"=D" (dummy) \
37 : "D" (v) \
38 : "rax", "rsi", "rdx", "rcx", \
39 "r8", "r9", "r10", "r11", "memory"); \
40} while (0)
41
42/**
43 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
44 * from 1 to a 0 value
45 * @count: pointer of type atomic_t
46 * @fail_fn: function to call if the original value was not 1
47 *
48 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
49 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
50 * or anything the slow path function returns
51 */
52static inline int
53__mutex_fastpath_lock_retval(atomic_t *count,
54 int fastcall (*fail_fn)(atomic_t *))
55{
56 if (unlikely(atomic_dec_return(count) < 0))
57 return fail_fn(count);
58 else
59 return 0;
60}
61
62/**
63 * __mutex_fastpath_unlock - increment and call function if nonpositive
64 * @v: pointer of type atomic_t
65 * @fail_fn: function to call if the result is nonpositive
66 *
67 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
68 */
69#define __mutex_fastpath_unlock(v, fail_fn) \
70do { \
71 unsigned long dummy; \
72 \
73 typecheck(atomic_t *, v); \
74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
75 \
76 __asm__ __volatile__( \
77 LOCK " incl (%%rdi) \n" \
78 " jle 2f \n" \
79 "1: \n" \
80 \
81 LOCK_SECTION_START("") \
82 "2: call "#fail_fn" \n" \
83 " jmp 1b \n" \
84 LOCK_SECTION_END \
85 \
86 :"=D" (dummy) \
87 : "D" (v) \
88 : "rax", "rsi", "rdx", "rcx", \
89 "r8", "r9", "r10", "r11", "memory"); \
90} while (0)
91
92#define __mutex_slowpath_needs_to_unlock() 1
93
94/**
95 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
96 *
97 * @count: pointer of type atomic_t
98 * @fail_fn: fallback function
99 *
100 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
101 * if it wasn't 1 originally. [the fallback function is never used on
102 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
103 */
104static inline int
105__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
106{
107 if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
108 return 1;
109 else
110 return 0;
111}
112
113#endif
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index e2ce06b101ad..fe105a123924 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -224,6 +224,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) 224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
225 225
226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
227#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
227 228
228/** 229/**
229 * atomic_add_unless - add unless the number is a given value 230 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-xtensa/mutex.h b/include/asm-xtensa/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-xtensa/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 2914f7b07156..e71dd98dbcae 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@ struct ext3_inode_info {
87#ifdef CONFIG_EXT3_FS_XATTR 87#ifdef CONFIG_EXT3_FS_XATTR
88 /* 88 /*
89 * Extended attributes can be read independently of the main file 89 * Extended attributes can be read independently of the main file
90 * data. Taking i_sem even when reading would cause contention 90 * data. Taking i_mutex even when reading would cause contention
91 * between readers of EAs and writers of regular file data, so 91 * between readers of EAs and writers of regular file data, so
92 * instead we synchronize on xattr_sem when reading or changing 92 * instead we synchronize on xattr_sem when reading or changing
93 * EAs. 93 * EAs.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4c82219b0fae..92ae3e2067b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -219,6 +219,7 @@ extern int dir_notify_enable;
219#include <linux/prio_tree.h> 219#include <linux/prio_tree.h>
220#include <linux/init.h> 220#include <linux/init.h>
221#include <linux/sched.h> 221#include <linux/sched.h>
222#include <linux/mutex.h>
222 223
223#include <asm/atomic.h> 224#include <asm/atomic.h>
224#include <asm/semaphore.h> 225#include <asm/semaphore.h>
@@ -484,7 +485,7 @@ struct inode {
484 unsigned long i_blocks; 485 unsigned long i_blocks;
485 unsigned short i_bytes; 486 unsigned short i_bytes;
486 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 487 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
487 struct semaphore i_sem; 488 struct mutex i_mutex;
488 struct rw_semaphore i_alloc_sem; 489 struct rw_semaphore i_alloc_sem;
489 struct inode_operations *i_op; 490 struct inode_operations *i_op;
490 struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 491 struct file_operations *i_fop; /* former ->i_op->default_file_ops */
@@ -820,7 +821,7 @@ struct super_block {
820 unsigned long s_magic; 821 unsigned long s_magic;
821 struct dentry *s_root; 822 struct dentry *s_root;
822 struct rw_semaphore s_umount; 823 struct rw_semaphore s_umount;
823 struct semaphore s_lock; 824 struct mutex s_lock;
824 int s_count; 825 int s_count;
825 int s_syncing; 826 int s_syncing;
826 int s_need_sync_fs; 827 int s_need_sync_fs;
@@ -892,13 +893,13 @@ static inline int has_fs_excl(void)
892static inline void lock_super(struct super_block * sb) 893static inline void lock_super(struct super_block * sb)
893{ 894{
894 get_fs_excl(); 895 get_fs_excl();
895 down(&sb->s_lock); 896 mutex_lock(&sb->s_lock);
896} 897}
897 898
898static inline void unlock_super(struct super_block * sb) 899static inline void unlock_super(struct super_block * sb)
899{ 900{
900 put_fs_excl(); 901 put_fs_excl();
901 up(&sb->s_lock); 902 mutex_unlock(&sb->s_lock);
902} 903}
903 904
904/* 905/*
@@ -1191,7 +1192,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
1191 * directory. The name should be stored in the @name (with the 1192 * directory. The name should be stored in the @name (with the
1192 * understanding that it is already pointing to a a %NAME_MAX+1 sized 1193 * understanding that it is already pointing to a a %NAME_MAX+1 sized
1193 * buffer. get_name() should return %0 on success, a negative error code 1194 * buffer. get_name() should return %0 on success, a negative error code
1194 * or error. @get_name will be called without @parent->i_sem held. 1195 * or error. @get_name will be called without @parent->i_mutex held.
1195 * 1196 *
1196 * get_parent: 1197 * get_parent:
1197 * @get_parent should find the parent directory for the given @child which 1198 * @get_parent should find the parent directory for the given @child which
@@ -1213,7 +1214,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
1213 * nfsd_find_fh_dentry() in either the @obj or @parent parameters. 1214 * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
1214 * 1215 *
1215 * Locking rules: 1216 * Locking rules:
1216 * get_parent is called with child->d_inode->i_sem down 1217 * get_parent is called with child->d_inode->i_mutex down
1217 * get_name is not (which is possibly inconsistent) 1218 * get_name is not (which is possibly inconsistent)
1218 */ 1219 */
1219 1220
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ef8d0cbb832f..9a8c05dbe4f3 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -18,6 +18,7 @@
18#include <linux/bio.h> 18#include <linux/bio.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/completion.h>
21#include <asm/byteorder.h> 22#include <asm/byteorder.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -638,7 +639,7 @@ typedef struct ide_drive_s {
638 int crc_count; /* crc counter to reduce drive speed */ 639 int crc_count; /* crc counter to reduce drive speed */
639 struct list_head list; 640 struct list_head list;
640 struct device gendev; 641 struct device gendev;
641 struct semaphore gendev_rel_sem; /* to deal with device release() */ 642 struct completion gendev_rel_comp; /* to deal with device release() */
642} ide_drive_t; 643} ide_drive_t;
643 644
644#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) 645#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
@@ -794,7 +795,7 @@ typedef struct hwif_s {
794 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 795 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
795 796
796 struct device gendev; 797 struct device gendev;
797 struct semaphore gendev_rel_sem; /* To deal with device release() */ 798 struct completion gendev_rel_comp; /* To deal with device release() */
798 799
799 void *hwif_data; /* extra hwif data */ 800 void *hwif_data; /* extra hwif data */
800 801
diff --git a/include/linux/jffs2_fs_i.h b/include/linux/jffs2_fs_i.h
index ef85ab56302b..ad565bf9dcc1 100644
--- a/include/linux/jffs2_fs_i.h
+++ b/include/linux/jffs2_fs_i.h
@@ -8,11 +8,11 @@
8#include <asm/semaphore.h> 8#include <asm/semaphore.h>
9 9
10struct jffs2_inode_info { 10struct jffs2_inode_info {
11 /* We need an internal semaphore similar to inode->i_sem. 11 /* We need an internal mutex similar to inode->i_mutex.
12 Unfortunately, we can't used the existing one, because 12 Unfortunately, we can't used the existing one, because
13 either the GC would deadlock, or we'd have to release it 13 either the GC would deadlock, or we'd have to release it
14 before letting GC proceed. Or we'd have to put ugliness 14 before letting GC proceed. Or we'd have to put ugliness
15 into the GC code so it didn't attempt to obtain the i_sem 15 into the GC code so it didn't attempt to obtain the i_mutex
16 for the inode(s) which are already locked */ 16 for the inode(s) which are already locked */
17 struct semaphore sem; 17 struct semaphore sem;
18 18
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ca7ff8fdd090..d0e6ca3b00ef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -286,6 +286,15 @@ extern void dump_stack(void);
286 1; \ 286 1; \
287}) 287})
288 288
289/*
290 * Check at compile time that 'function' is a certain type, or is a pointer
291 * to that type (needs to use typedef for the function type.)
292 */
293#define typecheck_fn(type,function) \
294({ typeof(type) __tmp = function; \
295 (void)__tmp; \
296})
297
289#endif /* __KERNEL__ */ 298#endif /* __KERNEL__ */
290 299
291#define SI_LOAD_SHIFT 16 300#define SI_LOAD_SHIFT 16
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40f63c9879d2..f96506782ebe 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -58,9 +58,9 @@ struct loop_device {
58 struct bio *lo_bio; 58 struct bio *lo_bio;
59 struct bio *lo_biotail; 59 struct bio *lo_biotail;
60 int lo_state; 60 int lo_state;
61 struct semaphore lo_sem; 61 struct completion lo_done;
62 struct completion lo_bh_done;
62 struct semaphore lo_ctl_mutex; 63 struct semaphore lo_ctl_mutex;
63 struct semaphore lo_bh_mutex;
64 int lo_pending; 64 int lo_pending;
65 65
66 request_queue_t *lo_queue; 66 request_queue_t *lo_queue;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index df80e63903b5..3f1fafc0245e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -13,6 +13,7 @@
13#include <linux/rbtree.h> 13#include <linux/rbtree.h>
14#include <linux/prio_tree.h> 14#include <linux/prio_tree.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/mutex.h>
16 17
17struct mempolicy; 18struct mempolicy;
18struct anon_vma; 19struct anon_vma;
@@ -1024,6 +1025,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
1024static inline void 1025static inline void
1025kernel_map_pages(struct page *page, int numpages, int enable) 1026kernel_map_pages(struct page *page, int numpages, int enable)
1026{ 1027{
1028 if (!PageHighMem(page) && !enable)
1029 mutex_debug_check_no_locks_freed(page_address(page),
1030 page_address(page + numpages));
1027} 1031}
1028#endif 1032#endif
1029 1033
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
new file mode 100644
index 000000000000..0ccd8f983b50
--- /dev/null
+++ b/include/linux/mutex-debug.h
@@ -0,0 +1,21 @@
1#ifndef __LINUX_MUTEX_DEBUG_H
2#define __LINUX_MUTEX_DEBUG_H
3
4/*
5 * Mutexes - debugging helpers:
6 */
7
8#define __DEBUG_MUTEX_INITIALIZER(lockname) \
9 , .held_list = LIST_HEAD_INIT(lockname.held_list), \
10 .name = #lockname , .magic = &lockname
11
12#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
13
14extern void FASTCALL(mutex_destroy(struct mutex *lock));
15
16extern void mutex_debug_show_all_locks(void);
17extern void mutex_debug_show_held_locks(struct task_struct *filter);
18extern void mutex_debug_check_no_locks_held(struct task_struct *task);
19extern void mutex_debug_check_no_locks_freed(const void *from, const void *to);
20
21#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
new file mode 100644
index 000000000000..9bce0fee68d4
--- /dev/null
+++ b/include/linux/mutex.h
@@ -0,0 +1,119 @@
1/*
2 * Mutexes: blocking mutual exclusion locks
3 *
4 * started by Ingo Molnar:
5 *
6 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 *
8 * This file contains the main data structure and API definitions.
9 */
10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H
12
13#include <linux/list.h>
14#include <linux/spinlock_types.h>
15
16#include <asm/atomic.h>
17
18/*
19 * Simple, straightforward mutexes with strict semantics:
20 *
21 * - only one task can hold the mutex at a time
22 * - only the owner can unlock the mutex
23 * - multiple unlocks are not permitted
24 * - recursive locking is not permitted
25 * - a mutex object must be initialized via the API
26 * - a mutex object must not be initialized via memset or copying
27 * - task may not exit with mutex held
28 * - memory areas where held locks reside must not be freed
29 * - held mutexes must not be reinitialized
30 * - mutexes may not be used in irq contexts
31 *
32 * These semantics are fully enforced when DEBUG_MUTEXES is
33 * enabled. Furthermore, besides enforcing the above rules, the mutex
34 * debugging code also implements a number of additional features
35 * that make lock debugging easier and faster:
36 *
37 * - uses symbolic names of mutexes, whenever they are printed in debug output
38 * - point-of-acquire tracking, symbolic lookup of function names
39 * - list of all locks held in the system, printout of them
40 * - owner tracking
41 * - detects self-recursing locks and prints out all relevant info
42 * - detects multi-task circular deadlocks and prints out all affected
43 * locks and tasks (and only those tasks)
44 */
45struct mutex {
46 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
47 atomic_t count;
48 spinlock_t wait_lock;
49 struct list_head wait_list;
50#ifdef CONFIG_DEBUG_MUTEXES
51 struct thread_info *owner;
52 struct list_head held_list;
53 unsigned long acquire_ip;
54 const char *name;
55 void *magic;
56#endif
57};
58
59/*
60 * This is the control structure for tasks blocked on mutex,
61 * which resides on the blocked task's kernel stack:
62 */
63struct mutex_waiter {
64 struct list_head list;
65 struct task_struct *task;
66#ifdef CONFIG_DEBUG_MUTEXES
67 struct mutex *lock;
68 void *magic;
69#endif
70};
71
72#ifdef CONFIG_DEBUG_MUTEXES
73# include <linux/mutex-debug.h>
74#else
75# define __DEBUG_MUTEX_INITIALIZER(lockname)
76# define mutex_init(mutex) __mutex_init(mutex, NULL)
77# define mutex_destroy(mutex) do { } while (0)
78# define mutex_debug_show_all_locks() do { } while (0)
79# define mutex_debug_show_held_locks(p) do { } while (0)
80# define mutex_debug_check_no_locks_held(task) do { } while (0)
81# define mutex_debug_check_no_locks_freed(from, to) do { } while (0)
82#endif
83
84#define __MUTEX_INITIALIZER(lockname) \
85 { .count = ATOMIC_INIT(1) \
86 , .wait_lock = SPIN_LOCK_UNLOCKED \
87 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
88 __DEBUG_MUTEX_INITIALIZER(lockname) }
89
90#define DEFINE_MUTEX(mutexname) \
91 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
92
93extern void fastcall __mutex_init(struct mutex *lock, const char *name);
94
95/***
96 * mutex_is_locked - is the mutex locked
97 * @lock: the mutex to be queried
98 *
99 * Returns 1 if the mutex is locked, 0 if unlocked.
100 */
101static inline int fastcall mutex_is_locked(struct mutex *lock)
102{
103 return atomic_read(&lock->count) != 1;
104}
105
106/*
107 * See kernel/mutex.c for detailed documentation of these APIs.
108 * Also see Documentation/mutex-design.txt.
109 */
110extern void fastcall mutex_lock(struct mutex *lock);
111extern int fastcall mutex_lock_interruptible(struct mutex *lock);
112/*
113 * NOTE: mutex_trylock() follows the spin_trylock() convention,
114 * not the down_trylock() convention!
115 */
116extern int fastcall mutex_trylock(struct mutex *lock);
117extern void fastcall mutex_unlock(struct mutex *lock);
118
119#endif
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index bb842ea41033..0798b7781a6e 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -294,7 +294,7 @@ fill_post_wcc(struct svc_fh *fhp)
294/* 294/*
295 * Lock a file handle/inode 295 * Lock a file handle/inode
296 * NOTE: both fh_lock and fh_unlock are done "by hand" in 296 * NOTE: both fh_lock and fh_unlock are done "by hand" in
297 * vfs.c:nfsd_rename as it needs to grab 2 i_sem's at once 297 * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
298 * so, any changes here should be reflected there. 298 * so, any changes here should be reflected there.
299 */ 299 */
300static inline void 300static inline void
@@ -317,7 +317,7 @@ fh_lock(struct svc_fh *fhp)
317 } 317 }
318 318
319 inode = dentry->d_inode; 319 inode = dentry->d_inode;
320 down(&inode->i_sem); 320 mutex_lock(&inode->i_mutex);
321 fill_pre_wcc(fhp); 321 fill_pre_wcc(fhp);
322 fhp->fh_locked = 1; 322 fhp->fh_locked = 1;
323} 323}
@@ -333,7 +333,7 @@ fh_unlock(struct svc_fh *fhp)
333 333
334 if (fhp->fh_locked) { 334 if (fhp->fh_locked) {
335 fill_post_wcc(fhp); 335 fill_post_wcc(fhp);
336 up(&fhp->fh_dentry->d_inode->i_sem); 336 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
337 fhp->fh_locked = 0; 337 fhp->fh_locked = 0;
338 } 338 }
339} 339}
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 1767073df26f..b12e59c75752 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -37,7 +37,7 @@ struct pipe_inode_info {
37 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ 37 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
38#define PIPE_SIZE PAGE_SIZE 38#define PIPE_SIZE PAGE_SIZE
39 39
40#define PIPE_SEM(inode) (&(inode).i_sem) 40#define PIPE_MUTEX(inode) (&(inode).i_mutex)
41#define PIPE_WAIT(inode) (&(inode).i_pipe->wait) 41#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
42#define PIPE_READERS(inode) ((inode).i_pipe->readers) 42#define PIPE_READERS(inode) ((inode).i_pipe->readers)
43#define PIPE_WRITERS(inode) ((inode).i_pipe->writers) 43#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 001ab82df051..e276c5ba2bb7 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1857,7 +1857,7 @@ void padd_item(char *item, int total_length, int length);
1857#define GET_BLOCK_CREATE 1 /* add anything you need to find block */ 1857#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
1858#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */ 1858#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
1859#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */ 1859#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
1860#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */ 1860#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
1861#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */ 1861#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
1862 1862
1863int restart_transaction(struct reiserfs_transaction_handle *th, 1863int restart_transaction(struct reiserfs_transaction_handle *th,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78eb92ae4d94..85b53f87c703 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -817,6 +817,11 @@ struct task_struct {
817/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 817/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
818 spinlock_t proc_lock; 818 spinlock_t proc_lock;
819 819
820#ifdef CONFIG_DEBUG_MUTEXES
821 /* mutex deadlock detection */
822 struct mutex_waiter *blocked_on;
823#endif
824
820/* journalling filesystem info */ 825/* journalling filesystem info */
821 void *journal_info; 826 void *journal_info;
822 827