aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:51:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:51:40 -0400
commit6d5f0ebfc0be9cbfeaafdd9258d5fa24b7975a36 (patch)
tree3b7a5851a3d9f02441e2dcbaf22785d131544544 /include/linux
parentdbb885fecc1b1b35e93416bedd24d21bd20f60ed (diff)
parent8acd91e8620836a56ff62028ed28ba629f2881a0 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main updates in this cycle were: - mutex MCS refactoring finishing touches: improve comments, refactor and clean up code, reduce debug data structure footprint, etc. - qrwlock finishing touches: remove old code, self-test updates. - small rwsem optimization - various smaller fixes/cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/lockdep: Revert qrwlock recusive stuff locking/rwsem: Avoid double checking before try acquiring write lock locking/rwsem: Move EXPORT_SYMBOL() lines to follow function definition locking/rwlock, x86: Delete unused asm/rwlock.h and rwlock.S locking/rwlock, x86: Clean up asm/spinlock*.h to remove old rwlock code locking/semaphore: Resolve some shadow warnings locking/selftest: Support queued rwlock locking/lockdep: Restrict the use of recursive read_lock() with qrwlock locking/spinlocks: Always evaluate the second argument of spin_lock_nested() locking/Documentation: Update locking/mutex-design.txt disadvantages locking/Documentation: Move locking related docs into Documentation/locking/ locking/mutexes: Use MUTEX_SPIN_ON_OWNER when appropriate locking/mutexes: Refactor optimistic spinning code locking/mcs: Remove obsolete comment locking/mutexes: Document quick lock release when unlocking locking/mutexes: Standardize arguments in lock/unlock slowpaths locking: Remove deprecated smp_mb__() barriers
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/bitops.h20
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/spinlock.h8
6 files changed, 11 insertions, 61 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index fef3a809e7cf..5b08a8540ecf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,42 +3,6 @@
3#define _LINUX_ATOMIC_H 3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6/*
7 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
8 * We need the ugly external functions to break header recursion hell.
9 */
10#ifndef smp_mb__before_atomic_inc
11static inline void __deprecated smp_mb__before_atomic_inc(void)
12{
13 extern void __smp_mb__before_atomic(void);
14 __smp_mb__before_atomic();
15}
16#endif
17
18#ifndef smp_mb__after_atomic_inc
19static inline void __deprecated smp_mb__after_atomic_inc(void)
20{
21 extern void __smp_mb__after_atomic(void);
22 __smp_mb__after_atomic();
23}
24#endif
25
26#ifndef smp_mb__before_atomic_dec
27static inline void __deprecated smp_mb__before_atomic_dec(void)
28{
29 extern void __smp_mb__before_atomic(void);
30 __smp_mb__before_atomic();
31}
32#endif
33
34#ifndef smp_mb__after_atomic_dec
35static inline void __deprecated smp_mb__after_atomic_dec(void)
36{
37 extern void __smp_mb__after_atomic(void);
38 __smp_mb__after_atomic();
39}
40#endif
41
42/** 6/**
43 * atomic_add_unless - add unless the number is already a given value 7 * atomic_add_unless - add unless the number is already a given value
44 * @v: pointer of type atomic_t 8 * @v: pointer of type atomic_t
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cbc5833fb221..be5fd38bd5a0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
32 */ 32 */
33#include <asm/bitops.h> 33#include <asm/bitops.h>
34 34
35/*
36 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
37 * We need the ugly external functions to break header recursion hell.
38 */
39#ifndef smp_mb__before_clear_bit
40static inline void __deprecated smp_mb__before_clear_bit(void)
41{
42 extern void __smp_mb__before_atomic(void);
43 __smp_mb__before_atomic();
44}
45#endif
46
47#ifndef smp_mb__after_clear_bit
48static inline void __deprecated smp_mb__after_clear_bit(void)
49{
50 extern void __smp_mb__after_atomic(void);
51 __smp_mb__after_atomic();
52}
53#endif
54
55#define for_each_set_bit(bit, addr, size) \ 35#define for_each_set_bit(bit, addr, size) \
56 for ((bit) = find_first_bit((addr), (size)); \ 36 for ((bit) = find_first_bit((addr), (size)); \
57 (bit) < (size); \ 37 (bit) < (size); \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 72d6dea7fac1..74ab23176e9b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * 6 *
7 * see Documentation/lockdep-design.txt for more details. 7 * see Documentation/locking/lockdep-design.txt for more details.
8 */ 8 */
9#ifndef __LINUX_LOCKDEP_H 9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8d5535c58cc2..cc31498fc526 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -52,7 +52,7 @@ struct mutex {
52 atomic_t count; 52 atomic_t count;
53 spinlock_t wait_lock; 53 spinlock_t wait_lock;
54 struct list_head wait_list; 54 struct list_head wait_list;
55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) 55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock)
133 133
134/* 134/*
135 * See kernel/locking/mutex.c for detailed documentation of these APIs. 135 * See kernel/locking/mutex.c for detailed documentation of these APIs.
136 * Also see Documentation/mutex-design.txt. 136 * Also see Documentation/locking/mutex-design.txt.
137 */ 137 */
138#ifdef CONFIG_DEBUG_LOCK_ALLOC 138#ifdef CONFIG_DEBUG_LOCK_ALLOC
139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 035d3c57fc8a..8f498cdde280 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
149 * static then another method for expressing nested locking is 149 * static then another method for expressing nested locking is
150 * the explicit definition of lock class keys and the use of 150 * the explicit definition of lock class keys and the use of
151 * lockdep_set_class() at lock initialization time. 151 * lockdep_set_class() at lock initialization time.
152 * See Documentation/lockdep-design.txt for more details.) 152 * See Documentation/locking/lockdep-design.txt for more details.)
153 */ 153 */
154extern void down_read_nested(struct rw_semaphore *sem, int subclass); 154extern void down_read_nested(struct rw_semaphore *sem, int subclass);
155extern void down_write_nested(struct rw_semaphore *sem, int subclass); 155extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3f2867ff0ced..262ba4ef9a8e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0) 198 } while (0)
199#else 199#else
200# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) 200/*
201 * Always evaluate the 'subclass' argument to avoid that the compiler
202 * warns about set-but-not-used variables when building with
203 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
204 */
205# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock)))
201# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202#endif 208#endif
203 209