aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
17 files changed, 891 insertions, 590 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
15# define ATOMIC_HASH_SIZE 4 15# define ATOMIC_HASH_SIZE 4
16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 17
18extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 18extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 19
20/* Can't use raw_spin_lock_irq because of #include problems, so 20/* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */ 21 * this is the substitute */
22#define _atomic_spin_lock_irqsave(l,f) do { \ 22#define _atomic_spin_lock_irqsave(l,f) do { \
23 raw_spinlock_t *s = ATOMIC_HASH(l); \ 23 arch_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \ 24 local_irq_save(f); \
25 __raw_spin_lock(s); \ 25 arch_spin_lock(s); \
26} while(0) 26} while(0)
27 27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \ 28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 raw_spinlock_t *s = ATOMIC_HASH(l); \ 29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \ 30 arch_spin_unlock(s); \
31 local_irq_restore(f); \ 31 local_irq_restore(f); \
32} while(0) 32} while(0)
33 33
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index af634e95871d..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -169,7 +169,7 @@ struct hrtimer_clock_base {
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
170 */ 170 */
171struct hrtimer_cpu_base { 171struct hrtimer_cpu_base {
172 spinlock_t lock; 172 raw_spinlock_t lock;
173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
174#ifdef CONFIG_HIGH_RES_TIMERS 174#ifdef CONFIG_HIGH_RES_TIMERS
175 ktime_t expires_next; 175 ktime_t expires_next;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8ed0abf06f89..5ed8b9c50355 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -170,7 +170,7 @@ extern struct cred init_cred;
170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
172 .fs_excl = ATOMIC_INIT(0), \ 172 .fs_excl = ATOMIC_INIT(0), \
173 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 173 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
174 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 174 .timer_slack_ns = 50000, /* 50 usec default slack */ \
175 .pids = { \ 175 .pids = { \
176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
192 unsigned int irq_count; /* For detecting broken IRQs */ 192 unsigned int irq_count; /* For detecting broken IRQs */
193 unsigned long last_unhandled; /* Aging timer for unhandled count */ 193 unsigned long last_unhandled; /* Aging timer for unhandled count */
194 unsigned int irqs_unhandled; 194 unsigned int irqs_unhandled;
195 spinlock_t lock; 195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
197 cpumask_var_t affinity; 197 cpumask_var_t affinity;
198 unsigned int node; 198 unsigned int node;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64a53f74c9a9..da7bdc23f279 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -681,7 +681,7 @@ struct perf_event_context {
681 * Protect the states of the events in the list, 681 * Protect the states of the events in the list,
682 * nr_active, and the list: 682 * nr_active, and the list:
683 */ 683 */
684 spinlock_t lock; 684 raw_spinlock_t lock;
685 /* 685 /*
686 * Protect the list of events. Locking either mutex or lock 686 * Protect the list of events. Locking either mutex or lock
687 * is sufficient to ensure the list doesn't change; to change 687 * is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
81 struct list_head prio_list; 81 struct list_head prio_list;
82 struct list_head node_list; 82 struct list_head node_list;
83#ifdef CONFIG_DEBUG_PI_LIST 83#ifdef CONFIG_DEBUG_PI_LIST
84 spinlock_t *lock; 84 raw_spinlock_t *rawlock;
85 spinlock_t *spinlock;
85#endif 86#endif
86}; 87};
87 88
@@ -91,9 +92,11 @@ struct plist_node {
91}; 92};
92 93
93#ifdef CONFIG_DEBUG_PI_LIST 94#ifdef CONFIG_DEBUG_PI_LIST
94# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock 95# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
96# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
95#else 97#else
96# define PLIST_HEAD_LOCK_INIT(_lock) 98# define PLIST_HEAD_LOCK_INIT(_lock)
99# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
97#endif 100#endif
98 101
99#define _PLIST_HEAD_INIT(head) \ 102#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
107 */ 110 */
108#define PLIST_HEAD_INIT(head, _lock) \ 111#define PLIST_HEAD_INIT(head, _lock) \
109{ \ 112{ \
110 _PLIST_HEAD_INIT(head), \ 113 _PLIST_HEAD_INIT(head), \
111 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 114 PLIST_HEAD_LOCK_INIT(&(_lock)) \
112} 115}
113 116
114/** 117/**
118 * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
119 * @head: struct plist_head variable name
120 * @_lock: lock to initialize for this list
121 */
122#define PLIST_HEAD_INIT_RAW(head, _lock) \
123{ \
124 _PLIST_HEAD_INIT(head), \
125 PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
126}
127
128/**
115 * PLIST_NODE_INIT - static struct plist_node initializer 129 * PLIST_NODE_INIT - static struct plist_node initializer
116 * @node: struct plist_node variable name 130 * @node: struct plist_node variable name
117 * @__prio: initial node priority 131 * @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
119#define PLIST_NODE_INIT(node, __prio) \ 133#define PLIST_NODE_INIT(node, __prio) \
120{ \ 134{ \
121 .prio = (__prio), \ 135 .prio = (__prio), \
122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 136 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
123} 137}
124 138
125/** 139/**
126 * plist_head_init - dynamic struct plist_head initializer 140 * plist_head_init - dynamic struct plist_head initializer
127 * @head: &struct plist_head pointer 141 * @head: &struct plist_head pointer
128 * @lock: list spinlock, remembered for debugging 142 * @lock: spinlock protecting the list (debugging)
129 */ 143 */
130static inline void 144static inline void
131plist_head_init(struct plist_head *head, spinlock_t *lock) 145plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
133 INIT_LIST_HEAD(&head->prio_list); 147 INIT_LIST_HEAD(&head->prio_list);
134 INIT_LIST_HEAD(&head->node_list); 148 INIT_LIST_HEAD(&head->node_list);
135#ifdef CONFIG_DEBUG_PI_LIST 149#ifdef CONFIG_DEBUG_PI_LIST
136 head->lock = lock; 150 head->spinlock = lock;
151 head->rawlock = NULL;
152#endif
153}
154
155/**
156 * plist_head_init_raw - dynamic struct plist_head initializer
157 * @head: &struct plist_head pointer
158 * @lock: raw_spinlock protecting the list (debugging)
159 */
160static inline void
161plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
162{
163 INIT_LIST_HEAD(&head->prio_list);
164 INIT_LIST_HEAD(&head->node_list);
165#ifdef CONFIG_DEBUG_PI_LIST
166 head->rawlock = lock;
167 head->spinlock = NULL;
137#endif 168#endif
138} 169}
139 170
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
24 * @owner: the mutex owner 24 * @owner: the mutex owner
25 */ 25 */
26struct rt_mutex { 26struct rt_mutex {
27 spinlock_t wait_lock; 27 raw_spinlock_t wait_lock;
28 struct plist_head wait_list; 28 struct plist_head wait_list;
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
63#endif 63#endif
64 64
65#define __RT_MUTEX_INITIALIZER(mutexname) \ 65#define __RT_MUTEX_INITIALIZER(mutexname) \
66 { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 66 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 67 , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
68 , .owner = NULL \ 68 , .owner = NULL \
69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
70 70
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
1#ifndef __LINUX_RWLOCK_H
2#define __LINUX_RWLOCK_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * rwlock related methods
10 *
11 * split out from spinlock.h
12 *
13 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14 * Released under the General Public License (GPL).
15 */
16
17#ifdef CONFIG_DEBUG_SPINLOCK
18 extern void __rwlock_init(rwlock_t *lock, const char *name,
19 struct lock_class_key *key);
20# define rwlock_init(lock) \
21do { \
22 static struct lock_class_key __key; \
23 \
24 __rwlock_init((lock), #lock, &__key); \
25} while (0)
26#else
27# define rwlock_init(lock) \
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
29#endif
30
31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void do_raw_read_lock(rwlock_t *lock);
33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock);
36 extern void do_raw_write_lock(rwlock_t *lock);
37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock);
40#else
41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags))
44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags))
49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
51#endif
52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
54#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
55
56/*
57 * Define the various rw_lock methods. Note we define these
58 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
59 * methods are defined as nops in the case they are not required.
60 */
61#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
62#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
63
64#define write_lock(lock) _raw_write_lock(lock)
65#define read_lock(lock) _raw_read_lock(lock)
66
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
68
69#define read_lock_irqsave(lock, flags) \
70 do { \
71 typecheck(unsigned long, flags); \
72 flags = _raw_read_lock_irqsave(lock); \
73 } while (0)
74#define write_lock_irqsave(lock, flags) \
75 do { \
76 typecheck(unsigned long, flags); \
77 flags = _raw_write_lock_irqsave(lock); \
78 } while (0)
79
80#else
81
82#define read_lock_irqsave(lock, flags) \
83 do { \
84 typecheck(unsigned long, flags); \
85 _raw_read_lock_irqsave(lock, flags); \
86 } while (0)
87#define write_lock_irqsave(lock, flags) \
88 do { \
89 typecheck(unsigned long, flags); \
90 _raw_write_lock_irqsave(lock, flags); \
91 } while (0)
92
93#endif
94
95#define read_lock_irq(lock) _raw_read_lock_irq(lock)
96#define read_lock_bh(lock) _raw_read_lock_bh(lock)
97#define write_lock_irq(lock) _raw_write_lock_irq(lock)
98#define write_lock_bh(lock) _raw_write_lock_bh(lock)
99#define read_unlock(lock) _raw_read_unlock(lock)
100#define write_unlock(lock) _raw_write_unlock(lock)
101#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
102#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
103
104#define read_unlock_irqrestore(lock, flags) \
105 do { \
106 typecheck(unsigned long, flags); \
107 _raw_read_unlock_irqrestore(lock, flags); \
108 } while (0)
109#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
110
111#define write_unlock_irqrestore(lock, flags) \
112 do { \
113 typecheck(unsigned long, flags); \
114 _raw_write_unlock_irqrestore(lock, flags); \
115 } while (0)
116#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
117
118#define write_trylock_irqsave(lock, flags) \
119({ \
120 local_irq_save(flags); \
121 write_trylock(lock) ? \
122 1 : ({ local_irq_restore(flags); 0; }); \
123})
124
125#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
1#ifndef __LINUX_RWLOCK_API_SMP_H
2#define __LINUX_RWLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_API_SMP_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/rwlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
28int __lockfunc _raw_read_trylock(rwlock_t *lock);
29int __lockfunc _raw_write_trylock(rwlock_t *lock);
30void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
31void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
32void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
33void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
34void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
35void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
36void __lockfunc
37_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
38 __releases(lock);
39void __lockfunc
40_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
41 __releases(lock);
42
43#ifdef CONFIG_INLINE_READ_LOCK
44#define _raw_read_lock(lock) __raw_read_lock(lock)
45#endif
46
47#ifdef CONFIG_INLINE_WRITE_LOCK
48#define _raw_write_lock(lock) __raw_write_lock(lock)
49#endif
50
51#ifdef CONFIG_INLINE_READ_LOCK_BH
52#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
53#endif
54
55#ifdef CONFIG_INLINE_WRITE_LOCK_BH
56#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
57#endif
58
59#ifdef CONFIG_INLINE_READ_LOCK_IRQ
60#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
61#endif
62
63#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
64#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
68#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
72#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
73#endif
74
75#ifdef CONFIG_INLINE_READ_TRYLOCK
76#define _raw_read_trylock(lock) __raw_read_trylock(lock)
77#endif
78
79#ifdef CONFIG_INLINE_WRITE_TRYLOCK
80#define _raw_write_trylock(lock) __raw_write_trylock(lock)
81#endif
82
83#ifdef CONFIG_INLINE_READ_UNLOCK
84#define _raw_read_unlock(lock) __raw_read_unlock(lock)
85#endif
86
87#ifdef CONFIG_INLINE_WRITE_UNLOCK
88#define _raw_write_unlock(lock) __raw_write_unlock(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_UNLOCK_BH
92#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
96#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
97#endif
98
99#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
100#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
101#endif
102
103#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
104#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
105#endif
106
107#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
108#define _raw_read_unlock_irqrestore(lock, flags) \
109 __raw_read_unlock_irqrestore(lock, flags)
110#endif
111
112#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
113#define _raw_write_unlock_irqrestore(lock, flags) \
114 __raw_write_unlock_irqrestore(lock, flags)
115#endif
116
117static inline int __raw_read_trylock(rwlock_t *lock)
118{
119 preempt_disable();
120 if (do_raw_read_trylock(lock)) {
121 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
122 return 1;
123 }
124 preempt_enable();
125 return 0;
126}
127
128static inline int __raw_write_trylock(rwlock_t *lock)
129{
130 preempt_disable();
131 if (do_raw_write_trylock(lock)) {
132 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
133 return 1;
134 }
135 preempt_enable();
136 return 0;
137}
138
139/*
140 * If lockdep is enabled then we use the non-preemption spin-ops
141 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
143 */
144#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
145
146static inline void __raw_read_lock(rwlock_t *lock)
147{
148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
151}
152
153static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
154{
155 unsigned long flags;
156
157 local_irq_save(flags);
158 preempt_disable();
159 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
160 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
161 do_raw_read_lock_flags, &flags);
162 return flags;
163}
164
165static inline void __raw_read_lock_irq(rwlock_t *lock)
166{
167 local_irq_disable();
168 preempt_disable();
169 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
170 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
171}
172
173static inline void __raw_read_lock_bh(rwlock_t *lock)
174{
175 local_bh_disable();
176 preempt_disable();
177 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
178 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
179}
180
181static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
182{
183 unsigned long flags;
184
185 local_irq_save(flags);
186 preempt_disable();
187 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
189 do_raw_write_lock_flags, &flags);
190 return flags;
191}
192
193static inline void __raw_write_lock_irq(rwlock_t *lock)
194{
195 local_irq_disable();
196 preempt_disable();
197 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
198 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
199}
200
201static inline void __raw_write_lock_bh(rwlock_t *lock)
202{
203 local_bh_disable();
204 preempt_disable();
205 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
206 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
207}
208
209static inline void __raw_write_lock(rwlock_t *lock)
210{
211 preempt_disable();
212 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
213 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
214}
215
216#endif /* CONFIG_PREEMPT */
217
218static inline void __raw_write_unlock(rwlock_t *lock)
219{
220 rwlock_release(&lock->dep_map, 1, _RET_IP_);
221 do_raw_write_unlock(lock);
222 preempt_enable();
223}
224
225static inline void __raw_read_unlock(rwlock_t *lock)
226{
227 rwlock_release(&lock->dep_map, 1, _RET_IP_);
228 do_raw_read_unlock(lock);
229 preempt_enable();
230}
231
232static inline void
233__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
234{
235 rwlock_release(&lock->dep_map, 1, _RET_IP_);
236 do_raw_read_unlock(lock);
237 local_irq_restore(flags);
238 preempt_enable();
239}
240
241static inline void __raw_read_unlock_irq(rwlock_t *lock)
242{
243 rwlock_release(&lock->dep_map, 1, _RET_IP_);
244 do_raw_read_unlock(lock);
245 local_irq_enable();
246 preempt_enable();
247}
248
249static inline void __raw_read_unlock_bh(rwlock_t *lock)
250{
251 rwlock_release(&lock->dep_map, 1, _RET_IP_);
252 do_raw_read_unlock(lock);
253 preempt_enable_no_resched();
254 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
255}
256
257static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
258 unsigned long flags)
259{
260 rwlock_release(&lock->dep_map, 1, _RET_IP_);
261 do_raw_write_unlock(lock);
262 local_irq_restore(flags);
263 preempt_enable();
264}
265
266static inline void __raw_write_unlock_irq(rwlock_t *lock)
267{
268 rwlock_release(&lock->dep_map, 1, _RET_IP_);
269 do_raw_write_unlock(lock);
270 local_irq_enable();
271 preempt_enable();
272}
273
274static inline void __raw_write_unlock_bh(rwlock_t *lock)
275{
276 rwlock_release(&lock->dep_map, 1, _RET_IP_);
277 do_raw_write_unlock(lock);
278 preempt_enable_no_resched();
279 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
280}
281
282#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
1#ifndef __LINUX_RWLOCK_TYPES_H
2#define __LINUX_RWLOCK_TYPES_H
3
4/*
5 * include/linux/rwlock_types.h - generic rwlock type definitions
6 * and initializers
7 *
8 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
9 * Released under the General Public License (GPL).
10 */
11typedef struct {
12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu;
18 void *owner;
19#endif
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
23} rwlock_t;
24
25#define RWLOCK_MAGIC 0xdeaf1eed
26
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
29#else
30# define RW_DEP_MAP_INIT(lockname)
31#endif
32
33#ifdef CONFIG_DEBUG_SPINLOCK
34#define __RW_LOCK_UNLOCKED(lockname) \
35 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
36 .magic = RWLOCK_MAGIC, \
37 .owner = SPINLOCK_OWNER_INIT, \
38 .owner_cpu = -1, \
39 RW_DEP_MAP_INIT(lockname) }
40#else
41#define __RW_LOCK_UNLOCKED(lockname) \
42 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
43 RW_DEP_MAP_INIT(lockname) }
44#endif
45
46/*
47 * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48 * deprecated.
49 *
50 * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51 */
52#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53
54#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55
56#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d388494f45d..5c858f38e81a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,7 +1409,7 @@ struct task_struct {
1409#endif 1409#endif
1410 1410
1411 /* Protection of the PI data structures: */ 1411 /* Protection of the PI data structures: */
1412 spinlock_t pi_lock; 1412 raw_spinlock_t pi_lock;
1413 1413
1414#ifdef CONFIG_RT_MUTEXES 1414#ifdef CONFIG_RT_MUTEXES
1415 /* PI waiters blocked on a rt_mutex held by this task */ 1415 /* PI waiters blocked on a rt_mutex held by this task */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
8 * 8 *
9 * on SMP builds: 9 * on SMP builds:
10 * 10 *
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers 12 * initializers
13 * 13 *
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
15 * defines the generic type and initializers 15 * defines the generic type and initializers
16 * 16 *
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code 18 * implementations, mostly inline assembly code
19 * 19 *
20 * (also included on UP-debug builds:) 20 * (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
34 * defines the generic type and initializers 34 * defines the generic type and initializers
35 * 35 *
36 * linux/spinlock_up.h: 36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP 37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt 38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds) 39 * builds)
40 * 40 *
@@ -75,12 +75,12 @@
75#define __lockfunc __attribute__((section(".spinlock.text"))) 75#define __lockfunc __attribute__((section(".spinlock.text")))
76 76
77/* 77/*
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
89#endif 89#endif
90 90
91#ifdef CONFIG_DEBUG_SPINLOCK 91#ifdef CONFIG_DEBUG_SPINLOCK
92 extern void __spin_lock_init(spinlock_t *lock, const char *name, 92 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
93 struct lock_class_key *key); 93 struct lock_class_key *key);
94# define spin_lock_init(lock) \ 94# define raw_spin_lock_init(lock) \
95do { \ 95do { \
96 static struct lock_class_key __key; \ 96 static struct lock_class_key __key; \
97 \ 97 \
98 __spin_lock_init((lock), #lock, &__key); \ 98 __raw_spin_lock_init((lock), #lock, &__key); \
99} while (0) 99} while (0)
100 100
101#else 101#else
102# define spin_lock_init(lock) \ 102# define raw_spin_lock_init(lock) \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#ifdef CONFIG_DEBUG_SPINLOCK 106#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
118#endif
119
120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
121 107
122#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
123#define spin_is_contended(lock) ((lock)->break_lock) 109#define raw_spin_is_contended(lock) ((lock)->break_lock)
124#else 110#else
125 111
126#ifdef __raw_spin_is_contended 112#ifdef arch_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
128#else 114#else
129#define spin_is_contended(lock) (((void)(lock), 0)) 115#define raw_spin_is_contended(lock) (((void)(lock), 0))
130#endif /*__raw_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
131#endif 117#endif
132 118
133/* The lock does not imply full memory barrier. */ 119/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
136#endif 122#endif
137 123
138/** 124/**
139 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
140 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
141 */ 127 */
142#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
143 129
144#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
145 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock);
146#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
147 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
148 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
149 extern void _raw_read_lock(rwlock_t *lock);
150#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
151 extern int _raw_read_trylock(rwlock_t *lock);
152 extern void _raw_read_unlock(rwlock_t *lock);
153 extern void _raw_write_lock(rwlock_t *lock);
154#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
155 extern int _raw_write_trylock(rwlock_t *lock);
156 extern void _raw_write_unlock(rwlock_t *lock);
157#else 135#else
158# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock)
159# define _raw_spin_lock_flags(lock, flags) \ 137{
160 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock(&lock->raw_lock);
161# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 139}
162# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 140
163# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 141static inline void
164# define _raw_read_lock_flags(lock, flags) \ 142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
165 __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 143{
166# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 144 arch_spin_lock_flags(&lock->raw_lock, *flags);
167# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 145}
168# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 146
169# define _raw_write_lock_flags(lock, flags) \ 147static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
170 __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 148{
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 149 return arch_spin_trylock(&(lock)->raw_lock);
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 150}
151
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153{
154 arch_spin_unlock(&lock->raw_lock);
155}
173#endif 156#endif
174 157
175#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
176#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
177
178/* 158/*
179 * Define the various spin_lock and rw_lock methods. Note we define these 159 * Define the various spin_lock methods. Note we define these
180 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 160 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
181 * methods are defined as nops in the case they are not required. 161 * various methods are defined as nops in the case they are not
162 * required.
182 */ 163 */
183#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 164#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
184#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
185#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
186 165
187#define spin_lock(lock) _spin_lock(lock) 166#define raw_spin_lock(lock) _raw_spin_lock(lock)
188 167
189#ifdef CONFIG_DEBUG_LOCK_ALLOC 168#ifdef CONFIG_DEBUG_LOCK_ALLOC
190# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 169# define raw_spin_lock_nested(lock, subclass) \
191# define spin_lock_nest_lock(lock, nest_lock) \ 170 _raw_spin_lock_nested(lock, subclass)
171
172# define raw_spin_lock_nest_lock(lock, nest_lock) \
192 do { \ 173 do { \
193 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 174 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
194 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 175 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
195 } while (0) 176 } while (0)
196#else 177#else
197# define spin_lock_nested(lock, subclass) _spin_lock(lock) 178# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
198# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 179# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
199#endif 180#endif
200 181
201#define write_lock(lock) _write_lock(lock)
202#define read_lock(lock) _read_lock(lock)
203
204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 182#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205 183
206#define spin_lock_irqsave(lock, flags) \ 184#define raw_spin_lock_irqsave(lock, flags) \
207 do { \ 185 do { \
208 typecheck(unsigned long, flags); \ 186 typecheck(unsigned long, flags); \
209 flags = _spin_lock_irqsave(lock); \ 187 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211#define read_lock_irqsave(lock, flags) \
212 do { \
213 typecheck(unsigned long, flags); \
214 flags = _read_lock_irqsave(lock); \
215 } while (0)
216#define write_lock_irqsave(lock, flags) \
217 do { \
218 typecheck(unsigned long, flags); \
219 flags = _write_lock_irqsave(lock); \
220 } while (0) 188 } while (0)
221 189
222#ifdef CONFIG_DEBUG_LOCK_ALLOC 190#ifdef CONFIG_DEBUG_LOCK_ALLOC
223#define spin_lock_irqsave_nested(lock, flags, subclass) \ 191#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
224 do { \ 192 do { \
225 typecheck(unsigned long, flags); \ 193 typecheck(unsigned long, flags); \
226 flags = _spin_lock_irqsave_nested(lock, subclass); \ 194 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
227 } while (0) 195 } while (0)
228#else 196#else
229#define spin_lock_irqsave_nested(lock, flags, subclass) \ 197#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
230 do { \ 198 do { \
231 typecheck(unsigned long, flags); \ 199 typecheck(unsigned long, flags); \
232 flags = _spin_lock_irqsave(lock); \ 200 flags = _raw_spin_lock_irqsave(lock); \
233 } while (0) 201 } while (0)
234#endif 202#endif
235 203
236#else 204#else
237 205
238#define spin_lock_irqsave(lock, flags) \ 206#define raw_spin_lock_irqsave(lock, flags) \
239 do { \
240 typecheck(unsigned long, flags); \
241 _spin_lock_irqsave(lock, flags); \
242 } while (0)
243#define read_lock_irqsave(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _read_lock_irqsave(lock, flags); \
247 } while (0)
248#define write_lock_irqsave(lock, flags) \
249 do { \ 207 do { \
250 typecheck(unsigned long, flags); \ 208 typecheck(unsigned long, flags); \
251 _write_lock_irqsave(lock, flags); \ 209 _raw_spin_lock_irqsave(lock, flags); \
252 } while (0) 210 } while (0)
253#define spin_lock_irqsave_nested(lock, flags, subclass) \
254 spin_lock_irqsave(lock, flags)
255 211
256#endif 212#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 raw_spin_lock_irqsave(lock, flags)
257 214
258#define spin_lock_irq(lock) _spin_lock_irq(lock) 215#endif
259#define spin_lock_bh(lock) _spin_lock_bh(lock)
260#define read_lock_irq(lock) _read_lock_irq(lock)
261#define read_lock_bh(lock) _read_lock_bh(lock)
262#define write_lock_irq(lock) _write_lock_irq(lock)
263#define write_lock_bh(lock) _write_lock_bh(lock)
264#define spin_unlock(lock) _spin_unlock(lock)
265#define read_unlock(lock) _read_unlock(lock)
266#define write_unlock(lock) _write_unlock(lock)
267#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
268#define read_unlock_irq(lock) _read_unlock_irq(lock)
269#define write_unlock_irq(lock) _write_unlock_irq(lock)
270
271#define spin_unlock_irqrestore(lock, flags) \
272 do { \
273 typecheck(unsigned long, flags); \
274 _spin_unlock_irqrestore(lock, flags); \
275 } while (0)
276#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
277 216
278#define read_unlock_irqrestore(lock, flags) \ 217#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
279 do { \ 218#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
280 typecheck(unsigned long, flags); \ 219#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
281 _read_unlock_irqrestore(lock, flags); \ 220#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
282 } while (0)
283#define read_unlock_bh(lock) _read_unlock_bh(lock)
284 221
285#define write_unlock_irqrestore(lock, flags) \ 222#define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \ 223 do { \
287 typecheck(unsigned long, flags); \ 224 typecheck(unsigned long, flags); \
288 _write_unlock_irqrestore(lock, flags); \ 225 _raw_spin_unlock_irqrestore(lock, flags); \
289 } while (0) 226 } while (0)
290#define write_unlock_bh(lock) _write_unlock_bh(lock) 227#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
291 228
292#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 229#define raw_spin_trylock_bh(lock) \
230 __cond_lock(lock, _raw_spin_trylock_bh(lock))
293 231
294#define spin_trylock_irq(lock) \ 232#define raw_spin_trylock_irq(lock) \
295({ \ 233({ \
296 local_irq_disable(); \ 234 local_irq_disable(); \
297 spin_trylock(lock) ? \ 235 raw_spin_trylock(lock) ? \
298 1 : ({ local_irq_enable(); 0; }); \ 236 1 : ({ local_irq_enable(); 0; }); \
299}) 237})
300 238
301#define spin_trylock_irqsave(lock, flags) \ 239#define raw_spin_trylock_irqsave(lock, flags) \
302({ \ 240({ \
303 local_irq_save(flags); \ 241 local_irq_save(flags); \
304 spin_trylock(lock) ? \ 242 raw_spin_trylock(lock) ? \
305 1 : ({ local_irq_restore(flags); 0; }); \ 243 1 : ({ local_irq_restore(flags); 0; }); \
306}) 244})
307 245
308#define write_trylock_irqsave(lock, flags) \ 246/**
309({ \ 247 * raw_spin_can_lock - would raw_spin_trylock() succeed?
310 local_irq_save(flags); \ 248 * @lock: the spinlock in question.
311 write_trylock(lock) ? \ 249 */
312 1 : ({ local_irq_restore(flags); 0; }); \ 250#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
251
252/* Include rwlock functions */
253#include <linux/rwlock.h>
254
255/*
256 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
257 */
258#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
259# include <linux/spinlock_api_smp.h>
260#else
261# include <linux/spinlock_api_up.h>
262#endif
263
264/*
265 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
266 */
267
268static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
269{
270 return &lock->rlock;
271}
272
273#define spin_lock_init(_lock) \
274do { \
275 spinlock_check(_lock); \
276 raw_spin_lock_init(&(_lock)->rlock); \
277} while (0)
278
279static inline void spin_lock(spinlock_t *lock)
280{
281 raw_spin_lock(&lock->rlock);
282}
283
284static inline void spin_lock_bh(spinlock_t *lock)
285{
286 raw_spin_lock_bh(&lock->rlock);
287}
288
289static inline int spin_trylock(spinlock_t *lock)
290{
291 return raw_spin_trylock(&lock->rlock);
292}
293
294#define spin_lock_nested(lock, subclass) \
295do { \
296 raw_spin_lock_nested(spinlock_check(lock), subclass); \
297} while (0)
298
299#define spin_lock_nest_lock(lock, nest_lock) \
300do { \
301 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
302} while (0)
303
304static inline void spin_lock_irq(spinlock_t *lock)
305{
306 raw_spin_lock_irq(&lock->rlock);
307}
308
309#define spin_lock_irqsave(lock, flags) \
310do { \
311 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
312} while (0)
313
314#define spin_lock_irqsave_nested(lock, flags, subclass) \
315do { \
316 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
317} while (0)
318
319static inline void spin_unlock(spinlock_t *lock)
320{
321 raw_spin_unlock(&lock->rlock);
322}
323
324static inline void spin_unlock_bh(spinlock_t *lock)
325{
326 raw_spin_unlock_bh(&lock->rlock);
327}
328
329static inline void spin_unlock_irq(spinlock_t *lock)
330{
331 raw_spin_unlock_irq(&lock->rlock);
332}
333
334static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
335{
336 raw_spin_unlock_irqrestore(&lock->rlock, flags);
337}
338
339static inline int spin_trylock_bh(spinlock_t *lock)
340{
341 return raw_spin_trylock_bh(&lock->rlock);
342}
343
344static inline int spin_trylock_irq(spinlock_t *lock)
345{
346 return raw_spin_trylock_irq(&lock->rlock);
347}
348
349#define spin_trylock_irqsave(lock, flags) \
350({ \
351 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
313}) 352})
314 353
354static inline void spin_unlock_wait(spinlock_t *lock)
355{
356 raw_spin_unlock_wait(&lock->rlock);
357}
358
359static inline int spin_is_locked(spinlock_t *lock)
360{
361 return raw_spin_is_locked(&lock->rlock);
362}
363
364static inline int spin_is_contended(spinlock_t *lock)
365{
366 return raw_spin_is_contended(&lock->rlock);
367}
368
369static inline int spin_can_lock(spinlock_t *lock)
370{
371 return raw_spin_can_lock(&lock->rlock);
372}
373
374static inline void assert_spin_locked(spinlock_t *lock)
375{
376 assert_raw_spin_locked(&lock->rlock);
377}
378
315/* 379/*
316 * Pull the atomic_t declaration: 380 * Pull the atomic_t declaration:
317 * (asm-mips/atomic.h needs above definitions) 381 * (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
329#define atomic_dec_and_lock(atomic, lock) \ 393#define atomic_dec_and_lock(atomic, lock) \
330 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 394 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
331 395
332/**
333 * spin_can_lock - would spin_trylock() succeed?
334 * @lock: the spinlock in question.
335 */
336#define spin_can_lock(lock) (!spin_is_locked(lock))
337
338/*
339 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
340 */
341#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
342# include <linux/spinlock_api_smp.h>
343#else
344# include <linux/spinlock_api_up.h>
345#endif
346
347#endif /* __LINUX_SPINLOCK_H */ 396#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
17 17
18int in_lock_functions(unsigned long addr); 18int in_lock_functions(unsigned long addr);
19 19
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 25void __lockfunc
26 __acquires(lock); 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); 27 __acquires(lock);
28void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); 28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); 30 __acquires(lock);
31void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); 31
32void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); 33 __acquires(lock);
34void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); 34unsigned long __lockfunc
35unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
36 __acquires(lock); 36 __acquires(lock);
37unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
38 __acquires(lock); 38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
39unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
40 __acquires(lock); 40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
41unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
42 __acquires(lock); 42void __lockfunc
43int __lockfunc _spin_trylock(spinlock_t *lock); 43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
44int __lockfunc _read_trylock(rwlock_t *lock); 44 __releases(lock);
45int __lockfunc _write_trylock(rwlock_t *lock);
46int __lockfunc _spin_trylock_bh(spinlock_t *lock);
47void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
48void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
49void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
50void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
51void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
52void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
53void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
54void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
55void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
56void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
57 __releases(lock);
58void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
59 __releases(lock);
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock);
62 45
63#ifdef CONFIG_INLINE_SPIN_LOCK 46#ifdef CONFIG_INLINE_SPIN_LOCK
64#define _spin_lock(lock) __spin_lock(lock) 47#define _raw_spin_lock(lock) __raw_spin_lock(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK
68#define _read_lock(lock) __read_lock(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK
72#define _write_lock(lock) __write_lock(lock)
73#endif 48#endif
74 49
75#ifdef CONFIG_INLINE_SPIN_LOCK_BH 50#ifdef CONFIG_INLINE_SPIN_LOCK_BH
76#define _spin_lock_bh(lock) __spin_lock_bh(lock) 51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
77#endif
78
79#ifdef CONFIG_INLINE_READ_LOCK_BH
80#define _read_lock_bh(lock) __read_lock_bh(lock)
81#endif
82
83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
84#define _write_lock_bh(lock) __write_lock_bh(lock)
85#endif 52#endif
86 53
87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
88#define _spin_lock_irq(lock) __spin_lock_irq(lock) 55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
92#define _read_lock_irq(lock) __read_lock_irq(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
96#define _write_lock_irq(lock) __write_lock_irq(lock)
97#endif 56#endif
98 57
99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
101#endif
102
103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
105#endif
106
107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
109#endif 60#endif
110 61
111#ifdef CONFIG_INLINE_SPIN_TRYLOCK 62#ifdef CONFIG_INLINE_SPIN_TRYLOCK
112#define _spin_trylock(lock) __spin_trylock(lock) 63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
113#endif
114
115#ifdef CONFIG_INLINE_READ_TRYLOCK
116#define _read_trylock(lock) __read_trylock(lock)
117#endif
118
119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
120#define _write_trylock(lock) __write_trylock(lock)
121#endif 64#endif
122 65
123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
125#endif 68#endif
126 69
127#ifdef CONFIG_INLINE_SPIN_UNLOCK 70#ifdef CONFIG_INLINE_SPIN_UNLOCK
128#define _spin_unlock(lock) __spin_unlock(lock) 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
129#endif
130
131#ifdef CONFIG_INLINE_READ_UNLOCK
132#define _read_unlock(lock) __read_unlock(lock)
133#endif
134
135#ifdef CONFIG_INLINE_WRITE_UNLOCK
136#define _write_unlock(lock) __write_unlock(lock)
137#endif 72#endif
138 73
139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
141#endif
142
143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
145#endif
146
147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
149#endif 76#endif
150 77
151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
153#endif
154
155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
157#endif
158
159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
161#endif 80#endif
162 81
163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
165#endif
166
167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
169#endif
170
171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
173#endif 84#endif
174 85
175static inline int __spin_trylock(spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
176{ 87{
177 preempt_disable(); 88 preempt_disable();
178 if (_raw_spin_trylock(lock)) { 89 if (do_raw_spin_trylock(lock)) {
179 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
180 return 1; 91 return 1;
181 } 92 }
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
183 return 0; 94 return 0;
184} 95}
185 96
186static inline int __read_trylock(rwlock_t *lock)
187{
188 preempt_disable();
189 if (_raw_read_trylock(lock)) {
190 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
191 return 1;
192 }
193 preempt_enable();
194 return 0;
195}
196
197static inline int __write_trylock(rwlock_t *lock)
198{
199 preempt_disable();
200 if (_raw_write_trylock(lock)) {
201 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
202 return 1;
203 }
204 preempt_enable();
205 return 0;
206}
207
208/* 97/*
209 * If lockdep is enabled then we use the non-preemption spin-ops 98 * If lockdep is enabled then we use the non-preemption spin-ops
210 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 99 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
212 */ 101 */
213#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 102#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
214 103
215static inline void __read_lock(rwlock_t *lock) 104static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
216{
217 preempt_disable();
218 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
219 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
220}
221
222static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
223{ 105{
224 unsigned long flags; 106 unsigned long flags;
225 107
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
228 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
229 /* 111 /*
230 * On lockdep we dont want the hand-coded irq-enable of 112 * On lockdep we dont want the hand-coded irq-enable of
231 * _raw_spin_lock_flags() code, because lockdep assumes 113 * do_raw_spin_lock_flags() code, because lockdep assumes
232 * that interrupts are not re-enabled during lock-acquire: 114 * that interrupts are not re-enabled during lock-acquire:
233 */ 115 */
234#ifdef CONFIG_LOCKDEP 116#ifdef CONFIG_LOCKDEP
235 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 117 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
236#else 118#else
237 _raw_spin_lock_flags(lock, &flags); 119 do_raw_spin_lock_flags(lock, &flags);
238#endif 120#endif
239 return flags; 121 return flags;
240} 122}
241 123
242static inline void __spin_lock_irq(spinlock_t *lock) 124static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
243{ 125{
244 local_irq_disable(); 126 local_irq_disable();
245 preempt_disable(); 127 preempt_disable();
246 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 128 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 129 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
248} 130}
249 131
250static inline void __spin_lock_bh(spinlock_t *lock) 132static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
251{ 133{
252 local_bh_disable(); 134 local_bh_disable();
253 preempt_disable(); 135 preempt_disable();
254 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 136 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
255 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 137 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
256}
257
258static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
259{
260 unsigned long flags;
261
262 local_irq_save(flags);
263 preempt_disable();
264 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
265 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
266 _raw_read_lock_flags, &flags);
267 return flags;
268}
269
270static inline void __read_lock_irq(rwlock_t *lock)
271{
272 local_irq_disable();
273 preempt_disable();
274 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
275 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
276}
277
278static inline void __read_lock_bh(rwlock_t *lock)
279{
280 local_bh_disable();
281 preempt_disable();
282 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
283 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
284}
285
286static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
287{
288 unsigned long flags;
289
290 local_irq_save(flags);
291 preempt_disable();
292 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
293 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
294 _raw_write_lock_flags, &flags);
295 return flags;
296}
297
298static inline void __write_lock_irq(rwlock_t *lock)
299{
300 local_irq_disable();
301 preempt_disable();
302 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
303 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
304} 138}
305 139
306static inline void __write_lock_bh(rwlock_t *lock) 140static inline void __raw_spin_lock(raw_spinlock_t *lock)
307{
308 local_bh_disable();
309 preempt_disable();
310 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
311 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
312}
313
314static inline void __spin_lock(spinlock_t *lock)
315{ 141{
316 preempt_disable(); 142 preempt_disable();
317 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 143 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
318 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 144 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
319}
320
321static inline void __write_lock(rwlock_t *lock)
322{
323 preempt_disable();
324 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
325 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
326} 145}
327 146
328#endif /* CONFIG_PREEMPT */ 147#endif /* CONFIG_PREEMPT */
329 148
330static inline void __spin_unlock(spinlock_t *lock) 149static inline void __raw_spin_unlock(raw_spinlock_t *lock)
331{ 150{
332 spin_release(&lock->dep_map, 1, _RET_IP_); 151 spin_release(&lock->dep_map, 1, _RET_IP_);
333 _raw_spin_unlock(lock); 152 do_raw_spin_unlock(lock);
334 preempt_enable();
335}
336
337static inline void __write_unlock(rwlock_t *lock)
338{
339 rwlock_release(&lock->dep_map, 1, _RET_IP_);
340 _raw_write_unlock(lock);
341 preempt_enable();
342}
343
344static inline void __read_unlock(rwlock_t *lock)
345{
346 rwlock_release(&lock->dep_map, 1, _RET_IP_);
347 _raw_read_unlock(lock);
348 preempt_enable(); 153 preempt_enable();
349} 154}
350 155
351static inline void __spin_unlock_irqrestore(spinlock_t *lock, 156static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
352 unsigned long flags) 157 unsigned long flags)
353{ 158{
354 spin_release(&lock->dep_map, 1, _RET_IP_); 159 spin_release(&lock->dep_map, 1, _RET_IP_);
355 _raw_spin_unlock(lock); 160 do_raw_spin_unlock(lock);
356 local_irq_restore(flags); 161 local_irq_restore(flags);
357 preempt_enable(); 162 preempt_enable();
358} 163}
359 164
360static inline void __spin_unlock_irq(spinlock_t *lock) 165static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
361{ 166{
362 spin_release(&lock->dep_map, 1, _RET_IP_); 167 spin_release(&lock->dep_map, 1, _RET_IP_);
363 _raw_spin_unlock(lock); 168 do_raw_spin_unlock(lock);
364 local_irq_enable(); 169 local_irq_enable();
365 preempt_enable(); 170 preempt_enable();
366} 171}
367 172
368static inline void __spin_unlock_bh(spinlock_t *lock) 173static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
369{ 174{
370 spin_release(&lock->dep_map, 1, _RET_IP_); 175 spin_release(&lock->dep_map, 1, _RET_IP_);
371 _raw_spin_unlock(lock); 176 do_raw_spin_unlock(lock);
372 preempt_enable_no_resched();
373 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
374}
375
376static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
377{
378 rwlock_release(&lock->dep_map, 1, _RET_IP_);
379 _raw_read_unlock(lock);
380 local_irq_restore(flags);
381 preempt_enable();
382}
383
384static inline void __read_unlock_irq(rwlock_t *lock)
385{
386 rwlock_release(&lock->dep_map, 1, _RET_IP_);
387 _raw_read_unlock(lock);
388 local_irq_enable();
389 preempt_enable();
390}
391
392static inline void __read_unlock_bh(rwlock_t *lock)
393{
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
395 _raw_read_unlock(lock);
396 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
397 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 178 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
398} 179}
399 180
400static inline void __write_unlock_irqrestore(rwlock_t *lock, 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
401 unsigned long flags)
402{
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
404 _raw_write_unlock(lock);
405 local_irq_restore(flags);
406 preempt_enable();
407}
408
409static inline void __write_unlock_irq(rwlock_t *lock)
410{
411 rwlock_release(&lock->dep_map, 1, _RET_IP_);
412 _raw_write_unlock(lock);
413 local_irq_enable();
414 preempt_enable();
415}
416
417static inline void __write_unlock_bh(rwlock_t *lock)
418{
419 rwlock_release(&lock->dep_map, 1, _RET_IP_);
420 _raw_write_unlock(lock);
421 preempt_enable_no_resched();
422 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423}
424
425static inline int __spin_trylock_bh(spinlock_t *lock)
426{ 182{
427 local_bh_disable(); 183 local_bh_disable();
428 preempt_disable(); 184 preempt_disable();
429 if (_raw_spin_trylock(lock)) { 185 if (do_raw_spin_trylock(lock)) {
430 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 186 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
431 return 1; 187 return 1;
432 } 188 }
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
435 return 0; 191 return 0;
436} 192}
437 193
194#include <linux/rwlock_api_smp.h>
195
438#endif /* __LINUX_SPINLOCK_API_SMP_H */ 196#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
16 16
17#define in_lock_functions(ADDR) 0 17#define in_lock_functions(ADDR) 0
18 18
19#define assert_spin_locked(lock) do { (void)(lock); } while (0) 19#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
20 20
21/* 21/*
22 * In the UP-nondebug case there's no real locking going on, so the 22 * In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
40 do { preempt_enable(); __release(lock); (void)(lock); } while (0) 40 do { preempt_enable(); __release(lock); (void)(lock); } while (0)
41 41
42#define __UNLOCK_BH(lock) \ 42#define __UNLOCK_BH(lock) \
43 do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) 43 do { preempt_enable_no_resched(); local_bh_enable(); \
44 __release(lock); (void)(lock); } while (0)
44 45
45#define __UNLOCK_IRQ(lock) \ 46#define __UNLOCK_IRQ(lock) \
46 do { local_irq_enable(); __UNLOCK(lock); } while (0) 47 do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
48#define __UNLOCK_IRQRESTORE(lock, flags) \ 49#define __UNLOCK_IRQRESTORE(lock, flags) \
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 50 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 51
51#define _spin_lock(lock) __LOCK(lock) 52#define _raw_spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock) 53#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
53#define _read_lock(lock) __LOCK(lock) 54#define _raw_read_lock(lock) __LOCK(lock)
54#define _write_lock(lock) __LOCK(lock) 55#define _raw_write_lock(lock) __LOCK(lock)
55#define _spin_lock_bh(lock) __LOCK_BH(lock) 56#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
56#define _read_lock_bh(lock) __LOCK_BH(lock) 57#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
57#define _write_lock_bh(lock) __LOCK_BH(lock) 58#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
58#define _spin_lock_irq(lock) __LOCK_IRQ(lock) 59#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
59#define _read_lock_irq(lock) __LOCK_IRQ(lock) 60#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
60#define _write_lock_irq(lock) __LOCK_IRQ(lock) 61#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
61#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 62#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
62#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
63#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
64#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) 65#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
65#define _read_trylock(lock) ({ __LOCK(lock); 1; }) 66#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
66#define _write_trylock(lock) ({ __LOCK(lock); 1; }) 67#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
67#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 68#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
68#define _spin_unlock(lock) __UNLOCK(lock) 69#define _raw_spin_unlock(lock) __UNLOCK(lock)
69#define _read_unlock(lock) __UNLOCK(lock) 70#define _raw_read_unlock(lock) __UNLOCK(lock)
70#define _write_unlock(lock) __UNLOCK(lock) 71#define _raw_write_unlock(lock) __UNLOCK(lock)
71#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) 72#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
72#define _write_unlock_bh(lock) __UNLOCK_BH(lock) 73#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
73#define _read_unlock_bh(lock) __UNLOCK_BH(lock) 74#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
74#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 75#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
75#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) 76#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
76#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) 77#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
77#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 78#define _raw_spin_unlock_irqrestore(lock, flags) \
78#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 79 __UNLOCK_IRQRESTORE(lock, flags)
79#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 80#define _raw_read_unlock_irqrestore(lock, flags) \
81 __UNLOCK_IRQRESTORE(lock, flags)
82#define _raw_write_unlock_irqrestore(lock, flags) \
83 __UNLOCK_IRQRESTORE(lock, flags)
80 84
81#endif /* __LINUX_SPINLOCK_API_UP_H */ 85#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
17 17
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct raw_spinlock {
21 raw_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
@@ -29,26 +29,10 @@ typedef struct {
29#ifdef CONFIG_DEBUG_LOCK_ALLOC 29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map; 30 struct lockdep_map dep_map;
31#endif 31#endif
32} spinlock_t; 32} raw_spinlock_t;
33 33
34#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
35 35
36typedef struct {
37 raw_rwlock_t raw_lock;
38#ifdef CONFIG_GENERIC_LOCKBREAK
39 unsigned int break_lock;
40#endif
41#ifdef CONFIG_DEBUG_SPINLOCK
42 unsigned int magic, owner_cpu;
43 void *owner;
44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
48} rwlock_t;
49
50#define RWLOCK_MAGIC 0xdeaf1eed
51
52#define SPINLOCK_OWNER_INIT ((void *)-1L) 36#define SPINLOCK_OWNER_INIT ((void *)-1L)
53 37
54#ifdef CONFIG_DEBUG_LOCK_ALLOC 38#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
57# define SPIN_DEP_MAP_INIT(lockname) 41# define SPIN_DEP_MAP_INIT(lockname)
58#endif 42#endif
59 43
60#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#ifdef CONFIG_DEBUG_SPINLOCK
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 45# define SPIN_DEBUG_INIT(lockname) \
46 .magic = SPINLOCK_MAGIC, \
47 .owner_cpu = -1, \
48 .owner = SPINLOCK_OWNER_INIT,
62#else 49#else
63# define RW_DEP_MAP_INIT(lockname) 50# define SPIN_DEBUG_INIT(lockname)
64#endif 51#endif
65 52
66#ifdef CONFIG_DEBUG_SPINLOCK 53#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
67# define __SPIN_LOCK_UNLOCKED(lockname) \ 54 { \
68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 55 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
69 .magic = SPINLOCK_MAGIC, \ 56 SPIN_DEBUG_INIT(lockname) \
70 .owner = SPINLOCK_OWNER_INIT, \ 57 SPIN_DEP_MAP_INIT(lockname) }
71 .owner_cpu = -1, \ 58
72 SPIN_DEP_MAP_INIT(lockname) } 59#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
73#define __RW_LOCK_UNLOCKED(lockname) \ 60 (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 61
75 .magic = RWLOCK_MAGIC, \ 62#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
76 .owner = SPINLOCK_OWNER_INIT, \ 63
77 .owner_cpu = -1, \ 64typedef struct spinlock {
78 RW_DEP_MAP_INIT(lockname) } 65 union {
79#else 66 struct raw_spinlock rlock;
80# define __SPIN_LOCK_UNLOCKED(lockname) \ 67
81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68#ifdef CONFIG_DEBUG_LOCK_ALLOC
82 SPIN_DEP_MAP_INIT(lockname) } 69# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
83#define __RW_LOCK_UNLOCKED(lockname) \ 70 struct {
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 71 u8 __padding[LOCK_PADSIZE];
85 RW_DEP_MAP_INIT(lockname) } 72 struct lockdep_map dep_map;
73 };
86#endif 74#endif
75 };
76} spinlock_t;
77
78#define __SPIN_LOCK_INITIALIZER(lockname) \
79 { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
80
81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
87 83
88/* 84/*
89 * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
90 * are hence deprecated. 86 * deprecated.
91 * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 87 * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
92 * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. 88 * appropriate.
93 */ 89 */
94#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 90#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
95#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
96 91
97#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 92#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
98#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 93
94#include <linux/rwlock_types.h>
99 95
100#endif /* __LINUX_SPINLOCK_TYPES_H */ 96#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
16 16
17typedef struct { 17typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} raw_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } raw_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __ARCH_SPIN_LOCK_UNLOCKED { }
28 28
29#endif 29#endif
30 30
31typedef struct { 31typedef struct {
32 /* no debug version on UP */ 32 /* no debug version on UP */
33} raw_rwlock_t; 33} arch_rwlock_t;
34 34
35#define __RAW_RW_LOCK_UNLOCKED { } 35#define __ARCH_RW_LOCK_UNLOCKED { }
36 36
37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ 37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define arch_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
49/* 49/*
50 * Read-write spinlocks. No debug version. 50 * Read-write spinlocks. No debug version.
51 */ 51 */
52#define __raw_read_lock(lock) do { (void)(lock); } while (0) 52#define arch_read_lock(lock) do { (void)(lock); } while (0)
53#define __raw_write_lock(lock) do { (void)(lock); } while (0) 53#define arch_write_lock(lock) do { (void)(lock); } while (0)
54#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 54#define arch_read_trylock(lock) ({ (void)(lock); 1; })
55#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 55#define arch_write_trylock(lock) ({ (void)(lock); 1; })
56#define __raw_read_unlock(lock) do { (void)(lock); } while (0) 56#define arch_read_unlock(lock) do { (void)(lock); } while (0)
57#define __raw_write_unlock(lock) do { (void)(lock); } while (0) 57#define arch_write_unlock(lock) do { (void)(lock); } while (0)
58 58
59#else /* DEBUG_SPINLOCK */ 59#else /* DEBUG_SPINLOCK */
60#define __raw_spin_is_locked(lock) ((void)(lock), 0) 60#define arch_spin_is_locked(lock) ((void)(lock), 0)
61/* for sched.c and kernel_lock.c: */ 61/* for sched.c and kernel_lock.c: */
62# define __raw_spin_lock(lock) do { (void)(lock); } while (0) 62# define arch_spin_lock(lock) do { (void)(lock); } while (0)
63# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 63# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
64# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 64# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
65# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 65# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
66#endif /* DEBUG_SPINLOCK */ 66#endif /* DEBUG_SPINLOCK */
67 67
68#define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68#define arch_spin_is_contended(lock) (((void)(lock), 0))
69 69
70#define __raw_read_can_lock(lock) (((void)(lock), 1)) 70#define arch_read_can_lock(lock) (((void)(lock), 1))
71#define __raw_write_can_lock(lock) (((void)(lock), 1)) 71#define arch_write_can_lock(lock) (((void)(lock), 1))
72 72
73#define __raw_spin_unlock_wait(lock) \ 73#define arch_spin_unlock_wait(lock) \
74 do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
75 75
76#endif /* __LINUX_SPINLOCK_UP_H */ 76#endif /* __LINUX_SPINLOCK_UP_H */