diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/domain.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/futex.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/sp810.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/locks.h | 274 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/perf_event.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/pmu.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 76 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock_types.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 27 | ||||
-rw-r--r-- | arch/arm/include/asm/word-at-a-time.h | 96 |
13 files changed, 183 insertions, 357 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 68374ba6a943..c79f61faa3a5 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -243,7 +243,7 @@ typedef struct { | |||
243 | 243 | ||
244 | #define ATOMIC64_INIT(i) { (i) } | 244 | #define ATOMIC64_INIT(i) { (i) } |
245 | 245 | ||
246 | static inline u64 atomic64_read(atomic64_t *v) | 246 | static inline u64 atomic64_read(const atomic64_t *v) |
247 | { | 247 | { |
248 | u64 result; | 248 | u64 result; |
249 | 249 | ||
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index 3d2220498abc..6ddbe446425e 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h | |||
@@ -60,13 +60,13 @@ | |||
60 | #ifndef __ASSEMBLY__ | 60 | #ifndef __ASSEMBLY__ |
61 | 61 | ||
62 | #ifdef CONFIG_CPU_USE_DOMAINS | 62 | #ifdef CONFIG_CPU_USE_DOMAINS |
63 | #define set_domain(x) \ | 63 | static inline void set_domain(unsigned val) |
64 | do { \ | 64 | { |
65 | __asm__ __volatile__( \ | 65 | asm volatile( |
66 | "mcr p15, 0, %0, c3, c0 @ set domain" \ | 66 | "mcr p15, 0, %0, c3, c0 @ set domain" |
67 | : : "r" (x)); \ | 67 | : : "r" (val)); |
68 | isb(); \ | 68 | isb(); |
69 | } while (0) | 69 | } |
70 | 70 | ||
71 | #define modify_domain(dom,type) \ | 71 | #define modify_domain(dom,type) \ |
72 | do { \ | 72 | do { \ |
@@ -78,8 +78,8 @@ | |||
78 | } while (0) | 78 | } while (0) |
79 | 79 | ||
80 | #else | 80 | #else |
81 | #define set_domain(x) do { } while (0) | 81 | static inline void set_domain(unsigned val) { } |
82 | #define modify_domain(dom,type) do { } while (0) | 82 | static inline void modify_domain(unsigned dom, unsigned type) { } |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | /* | 85 | /* |
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 7be54690aeec..e42cf597f6e6 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -19,6 +19,7 @@ | |||
19 | " .long 1b, 4f, 2b, 4f\n" \ | 19 | " .long 1b, 4f, 2b, 4f\n" \ |
20 | " .popsection\n" \ | 20 | " .popsection\n" \ |
21 | " .pushsection .fixup,\"ax\"\n" \ | 21 | " .pushsection .fixup,\"ax\"\n" \ |
22 | " .align 2\n" \ | ||
22 | "4: mov %0, " err_reg "\n" \ | 23 | "4: mov %0, " err_reg "\n" \ |
23 | " b 3b\n" \ | 24 | " b 3b\n" \ |
24 | " .popsection" | 25 | " .popsection" |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index e0d1c0cfa548..6b9b077d86b3 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * ARM PrimeXsys System Controller SP810 header file | 4 | * ARM PrimeXsys System Controller SP810 header file |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h deleted file mode 100644 index ef4c897772d1..000000000000 --- a/arch/arm/include/asm/locks.h +++ /dev/null | |||
@@ -1,274 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/locks.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Interrupt safe locking assembler. | ||
11 | */ | ||
12 | #ifndef __ASM_PROC_LOCKS_H | ||
13 | #define __ASM_PROC_LOCKS_H | ||
14 | |||
15 | #if __LINUX_ARM_ARCH__ >= 6 | ||
16 | |||
17 | #define __down_op(ptr,fail) \ | ||
18 | ({ \ | ||
19 | __asm__ __volatile__( \ | ||
20 | "@ down_op\n" \ | ||
21 | "1: ldrex lr, [%0]\n" \ | ||
22 | " sub lr, lr, %1\n" \ | ||
23 | " strex ip, lr, [%0]\n" \ | ||
24 | " teq ip, #0\n" \ | ||
25 | " bne 1b\n" \ | ||
26 | " teq lr, #0\n" \ | ||
27 | " movmi ip, %0\n" \ | ||
28 | " blmi " #fail \ | ||
29 | : \ | ||
30 | : "r" (ptr), "I" (1) \ | ||
31 | : "ip", "lr", "cc"); \ | ||
32 | smp_mb(); \ | ||
33 | }) | ||
34 | |||
35 | #define __down_op_ret(ptr,fail) \ | ||
36 | ({ \ | ||
37 | unsigned int ret; \ | ||
38 | __asm__ __volatile__( \ | ||
39 | "@ down_op_ret\n" \ | ||
40 | "1: ldrex lr, [%1]\n" \ | ||
41 | " sub lr, lr, %2\n" \ | ||
42 | " strex ip, lr, [%1]\n" \ | ||
43 | " teq ip, #0\n" \ | ||
44 | " bne 1b\n" \ | ||
45 | " teq lr, #0\n" \ | ||
46 | " movmi ip, %1\n" \ | ||
47 | " movpl ip, #0\n" \ | ||
48 | " blmi " #fail "\n" \ | ||
49 | " mov %0, ip" \ | ||
50 | : "=&r" (ret) \ | ||
51 | : "r" (ptr), "I" (1) \ | ||
52 | : "ip", "lr", "cc"); \ | ||
53 | smp_mb(); \ | ||
54 | ret; \ | ||
55 | }) | ||
56 | |||
57 | #define __up_op(ptr,wake) \ | ||
58 | ({ \ | ||
59 | smp_mb(); \ | ||
60 | __asm__ __volatile__( \ | ||
61 | "@ up_op\n" \ | ||
62 | "1: ldrex lr, [%0]\n" \ | ||
63 | " add lr, lr, %1\n" \ | ||
64 | " strex ip, lr, [%0]\n" \ | ||
65 | " teq ip, #0\n" \ | ||
66 | " bne 1b\n" \ | ||
67 | " cmp lr, #0\n" \ | ||
68 | " movle ip, %0\n" \ | ||
69 | " blle " #wake \ | ||
70 | : \ | ||
71 | : "r" (ptr), "I" (1) \ | ||
72 | : "ip", "lr", "cc"); \ | ||
73 | }) | ||
74 | |||
75 | /* | ||
76 | * The value 0x01000000 supports up to 128 processors and | ||
77 | * lots of processes. BIAS must be chosen such that sub'ing | ||
78 | * BIAS once per CPU will result in the long remaining | ||
79 | * negative. | ||
80 | */ | ||
81 | #define RW_LOCK_BIAS 0x01000000 | ||
82 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
83 | |||
84 | #define __down_op_write(ptr,fail) \ | ||
85 | ({ \ | ||
86 | __asm__ __volatile__( \ | ||
87 | "@ down_op_write\n" \ | ||
88 | "1: ldrex lr, [%0]\n" \ | ||
89 | " sub lr, lr, %1\n" \ | ||
90 | " strex ip, lr, [%0]\n" \ | ||
91 | " teq ip, #0\n" \ | ||
92 | " bne 1b\n" \ | ||
93 | " teq lr, #0\n" \ | ||
94 | " movne ip, %0\n" \ | ||
95 | " blne " #fail \ | ||
96 | : \ | ||
97 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
98 | : "ip", "lr", "cc"); \ | ||
99 | smp_mb(); \ | ||
100 | }) | ||
101 | |||
102 | #define __up_op_write(ptr,wake) \ | ||
103 | ({ \ | ||
104 | smp_mb(); \ | ||
105 | __asm__ __volatile__( \ | ||
106 | "@ up_op_write\n" \ | ||
107 | "1: ldrex lr, [%0]\n" \ | ||
108 | " adds lr, lr, %1\n" \ | ||
109 | " strex ip, lr, [%0]\n" \ | ||
110 | " teq ip, #0\n" \ | ||
111 | " bne 1b\n" \ | ||
112 | " movcs ip, %0\n" \ | ||
113 | " blcs " #wake \ | ||
114 | : \ | ||
115 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
116 | : "ip", "lr", "cc"); \ | ||
117 | }) | ||
118 | |||
119 | #define __down_op_read(ptr,fail) \ | ||
120 | __down_op(ptr, fail) | ||
121 | |||
122 | #define __up_op_read(ptr,wake) \ | ||
123 | ({ \ | ||
124 | smp_mb(); \ | ||
125 | __asm__ __volatile__( \ | ||
126 | "@ up_op_read\n" \ | ||
127 | "1: ldrex lr, [%0]\n" \ | ||
128 | " add lr, lr, %1\n" \ | ||
129 | " strex ip, lr, [%0]\n" \ | ||
130 | " teq ip, #0\n" \ | ||
131 | " bne 1b\n" \ | ||
132 | " teq lr, #0\n" \ | ||
133 | " moveq ip, %0\n" \ | ||
134 | " bleq " #wake \ | ||
135 | : \ | ||
136 | : "r" (ptr), "I" (1) \ | ||
137 | : "ip", "lr", "cc"); \ | ||
138 | }) | ||
139 | |||
140 | #else | ||
141 | |||
142 | #define __down_op(ptr,fail) \ | ||
143 | ({ \ | ||
144 | __asm__ __volatile__( \ | ||
145 | "@ down_op\n" \ | ||
146 | " mrs ip, cpsr\n" \ | ||
147 | " orr lr, ip, #128\n" \ | ||
148 | " msr cpsr_c, lr\n" \ | ||
149 | " ldr lr, [%0]\n" \ | ||
150 | " subs lr, lr, %1\n" \ | ||
151 | " str lr, [%0]\n" \ | ||
152 | " msr cpsr_c, ip\n" \ | ||
153 | " movmi ip, %0\n" \ | ||
154 | " blmi " #fail \ | ||
155 | : \ | ||
156 | : "r" (ptr), "I" (1) \ | ||
157 | : "ip", "lr", "cc"); \ | ||
158 | smp_mb(); \ | ||
159 | }) | ||
160 | |||
161 | #define __down_op_ret(ptr,fail) \ | ||
162 | ({ \ | ||
163 | unsigned int ret; \ | ||
164 | __asm__ __volatile__( \ | ||
165 | "@ down_op_ret\n" \ | ||
166 | " mrs ip, cpsr\n" \ | ||
167 | " orr lr, ip, #128\n" \ | ||
168 | " msr cpsr_c, lr\n" \ | ||
169 | " ldr lr, [%1]\n" \ | ||
170 | " subs lr, lr, %2\n" \ | ||
171 | " str lr, [%1]\n" \ | ||
172 | " msr cpsr_c, ip\n" \ | ||
173 | " movmi ip, %1\n" \ | ||
174 | " movpl ip, #0\n" \ | ||
175 | " blmi " #fail "\n" \ | ||
176 | " mov %0, ip" \ | ||
177 | : "=&r" (ret) \ | ||
178 | : "r" (ptr), "I" (1) \ | ||
179 | : "ip", "lr", "cc"); \ | ||
180 | smp_mb(); \ | ||
181 | ret; \ | ||
182 | }) | ||
183 | |||
184 | #define __up_op(ptr,wake) \ | ||
185 | ({ \ | ||
186 | smp_mb(); \ | ||
187 | __asm__ __volatile__( \ | ||
188 | "@ up_op\n" \ | ||
189 | " mrs ip, cpsr\n" \ | ||
190 | " orr lr, ip, #128\n" \ | ||
191 | " msr cpsr_c, lr\n" \ | ||
192 | " ldr lr, [%0]\n" \ | ||
193 | " adds lr, lr, %1\n" \ | ||
194 | " str lr, [%0]\n" \ | ||
195 | " msr cpsr_c, ip\n" \ | ||
196 | " movle ip, %0\n" \ | ||
197 | " blle " #wake \ | ||
198 | : \ | ||
199 | : "r" (ptr), "I" (1) \ | ||
200 | : "ip", "lr", "cc"); \ | ||
201 | }) | ||
202 | |||
203 | /* | ||
204 | * The value 0x01000000 supports up to 128 processors and | ||
205 | * lots of processes. BIAS must be chosen such that sub'ing | ||
206 | * BIAS once per CPU will result in the long remaining | ||
207 | * negative. | ||
208 | */ | ||
209 | #define RW_LOCK_BIAS 0x01000000 | ||
210 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
211 | |||
212 | #define __down_op_write(ptr,fail) \ | ||
213 | ({ \ | ||
214 | __asm__ __volatile__( \ | ||
215 | "@ down_op_write\n" \ | ||
216 | " mrs ip, cpsr\n" \ | ||
217 | " orr lr, ip, #128\n" \ | ||
218 | " msr cpsr_c, lr\n" \ | ||
219 | " ldr lr, [%0]\n" \ | ||
220 | " subs lr, lr, %1\n" \ | ||
221 | " str lr, [%0]\n" \ | ||
222 | " msr cpsr_c, ip\n" \ | ||
223 | " movne ip, %0\n" \ | ||
224 | " blne " #fail \ | ||
225 | : \ | ||
226 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
227 | : "ip", "lr", "cc"); \ | ||
228 | smp_mb(); \ | ||
229 | }) | ||
230 | |||
231 | #define __up_op_write(ptr,wake) \ | ||
232 | ({ \ | ||
233 | __asm__ __volatile__( \ | ||
234 | "@ up_op_write\n" \ | ||
235 | " mrs ip, cpsr\n" \ | ||
236 | " orr lr, ip, #128\n" \ | ||
237 | " msr cpsr_c, lr\n" \ | ||
238 | " ldr lr, [%0]\n" \ | ||
239 | " adds lr, lr, %1\n" \ | ||
240 | " str lr, [%0]\n" \ | ||
241 | " msr cpsr_c, ip\n" \ | ||
242 | " movcs ip, %0\n" \ | ||
243 | " blcs " #wake \ | ||
244 | : \ | ||
245 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
246 | : "ip", "lr", "cc"); \ | ||
247 | smp_mb(); \ | ||
248 | }) | ||
249 | |||
250 | #define __down_op_read(ptr,fail) \ | ||
251 | __down_op(ptr, fail) | ||
252 | |||
253 | #define __up_op_read(ptr,wake) \ | ||
254 | ({ \ | ||
255 | smp_mb(); \ | ||
256 | __asm__ __volatile__( \ | ||
257 | "@ up_op_read\n" \ | ||
258 | " mrs ip, cpsr\n" \ | ||
259 | " orr lr, ip, #128\n" \ | ||
260 | " msr cpsr_c, lr\n" \ | ||
261 | " ldr lr, [%0]\n" \ | ||
262 | " adds lr, lr, %1\n" \ | ||
263 | " str lr, [%0]\n" \ | ||
264 | " msr cpsr_c, ip\n" \ | ||
265 | " moveq ip, %0\n" \ | ||
266 | " bleq " #wake \ | ||
267 | : \ | ||
268 | : "r" (ptr), "I" (1) \ | ||
269 | : "ip", "lr", "cc"); \ | ||
270 | }) | ||
271 | |||
272 | #endif | ||
273 | |||
274 | #endif | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index fcb575747e5e..e965f1b560f1 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/const.h> | 17 | #include <linux/const.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <asm/sizes.h> | 19 | #include <linux/sizes.h> |
20 | 20 | ||
21 | #ifdef CONFIG_NEED_MACH_MEMORY_H | 21 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
22 | #include <mach/memory.h> | 22 | #include <mach/memory.h> |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00cbe10a50e3..e074948d8143 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,21 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* ARM perf PMU IDs for use by internal perf clients. */ | 15 | /* Nothing to see here... */ |
16 | enum arm_perf_pmu_ids { | ||
17 | ARM_PERF_PMU_ID_XSCALE1 = 0, | ||
18 | ARM_PERF_PMU_ID_XSCALE2, | ||
19 | ARM_PERF_PMU_ID_V6, | ||
20 | ARM_PERF_PMU_ID_V6MP, | ||
21 | ARM_PERF_PMU_ID_CA8, | ||
22 | ARM_PERF_PMU_ID_CA9, | ||
23 | ARM_PERF_PMU_ID_CA5, | ||
24 | ARM_PERF_PMU_ID_CA15, | ||
25 | ARM_PERF_PMU_ID_CA7, | ||
26 | ARM_NUM_PMU_IDS, | ||
27 | }; | ||
28 | |||
29 | extern enum arm_perf_pmu_ids | ||
30 | armpmu_get_pmu_id(void); | ||
31 | 16 | ||
32 | #endif /* __ARM_PERF_EVENT_H__ */ | 17 | #endif /* __ARM_PERF_EVENT_H__ */ |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 90114faa9f3c..4432305f4a2a 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -103,10 +103,9 @@ struct pmu_hw_events { | |||
103 | 103 | ||
104 | struct arm_pmu { | 104 | struct arm_pmu { |
105 | struct pmu pmu; | 105 | struct pmu pmu; |
106 | enum arm_perf_pmu_ids id; | ||
107 | enum arm_pmu_type type; | 106 | enum arm_pmu_type type; |
108 | cpumask_t active_irqs; | 107 | cpumask_t active_irqs; |
109 | const char *name; | 108 | char *name; |
110 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 109 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
111 | void (*enable)(struct hw_perf_event *evt, int idx); | 110 | void (*enable)(struct hw_perf_event *evt, int idx); |
112 | void (*disable)(struct hw_perf_event *evt, int idx); | 111 | void (*disable)(struct hw_perf_event *evt, int idx); |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 65fa3c88095c..b4ca707d0a69 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -59,18 +59,13 @@ static inline void dsb_sev(void) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * ARMv6 Spin-locking. | 62 | * ARMv6 ticket-based spin-locking. |
63 | * | 63 | * |
64 | * We exclusively read the old value. If it is zero, we may have | 64 | * A memory barrier is required after we get a lock, and before we |
65 | * won the lock, so we try exclusively storing it. A memory barrier | 65 | * release it, because V6 CPUs are assumed to have weakly ordered |
66 | * is required after we get a lock, and before we release it, because | 66 | * memory. |
67 | * V6 CPUs are assumed to have weakly ordered memory. | ||
68 | * | ||
69 | * Unlocked value: 0 | ||
70 | * Locked value: 1 | ||
71 | */ | 67 | */ |
72 | 68 | ||
73 | #define arch_spin_is_locked(x) ((x)->lock != 0) | ||
74 | #define arch_spin_unlock_wait(lock) \ | 69 | #define arch_spin_unlock_wait(lock) \ |
75 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 70 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
76 | 71 | ||
@@ -79,31 +74,39 @@ static inline void dsb_sev(void) | |||
79 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
80 | { | 75 | { |
81 | unsigned long tmp; | 76 | unsigned long tmp; |
77 | u32 newval; | ||
78 | arch_spinlock_t lockval; | ||
82 | 79 | ||
83 | __asm__ __volatile__( | 80 | __asm__ __volatile__( |
84 | "1: ldrex %0, [%1]\n" | 81 | "1: ldrex %0, [%3]\n" |
85 | " teq %0, #0\n" | 82 | " add %1, %0, %4\n" |
86 | WFE("ne") | 83 | " strex %2, %1, [%3]\n" |
87 | " strexeq %0, %2, [%1]\n" | 84 | " teq %2, #0\n" |
88 | " teqeq %0, #0\n" | ||
89 | " bne 1b" | 85 | " bne 1b" |
90 | : "=&r" (tmp) | 86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
91 | : "r" (&lock->lock), "r" (1) | 87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
92 | : "cc"); | 88 | : "cc"); |
93 | 89 | ||
90 | while (lockval.tickets.next != lockval.tickets.owner) { | ||
91 | wfe(); | ||
92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | ||
93 | } | ||
94 | |||
94 | smp_mb(); | 95 | smp_mb(); |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
98 | { | 99 | { |
99 | unsigned long tmp; | 100 | unsigned long tmp; |
101 | u32 slock; | ||
100 | 102 | ||
101 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
102 | " ldrex %0, [%1]\n" | 104 | " ldrex %0, [%2]\n" |
103 | " teq %0, #0\n" | 105 | " subs %1, %0, %0, ror #16\n" |
104 | " strexeq %0, %2, [%1]" | 106 | " addeq %0, %0, %3\n" |
105 | : "=&r" (tmp) | 107 | " strexeq %1, %0, [%2]" |
106 | : "r" (&lock->lock), "r" (1) | 108 | : "=&r" (slock), "=&r" (tmp) |
109 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | ||
107 | : "cc"); | 110 | : "cc"); |
108 | 111 | ||
109 | if (tmp == 0) { | 112 | if (tmp == 0) { |
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
116 | 119 | ||
117 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
118 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
119 | smp_mb(); | 125 | smp_mb(); |
120 | 126 | ||
121 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
122 | " str %1, [%0]\n" | 128 | " mov %1, #1\n" |
123 | : | 129 | "1: ldrex %0, [%2]\n" |
124 | : "r" (&lock->lock), "r" (0) | 130 | " uadd16 %0, %0, %1\n" |
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
125 | : "cc"); | 136 | : "cc"); |
126 | 137 | ||
127 | dsb_sev(); | 138 | dsb_sev(); |
128 | } | 139 | } |
129 | 140 | ||
141 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
142 | { | ||
143 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
144 | return tickets.owner != tickets.next; | ||
145 | } | ||
146 | |||
147 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | ||
148 | { | ||
149 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
150 | return (tickets.next - tickets.owner) > 1; | ||
151 | } | ||
152 | #define arch_spin_is_contended arch_spin_is_contended | ||
153 | |||
130 | /* | 154 | /* |
131 | * RWLOCKS | 155 | * RWLOCKS |
132 | * | 156 | * |
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
158 | unsigned long tmp; | 182 | unsigned long tmp; |
159 | 183 | ||
160 | __asm__ __volatile__( | 184 | __asm__ __volatile__( |
161 | "1: ldrex %0, [%1]\n" | 185 | " ldrex %0, [%1]\n" |
162 | " teq %0, #0\n" | 186 | " teq %0, #0\n" |
163 | " strexeq %0, %2, [%1]" | 187 | " strexeq %0, %2, [%1]" |
164 | : "=&r" (tmp) | 188 | : "=&r" (tmp) |
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
244 | unsigned long tmp, tmp2 = 1; | 268 | unsigned long tmp, tmp2 = 1; |
245 | 269 | ||
246 | __asm__ __volatile__( | 270 | __asm__ __volatile__( |
247 | "1: ldrex %0, [%2]\n" | 271 | " ldrex %0, [%2]\n" |
248 | " adds %0, %0, #1\n" | 272 | " adds %0, %0, #1\n" |
249 | " strexpl %1, %0, [%2]\n" | 273 | " strexpl %1, %0, [%2]\n" |
250 | : "=&r" (tmp), "+r" (tmp2) | 274 | : "=&r" (tmp), "+r" (tmp2) |
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index d14d197ae04a..b262d2f8b478 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h | |||
@@ -5,11 +5,24 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #define TICKET_SHIFT 16 | ||
9 | |||
8 | typedef struct { | 10 | typedef struct { |
9 | volatile unsigned int lock; | 11 | union { |
12 | u32 slock; | ||
13 | struct __raw_tickets { | ||
14 | #ifdef __ARMEB__ | ||
15 | u16 next; | ||
16 | u16 owner; | ||
17 | #else | ||
18 | u16 owner; | ||
19 | u16 next; | ||
20 | #endif | ||
21 | } tickets; | ||
22 | }; | ||
10 | } arch_spinlock_t; | 23 | } arch_spinlock_t; |
11 | 24 | ||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | 25 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } |
13 | 26 | ||
14 | typedef struct { | 27 | typedef struct { |
15 | volatile unsigned int lock; | 28 | volatile unsigned int lock; |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index b79f8e97f775..af7b0bda3355 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
148 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 148 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
149 | #define TIF_SYSCALL_TRACE 8 | 149 | #define TIF_SYSCALL_TRACE 8 |
150 | #define TIF_SYSCALL_AUDIT 9 | 150 | #define TIF_SYSCALL_AUDIT 9 |
151 | #define TIF_SYSCALL_RESTARTSYS 10 | ||
152 | #define TIF_POLLING_NRFLAG 16 | 151 | #define TIF_POLLING_NRFLAG 16 |
153 | #define TIF_USING_IWMMXT 17 | 152 | #define TIF_USING_IWMMXT 17 |
154 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 153 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
164 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 163 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
165 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | 164 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) |
166 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 165 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
167 | #define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS) | ||
168 | 166 | ||
169 | /* Checks for any syscall work in entry-common.S */ | 167 | /* Checks for any syscall work in entry-common.S */ |
170 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 168 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) |
171 | _TIF_SYSCALL_RESTARTSYS) | ||
172 | 169 | ||
173 | /* | 170 | /* |
174 | * Change these and you break ASM code in entry-common.S | 171 | * Change these and you break ASM code in entry-common.S |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 71f6536d17ac..479a6352e0b5 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs) | |||
189 | 189 | ||
190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | 190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) |
191 | 191 | ||
192 | #define user_addr_max() \ | ||
193 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) | ||
194 | |||
192 | /* | 195 | /* |
193 | * The "__xxx" versions of the user access functions do not verify the | 196 | * The "__xxx" versions of the user access functions do not verify the |
194 | * address space - it must have been done previously with a separate | 197 | * address space - it must have been done previously with a separate |
@@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l | |||
398 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) | 401 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) |
399 | #endif | 402 | #endif |
400 | 403 | ||
401 | extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); | ||
402 | extern unsigned long __must_check __strnlen_user(const char __user *s, long n); | ||
403 | |||
404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | 404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) |
405 | { | 405 | { |
406 | if (access_ok(VERIFY_READ, from, n)) | 406 | if (access_ok(VERIFY_READ, from, n)) |
@@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo | |||
427 | return n; | 427 | return n; |
428 | } | 428 | } |
429 | 429 | ||
430 | static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) | 430 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
431 | { | ||
432 | long res = -EFAULT; | ||
433 | if (access_ok(VERIFY_READ, src, 1)) | ||
434 | res = __strncpy_from_user(dst, src, count); | ||
435 | return res; | ||
436 | } | ||
437 | |||
438 | #define strlen_user(s) strnlen_user(s, ~0UL >> 1) | ||
439 | 431 | ||
440 | static inline long __must_check strnlen_user(const char __user *s, long n) | 432 | extern __must_check long strlen_user(const char __user *str); |
441 | { | 433 | extern __must_check long strnlen_user(const char __user *str, long n); |
442 | unsigned long res = 0; | ||
443 | |||
444 | if (__addr_ok(s)) | ||
445 | res = __strnlen_user(s, n); | ||
446 | |||
447 | return res; | ||
448 | } | ||
449 | 434 | ||
450 | #endif /* _ASMARM_UACCESS_H */ | 435 | #endif /* _ASMARM_UACCESS_H */ |
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..4d52f92967a6 --- /dev/null +++ b/arch/arm/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef __ASM_ARM_WORD_AT_A_TIME_H | ||
2 | #define __ASM_ARM_WORD_AT_A_TIME_H | ||
3 | |||
4 | #ifndef __ARMEB__ | ||
5 | |||
6 | /* | ||
7 | * Little-endian word-at-a-time zero byte handling. | ||
8 | * Heavily based on the x86 algorithm. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | |||
12 | struct word_at_a_time { | ||
13 | const unsigned long one_bits, high_bits; | ||
14 | }; | ||
15 | |||
16 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
17 | |||
18 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, | ||
19 | const struct word_at_a_time *c) | ||
20 | { | ||
21 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
22 | *bits = mask; | ||
23 | return mask; | ||
24 | } | ||
25 | |||
26 | #define prep_zero_mask(a, bits, c) (bits) | ||
27 | |||
28 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
29 | { | ||
30 | bits = (bits - 1) & ~bits; | ||
31 | return bits >> 7; | ||
32 | } | ||
33 | |||
34 | static inline unsigned long find_zero(unsigned long mask) | ||
35 | { | ||
36 | unsigned long ret; | ||
37 | |||
38 | #if __LINUX_ARM_ARCH__ >= 5 | ||
39 | /* We have clz available. */ | ||
40 | ret = fls(mask) >> 3; | ||
41 | #else | ||
42 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
43 | ret = (0x0ff0001 + mask) >> 23; | ||
44 | /* Fix the 1 for 00 case */ | ||
45 | ret &= mask; | ||
46 | #endif | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | #ifdef CONFIG_DCACHE_WORD_ACCESS | ||
52 | |||
53 | #define zero_bytemask(mask) (mask) | ||
54 | |||
55 | /* | ||
56 | * Load an unaligned word from kernel space. | ||
57 | * | ||
58 | * In the (very unlikely) case of the word being a page-crosser | ||
59 | * and the next page not being mapped, take the exception and | ||
60 | * return zeroes in the non-existing part. | ||
61 | */ | ||
62 | static inline unsigned long load_unaligned_zeropad(const void *addr) | ||
63 | { | ||
64 | unsigned long ret, offset; | ||
65 | |||
66 | /* Load word from unaligned pointer addr */ | ||
67 | asm( | ||
68 | "1: ldr %0, [%2]\n" | ||
69 | "2:\n" | ||
70 | " .pushsection .fixup,\"ax\"\n" | ||
71 | " .align 2\n" | ||
72 | "3: and %1, %2, #0x3\n" | ||
73 | " bic %2, %2, #0x3\n" | ||
74 | " ldr %0, [%2]\n" | ||
75 | " lsl %1, %1, #0x3\n" | ||
76 | " lsr %0, %0, %1\n" | ||
77 | " b 2b\n" | ||
78 | " .popsection\n" | ||
79 | " .pushsection __ex_table,\"a\"\n" | ||
80 | " .align 3\n" | ||
81 | " .long 1b, 3b\n" | ||
82 | " .popsection" | ||
83 | : "=&r" (ret), "=&r" (offset) | ||
84 | : "r" (addr), "Qo" (*(unsigned long *)addr)); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | |||
90 | #endif /* DCACHE_WORD_ACCESS */ | ||
91 | |||
92 | #else /* __ARMEB__ */ | ||
93 | #include <asm-generic/word-at-a-time.h> | ||
94 | #endif | ||
95 | |||
96 | #endif /* __ASM_ARM_WORD_AT_A_TIME_H */ | ||