diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-27 18:14:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-27 18:14:26 -0400 |
commit | cea8f46c36c3f82860b038aa23a46e16757666ba (patch) | |
tree | e09dc37d2b6880d86dac09afbc0c686139d86df0 /arch/arm/include | |
parent | c1e7179a38919f02dd950801529176b72f5e5a8a (diff) | |
parent | 91b006def384d8f07f9f324ab211fefe2b085c90 (diff) |
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King:
"First ARM push of this merge window, post me coming back from holiday.
This is what has been in linux-next for the last few weeks. Not much
to say which isn't described by the commit summaries."
* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (32 commits)
ARM: 7463/1: topology: Update cpu_power according to DT information
ARM: 7462/1: topology: factorize the update of sibling masks
ARM: 7461/1: topology: Add arch_scale_freq_power function
ARM: 7456/1: ptrace: provide separate functions for tracing syscall {entry,exit}
ARM: 7455/1: audit: move syscall auditing until after ptrace SIGTRAP handling
ARM: 7454/1: entry: don't bother with syscall tracing on ret_from_fork path
ARM: 7453/1: audit: only allow syscall auditing for pure EABI userspace
ARM: 7452/1: delay: allow timer-based delay implementation to be selected
ARM: 7451/1: arch timer: implement read_current_timer and get_cycles
ARM: 7450/1: dcache: select DCACHE_WORD_ACCESS for little-endian ARMv6+ CPUs
ARM: 7449/1: use generic strnlen_user and strncpy_from_user functions
ARM: 7448/1: perf: remove arm_perf_pmu_ids global enumeration
ARM: 7447/1: rwlocks: remove unused branch labels from trylock routines
ARM: 7446/1: spinlock: use ticket algorithm for ARMv6+ locking implementation
ARM: 7445/1: mm: update CONTEXTIDR register to contain PID of current process
ARM: 7444/1: kernel: add arch-timer C3STOP feature
ARM: 7460/1: remove asm/locks.h
ARM: 7439/1: head.S: simplify initial page table mapping
ARM: 7437/1: zImage: Allow DTB command line concatenation with ATAG_CMDLINE
ARM: 7436/1: Do not map the vectors page as write-through on UP systems
...
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/arch_timer.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/delay.h | 32 | ||||
-rw-r--r-- | arch/arm/include/asm/locks.h | 274 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/perf_event.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/pmu.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 76 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock_types.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/timex.h | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 27 | ||||
-rw-r--r-- | arch/arm/include/asm/word-at-a-time.h | 96 |
11 files changed, 204 insertions, 353 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index ed2e95d46e29..62e75475e57e 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h | |||
@@ -1,7 +1,10 @@ | |||
1 | #ifndef __ASMARM_ARCH_TIMER_H | 1 | #ifndef __ASMARM_ARCH_TIMER_H |
2 | #define __ASMARM_ARCH_TIMER_H | 2 | #define __ASMARM_ARCH_TIMER_H |
3 | 3 | ||
4 | #include <asm/errno.h> | ||
5 | |||
4 | #ifdef CONFIG_ARM_ARCH_TIMER | 6 | #ifdef CONFIG_ARM_ARCH_TIMER |
7 | #define ARCH_HAS_READ_CURRENT_TIMER | ||
5 | int arch_timer_of_register(void); | 8 | int arch_timer_of_register(void); |
6 | int arch_timer_sched_clock_init(void); | 9 | int arch_timer_sched_clock_init(void); |
7 | #else | 10 | #else |
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index b2deda181549..dc6145120de3 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h | |||
@@ -6,9 +6,22 @@ | |||
6 | #ifndef __ASM_ARM_DELAY_H | 6 | #ifndef __ASM_ARM_DELAY_H |
7 | #define __ASM_ARM_DELAY_H | 7 | #define __ASM_ARM_DELAY_H |
8 | 8 | ||
9 | #include <asm/memory.h> | ||
9 | #include <asm/param.h> /* HZ */ | 10 | #include <asm/param.h> /* HZ */ |
10 | 11 | ||
11 | extern void __delay(int loops); | 12 | #define MAX_UDELAY_MS 2 |
13 | #define UDELAY_MULT ((UL(2199023) * HZ) >> 11) | ||
14 | #define UDELAY_SHIFT 30 | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | extern struct arm_delay_ops { | ||
19 | void (*delay)(unsigned long); | ||
20 | void (*const_udelay)(unsigned long); | ||
21 | void (*udelay)(unsigned long); | ||
22 | } arm_delay_ops; | ||
23 | |||
24 | #define __delay(n) arm_delay_ops.delay(n) | ||
12 | 25 | ||
13 | /* | 26 | /* |
14 | * This function intentionally does not exist; if you see references to | 27 | * This function intentionally does not exist; if you see references to |
@@ -23,22 +36,27 @@ extern void __bad_udelay(void); | |||
23 | * division by multiplication: you don't have to worry about | 36 | * division by multiplication: you don't have to worry about |
24 | * loss of precision. | 37 | * loss of precision. |
25 | * | 38 | * |
26 | * Use only for very small delays ( < 1 msec). Should probably use a | 39 | * Use only for very small delays ( < 2 msec). Should probably use a |
27 | * lookup table, really, as the multiplications take much too long with | 40 | * lookup table, really, as the multiplications take much too long with |
28 | * short delays. This is a "reasonable" implementation, though (and the | 41 | * short delays. This is a "reasonable" implementation, though (and the |
29 | * first constant multiplications gets optimized away if the delay is | 42 | * first constant multiplications gets optimized away if the delay is |
30 | * a constant) | 43 | * a constant) |
31 | */ | 44 | */ |
32 | extern void __udelay(unsigned long usecs); | 45 | #define __udelay(n) arm_delay_ops.udelay(n) |
33 | extern void __const_udelay(unsigned long); | 46 | #define __const_udelay(n) arm_delay_ops.const_udelay(n) |
34 | |||
35 | #define MAX_UDELAY_MS 2 | ||
36 | 47 | ||
37 | #define udelay(n) \ | 48 | #define udelay(n) \ |
38 | (__builtin_constant_p(n) ? \ | 49 | (__builtin_constant_p(n) ? \ |
39 | ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ | 50 | ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ |
40 | __const_udelay((n) * ((2199023U*HZ)>>11))) : \ | 51 | __const_udelay((n) * UDELAY_MULT)) : \ |
41 | __udelay(n)) | 52 | __udelay(n)) |
42 | 53 | ||
54 | /* Loop-based definitions for assembly code. */ | ||
55 | extern void __loop_delay(unsigned long loops); | ||
56 | extern void __loop_udelay(unsigned long usecs); | ||
57 | extern void __loop_const_udelay(unsigned long); | ||
58 | |||
59 | #endif /* __ASSEMBLY__ */ | ||
60 | |||
43 | #endif /* defined(_ARM_DELAY_H) */ | 61 | #endif /* defined(_ARM_DELAY_H) */ |
44 | 62 | ||
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h deleted file mode 100644 index ef4c897772d1..000000000000 --- a/arch/arm/include/asm/locks.h +++ /dev/null | |||
@@ -1,274 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/locks.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Interrupt safe locking assembler. | ||
11 | */ | ||
12 | #ifndef __ASM_PROC_LOCKS_H | ||
13 | #define __ASM_PROC_LOCKS_H | ||
14 | |||
15 | #if __LINUX_ARM_ARCH__ >= 6 | ||
16 | |||
17 | #define __down_op(ptr,fail) \ | ||
18 | ({ \ | ||
19 | __asm__ __volatile__( \ | ||
20 | "@ down_op\n" \ | ||
21 | "1: ldrex lr, [%0]\n" \ | ||
22 | " sub lr, lr, %1\n" \ | ||
23 | " strex ip, lr, [%0]\n" \ | ||
24 | " teq ip, #0\n" \ | ||
25 | " bne 1b\n" \ | ||
26 | " teq lr, #0\n" \ | ||
27 | " movmi ip, %0\n" \ | ||
28 | " blmi " #fail \ | ||
29 | : \ | ||
30 | : "r" (ptr), "I" (1) \ | ||
31 | : "ip", "lr", "cc"); \ | ||
32 | smp_mb(); \ | ||
33 | }) | ||
34 | |||
35 | #define __down_op_ret(ptr,fail) \ | ||
36 | ({ \ | ||
37 | unsigned int ret; \ | ||
38 | __asm__ __volatile__( \ | ||
39 | "@ down_op_ret\n" \ | ||
40 | "1: ldrex lr, [%1]\n" \ | ||
41 | " sub lr, lr, %2\n" \ | ||
42 | " strex ip, lr, [%1]\n" \ | ||
43 | " teq ip, #0\n" \ | ||
44 | " bne 1b\n" \ | ||
45 | " teq lr, #0\n" \ | ||
46 | " movmi ip, %1\n" \ | ||
47 | " movpl ip, #0\n" \ | ||
48 | " blmi " #fail "\n" \ | ||
49 | " mov %0, ip" \ | ||
50 | : "=&r" (ret) \ | ||
51 | : "r" (ptr), "I" (1) \ | ||
52 | : "ip", "lr", "cc"); \ | ||
53 | smp_mb(); \ | ||
54 | ret; \ | ||
55 | }) | ||
56 | |||
57 | #define __up_op(ptr,wake) \ | ||
58 | ({ \ | ||
59 | smp_mb(); \ | ||
60 | __asm__ __volatile__( \ | ||
61 | "@ up_op\n" \ | ||
62 | "1: ldrex lr, [%0]\n" \ | ||
63 | " add lr, lr, %1\n" \ | ||
64 | " strex ip, lr, [%0]\n" \ | ||
65 | " teq ip, #0\n" \ | ||
66 | " bne 1b\n" \ | ||
67 | " cmp lr, #0\n" \ | ||
68 | " movle ip, %0\n" \ | ||
69 | " blle " #wake \ | ||
70 | : \ | ||
71 | : "r" (ptr), "I" (1) \ | ||
72 | : "ip", "lr", "cc"); \ | ||
73 | }) | ||
74 | |||
75 | /* | ||
76 | * The value 0x01000000 supports up to 128 processors and | ||
77 | * lots of processes. BIAS must be chosen such that sub'ing | ||
78 | * BIAS once per CPU will result in the long remaining | ||
79 | * negative. | ||
80 | */ | ||
81 | #define RW_LOCK_BIAS 0x01000000 | ||
82 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
83 | |||
84 | #define __down_op_write(ptr,fail) \ | ||
85 | ({ \ | ||
86 | __asm__ __volatile__( \ | ||
87 | "@ down_op_write\n" \ | ||
88 | "1: ldrex lr, [%0]\n" \ | ||
89 | " sub lr, lr, %1\n" \ | ||
90 | " strex ip, lr, [%0]\n" \ | ||
91 | " teq ip, #0\n" \ | ||
92 | " bne 1b\n" \ | ||
93 | " teq lr, #0\n" \ | ||
94 | " movne ip, %0\n" \ | ||
95 | " blne " #fail \ | ||
96 | : \ | ||
97 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
98 | : "ip", "lr", "cc"); \ | ||
99 | smp_mb(); \ | ||
100 | }) | ||
101 | |||
102 | #define __up_op_write(ptr,wake) \ | ||
103 | ({ \ | ||
104 | smp_mb(); \ | ||
105 | __asm__ __volatile__( \ | ||
106 | "@ up_op_write\n" \ | ||
107 | "1: ldrex lr, [%0]\n" \ | ||
108 | " adds lr, lr, %1\n" \ | ||
109 | " strex ip, lr, [%0]\n" \ | ||
110 | " teq ip, #0\n" \ | ||
111 | " bne 1b\n" \ | ||
112 | " movcs ip, %0\n" \ | ||
113 | " blcs " #wake \ | ||
114 | : \ | ||
115 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
116 | : "ip", "lr", "cc"); \ | ||
117 | }) | ||
118 | |||
119 | #define __down_op_read(ptr,fail) \ | ||
120 | __down_op(ptr, fail) | ||
121 | |||
122 | #define __up_op_read(ptr,wake) \ | ||
123 | ({ \ | ||
124 | smp_mb(); \ | ||
125 | __asm__ __volatile__( \ | ||
126 | "@ up_op_read\n" \ | ||
127 | "1: ldrex lr, [%0]\n" \ | ||
128 | " add lr, lr, %1\n" \ | ||
129 | " strex ip, lr, [%0]\n" \ | ||
130 | " teq ip, #0\n" \ | ||
131 | " bne 1b\n" \ | ||
132 | " teq lr, #0\n" \ | ||
133 | " moveq ip, %0\n" \ | ||
134 | " bleq " #wake \ | ||
135 | : \ | ||
136 | : "r" (ptr), "I" (1) \ | ||
137 | : "ip", "lr", "cc"); \ | ||
138 | }) | ||
139 | |||
140 | #else | ||
141 | |||
142 | #define __down_op(ptr,fail) \ | ||
143 | ({ \ | ||
144 | __asm__ __volatile__( \ | ||
145 | "@ down_op\n" \ | ||
146 | " mrs ip, cpsr\n" \ | ||
147 | " orr lr, ip, #128\n" \ | ||
148 | " msr cpsr_c, lr\n" \ | ||
149 | " ldr lr, [%0]\n" \ | ||
150 | " subs lr, lr, %1\n" \ | ||
151 | " str lr, [%0]\n" \ | ||
152 | " msr cpsr_c, ip\n" \ | ||
153 | " movmi ip, %0\n" \ | ||
154 | " blmi " #fail \ | ||
155 | : \ | ||
156 | : "r" (ptr), "I" (1) \ | ||
157 | : "ip", "lr", "cc"); \ | ||
158 | smp_mb(); \ | ||
159 | }) | ||
160 | |||
161 | #define __down_op_ret(ptr,fail) \ | ||
162 | ({ \ | ||
163 | unsigned int ret; \ | ||
164 | __asm__ __volatile__( \ | ||
165 | "@ down_op_ret\n" \ | ||
166 | " mrs ip, cpsr\n" \ | ||
167 | " orr lr, ip, #128\n" \ | ||
168 | " msr cpsr_c, lr\n" \ | ||
169 | " ldr lr, [%1]\n" \ | ||
170 | " subs lr, lr, %2\n" \ | ||
171 | " str lr, [%1]\n" \ | ||
172 | " msr cpsr_c, ip\n" \ | ||
173 | " movmi ip, %1\n" \ | ||
174 | " movpl ip, #0\n" \ | ||
175 | " blmi " #fail "\n" \ | ||
176 | " mov %0, ip" \ | ||
177 | : "=&r" (ret) \ | ||
178 | : "r" (ptr), "I" (1) \ | ||
179 | : "ip", "lr", "cc"); \ | ||
180 | smp_mb(); \ | ||
181 | ret; \ | ||
182 | }) | ||
183 | |||
184 | #define __up_op(ptr,wake) \ | ||
185 | ({ \ | ||
186 | smp_mb(); \ | ||
187 | __asm__ __volatile__( \ | ||
188 | "@ up_op\n" \ | ||
189 | " mrs ip, cpsr\n" \ | ||
190 | " orr lr, ip, #128\n" \ | ||
191 | " msr cpsr_c, lr\n" \ | ||
192 | " ldr lr, [%0]\n" \ | ||
193 | " adds lr, lr, %1\n" \ | ||
194 | " str lr, [%0]\n" \ | ||
195 | " msr cpsr_c, ip\n" \ | ||
196 | " movle ip, %0\n" \ | ||
197 | " blle " #wake \ | ||
198 | : \ | ||
199 | : "r" (ptr), "I" (1) \ | ||
200 | : "ip", "lr", "cc"); \ | ||
201 | }) | ||
202 | |||
203 | /* | ||
204 | * The value 0x01000000 supports up to 128 processors and | ||
205 | * lots of processes. BIAS must be chosen such that sub'ing | ||
206 | * BIAS once per CPU will result in the long remaining | ||
207 | * negative. | ||
208 | */ | ||
209 | #define RW_LOCK_BIAS 0x01000000 | ||
210 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
211 | |||
212 | #define __down_op_write(ptr,fail) \ | ||
213 | ({ \ | ||
214 | __asm__ __volatile__( \ | ||
215 | "@ down_op_write\n" \ | ||
216 | " mrs ip, cpsr\n" \ | ||
217 | " orr lr, ip, #128\n" \ | ||
218 | " msr cpsr_c, lr\n" \ | ||
219 | " ldr lr, [%0]\n" \ | ||
220 | " subs lr, lr, %1\n" \ | ||
221 | " str lr, [%0]\n" \ | ||
222 | " msr cpsr_c, ip\n" \ | ||
223 | " movne ip, %0\n" \ | ||
224 | " blne " #fail \ | ||
225 | : \ | ||
226 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
227 | : "ip", "lr", "cc"); \ | ||
228 | smp_mb(); \ | ||
229 | }) | ||
230 | |||
231 | #define __up_op_write(ptr,wake) \ | ||
232 | ({ \ | ||
233 | __asm__ __volatile__( \ | ||
234 | "@ up_op_write\n" \ | ||
235 | " mrs ip, cpsr\n" \ | ||
236 | " orr lr, ip, #128\n" \ | ||
237 | " msr cpsr_c, lr\n" \ | ||
238 | " ldr lr, [%0]\n" \ | ||
239 | " adds lr, lr, %1\n" \ | ||
240 | " str lr, [%0]\n" \ | ||
241 | " msr cpsr_c, ip\n" \ | ||
242 | " movcs ip, %0\n" \ | ||
243 | " blcs " #wake \ | ||
244 | : \ | ||
245 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
246 | : "ip", "lr", "cc"); \ | ||
247 | smp_mb(); \ | ||
248 | }) | ||
249 | |||
250 | #define __down_op_read(ptr,fail) \ | ||
251 | __down_op(ptr, fail) | ||
252 | |||
253 | #define __up_op_read(ptr,wake) \ | ||
254 | ({ \ | ||
255 | smp_mb(); \ | ||
256 | __asm__ __volatile__( \ | ||
257 | "@ up_op_read\n" \ | ||
258 | " mrs ip, cpsr\n" \ | ||
259 | " orr lr, ip, #128\n" \ | ||
260 | " msr cpsr_c, lr\n" \ | ||
261 | " ldr lr, [%0]\n" \ | ||
262 | " adds lr, lr, %1\n" \ | ||
263 | " str lr, [%0]\n" \ | ||
264 | " msr cpsr_c, ip\n" \ | ||
265 | " moveq ip, %0\n" \ | ||
266 | " bleq " #wake \ | ||
267 | : \ | ||
268 | : "r" (ptr), "I" (1) \ | ||
269 | : "ip", "lr", "cc"); \ | ||
270 | }) | ||
271 | |||
272 | #endif | ||
273 | |||
274 | #endif | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index fcb575747e5e..e965f1b560f1 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/const.h> | 17 | #include <linux/const.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <asm/sizes.h> | 19 | #include <linux/sizes.h> |
20 | 20 | ||
21 | #ifdef CONFIG_NEED_MACH_MEMORY_H | 21 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
22 | #include <mach/memory.h> | 22 | #include <mach/memory.h> |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00cbe10a50e3..e074948d8143 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,21 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* ARM perf PMU IDs for use by internal perf clients. */ | 15 | /* Nothing to see here... */ |
16 | enum arm_perf_pmu_ids { | ||
17 | ARM_PERF_PMU_ID_XSCALE1 = 0, | ||
18 | ARM_PERF_PMU_ID_XSCALE2, | ||
19 | ARM_PERF_PMU_ID_V6, | ||
20 | ARM_PERF_PMU_ID_V6MP, | ||
21 | ARM_PERF_PMU_ID_CA8, | ||
22 | ARM_PERF_PMU_ID_CA9, | ||
23 | ARM_PERF_PMU_ID_CA5, | ||
24 | ARM_PERF_PMU_ID_CA15, | ||
25 | ARM_PERF_PMU_ID_CA7, | ||
26 | ARM_NUM_PMU_IDS, | ||
27 | }; | ||
28 | |||
29 | extern enum arm_perf_pmu_ids | ||
30 | armpmu_get_pmu_id(void); | ||
31 | 16 | ||
32 | #endif /* __ARM_PERF_EVENT_H__ */ | 17 | #endif /* __ARM_PERF_EVENT_H__ */ |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 90114faa9f3c..4432305f4a2a 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -103,10 +103,9 @@ struct pmu_hw_events { | |||
103 | 103 | ||
104 | struct arm_pmu { | 104 | struct arm_pmu { |
105 | struct pmu pmu; | 105 | struct pmu pmu; |
106 | enum arm_perf_pmu_ids id; | ||
107 | enum arm_pmu_type type; | 106 | enum arm_pmu_type type; |
108 | cpumask_t active_irqs; | 107 | cpumask_t active_irqs; |
109 | const char *name; | 108 | char *name; |
110 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 109 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
111 | void (*enable)(struct hw_perf_event *evt, int idx); | 110 | void (*enable)(struct hw_perf_event *evt, int idx); |
112 | void (*disable)(struct hw_perf_event *evt, int idx); | 111 | void (*disable)(struct hw_perf_event *evt, int idx); |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 65fa3c88095c..b4ca707d0a69 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -59,18 +59,13 @@ static inline void dsb_sev(void) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * ARMv6 Spin-locking. | 62 | * ARMv6 ticket-based spin-locking. |
63 | * | 63 | * |
64 | * We exclusively read the old value. If it is zero, we may have | 64 | * A memory barrier is required after we get a lock, and before we |
65 | * won the lock, so we try exclusively storing it. A memory barrier | 65 | * release it, because V6 CPUs are assumed to have weakly ordered |
66 | * is required after we get a lock, and before we release it, because | 66 | * memory. |
67 | * V6 CPUs are assumed to have weakly ordered memory. | ||
68 | * | ||
69 | * Unlocked value: 0 | ||
70 | * Locked value: 1 | ||
71 | */ | 67 | */ |
72 | 68 | ||
73 | #define arch_spin_is_locked(x) ((x)->lock != 0) | ||
74 | #define arch_spin_unlock_wait(lock) \ | 69 | #define arch_spin_unlock_wait(lock) \ |
75 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 70 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
76 | 71 | ||
@@ -79,31 +74,39 @@ static inline void dsb_sev(void) | |||
79 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
80 | { | 75 | { |
81 | unsigned long tmp; | 76 | unsigned long tmp; |
77 | u32 newval; | ||
78 | arch_spinlock_t lockval; | ||
82 | 79 | ||
83 | __asm__ __volatile__( | 80 | __asm__ __volatile__( |
84 | "1: ldrex %0, [%1]\n" | 81 | "1: ldrex %0, [%3]\n" |
85 | " teq %0, #0\n" | 82 | " add %1, %0, %4\n" |
86 | WFE("ne") | 83 | " strex %2, %1, [%3]\n" |
87 | " strexeq %0, %2, [%1]\n" | 84 | " teq %2, #0\n" |
88 | " teqeq %0, #0\n" | ||
89 | " bne 1b" | 85 | " bne 1b" |
90 | : "=&r" (tmp) | 86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
91 | : "r" (&lock->lock), "r" (1) | 87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
92 | : "cc"); | 88 | : "cc"); |
93 | 89 | ||
90 | while (lockval.tickets.next != lockval.tickets.owner) { | ||
91 | wfe(); | ||
92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | ||
93 | } | ||
94 | |||
94 | smp_mb(); | 95 | smp_mb(); |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
98 | { | 99 | { |
99 | unsigned long tmp; | 100 | unsigned long tmp; |
101 | u32 slock; | ||
100 | 102 | ||
101 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
102 | " ldrex %0, [%1]\n" | 104 | " ldrex %0, [%2]\n" |
103 | " teq %0, #0\n" | 105 | " subs %1, %0, %0, ror #16\n" |
104 | " strexeq %0, %2, [%1]" | 106 | " addeq %0, %0, %3\n" |
105 | : "=&r" (tmp) | 107 | " strexeq %1, %0, [%2]" |
106 | : "r" (&lock->lock), "r" (1) | 108 | : "=&r" (slock), "=&r" (tmp) |
109 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | ||
107 | : "cc"); | 110 | : "cc"); |
108 | 111 | ||
109 | if (tmp == 0) { | 112 | if (tmp == 0) { |
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
116 | 119 | ||
117 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
118 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
119 | smp_mb(); | 125 | smp_mb(); |
120 | 126 | ||
121 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
122 | " str %1, [%0]\n" | 128 | " mov %1, #1\n" |
123 | : | 129 | "1: ldrex %0, [%2]\n" |
124 | : "r" (&lock->lock), "r" (0) | 130 | " uadd16 %0, %0, %1\n" |
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
125 | : "cc"); | 136 | : "cc"); |
126 | 137 | ||
127 | dsb_sev(); | 138 | dsb_sev(); |
128 | } | 139 | } |
129 | 140 | ||
141 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
142 | { | ||
143 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
144 | return tickets.owner != tickets.next; | ||
145 | } | ||
146 | |||
147 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | ||
148 | { | ||
149 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
150 | return (tickets.next - tickets.owner) > 1; | ||
151 | } | ||
152 | #define arch_spin_is_contended arch_spin_is_contended | ||
153 | |||
130 | /* | 154 | /* |
131 | * RWLOCKS | 155 | * RWLOCKS |
132 | * | 156 | * |
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
158 | unsigned long tmp; | 182 | unsigned long tmp; |
159 | 183 | ||
160 | __asm__ __volatile__( | 184 | __asm__ __volatile__( |
161 | "1: ldrex %0, [%1]\n" | 185 | " ldrex %0, [%1]\n" |
162 | " teq %0, #0\n" | 186 | " teq %0, #0\n" |
163 | " strexeq %0, %2, [%1]" | 187 | " strexeq %0, %2, [%1]" |
164 | : "=&r" (tmp) | 188 | : "=&r" (tmp) |
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
244 | unsigned long tmp, tmp2 = 1; | 268 | unsigned long tmp, tmp2 = 1; |
245 | 269 | ||
246 | __asm__ __volatile__( | 270 | __asm__ __volatile__( |
247 | "1: ldrex %0, [%2]\n" | 271 | " ldrex %0, [%2]\n" |
248 | " adds %0, %0, #1\n" | 272 | " adds %0, %0, #1\n" |
249 | " strexpl %1, %0, [%2]\n" | 273 | " strexpl %1, %0, [%2]\n" |
250 | : "=&r" (tmp), "+r" (tmp2) | 274 | : "=&r" (tmp), "+r" (tmp2) |
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index d14d197ae04a..b262d2f8b478 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h | |||
@@ -5,11 +5,24 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #define TICKET_SHIFT 16 | ||
9 | |||
8 | typedef struct { | 10 | typedef struct { |
9 | volatile unsigned int lock; | 11 | union { |
12 | u32 slock; | ||
13 | struct __raw_tickets { | ||
14 | #ifdef __ARMEB__ | ||
15 | u16 next; | ||
16 | u16 owner; | ||
17 | #else | ||
18 | u16 owner; | ||
19 | u16 next; | ||
20 | #endif | ||
21 | } tickets; | ||
22 | }; | ||
10 | } arch_spinlock_t; | 23 | } arch_spinlock_t; |
11 | 24 | ||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | 25 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } |
13 | 26 | ||
14 | typedef struct { | 27 | typedef struct { |
15 | volatile unsigned int lock; | 28 | volatile unsigned int lock; |
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 3be8de3adaba..ce119442277c 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h | |||
@@ -12,13 +12,15 @@ | |||
12 | #ifndef _ASMARM_TIMEX_H | 12 | #ifndef _ASMARM_TIMEX_H |
13 | #define _ASMARM_TIMEX_H | 13 | #define _ASMARM_TIMEX_H |
14 | 14 | ||
15 | #include <asm/arch_timer.h> | ||
15 | #include <mach/timex.h> | 16 | #include <mach/timex.h> |
16 | 17 | ||
17 | typedef unsigned long cycles_t; | 18 | typedef unsigned long cycles_t; |
18 | 19 | ||
19 | static inline cycles_t get_cycles (void) | 20 | #ifdef ARCH_HAS_READ_CURRENT_TIMER |
20 | { | 21 | #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) |
21 | return 0; | 22 | #else |
22 | } | 23 | #define get_cycles() (0) |
24 | #endif | ||
23 | 25 | ||
24 | #endif | 26 | #endif |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 71f6536d17ac..479a6352e0b5 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs) | |||
189 | 189 | ||
190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | 190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) |
191 | 191 | ||
192 | #define user_addr_max() \ | ||
193 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) | ||
194 | |||
192 | /* | 195 | /* |
193 | * The "__xxx" versions of the user access functions do not verify the | 196 | * The "__xxx" versions of the user access functions do not verify the |
194 | * address space - it must have been done previously with a separate | 197 | * address space - it must have been done previously with a separate |
@@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l | |||
398 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) | 401 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) |
399 | #endif | 402 | #endif |
400 | 403 | ||
401 | extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); | ||
402 | extern unsigned long __must_check __strnlen_user(const char __user *s, long n); | ||
403 | |||
404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | 404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) |
405 | { | 405 | { |
406 | if (access_ok(VERIFY_READ, from, n)) | 406 | if (access_ok(VERIFY_READ, from, n)) |
@@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo | |||
427 | return n; | 427 | return n; |
428 | } | 428 | } |
429 | 429 | ||
430 | static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) | 430 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
431 | { | ||
432 | long res = -EFAULT; | ||
433 | if (access_ok(VERIFY_READ, src, 1)) | ||
434 | res = __strncpy_from_user(dst, src, count); | ||
435 | return res; | ||
436 | } | ||
437 | |||
438 | #define strlen_user(s) strnlen_user(s, ~0UL >> 1) | ||
439 | 431 | ||
440 | static inline long __must_check strnlen_user(const char __user *s, long n) | 432 | extern __must_check long strlen_user(const char __user *str); |
441 | { | 433 | extern __must_check long strnlen_user(const char __user *str, long n); |
442 | unsigned long res = 0; | ||
443 | |||
444 | if (__addr_ok(s)) | ||
445 | res = __strnlen_user(s, n); | ||
446 | |||
447 | return res; | ||
448 | } | ||
449 | 434 | ||
450 | #endif /* _ASMARM_UACCESS_H */ | 435 | #endif /* _ASMARM_UACCESS_H */ |
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..4d52f92967a6 --- /dev/null +++ b/arch/arm/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef __ASM_ARM_WORD_AT_A_TIME_H | ||
2 | #define __ASM_ARM_WORD_AT_A_TIME_H | ||
3 | |||
4 | #ifndef __ARMEB__ | ||
5 | |||
6 | /* | ||
7 | * Little-endian word-at-a-time zero byte handling. | ||
8 | * Heavily based on the x86 algorithm. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | |||
12 | struct word_at_a_time { | ||
13 | const unsigned long one_bits, high_bits; | ||
14 | }; | ||
15 | |||
16 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
17 | |||
18 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, | ||
19 | const struct word_at_a_time *c) | ||
20 | { | ||
21 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
22 | *bits = mask; | ||
23 | return mask; | ||
24 | } | ||
25 | |||
26 | #define prep_zero_mask(a, bits, c) (bits) | ||
27 | |||
28 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
29 | { | ||
30 | bits = (bits - 1) & ~bits; | ||
31 | return bits >> 7; | ||
32 | } | ||
33 | |||
34 | static inline unsigned long find_zero(unsigned long mask) | ||
35 | { | ||
36 | unsigned long ret; | ||
37 | |||
38 | #if __LINUX_ARM_ARCH__ >= 5 | ||
39 | /* We have clz available. */ | ||
40 | ret = fls(mask) >> 3; | ||
41 | #else | ||
42 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
43 | ret = (0x0ff0001 + mask) >> 23; | ||
44 | /* Fix the 1 for 00 case */ | ||
45 | ret &= mask; | ||
46 | #endif | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | #ifdef CONFIG_DCACHE_WORD_ACCESS | ||
52 | |||
53 | #define zero_bytemask(mask) (mask) | ||
54 | |||
55 | /* | ||
56 | * Load an unaligned word from kernel space. | ||
57 | * | ||
58 | * In the (very unlikely) case of the word being a page-crosser | ||
59 | * and the next page not being mapped, take the exception and | ||
60 | * return zeroes in the non-existing part. | ||
61 | */ | ||
62 | static inline unsigned long load_unaligned_zeropad(const void *addr) | ||
63 | { | ||
64 | unsigned long ret, offset; | ||
65 | |||
66 | /* Load word from unaligned pointer addr */ | ||
67 | asm( | ||
68 | "1: ldr %0, [%2]\n" | ||
69 | "2:\n" | ||
70 | " .pushsection .fixup,\"ax\"\n" | ||
71 | " .align 2\n" | ||
72 | "3: and %1, %2, #0x3\n" | ||
73 | " bic %2, %2, #0x3\n" | ||
74 | " ldr %0, [%2]\n" | ||
75 | " lsl %1, %1, #0x3\n" | ||
76 | " lsr %0, %0, %1\n" | ||
77 | " b 2b\n" | ||
78 | " .popsection\n" | ||
79 | " .pushsection __ex_table,\"a\"\n" | ||
80 | " .align 3\n" | ||
81 | " .long 1b, 3b\n" | ||
82 | " .popsection" | ||
83 | : "=&r" (ret), "=&r" (offset) | ||
84 | : "r" (addr), "Qo" (*(unsigned long *)addr)); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | |||
90 | #endif /* DCACHE_WORD_ACCESS */ | ||
91 | |||
92 | #else /* __ARMEB__ */ | ||
93 | #include <asm-generic/word-at-a-time.h> | ||
94 | #endif | ||
95 | |||
96 | #endif /* __ASM_ARM_WORD_AT_A_TIME_H */ | ||