diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/barrier.h | 69 | ||||
-rw-r--r-- | arch/arm/include/asm/bitops.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/bug.h | 30 | ||||
-rw-r--r-- | arch/arm/include/asm/cmpxchg.h | 295 | ||||
-rw-r--r-- | arch/arm/include/asm/compiler.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/cp15.h | 87 | ||||
-rw-r--r-- | arch/arm/include/asm/div64.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/dma.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/domain.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/exec.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/iop3xx.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/processor.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/switch_to.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 552 | ||||
-rw-r--r-- | arch/arm/include/asm/system_info.h | 27 | ||||
-rw-r--r-- | arch/arm/include/asm/system_misc.h | 29 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 2 |
20 files changed, 607 insertions, 551 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 86976d034382..68374ba6a943 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -13,7 +13,9 @@ | |||
13 | 13 | ||
14 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <asm/system.h> | 16 | #include <linux/irqflags.h> |
17 | #include <asm/barrier.h> | ||
18 | #include <asm/cmpxchg.h> | ||
17 | 19 | ||
18 | #define ATOMIC_INIT(i) { (i) } | 20 | #define ATOMIC_INIT(i) { (i) } |
19 | 21 | ||
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 000000000000..44f4a09ff37b --- /dev/null +++ b/arch/arm/include/asm/barrier.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef __ASM_BARRIER_H | ||
2 | #define __ASM_BARRIER_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | ||
7 | |||
8 | #if __LINUX_ARM_ARCH__ >= 7 || \ | ||
9 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) | ||
10 | #define sev() __asm__ __volatile__ ("sev" : : : "memory") | ||
11 | #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") | ||
12 | #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") | ||
13 | #endif | ||
14 | |||
15 | #if __LINUX_ARM_ARCH__ >= 7 | ||
16 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") | ||
17 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") | ||
18 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") | ||
19 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 | ||
20 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | ||
21 | : : "r" (0) : "memory") | ||
22 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
23 | : : "r" (0) : "memory") | ||
24 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | ||
25 | : : "r" (0) : "memory") | ||
26 | #elif defined(CONFIG_CPU_FA526) | ||
27 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | ||
28 | : : "r" (0) : "memory") | ||
29 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
30 | : : "r" (0) : "memory") | ||
31 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | ||
32 | #else | ||
33 | #define isb() __asm__ __volatile__ ("" : : : "memory") | ||
34 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
35 | : : "r" (0) : "memory") | ||
36 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | ||
37 | #endif | ||
38 | |||
39 | #ifdef CONFIG_ARCH_HAS_BARRIERS | ||
40 | #include <mach/barriers.h> | ||
41 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) | ||
42 | #include <asm/outercache.h> | ||
43 | #define mb() do { dsb(); outer_sync(); } while (0) | ||
44 | #define rmb() dsb() | ||
45 | #define wmb() mb() | ||
46 | #else | ||
47 | #include <asm/memory.h> | ||
48 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
49 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
50 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
51 | #endif | ||
52 | |||
53 | #ifndef CONFIG_SMP | ||
54 | #define smp_mb() barrier() | ||
55 | #define smp_rmb() barrier() | ||
56 | #define smp_wmb() barrier() | ||
57 | #else | ||
58 | #define smp_mb() dmb() | ||
59 | #define smp_rmb() dmb() | ||
60 | #define smp_wmb() dmb() | ||
61 | #endif | ||
62 | |||
63 | #define read_barrier_depends() do { } while(0) | ||
64 | #define smp_read_barrier_depends() do { } while(0) | ||
65 | |||
66 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | ||
67 | |||
68 | #endif /* !__ASSEMBLY__ */ | ||
69 | #endif /* __ASM_BARRIER_H */ | ||
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index f7419ef9c8f9..e691ec91e4d3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -24,7 +24,7 @@ | |||
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
27 | #include <asm/system.h> | 27 | #include <linux/irqflags.h> |
28 | 28 | ||
29 | #define smp_mb__before_clear_bit() smp_mb() | 29 | #define smp_mb__before_clear_bit() smp_mb() |
30 | #define smp_mb__after_clear_bit() smp_mb() | 30 | #define smp_mb__after_clear_bit() smp_mb() |
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index fac79dceb736..7af5c6c3653a 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASMARM_BUG_H | 1 | #ifndef _ASMARM_BUG_H |
2 | #define _ASMARM_BUG_H | 2 | #define _ASMARM_BUG_H |
3 | 3 | ||
4 | #include <linux/linkage.h> | ||
4 | 5 | ||
5 | #ifdef CONFIG_BUG | 6 | #ifdef CONFIG_BUG |
6 | 7 | ||
@@ -57,4 +58,33 @@ do { \ | |||
57 | 58 | ||
58 | #include <asm-generic/bug.h> | 59 | #include <asm-generic/bug.h> |
59 | 60 | ||
61 | struct pt_regs; | ||
62 | void die(const char *msg, struct pt_regs *regs, int err); | ||
63 | |||
64 | struct siginfo; | ||
65 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | ||
66 | unsigned long err, unsigned long trap); | ||
67 | |||
68 | #ifdef CONFIG_ARM_LPAE | ||
69 | #define FAULT_CODE_ALIGNMENT 33 | ||
70 | #define FAULT_CODE_DEBUG 34 | ||
71 | #else | ||
72 | #define FAULT_CODE_ALIGNMENT 1 | ||
73 | #define FAULT_CODE_DEBUG 2 | ||
74 | #endif | ||
75 | |||
76 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
77 | struct pt_regs *), | ||
78 | int sig, int code, const char *name); | ||
79 | |||
80 | void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
81 | struct pt_regs *), | ||
82 | int sig, int code, const char *name); | ||
83 | |||
84 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); | ||
85 | |||
86 | struct mm_struct; | ||
87 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | ||
88 | extern void __show_regs(struct pt_regs *); | ||
89 | |||
60 | #endif | 90 | #endif |
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h new file mode 100644 index 000000000000..d41d7cbf0ada --- /dev/null +++ b/arch/arm/include/asm/cmpxchg.h | |||
@@ -0,0 +1,295 @@ | |||
1 | #ifndef __ASM_ARM_CMPXCHG_H | ||
2 | #define __ASM_ARM_CMPXCHG_H | ||
3 | |||
4 | #include <linux/irqflags.h> | ||
5 | #include <asm/barrier.h> | ||
6 | |||
7 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | ||
8 | /* | ||
9 | * On the StrongARM, "swp" is terminally broken since it bypasses the | ||
10 | * cache totally. This means that the cache becomes inconsistent, and, | ||
11 | * since we use normal loads/stores as well, this is really bad. | ||
12 | * Typically, this causes oopsen in filp_close, but could have other, | ||
13 | * more disastrous effects. There are two work-arounds: | ||
14 | * 1. Disable interrupts and emulate the atomic swap | ||
15 | * 2. Clean the cache, perform atomic swap, flush the cache | ||
16 | * | ||
17 | * We choose (1) since its the "easiest" to achieve here and is not | ||
18 | * dependent on the processor type. | ||
19 | * | ||
20 | * NOTE that this solution won't work on an SMP system, so explcitly | ||
21 | * forbid it here. | ||
22 | */ | ||
23 | #define swp_is_buggy | ||
24 | #endif | ||
25 | |||
26 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | ||
27 | { | ||
28 | extern void __bad_xchg(volatile void *, int); | ||
29 | unsigned long ret; | ||
30 | #ifdef swp_is_buggy | ||
31 | unsigned long flags; | ||
32 | #endif | ||
33 | #if __LINUX_ARM_ARCH__ >= 6 | ||
34 | unsigned int tmp; | ||
35 | #endif | ||
36 | |||
37 | smp_mb(); | ||
38 | |||
39 | switch (size) { | ||
40 | #if __LINUX_ARM_ARCH__ >= 6 | ||
41 | case 1: | ||
42 | asm volatile("@ __xchg1\n" | ||
43 | "1: ldrexb %0, [%3]\n" | ||
44 | " strexb %1, %2, [%3]\n" | ||
45 | " teq %1, #0\n" | ||
46 | " bne 1b" | ||
47 | : "=&r" (ret), "=&r" (tmp) | ||
48 | : "r" (x), "r" (ptr) | ||
49 | : "memory", "cc"); | ||
50 | break; | ||
51 | case 4: | ||
52 | asm volatile("@ __xchg4\n" | ||
53 | "1: ldrex %0, [%3]\n" | ||
54 | " strex %1, %2, [%3]\n" | ||
55 | " teq %1, #0\n" | ||
56 | " bne 1b" | ||
57 | : "=&r" (ret), "=&r" (tmp) | ||
58 | : "r" (x), "r" (ptr) | ||
59 | : "memory", "cc"); | ||
60 | break; | ||
61 | #elif defined(swp_is_buggy) | ||
62 | #ifdef CONFIG_SMP | ||
63 | #error SMP is not supported on this platform | ||
64 | #endif | ||
65 | case 1: | ||
66 | raw_local_irq_save(flags); | ||
67 | ret = *(volatile unsigned char *)ptr; | ||
68 | *(volatile unsigned char *)ptr = x; | ||
69 | raw_local_irq_restore(flags); | ||
70 | break; | ||
71 | |||
72 | case 4: | ||
73 | raw_local_irq_save(flags); | ||
74 | ret = *(volatile unsigned long *)ptr; | ||
75 | *(volatile unsigned long *)ptr = x; | ||
76 | raw_local_irq_restore(flags); | ||
77 | break; | ||
78 | #else | ||
79 | case 1: | ||
80 | asm volatile("@ __xchg1\n" | ||
81 | " swpb %0, %1, [%2]" | ||
82 | : "=&r" (ret) | ||
83 | : "r" (x), "r" (ptr) | ||
84 | : "memory", "cc"); | ||
85 | break; | ||
86 | case 4: | ||
87 | asm volatile("@ __xchg4\n" | ||
88 | " swp %0, %1, [%2]" | ||
89 | : "=&r" (ret) | ||
90 | : "r" (x), "r" (ptr) | ||
91 | : "memory", "cc"); | ||
92 | break; | ||
93 | #endif | ||
94 | default: | ||
95 | __bad_xchg(ptr, size), ret = 0; | ||
96 | break; | ||
97 | } | ||
98 | smp_mb(); | ||
99 | |||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | #define xchg(ptr,x) \ | ||
104 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
105 | |||
106 | #include <asm-generic/cmpxchg-local.h> | ||
107 | |||
108 | #if __LINUX_ARM_ARCH__ < 6 | ||
109 | /* min ARCH < ARMv6 */ | ||
110 | |||
111 | #ifdef CONFIG_SMP | ||
112 | #error "SMP is not supported on this platform" | ||
113 | #endif | ||
114 | |||
115 | /* | ||
116 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
117 | * them available. | ||
118 | */ | ||
119 | #define cmpxchg_local(ptr, o, n) \ | ||
120 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
121 | (unsigned long)(n), sizeof(*(ptr)))) | ||
122 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
123 | |||
124 | #ifndef CONFIG_SMP | ||
125 | #include <asm-generic/cmpxchg.h> | ||
126 | #endif | ||
127 | |||
128 | #else /* min ARCH >= ARMv6 */ | ||
129 | |||
130 | extern void __bad_cmpxchg(volatile void *ptr, int size); | ||
131 | |||
132 | /* | ||
133 | * cmpxchg only support 32-bits operands on ARMv6. | ||
134 | */ | ||
135 | |||
136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
137 | unsigned long new, int size) | ||
138 | { | ||
139 | unsigned long oldval, res; | ||
140 | |||
141 | switch (size) { | ||
142 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | ||
143 | case 1: | ||
144 | do { | ||
145 | asm volatile("@ __cmpxchg1\n" | ||
146 | " ldrexb %1, [%2]\n" | ||
147 | " mov %0, #0\n" | ||
148 | " teq %1, %3\n" | ||
149 | " strexbeq %0, %4, [%2]\n" | ||
150 | : "=&r" (res), "=&r" (oldval) | ||
151 | : "r" (ptr), "Ir" (old), "r" (new) | ||
152 | : "memory", "cc"); | ||
153 | } while (res); | ||
154 | break; | ||
155 | case 2: | ||
156 | do { | ||
157 | asm volatile("@ __cmpxchg1\n" | ||
158 | " ldrexh %1, [%2]\n" | ||
159 | " mov %0, #0\n" | ||
160 | " teq %1, %3\n" | ||
161 | " strexheq %0, %4, [%2]\n" | ||
162 | : "=&r" (res), "=&r" (oldval) | ||
163 | : "r" (ptr), "Ir" (old), "r" (new) | ||
164 | : "memory", "cc"); | ||
165 | } while (res); | ||
166 | break; | ||
167 | #endif | ||
168 | case 4: | ||
169 | do { | ||
170 | asm volatile("@ __cmpxchg4\n" | ||
171 | " ldrex %1, [%2]\n" | ||
172 | " mov %0, #0\n" | ||
173 | " teq %1, %3\n" | ||
174 | " strexeq %0, %4, [%2]\n" | ||
175 | : "=&r" (res), "=&r" (oldval) | ||
176 | : "r" (ptr), "Ir" (old), "r" (new) | ||
177 | : "memory", "cc"); | ||
178 | } while (res); | ||
179 | break; | ||
180 | default: | ||
181 | __bad_cmpxchg(ptr, size); | ||
182 | oldval = 0; | ||
183 | } | ||
184 | |||
185 | return oldval; | ||
186 | } | ||
187 | |||
188 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | ||
189 | unsigned long new, int size) | ||
190 | { | ||
191 | unsigned long ret; | ||
192 | |||
193 | smp_mb(); | ||
194 | ret = __cmpxchg(ptr, old, new, size); | ||
195 | smp_mb(); | ||
196 | |||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | #define cmpxchg(ptr,o,n) \ | ||
201 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | ||
202 | (unsigned long)(o), \ | ||
203 | (unsigned long)(n), \ | ||
204 | sizeof(*(ptr)))) | ||
205 | |||
206 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
207 | unsigned long old, | ||
208 | unsigned long new, int size) | ||
209 | { | ||
210 | unsigned long ret; | ||
211 | |||
212 | switch (size) { | ||
213 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ | ||
214 | case 1: | ||
215 | case 2: | ||
216 | ret = __cmpxchg_local_generic(ptr, old, new, size); | ||
217 | break; | ||
218 | #endif | ||
219 | default: | ||
220 | ret = __cmpxchg(ptr, old, new, size); | ||
221 | } | ||
222 | |||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | #define cmpxchg_local(ptr,o,n) \ | ||
227 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | ||
228 | (unsigned long)(o), \ | ||
229 | (unsigned long)(n), \ | ||
230 | sizeof(*(ptr)))) | ||
231 | |||
232 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | ||
233 | |||
234 | /* | ||
235 | * Note : ARMv7-M (currently unsupported by Linux) does not support | ||
236 | * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should | ||
237 | * not be allowed to use __cmpxchg64. | ||
238 | */ | ||
239 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | ||
240 | unsigned long long old, | ||
241 | unsigned long long new) | ||
242 | { | ||
243 | register unsigned long long oldval asm("r0"); | ||
244 | register unsigned long long __old asm("r2") = old; | ||
245 | register unsigned long long __new asm("r4") = new; | ||
246 | unsigned long res; | ||
247 | |||
248 | do { | ||
249 | asm volatile( | ||
250 | " @ __cmpxchg8\n" | ||
251 | " ldrexd %1, %H1, [%2]\n" | ||
252 | " mov %0, #0\n" | ||
253 | " teq %1, %3\n" | ||
254 | " teqeq %H1, %H3\n" | ||
255 | " strexdeq %0, %4, %H4, [%2]\n" | ||
256 | : "=&r" (res), "=&r" (oldval) | ||
257 | : "r" (ptr), "Ir" (__old), "r" (__new) | ||
258 | : "memory", "cc"); | ||
259 | } while (res); | ||
260 | |||
261 | return oldval; | ||
262 | } | ||
263 | |||
264 | static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, | ||
265 | unsigned long long old, | ||
266 | unsigned long long new) | ||
267 | { | ||
268 | unsigned long long ret; | ||
269 | |||
270 | smp_mb(); | ||
271 | ret = __cmpxchg64(ptr, old, new); | ||
272 | smp_mb(); | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | #define cmpxchg64(ptr,o,n) \ | ||
278 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ | ||
279 | (unsigned long long)(o), \ | ||
280 | (unsigned long long)(n))) | ||
281 | |||
282 | #define cmpxchg64_local(ptr,o,n) \ | ||
283 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ | ||
284 | (unsigned long long)(o), \ | ||
285 | (unsigned long long)(n))) | ||
286 | |||
287 | #else /* min ARCH = ARMv6 */ | ||
288 | |||
289 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
290 | |||
291 | #endif | ||
292 | |||
293 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | ||
294 | |||
295 | #endif /* __ASM_ARM_CMPXCHG_H */ | ||
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h new file mode 100644 index 000000000000..8155db2f7fa1 --- /dev/null +++ b/arch/arm/include/asm/compiler.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __ASM_ARM_COMPILER_H | ||
2 | #define __ASM_ARM_COMPILER_H | ||
3 | |||
4 | /* | ||
5 | * This is used to ensure the compiler did actually allocate the register we | ||
6 | * asked it for some inline assembly sequences. Apparently we can't trust | ||
7 | * the compiler from one version to another so a bit of paranoia won't hurt. | ||
8 | * This string is meant to be concatenated with the inline asm string and | ||
9 | * will cause compilation to stop on mismatch. | ||
10 | * (for details, see gcc PR 15089) | ||
11 | */ | ||
12 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | ||
13 | |||
14 | |||
15 | #endif /* __ASM_ARM_COMPILER_H */ | ||
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 000000000000..5ef4d8015a60 --- /dev/null +++ b/arch/arm/include/asm/cp15.h | |||
@@ -0,0 +1,87 @@ | |||
1 | #ifndef __ASM_ARM_CP15_H | ||
2 | #define __ASM_ARM_CP15_H | ||
3 | |||
4 | #include <asm/barrier.h> | ||
5 | |||
6 | /* | ||
7 | * CR1 bits (CP#15 CR1) | ||
8 | */ | ||
9 | #define CR_M (1 << 0) /* MMU enable */ | ||
10 | #define CR_A (1 << 1) /* Alignment abort enable */ | ||
11 | #define CR_C (1 << 2) /* Dcache enable */ | ||
12 | #define CR_W (1 << 3) /* Write buffer enable */ | ||
13 | #define CR_P (1 << 4) /* 32-bit exception handler */ | ||
14 | #define CR_D (1 << 5) /* 32-bit data address range */ | ||
15 | #define CR_L (1 << 6) /* Implementation defined */ | ||
16 | #define CR_B (1 << 7) /* Big endian */ | ||
17 | #define CR_S (1 << 8) /* System MMU protection */ | ||
18 | #define CR_R (1 << 9) /* ROM MMU protection */ | ||
19 | #define CR_F (1 << 10) /* Implementation defined */ | ||
20 | #define CR_Z (1 << 11) /* Implementation defined */ | ||
21 | #define CR_I (1 << 12) /* Icache enable */ | ||
22 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | ||
23 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | ||
24 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | ||
25 | #define CR_DT (1 << 16) | ||
26 | #define CR_IT (1 << 18) | ||
27 | #define CR_ST (1 << 19) | ||
28 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | ||
29 | #define CR_U (1 << 22) /* Unaligned access operation */ | ||
30 | #define CR_XP (1 << 23) /* Extended page tables */ | ||
31 | #define CR_VE (1 << 24) /* Vectored interrupts */ | ||
32 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | ||
33 | #define CR_TRE (1 << 28) /* TEX remap enable */ | ||
34 | #define CR_AFE (1 << 29) /* Access flag enable */ | ||
35 | #define CR_TE (1 << 30) /* Thumb exception enable */ | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | #if __LINUX_ARM_ARCH__ >= 4 | ||
40 | #define vectors_high() (cr_alignment & CR_V) | ||
41 | #else | ||
42 | #define vectors_high() (0) | ||
43 | #endif | ||
44 | |||
45 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
46 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
47 | |||
48 | static inline unsigned int get_cr(void) | ||
49 | { | ||
50 | unsigned int val; | ||
51 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | ||
52 | return val; | ||
53 | } | ||
54 | |||
55 | static inline void set_cr(unsigned int val) | ||
56 | { | ||
57 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | ||
58 | : : "r" (val) : "cc"); | ||
59 | isb(); | ||
60 | } | ||
61 | |||
62 | #ifndef CONFIG_SMP | ||
63 | extern void adjust_cr(unsigned long mask, unsigned long set); | ||
64 | #endif | ||
65 | |||
66 | #define CPACC_FULL(n) (3 << (n * 2)) | ||
67 | #define CPACC_SVC(n) (1 << (n * 2)) | ||
68 | #define CPACC_DISABLE(n) (0 << (n * 2)) | ||
69 | |||
70 | static inline unsigned int get_copro_access(void) | ||
71 | { | ||
72 | unsigned int val; | ||
73 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | ||
74 | : "=r" (val) : : "cc"); | ||
75 | return val; | ||
76 | } | ||
77 | |||
78 | static inline void set_copro_access(unsigned int val) | ||
79 | { | ||
80 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | ||
81 | : : "r" (val) : "cc"); | ||
82 | isb(); | ||
83 | } | ||
84 | |||
85 | #endif | ||
86 | |||
87 | #endif | ||
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index d3f0a9eee9f6..fe92ccf1d0b0 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef __ASM_ARM_DIV64 | 1 | #ifndef __ASM_ARM_DIV64 |
2 | #define __ASM_ARM_DIV64 | 2 | #define __ASM_ARM_DIV64 |
3 | 3 | ||
4 | #include <asm/system.h> | ||
5 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/compiler.h> | ||
6 | 6 | ||
7 | /* | 7 | /* |
8 | * The semantics of do_div() are: | 8 | * The semantics of do_div() are: |
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 69a5b0b6455c..5694a0d6576b 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
@@ -19,7 +19,6 @@ | |||
19 | * It should not be re-used except for that purpose. | 19 | * It should not be re-used except for that purpose. |
20 | */ | 20 | */ |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <asm/system.h> | ||
23 | #include <asm/scatterlist.h> | 22 | #include <asm/scatterlist.h> |
24 | 23 | ||
25 | #include <mach/isa-dma.h> | 24 | #include <mach/isa-dma.h> |
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index b5dc173d336f..3d2220498abc 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h | |||
@@ -10,6 +10,10 @@ | |||
10 | #ifndef __ASM_PROC_DOMAIN_H | 10 | #ifndef __ASM_PROC_DOMAIN_H |
11 | #define __ASM_PROC_DOMAIN_H | 11 | #define __ASM_PROC_DOMAIN_H |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | ||
14 | #include <asm/barrier.h> | ||
15 | #endif | ||
16 | |||
13 | /* | 17 | /* |
14 | * Domain numbers | 18 | * Domain numbers |
15 | * | 19 | * |
diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h new file mode 100644 index 000000000000..7c4fbef72b3a --- /dev/null +++ b/arch/arm/include/asm/exec.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_ARM_EXEC_H | ||
2 | #define __ASM_ARM_EXEC_H | ||
3 | |||
4 | #define arch_align_stack(x) (x) | ||
5 | |||
6 | #endif /* __ASM_ARM_EXEC_H */ | ||
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 077c32326c63..2ff2c75a4639 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h | |||
@@ -231,6 +231,9 @@ extern int iop3xx_get_init_atu(void); | |||
231 | 231 | ||
232 | 232 | ||
233 | #ifndef __ASSEMBLY__ | 233 | #ifndef __ASSEMBLY__ |
234 | |||
235 | #include <linux/types.h> | ||
236 | |||
234 | void iop3xx_map_io(void); | 237 | void iop3xx_map_io(void); |
235 | void iop_init_cp6_handler(void); | 238 | void iop_init_cp6_handler(void); |
236 | void iop_init_time(unsigned long tickrate); | 239 | void iop_init_time(unsigned long tickrate); |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 9275828feb3d..bae7eb6011d2 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <asm/byteorder.h> | 27 | #include <asm/byteorder.h> |
28 | #include <asm/memory.h> | 28 | #include <asm/memory.h> |
29 | #include <asm/system.h> | ||
30 | #include <asm-generic/pci_iomap.h> | 29 | #include <asm-generic/pci_iomap.h> |
31 | 30 | ||
32 | /* | 31 | /* |
@@ -99,6 +98,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr) | |||
99 | 98 | ||
100 | /* IO barriers */ | 99 | /* IO barriers */ |
101 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | 100 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
101 | #include <asm/barrier.h> | ||
102 | #define __iormb() rmb() | 102 | #define __iormb() rmb() |
103 | #define __iowmb() wmb() | 103 | #define __iowmb() wmb() |
104 | #else | 104 | #else |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a923..b8e580a297e4 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -34,4 +34,11 @@ typedef struct { | |||
34 | 34 | ||
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | /* | ||
38 | * switch_mm() may do a full cache flush over the context switch, | ||
39 | * so enable interrupts over the context switch to avoid high | ||
40 | * latency. | ||
41 | */ | ||
42 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
43 | |||
37 | #endif | 44 | #endif |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index cb8d638924fd..f4d7f56ee51f 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <asm/hw_breakpoint.h> | 22 | #include <asm/hw_breakpoint.h> |
23 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
24 | #include <asm/types.h> | 24 | #include <asm/types.h> |
25 | #include <asm/system.h> | ||
26 | 25 | ||
27 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
28 | #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ | 27 | #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ |
@@ -90,6 +89,8 @@ unsigned long get_wchan(struct task_struct *p); | |||
90 | #define cpu_relax() barrier() | 89 | #define cpu_relax() barrier() |
91 | #endif | 90 | #endif |
92 | 91 | ||
92 | void cpu_idle_wait(void); | ||
93 | |||
93 | /* | 94 | /* |
94 | * Create a new kernel thread | 95 | * Create a new kernel thread |
95 | */ | 96 | */ |
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h new file mode 100644 index 000000000000..fa09e6b49bf1 --- /dev/null +++ b/arch/arm/include/asm/switch_to.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __ASM_ARM_SWITCH_TO_H | ||
2 | #define __ASM_ARM_SWITCH_TO_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | /* | ||
7 | * switch_to(prev, next) should switch from task `prev' to `next' | ||
8 | * `prev' will never be the same as `next'. schedule() itself | ||
9 | * contains the memory barrier to tell GCC not to cache `current'. | ||
10 | */ | ||
11 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | ||
12 | |||
13 | #define switch_to(prev,next,last) \ | ||
14 | do { \ | ||
15 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ | ||
16 | } while (0) | ||
17 | |||
18 | #endif /* __ASM_ARM_SWITCH_TO_H */ | ||
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 424aa458c487..74542c52f9be 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -1,544 +1,8 @@ | |||
1 | #ifndef __ASM_ARM_SYSTEM_H | 1 | /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ |
2 | #define __ASM_ARM_SYSTEM_H | 2 | #include <asm/barrier.h> |
3 | 3 | #include <asm/compiler.h> | |
4 | #ifdef __KERNEL__ | 4 | #include <asm/cmpxchg.h> |
5 | 5 | #include <asm/exec.h> | |
6 | #define CPU_ARCH_UNKNOWN 0 | 6 | #include <asm/switch_to.h> |
7 | #define CPU_ARCH_ARMv3 1 | 7 | #include <asm/system_info.h> |
8 | #define CPU_ARCH_ARMv4 2 | 8 | #include <asm/system_misc.h> |
9 | #define CPU_ARCH_ARMv4T 3 | ||
10 | #define CPU_ARCH_ARMv5 4 | ||
11 | #define CPU_ARCH_ARMv5T 5 | ||
12 | #define CPU_ARCH_ARMv5TE 6 | ||
13 | #define CPU_ARCH_ARMv5TEJ 7 | ||
14 | #define CPU_ARCH_ARMv6 8 | ||
15 | #define CPU_ARCH_ARMv7 9 | ||
16 | |||
17 | /* | ||
18 | * CR1 bits (CP#15 CR1) | ||
19 | */ | ||
20 | #define CR_M (1 << 0) /* MMU enable */ | ||
21 | #define CR_A (1 << 1) /* Alignment abort enable */ | ||
22 | #define CR_C (1 << 2) /* Dcache enable */ | ||
23 | #define CR_W (1 << 3) /* Write buffer enable */ | ||
24 | #define CR_P (1 << 4) /* 32-bit exception handler */ | ||
25 | #define CR_D (1 << 5) /* 32-bit data address range */ | ||
26 | #define CR_L (1 << 6) /* Implementation defined */ | ||
27 | #define CR_B (1 << 7) /* Big endian */ | ||
28 | #define CR_S (1 << 8) /* System MMU protection */ | ||
29 | #define CR_R (1 << 9) /* ROM MMU protection */ | ||
30 | #define CR_F (1 << 10) /* Implementation defined */ | ||
31 | #define CR_Z (1 << 11) /* Implementation defined */ | ||
32 | #define CR_I (1 << 12) /* Icache enable */ | ||
33 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | ||
34 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | ||
35 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | ||
36 | #define CR_DT (1 << 16) | ||
37 | #define CR_IT (1 << 18) | ||
38 | #define CR_ST (1 << 19) | ||
39 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | ||
40 | #define CR_U (1 << 22) /* Unaligned access operation */ | ||
41 | #define CR_XP (1 << 23) /* Extended page tables */ | ||
42 | #define CR_VE (1 << 24) /* Vectored interrupts */ | ||
43 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | ||
44 | #define CR_TRE (1 << 28) /* TEX remap enable */ | ||
45 | #define CR_AFE (1 << 29) /* Access flag enable */ | ||
46 | #define CR_TE (1 << 30) /* Thumb exception enable */ | ||
47 | |||
48 | /* | ||
49 | * This is used to ensure the compiler did actually allocate the register we | ||
50 | * asked it for some inline assembly sequences. Apparently we can't trust | ||
51 | * the compiler from one version to another so a bit of paranoia won't hurt. | ||
52 | * This string is meant to be concatenated with the inline asm string and | ||
53 | * will cause compilation to stop on mismatch. | ||
54 | * (for details, see gcc PR 15089) | ||
55 | */ | ||
56 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | ||
57 | |||
58 | #ifndef __ASSEMBLY__ | ||
59 | |||
60 | #include <linux/compiler.h> | ||
61 | #include <linux/linkage.h> | ||
62 | #include <linux/irqflags.h> | ||
63 | |||
64 | #include <asm/outercache.h> | ||
65 | |||
66 | struct thread_info; | ||
67 | struct task_struct; | ||
68 | |||
69 | /* information about the system we're running on */ | ||
70 | extern unsigned int system_rev; | ||
71 | extern unsigned int system_serial_low; | ||
72 | extern unsigned int system_serial_high; | ||
73 | extern unsigned int mem_fclk_21285; | ||
74 | |||
75 | struct pt_regs; | ||
76 | |||
77 | void die(const char *msg, struct pt_regs *regs, int err); | ||
78 | |||
79 | struct siginfo; | ||
80 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | ||
81 | unsigned long err, unsigned long trap); | ||
82 | |||
83 | #ifdef CONFIG_ARM_LPAE | ||
84 | #define FAULT_CODE_ALIGNMENT 33 | ||
85 | #define FAULT_CODE_DEBUG 34 | ||
86 | #else | ||
87 | #define FAULT_CODE_ALIGNMENT 1 | ||
88 | #define FAULT_CODE_DEBUG 2 | ||
89 | #endif | ||
90 | |||
91 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
92 | struct pt_regs *), | ||
93 | int sig, int code, const char *name); | ||
94 | |||
95 | void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
96 | struct pt_regs *), | ||
97 | int sig, int code, const char *name); | ||
98 | |||
99 | #define xchg(ptr,x) \ | ||
100 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
101 | |||
102 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); | ||
103 | |||
104 | struct mm_struct; | ||
105 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | ||
106 | extern void __show_regs(struct pt_regs *); | ||
107 | |||
108 | extern int __pure cpu_architecture(void); | ||
109 | extern void cpu_init(void); | ||
110 | |||
111 | void soft_restart(unsigned long); | ||
112 | extern void (*arm_pm_restart)(char str, const char *cmd); | ||
113 | extern void (*arm_pm_idle)(void); | ||
114 | |||
115 | #define UDBG_UNDEFINED (1 << 0) | ||
116 | #define UDBG_SYSCALL (1 << 1) | ||
117 | #define UDBG_BADABORT (1 << 2) | ||
118 | #define UDBG_SEGV (1 << 3) | ||
119 | #define UDBG_BUS (1 << 4) | ||
120 | |||
121 | extern unsigned int user_debug; | ||
122 | |||
123 | #if __LINUX_ARM_ARCH__ >= 4 | ||
124 | #define vectors_high() (cr_alignment & CR_V) | ||
125 | #else | ||
126 | #define vectors_high() (0) | ||
127 | #endif | ||
128 | |||
129 | #if __LINUX_ARM_ARCH__ >= 7 || \ | ||
130 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) | ||
131 | #define sev() __asm__ __volatile__ ("sev" : : : "memory") | ||
132 | #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") | ||
133 | #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") | ||
134 | #endif | ||
135 | |||
136 | #if __LINUX_ARM_ARCH__ >= 7 | ||
137 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") | ||
138 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") | ||
139 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") | ||
140 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 | ||
141 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | ||
142 | : : "r" (0) : "memory") | ||
143 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
144 | : : "r" (0) : "memory") | ||
145 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | ||
146 | : : "r" (0) : "memory") | ||
147 | #elif defined(CONFIG_CPU_FA526) | ||
148 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | ||
149 | : : "r" (0) : "memory") | ||
150 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
151 | : : "r" (0) : "memory") | ||
152 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | ||
153 | #else | ||
154 | #define isb() __asm__ __volatile__ ("" : : : "memory") | ||
155 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | ||
156 | : : "r" (0) : "memory") | ||
157 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | ||
158 | #endif | ||
159 | |||
160 | #ifdef CONFIG_ARCH_HAS_BARRIERS | ||
161 | #include <mach/barriers.h> | ||
162 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) | ||
163 | #define mb() do { dsb(); outer_sync(); } while (0) | ||
164 | #define rmb() dsb() | ||
165 | #define wmb() mb() | ||
166 | #else | ||
167 | #include <asm/memory.h> | ||
168 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
169 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
170 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | ||
171 | #endif | ||
172 | |||
173 | #ifndef CONFIG_SMP | ||
174 | #define smp_mb() barrier() | ||
175 | #define smp_rmb() barrier() | ||
176 | #define smp_wmb() barrier() | ||
177 | #else | ||
178 | #define smp_mb() dmb() | ||
179 | #define smp_rmb() dmb() | ||
180 | #define smp_wmb() dmb() | ||
181 | #endif | ||
182 | |||
183 | #define read_barrier_depends() do { } while(0) | ||
184 | #define smp_read_barrier_depends() do { } while(0) | ||
185 | |||
186 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | ||
187 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | ||
188 | |||
189 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
190 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
191 | |||
192 | static inline unsigned int get_cr(void) | ||
193 | { | ||
194 | unsigned int val; | ||
195 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | ||
196 | return val; | ||
197 | } | ||
198 | |||
199 | static inline void set_cr(unsigned int val) | ||
200 | { | ||
201 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | ||
202 | : : "r" (val) : "cc"); | ||
203 | isb(); | ||
204 | } | ||
205 | |||
206 | #ifndef CONFIG_SMP | ||
207 | extern void adjust_cr(unsigned long mask, unsigned long set); | ||
208 | #endif | ||
209 | |||
210 | #define CPACC_FULL(n) (3 << (n * 2)) | ||
211 | #define CPACC_SVC(n) (1 << (n * 2)) | ||
212 | #define CPACC_DISABLE(n) (0 << (n * 2)) | ||
213 | |||
214 | static inline unsigned int get_copro_access(void) | ||
215 | { | ||
216 | unsigned int val; | ||
217 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | ||
218 | : "=r" (val) : : "cc"); | ||
219 | return val; | ||
220 | } | ||
221 | |||
222 | static inline void set_copro_access(unsigned int val) | ||
223 | { | ||
224 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | ||
225 | : : "r" (val) : "cc"); | ||
226 | isb(); | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * switch_mm() may do a full cache flush over the context switch, | ||
231 | * so enable interrupts over the context switch to avoid high | ||
232 | * latency. | ||
233 | */ | ||
234 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
235 | |||
236 | /* | ||
237 | * switch_to(prev, next) should switch from task `prev' to `next' | ||
238 | * `prev' will never be the same as `next'. schedule() itself | ||
239 | * contains the memory barrier to tell GCC not to cache `current'. | ||
240 | */ | ||
241 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | ||
242 | |||
243 | #define switch_to(prev,next,last) \ | ||
244 | do { \ | ||
245 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ | ||
246 | } while (0) | ||
247 | |||
248 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | ||
249 | /* | ||
250 | * On the StrongARM, "swp" is terminally broken since it bypasses the | ||
251 | * cache totally. This means that the cache becomes inconsistent, and, | ||
252 | * since we use normal loads/stores as well, this is really bad. | ||
253 | * Typically, this causes oopsen in filp_close, but could have other, | ||
254 | * more disastrous effects. There are two work-arounds: | ||
255 | * 1. Disable interrupts and emulate the atomic swap | ||
256 | * 2. Clean the cache, perform atomic swap, flush the cache | ||
257 | * | ||
258 | * We choose (1) since its the "easiest" to achieve here and is not | ||
259 | * dependent on the processor type. | ||
260 | * | ||
261 | * NOTE that this solution won't work on an SMP system, so explcitly | ||
262 | * forbid it here. | ||
263 | */ | ||
264 | #define swp_is_buggy | ||
265 | #endif | ||
266 | |||
267 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | ||
268 | { | ||
269 | extern void __bad_xchg(volatile void *, int); | ||
270 | unsigned long ret; | ||
271 | #ifdef swp_is_buggy | ||
272 | unsigned long flags; | ||
273 | #endif | ||
274 | #if __LINUX_ARM_ARCH__ >= 6 | ||
275 | unsigned int tmp; | ||
276 | #endif | ||
277 | |||
278 | smp_mb(); | ||
279 | |||
280 | switch (size) { | ||
281 | #if __LINUX_ARM_ARCH__ >= 6 | ||
282 | case 1: | ||
283 | asm volatile("@ __xchg1\n" | ||
284 | "1: ldrexb %0, [%3]\n" | ||
285 | " strexb %1, %2, [%3]\n" | ||
286 | " teq %1, #0\n" | ||
287 | " bne 1b" | ||
288 | : "=&r" (ret), "=&r" (tmp) | ||
289 | : "r" (x), "r" (ptr) | ||
290 | : "memory", "cc"); | ||
291 | break; | ||
292 | case 4: | ||
293 | asm volatile("@ __xchg4\n" | ||
294 | "1: ldrex %0, [%3]\n" | ||
295 | " strex %1, %2, [%3]\n" | ||
296 | " teq %1, #0\n" | ||
297 | " bne 1b" | ||
298 | : "=&r" (ret), "=&r" (tmp) | ||
299 | : "r" (x), "r" (ptr) | ||
300 | : "memory", "cc"); | ||
301 | break; | ||
302 | #elif defined(swp_is_buggy) | ||
303 | #ifdef CONFIG_SMP | ||
304 | #error SMP is not supported on this platform | ||
305 | #endif | ||
306 | case 1: | ||
307 | raw_local_irq_save(flags); | ||
308 | ret = *(volatile unsigned char *)ptr; | ||
309 | *(volatile unsigned char *)ptr = x; | ||
310 | raw_local_irq_restore(flags); | ||
311 | break; | ||
312 | |||
313 | case 4: | ||
314 | raw_local_irq_save(flags); | ||
315 | ret = *(volatile unsigned long *)ptr; | ||
316 | *(volatile unsigned long *)ptr = x; | ||
317 | raw_local_irq_restore(flags); | ||
318 | break; | ||
319 | #else | ||
320 | case 1: | ||
321 | asm volatile("@ __xchg1\n" | ||
322 | " swpb %0, %1, [%2]" | ||
323 | : "=&r" (ret) | ||
324 | : "r" (x), "r" (ptr) | ||
325 | : "memory", "cc"); | ||
326 | break; | ||
327 | case 4: | ||
328 | asm volatile("@ __xchg4\n" | ||
329 | " swp %0, %1, [%2]" | ||
330 | : "=&r" (ret) | ||
331 | : "r" (x), "r" (ptr) | ||
332 | : "memory", "cc"); | ||
333 | break; | ||
334 | #endif | ||
335 | default: | ||
336 | __bad_xchg(ptr, size), ret = 0; | ||
337 | break; | ||
338 | } | ||
339 | smp_mb(); | ||
340 | |||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | extern void disable_hlt(void); | ||
345 | extern void enable_hlt(void); | ||
346 | |||
347 | void cpu_idle_wait(void); | ||
348 | |||
349 | #include <asm-generic/cmpxchg-local.h> | ||
350 | |||
351 | #if __LINUX_ARM_ARCH__ < 6 | ||
352 | /* min ARCH < ARMv6 */ | ||
353 | |||
354 | #ifdef CONFIG_SMP | ||
355 | #error "SMP is not supported on this platform" | ||
356 | #endif | ||
357 | |||
358 | /* | ||
359 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
360 | * them available. | ||
361 | */ | ||
362 | #define cmpxchg_local(ptr, o, n) \ | ||
363 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
364 | (unsigned long)(n), sizeof(*(ptr)))) | ||
365 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
366 | |||
367 | #ifndef CONFIG_SMP | ||
368 | #include <asm-generic/cmpxchg.h> | ||
369 | #endif | ||
370 | |||
371 | #else /* min ARCH >= ARMv6 */ | ||
372 | |||
373 | extern void __bad_cmpxchg(volatile void *ptr, int size); | ||
374 | |||
375 | /* | ||
376 | * cmpxchg only support 32-bits operands on ARMv6. | ||
377 | */ | ||
378 | |||
379 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
380 | unsigned long new, int size) | ||
381 | { | ||
382 | unsigned long oldval, res; | ||
383 | |||
384 | switch (size) { | ||
385 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | ||
386 | case 1: | ||
387 | do { | ||
388 | asm volatile("@ __cmpxchg1\n" | ||
389 | " ldrexb %1, [%2]\n" | ||
390 | " mov %0, #0\n" | ||
391 | " teq %1, %3\n" | ||
392 | " strexbeq %0, %4, [%2]\n" | ||
393 | : "=&r" (res), "=&r" (oldval) | ||
394 | : "r" (ptr), "Ir" (old), "r" (new) | ||
395 | : "memory", "cc"); | ||
396 | } while (res); | ||
397 | break; | ||
398 | case 2: | ||
399 | do { | ||
400 | asm volatile("@ __cmpxchg1\n" | ||
401 | " ldrexh %1, [%2]\n" | ||
402 | " mov %0, #0\n" | ||
403 | " teq %1, %3\n" | ||
404 | " strexheq %0, %4, [%2]\n" | ||
405 | : "=&r" (res), "=&r" (oldval) | ||
406 | : "r" (ptr), "Ir" (old), "r" (new) | ||
407 | : "memory", "cc"); | ||
408 | } while (res); | ||
409 | break; | ||
410 | #endif | ||
411 | case 4: | ||
412 | do { | ||
413 | asm volatile("@ __cmpxchg4\n" | ||
414 | " ldrex %1, [%2]\n" | ||
415 | " mov %0, #0\n" | ||
416 | " teq %1, %3\n" | ||
417 | " strexeq %0, %4, [%2]\n" | ||
418 | : "=&r" (res), "=&r" (oldval) | ||
419 | : "r" (ptr), "Ir" (old), "r" (new) | ||
420 | : "memory", "cc"); | ||
421 | } while (res); | ||
422 | break; | ||
423 | default: | ||
424 | __bad_cmpxchg(ptr, size); | ||
425 | oldval = 0; | ||
426 | } | ||
427 | |||
428 | return oldval; | ||
429 | } | ||
430 | |||
431 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | ||
432 | unsigned long new, int size) | ||
433 | { | ||
434 | unsigned long ret; | ||
435 | |||
436 | smp_mb(); | ||
437 | ret = __cmpxchg(ptr, old, new, size); | ||
438 | smp_mb(); | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | #define cmpxchg(ptr,o,n) \ | ||
444 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | ||
445 | (unsigned long)(o), \ | ||
446 | (unsigned long)(n), \ | ||
447 | sizeof(*(ptr)))) | ||
448 | |||
449 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
450 | unsigned long old, | ||
451 | unsigned long new, int size) | ||
452 | { | ||
453 | unsigned long ret; | ||
454 | |||
455 | switch (size) { | ||
456 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ | ||
457 | case 1: | ||
458 | case 2: | ||
459 | ret = __cmpxchg_local_generic(ptr, old, new, size); | ||
460 | break; | ||
461 | #endif | ||
462 | default: | ||
463 | ret = __cmpxchg(ptr, old, new, size); | ||
464 | } | ||
465 | |||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | #define cmpxchg_local(ptr,o,n) \ | ||
470 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | ||
471 | (unsigned long)(o), \ | ||
472 | (unsigned long)(n), \ | ||
473 | sizeof(*(ptr)))) | ||
474 | |||
475 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | ||
476 | |||
477 | /* | ||
478 | * Note : ARMv7-M (currently unsupported by Linux) does not support | ||
479 | * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should | ||
480 | * not be allowed to use __cmpxchg64. | ||
481 | */ | ||
482 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | ||
483 | unsigned long long old, | ||
484 | unsigned long long new) | ||
485 | { | ||
486 | register unsigned long long oldval asm("r0"); | ||
487 | register unsigned long long __old asm("r2") = old; | ||
488 | register unsigned long long __new asm("r4") = new; | ||
489 | unsigned long res; | ||
490 | |||
491 | do { | ||
492 | asm volatile( | ||
493 | " @ __cmpxchg8\n" | ||
494 | " ldrexd %1, %H1, [%2]\n" | ||
495 | " mov %0, #0\n" | ||
496 | " teq %1, %3\n" | ||
497 | " teqeq %H1, %H3\n" | ||
498 | " strexdeq %0, %4, %H4, [%2]\n" | ||
499 | : "=&r" (res), "=&r" (oldval) | ||
500 | : "r" (ptr), "Ir" (__old), "r" (__new) | ||
501 | : "memory", "cc"); | ||
502 | } while (res); | ||
503 | |||
504 | return oldval; | ||
505 | } | ||
506 | |||
507 | static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, | ||
508 | unsigned long long old, | ||
509 | unsigned long long new) | ||
510 | { | ||
511 | unsigned long long ret; | ||
512 | |||
513 | smp_mb(); | ||
514 | ret = __cmpxchg64(ptr, old, new); | ||
515 | smp_mb(); | ||
516 | |||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | #define cmpxchg64(ptr,o,n) \ | ||
521 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ | ||
522 | (unsigned long long)(o), \ | ||
523 | (unsigned long long)(n))) | ||
524 | |||
525 | #define cmpxchg64_local(ptr,o,n) \ | ||
526 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ | ||
527 | (unsigned long long)(o), \ | ||
528 | (unsigned long long)(n))) | ||
529 | |||
530 | #else /* min ARCH = ARMv6 */ | ||
531 | |||
532 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
533 | |||
534 | #endif | ||
535 | |||
536 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | ||
537 | |||
538 | #endif /* __ASSEMBLY__ */ | ||
539 | |||
540 | #define arch_align_stack(x) (x) | ||
541 | |||
542 | #endif /* __KERNEL__ */ | ||
543 | |||
544 | #endif | ||
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h new file mode 100644 index 000000000000..dfd386d0c022 --- /dev/null +++ b/arch/arm/include/asm/system_info.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef __ASM_ARM_SYSTEM_INFO_H | ||
2 | #define __ASM_ARM_SYSTEM_INFO_H | ||
3 | |||
4 | #define CPU_ARCH_UNKNOWN 0 | ||
5 | #define CPU_ARCH_ARMv3 1 | ||
6 | #define CPU_ARCH_ARMv4 2 | ||
7 | #define CPU_ARCH_ARMv4T 3 | ||
8 | #define CPU_ARCH_ARMv5 4 | ||
9 | #define CPU_ARCH_ARMv5T 5 | ||
10 | #define CPU_ARCH_ARMv5TE 6 | ||
11 | #define CPU_ARCH_ARMv5TEJ 7 | ||
12 | #define CPU_ARCH_ARMv6 8 | ||
13 | #define CPU_ARCH_ARMv7 9 | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | |||
17 | /* information about the system we're running on */ | ||
18 | extern unsigned int system_rev; | ||
19 | extern unsigned int system_serial_low; | ||
20 | extern unsigned int system_serial_high; | ||
21 | extern unsigned int mem_fclk_21285; | ||
22 | |||
23 | extern int __pure cpu_architecture(void); | ||
24 | |||
25 | #endif /* !__ASSEMBLY__ */ | ||
26 | |||
27 | #endif /* __ASM_ARM_SYSTEM_INFO_H */ | ||
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h new file mode 100644 index 000000000000..5a85f148b607 --- /dev/null +++ b/arch/arm/include/asm/system_misc.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_ARM_SYSTEM_MISC_H | ||
2 | #define __ASM_ARM_SYSTEM_MISC_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #include <linux/compiler.h> | ||
7 | #include <linux/linkage.h> | ||
8 | #include <linux/irqflags.h> | ||
9 | |||
10 | extern void cpu_init(void); | ||
11 | |||
12 | void soft_restart(unsigned long); | ||
13 | extern void (*arm_pm_restart)(char str, const char *cmd); | ||
14 | extern void (*arm_pm_idle)(void); | ||
15 | |||
16 | #define UDBG_UNDEFINED (1 << 0) | ||
17 | #define UDBG_SYSCALL (1 << 1) | ||
18 | #define UDBG_BADABORT (1 << 2) | ||
19 | #define UDBG_SEGV (1 << 3) | ||
20 | #define UDBG_BUS (1 << 4) | ||
21 | |||
22 | extern unsigned int user_debug; | ||
23 | |||
24 | extern void disable_hlt(void); | ||
25 | extern void enable_hlt(void); | ||
26 | |||
27 | #endif /* !__ASSEMBLY__ */ | ||
28 | |||
29 | #endif /* __ASM_ARM_SYSTEM_MISC_H */ | ||
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 2958976d867b..71f6536d17ac 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -16,8 +16,8 @@ | |||
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/memory.h> | 17 | #include <asm/memory.h> |
18 | #include <asm/domain.h> | 18 | #include <asm/domain.h> |
19 | #include <asm/system.h> | ||
20 | #include <asm/unified.h> | 19 | #include <asm/unified.h> |
20 | #include <asm/compiler.h> | ||
21 | 21 | ||
22 | #define VERIFY_READ 0 | 22 | #define VERIFY_READ 0 |
23 | #define VERIFY_WRITE 1 | 23 | #define VERIFY_WRITE 1 |