aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/system.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-arm/system.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-arm/system.h')
-rw-r--r--include/asm-arm/system.h390
1 files changed, 390 insertions, 0 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
new file mode 100644
index 000000000000..b5731290b4e5
--- /dev/null
+++ b/include/asm-arm/system.h
@@ -0,0 +1,390 @@
1#ifndef __ASM_ARM_SYSTEM_H
2#define __ASM_ARM_SYSTEM_H
3
4#ifdef __KERNEL__
5
6#include <linux/config.h>
7
8#define CPU_ARCH_UNKNOWN 0
9#define CPU_ARCH_ARMv3 1
10#define CPU_ARCH_ARMv4 2
11#define CPU_ARCH_ARMv4T 3
12#define CPU_ARCH_ARMv5 4
13#define CPU_ARCH_ARMv5T 5
14#define CPU_ARCH_ARMv5TE 6
15#define CPU_ARCH_ARMv5TEJ 7
16#define CPU_ARCH_ARMv6 8
17
18/*
19 * CR1 bits (CP#15 CR1)
20 */
21#define CR_M (1 << 0) /* MMU enable */
22#define CR_A (1 << 1) /* Alignment abort enable */
23#define CR_C (1 << 2) /* Dcache enable */
24#define CR_W (1 << 3) /* Write buffer enable */
25#define CR_P (1 << 4) /* 32-bit exception handler */
26#define CR_D (1 << 5) /* 32-bit data address range */
27#define CR_L (1 << 6) /* Implementation defined */
28#define CR_B (1 << 7) /* Big endian */
29#define CR_S (1 << 8) /* System MMU protection */
30#define CR_R (1 << 9) /* ROM MMU protection */
31#define CR_F (1 << 10) /* Implementation defined */
32#define CR_Z (1 << 11) /* Implementation defined */
33#define CR_I (1 << 12) /* Icache enable */
34#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
35#define CR_RR (1 << 14) /* Round Robin cache replacement */
36#define CR_L4 (1 << 15) /* LDR pc can set T bit */
37#define CR_DT (1 << 16)
38#define CR_IT (1 << 18)
39#define CR_ST (1 << 19)
40#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
41#define CR_U (1 << 22) /* Unaligned access operation */
42#define CR_XP (1 << 23) /* Extended page tables */
43#define CR_VE (1 << 24) /* Vectored interrupts */
44
45#define CPUID_ID 0
46#define CPUID_CACHETYPE 1
47#define CPUID_TCM 2
48#define CPUID_TLBTYPE 3
49
50#define read_cpuid(reg) \
51 ({ \
52 unsigned int __val; \
53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
54 : "=r" (__val) \
55 : \
56 : "cc"); \
57 __val; \
58 })
59
60/*
61 * This is used to ensure the compiler did actually allocate the register we
62 * asked it for some inline assembly sequences. Apparently we can't trust
63 * the compiler from one version to another so a bit of paranoia won't hurt.
64 * This string is meant to be concatenated with the inline asm string and
65 * will cause compilation to stop on mismatch.
66 * (for details, see gcc PR 15089)
67 */
68#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
69
70#ifndef __ASSEMBLY__
71
72#include <linux/linkage.h>
73
74struct thread_info;
75struct task_struct;
76
77/* information about the system we're running on */
78extern unsigned int system_rev;
79extern unsigned int system_serial_low;
80extern unsigned int system_serial_high;
81extern unsigned int mem_fclk_21285;
82
83struct pt_regs;
84
85void die(const char *msg, struct pt_regs *regs, int err)
86 __attribute__((noreturn));
87
88void die_if_kernel(const char *str, struct pt_regs *regs, int err);
89
90void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
91 struct pt_regs *),
92 int sig, const char *name);
93
94#include <asm/proc-fns.h>
95
96#define xchg(ptr,x) \
97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
98
99#define tas(ptr) (xchg((ptr),1))
100
101extern asmlinkage void __backtrace(void);
102
103extern int cpu_architecture(void);
104
105#define set_cr(x) \
106 __asm__ __volatile__( \
107 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
108 : : "r" (x) : "cc")
109
110#define get_cr() \
111 ({ \
112 unsigned int __val; \
113 __asm__ __volatile__( \
114 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
115 : "=r" (__val) : : "cc"); \
116 __val; \
117 })
118
119extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
120extern unsigned long cr_alignment; /* defined in entry-armv.S */
121
122#define UDBG_UNDEFINED (1 << 0)
123#define UDBG_SYSCALL (1 << 1)
124#define UDBG_BADABORT (1 << 2)
125#define UDBG_SEGV (1 << 3)
126#define UDBG_BUS (1 << 4)
127
128extern unsigned int user_debug;
129
130#if __LINUX_ARM_ARCH__ >= 4
131#define vectors_high() (cr_alignment & CR_V)
132#else
133#define vectors_high() (0)
134#endif
135
136#define mb() __asm__ __volatile__ ("" : : : "memory")
137#define rmb() mb()
138#define wmb() mb()
139#define read_barrier_depends() do { } while(0)
140#define set_mb(var, value) do { var = value; mb(); } while (0)
141#define set_wmb(var, value) do { var = value; wmb(); } while (0)
142#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
143
144#ifdef CONFIG_SMP
145/*
146 * Define our own context switch locking. This allows us to enable
147 * interrupts over the context switch, otherwise we end up with high
148 * interrupt latency. The real problem area is switch_mm() which may
149 * do a full cache flush.
150 */
151#define prepare_arch_switch(rq,next) \
152do { \
153 spin_lock(&(next)->switch_lock); \
154 spin_unlock_irq(&(rq)->lock); \
155} while (0)
156
157#define finish_arch_switch(rq,prev) \
158 spin_unlock(&(prev)->switch_lock)
159
160#define task_running(rq,p) \
161 ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
162#else
163/*
164 * Our UP-case is more simple, but we assume knowledge of how
165 * spin_unlock_irq() and friends are implemented. This avoids
166 * us needlessly decrementing and incrementing the preempt count.
167 */
168#define prepare_arch_switch(rq,next) local_irq_enable()
169#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
170#define task_running(rq,p) ((rq)->curr == (p))
171#endif
172
173/*
174 * switch_to(prev, next) should switch from task `prev' to `next'
175 * `prev' will never be the same as `next'. schedule() itself
176 * contains the memory barrier to tell GCC not to cache `current'.
177 */
178extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
179
180#define switch_to(prev,next,last) \
181do { \
182 last = __switch_to(prev,prev->thread_info,next->thread_info); \
183} while (0)
184
185/*
186 * CPU interrupt mask handling.
187 */
188#if __LINUX_ARM_ARCH__ >= 6
189
190#define local_irq_save(x) \
191 ({ \
192 __asm__ __volatile__( \
193 "mrs %0, cpsr @ local_irq_save\n" \
194 "cpsid i" \
195 : "=r" (x) : : "memory", "cc"); \
196 })
197
198#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
199#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
200#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
201#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
202
203#else
204
205/*
206 * Save the current interrupt enable state & disable IRQs
207 */
208#define local_irq_save(x) \
209 ({ \
210 unsigned long temp; \
211 (void) (&temp == &x); \
212 __asm__ __volatile__( \
213 "mrs %0, cpsr @ local_irq_save\n" \
214" orr %1, %0, #128\n" \
215" msr cpsr_c, %1" \
216 : "=r" (x), "=r" (temp) \
217 : \
218 : "memory", "cc"); \
219 })
220
221/*
222 * Enable IRQs
223 */
224#define local_irq_enable() \
225 ({ \
226 unsigned long temp; \
227 __asm__ __volatile__( \
228 "mrs %0, cpsr @ local_irq_enable\n" \
229" bic %0, %0, #128\n" \
230" msr cpsr_c, %0" \
231 : "=r" (temp) \
232 : \
233 : "memory", "cc"); \
234 })
235
236/*
237 * Disable IRQs
238 */
239#define local_irq_disable() \
240 ({ \
241 unsigned long temp; \
242 __asm__ __volatile__( \
243 "mrs %0, cpsr @ local_irq_disable\n" \
244" orr %0, %0, #128\n" \
245" msr cpsr_c, %0" \
246 : "=r" (temp) \
247 : \
248 : "memory", "cc"); \
249 })
250
251/*
252 * Enable FIQs
253 */
254#define local_fiq_enable() \
255 ({ \
256 unsigned long temp; \
257 __asm__ __volatile__( \
258 "mrs %0, cpsr @ stf\n" \
259" bic %0, %0, #64\n" \
260" msr cpsr_c, %0" \
261 : "=r" (temp) \
262 : \
263 : "memory", "cc"); \
264 })
265
266/*
267 * Disable FIQs
268 */
269#define local_fiq_disable() \
270 ({ \
271 unsigned long temp; \
272 __asm__ __volatile__( \
273 "mrs %0, cpsr @ clf\n" \
274" orr %0, %0, #64\n" \
275" msr cpsr_c, %0" \
276 : "=r" (temp) \
277 : \
278 : "memory", "cc"); \
279 })
280
281#endif
282
283/*
284 * Save the current interrupt enable state.
285 */
286#define local_save_flags(x) \
287 ({ \
288 __asm__ __volatile__( \
289 "mrs %0, cpsr @ local_save_flags" \
290 : "=r" (x) : : "memory", "cc"); \
291 })
292
293/*
294 * restore saved IRQ & FIQ state
295 */
296#define local_irq_restore(x) \
297 __asm__ __volatile__( \
298 "msr cpsr_c, %0 @ local_irq_restore\n" \
299 : \
300 : "r" (x) \
301 : "memory", "cc")
302
303#define irqs_disabled() \
304({ \
305 unsigned long flags; \
306 local_save_flags(flags); \
307 flags & PSR_I_BIT; \
308})
309
310#ifdef CONFIG_SMP
311#error SMP not supported
312
313#define smp_mb() mb()
314#define smp_rmb() rmb()
315#define smp_wmb() wmb()
316#define smp_read_barrier_depends() read_barrier_depends()
317
318#else
319
320#define smp_mb() barrier()
321#define smp_rmb() barrier()
322#define smp_wmb() barrier()
323#define smp_read_barrier_depends() do { } while(0)
324
325#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
326/*
327 * On the StrongARM, "swp" is terminally broken since it bypasses the
328 * cache totally. This means that the cache becomes inconsistent, and,
329 * since we use normal loads/stores as well, this is really bad.
330 * Typically, this causes oopsen in filp_close, but could have other,
331 * more disasterous effects. There are two work-arounds:
332 * 1. Disable interrupts and emulate the atomic swap
333 * 2. Clean the cache, perform atomic swap, flush the cache
334 *
335 * We choose (1) since its the "easiest" to achieve here and is not
336 * dependent on the processor type.
337 */
338#define swp_is_buggy
339#endif
340
341static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
342{
343 extern void __bad_xchg(volatile void *, int);
344 unsigned long ret;
345#ifdef swp_is_buggy
346 unsigned long flags;
347#endif
348
349 switch (size) {
350#ifdef swp_is_buggy
351 case 1:
352 local_irq_save(flags);
353 ret = *(volatile unsigned char *)ptr;
354 *(volatile unsigned char *)ptr = x;
355 local_irq_restore(flags);
356 break;
357
358 case 4:
359 local_irq_save(flags);
360 ret = *(volatile unsigned long *)ptr;
361 *(volatile unsigned long *)ptr = x;
362 local_irq_restore(flags);
363 break;
364#else
365 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
366 : "=&r" (ret)
367 : "r" (x), "r" (ptr)
368 : "memory", "cc");
369 break;
370 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
371 : "=&r" (ret)
372 : "r" (x), "r" (ptr)
373 : "memory", "cc");
374 break;
375#endif
376 default: __bad_xchg(ptr, size), ret = 0;
377 }
378
379 return ret;
380}
381
382#endif /* CONFIG_SMP */
383
384#endif /* __ASSEMBLY__ */
385
386#define arch_align_stack(x) (x)
387
388#endif /* __KERNEL__ */
389
390#endif