diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-arm26/system.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-arm26/system.h')
-rw-r--r-- | include/asm-arm26/system.h | 252 |
1 files changed, 252 insertions, 0 deletions
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h new file mode 100644 index 000000000000..f23fac1938f3 --- /dev/null +++ b/include/asm-arm26/system.h | |||
@@ -0,0 +1,252 @@ | |||
1 | #ifndef __ASM_ARM_SYSTEM_H | ||
2 | #define __ASM_ARM_SYSTEM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/config.h> | ||
7 | |||
8 | /* | ||
9 | * This is used to ensure the compiler did actually allocate the register we | ||
10 | * asked it for some inline assembly sequences. Apparently we can't trust | ||
11 | * the compiler from one version to another so a bit of paranoia won't hurt. | ||
12 | * This string is meant to be concatenated with the inline asm string and | ||
13 | * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) | ||
14 | */ | ||
15 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | ||
16 | |||
17 | #ifndef __ASSEMBLY__ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | |||
21 | struct thread_info; | ||
22 | struct task_struct; | ||
23 | |||
24 | #if 0 | ||
25 | /* information about the system we're running on */ | ||
26 | extern unsigned int system_rev; | ||
27 | extern unsigned int system_serial_low; | ||
28 | extern unsigned int system_serial_high; | ||
29 | extern unsigned int mem_fclk_21285; | ||
30 | |||
31 | FIXME - sort this | ||
32 | /* | ||
33 | * We need to turn the caches off before calling the reset vector - RiscOS | ||
34 | * messes up if we don't | ||
35 | */ | ||
36 | #define proc_hard_reset() cpu_proc_fin() | ||
37 | |||
38 | #endif | ||
39 | |||
40 | struct pt_regs; | ||
41 | |||
42 | void die(const char *msg, struct pt_regs *regs, int err) | ||
43 | __attribute__((noreturn)); | ||
44 | |||
45 | void die_if_kernel(const char *str, struct pt_regs *regs, int err); | ||
46 | |||
47 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
48 | struct pt_regs *), | ||
49 | int sig, const char *name); | ||
50 | |||
51 | #include <asm/proc-fns.h> | ||
52 | |||
53 | #define xchg(ptr,x) \ | ||
54 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
55 | |||
56 | #define tas(ptr) (xchg((ptr),1)) | ||
57 | |||
58 | extern asmlinkage void __backtrace(void); | ||
59 | |||
60 | #define set_cr(x) \ | ||
61 | __asm__ __volatile__( \ | ||
62 | "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ | ||
63 | : : "r" (x) : "cc") | ||
64 | |||
65 | #define get_cr() \ | ||
66 | ({ \ | ||
67 | unsigned int __val; \ | ||
68 | __asm__ __volatile__( \ | ||
69 | "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ | ||
70 | : "=r" (__val) : : "cc"); \ | ||
71 | __val; \ | ||
72 | }) | ||
73 | |||
74 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
75 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
76 | |||
77 | #define UDBG_UNDEFINED (1 << 0) | ||
78 | #define UDBG_SYSCALL (1 << 1) | ||
79 | #define UDBG_BADABORT (1 << 2) | ||
80 | #define UDBG_SEGV (1 << 3) | ||
81 | #define UDBG_BUS (1 << 4) | ||
82 | |||
83 | extern unsigned int user_debug; | ||
84 | |||
85 | #define vectors_base() (0) | ||
86 | |||
87 | #define mb() __asm__ __volatile__ ("" : : : "memory") | ||
88 | #define rmb() mb() | ||
89 | #define wmb() mb() | ||
90 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | ||
91 | |||
92 | #define read_barrier_depends() do { } while(0) | ||
93 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
94 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
95 | |||
96 | /* | ||
97 | * We assume knowledge of how | ||
98 | * spin_unlock_irq() and friends are implemented. This avoids | ||
99 | * us needlessly decrementing and incrementing the preempt count. | ||
100 | */ | ||
101 | #define prepare_arch_switch(rq,next) local_irq_enable() | ||
102 | #define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock) | ||
103 | #define task_running(rq,p) ((rq)->curr == (p)) | ||
104 | |||
105 | /* | ||
106 | * switch_to(prev, next) should switch from task `prev' to `next' | ||
107 | * `prev' will never be the same as `next'. schedule() itself | ||
108 | * contains the memory barrier to tell GCC not to cache `current'. | ||
109 | */ | ||
110 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | ||
111 | |||
112 | #define switch_to(prev,next,last) \ | ||
113 | do { \ | ||
114 | last = __switch_to(prev,prev->thread_info,next->thread_info); \ | ||
115 | } while (0) | ||
116 | |||
117 | /* | ||
118 | * Save the current interrupt enable state & disable IRQs | ||
119 | */ | ||
120 | #define local_irq_save(x) \ | ||
121 | do { \ | ||
122 | unsigned long temp; \ | ||
123 | __asm__ __volatile__( \ | ||
124 | " mov %0, pc @ save_flags_cli\n" \ | ||
125 | " orr %1, %0, #0x08000000\n" \ | ||
126 | " and %0, %0, #0x0c000000\n" \ | ||
127 | " teqp %1, #0\n" \ | ||
128 | : "=r" (x), "=r" (temp) \ | ||
129 | : \ | ||
130 | : "memory"); \ | ||
131 | } while (0) | ||
132 | |||
133 | /* | ||
134 | * Enable IRQs (sti) | ||
135 | */ | ||
136 | #define local_irq_enable() \ | ||
137 | do { \ | ||
138 | unsigned long temp; \ | ||
139 | __asm__ __volatile__( \ | ||
140 | " mov %0, pc @ sti\n" \ | ||
141 | " bic %0, %0, #0x08000000\n" \ | ||
142 | " teqp %0, #0\n" \ | ||
143 | : "=r" (temp) \ | ||
144 | : \ | ||
145 | : "memory"); \ | ||
146 | } while(0) | ||
147 | |||
148 | /* | ||
149 | * Disable IRQs (cli) | ||
150 | */ | ||
151 | #define local_irq_disable() \ | ||
152 | do { \ | ||
153 | unsigned long temp; \ | ||
154 | __asm__ __volatile__( \ | ||
155 | " mov %0, pc @ cli\n" \ | ||
156 | " orr %0, %0, #0x08000000\n" \ | ||
157 | " teqp %0, #0\n" \ | ||
158 | : "=r" (temp) \ | ||
159 | : \ | ||
160 | : "memory"); \ | ||
161 | } while(0) | ||
162 | |||
163 | /* Enable FIQs (stf) */ | ||
164 | |||
165 | #define __stf() do { \ | ||
166 | unsigned long temp; \ | ||
167 | __asm__ __volatile__( \ | ||
168 | " mov %0, pc @ stf\n" \ | ||
169 | " bic %0, %0, #0x04000000\n" \ | ||
170 | " teqp %0, #0\n" \ | ||
171 | : "=r" (temp)); \ | ||
172 | } while(0) | ||
173 | |||
174 | /* Disable FIQs (clf) */ | ||
175 | |||
176 | #define __clf() do { \ | ||
177 | unsigned long temp; \ | ||
178 | __asm__ __volatile__( \ | ||
179 | " mov %0, pc @ clf\n" \ | ||
180 | " orr %0, %0, #0x04000000\n" \ | ||
181 | " teqp %0, #0\n" \ | ||
182 | : "=r" (temp)); \ | ||
183 | } while(0) | ||
184 | |||
185 | |||
186 | /* | ||
187 | * Save the current interrupt enable state. | ||
188 | */ | ||
189 | #define local_save_flags(x) \ | ||
190 | do { \ | ||
191 | __asm__ __volatile__( \ | ||
192 | " mov %0, pc @ save_flags\n" \ | ||
193 | " and %0, %0, #0x0c000000\n" \ | ||
194 | : "=r" (x)); \ | ||
195 | } while (0) | ||
196 | |||
197 | |||
198 | /* | ||
199 | * restore saved IRQ & FIQ state | ||
200 | */ | ||
201 | #define local_irq_restore(x) \ | ||
202 | do { \ | ||
203 | unsigned long temp; \ | ||
204 | __asm__ __volatile__( \ | ||
205 | " mov %0, pc @ restore_flags\n" \ | ||
206 | " bic %0, %0, #0x0c000000\n" \ | ||
207 | " orr %0, %0, %1\n" \ | ||
208 | " teqp %0, #0\n" \ | ||
209 | : "=&r" (temp) \ | ||
210 | : "r" (x) \ | ||
211 | : "memory"); \ | ||
212 | } while (0) | ||
213 | |||
214 | |||
215 | #ifdef CONFIG_SMP | ||
216 | #error SMP not supported | ||
217 | #endif | ||
218 | |||
219 | #define smp_mb() barrier() | ||
220 | #define smp_rmb() barrier() | ||
221 | #define smp_wmb() barrier() | ||
222 | #define smp_read_barrier_depends() do { } while(0) | ||
223 | |||
224 | #define clf() __clf() | ||
225 | #define stf() __stf() | ||
226 | |||
227 | #define irqs_disabled() \ | ||
228 | ({ \ | ||
229 | unsigned long flags; \ | ||
230 | local_save_flags(flags); \ | ||
231 | flags & PSR_I_BIT; \ | ||
232 | }) | ||
233 | |||
234 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | ||
235 | { | ||
236 | extern void __bad_xchg(volatile void *, int); | ||
237 | |||
238 | switch (size) { | ||
239 | case 1: return cpu_xchg_1(x, ptr); | ||
240 | case 4: return cpu_xchg_4(x, ptr); | ||
241 | default: __bad_xchg(ptr, size); | ||
242 | } | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | #endif /* __ASSEMBLY__ */ | ||
247 | |||
248 | #define arch_align_stack(x) (x) | ||
249 | |||
250 | #endif /* __KERNEL__ */ | ||
251 | |||
252 | #endif | ||