diff options
author | Adrian Bunk <bunk@stusta.de> | 2007-07-31 03:38:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-31 18:39:39 -0400 |
commit | 99eb8a550dbccc0e1f6c7e866fe421810e0585f6 (patch) | |
tree | 130c6e3338a0655ba74355eba83afab9261e1ed0 /include/asm-arm26/system.h | |
parent | 0d0ed42e5ca2e22465c591341839c18025748fe8 (diff) |
Remove the arm26 port
The arm26 port has been in a state where it was far from even compiling
for quite some time.
Ian Molton agreed with the removal.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Ian Molton <spyro@f2s.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-arm26/system.h')
-rw-r--r-- | include/asm-arm26/system.h | 247 |
1 files changed, 0 insertions, 247 deletions
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h deleted file mode 100644 index e09da5ff1f54..000000000000 --- a/include/asm-arm26/system.h +++ /dev/null | |||
@@ -1,247 +0,0 @@ | |||
1 | #ifndef __ASM_ARM_SYSTEM_H | ||
2 | #define __ASM_ARM_SYSTEM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | |||
7 | /* | ||
8 | * This is used to ensure the compiler did actually allocate the register we | ||
9 | * asked it for some inline assembly sequences. Apparently we can't trust | ||
10 | * the compiler from one version to another so a bit of paranoia won't hurt. | ||
11 | * This string is meant to be concatenated with the inline asm string and | ||
12 | * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) | ||
13 | */ | ||
14 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | |||
20 | struct thread_info; | ||
21 | struct task_struct; | ||
22 | |||
23 | #if 0 | ||
24 | /* information about the system we're running on */ | ||
25 | extern unsigned int system_rev; | ||
26 | extern unsigned int system_serial_low; | ||
27 | extern unsigned int system_serial_high; | ||
28 | extern unsigned int mem_fclk_21285; | ||
29 | |||
30 | FIXME - sort this | ||
31 | /* | ||
32 | * We need to turn the caches off before calling the reset vector - RiscOS | ||
33 | * messes up if we don't | ||
34 | */ | ||
35 | #define proc_hard_reset() cpu_proc_fin() | ||
36 | |||
37 | #endif | ||
38 | |||
39 | struct pt_regs; | ||
40 | |||
41 | void die(const char *msg, struct pt_regs *regs, int err) | ||
42 | __attribute__((noreturn)); | ||
43 | |||
44 | void die_if_kernel(const char *str, struct pt_regs *regs, int err); | ||
45 | |||
46 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | ||
47 | struct pt_regs *), | ||
48 | int sig, const char *name); | ||
49 | |||
50 | #include <asm/proc-fns.h> | ||
51 | |||
52 | #define xchg(ptr,x) \ | ||
53 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
54 | |||
55 | extern asmlinkage void __backtrace(void); | ||
56 | |||
57 | #define set_cr(x) \ | ||
58 | __asm__ __volatile__( \ | ||
59 | "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ | ||
60 | : : "r" (x) : "cc") | ||
61 | |||
62 | #define get_cr() \ | ||
63 | ({ \ | ||
64 | unsigned int __val; \ | ||
65 | __asm__ __volatile__( \ | ||
66 | "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ | ||
67 | : "=r" (__val) : : "cc"); \ | ||
68 | __val; \ | ||
69 | }) | ||
70 | |||
71 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
72 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
73 | |||
74 | #define UDBG_UNDEFINED (1 << 0) | ||
75 | #define UDBG_SYSCALL (1 << 1) | ||
76 | #define UDBG_BADABORT (1 << 2) | ||
77 | #define UDBG_SEGV (1 << 3) | ||
78 | #define UDBG_BUS (1 << 4) | ||
79 | |||
80 | extern unsigned int user_debug; | ||
81 | |||
82 | #define vectors_base() (0) | ||
83 | |||
84 | #define mb() __asm__ __volatile__ ("" : : : "memory") | ||
85 | #define rmb() mb() | ||
86 | #define wmb() mb() | ||
87 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | ||
88 | |||
89 | #define read_barrier_depends() do { } while(0) | ||
90 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
91 | |||
92 | /* | ||
93 | * We assume knowledge of how | ||
94 | * spin_unlock_irq() and friends are implemented. This avoids | ||
95 | * us needlessly decrementing and incrementing the preempt count. | ||
96 | */ | ||
97 | #define prepare_arch_switch(next) local_irq_enable() | ||
98 | #define finish_arch_switch(prev) spin_unlock(&(rq)->lock) | ||
99 | |||
100 | /* | ||
101 | * switch_to(prev, next) should switch from task `prev' to `next' | ||
102 | * `prev' will never be the same as `next'. schedule() itself | ||
103 | * contains the memory barrier to tell GCC not to cache `current'. | ||
104 | */ | ||
105 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | ||
106 | |||
107 | #define switch_to(prev,next,last) \ | ||
108 | do { \ | ||
109 | last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \ | ||
110 | } while (0) | ||
111 | |||
112 | /* | ||
113 | * Save the current interrupt enable state & disable IRQs | ||
114 | */ | ||
115 | #define local_irq_save(x) \ | ||
116 | do { \ | ||
117 | unsigned long temp; \ | ||
118 | __asm__ __volatile__( \ | ||
119 | " mov %0, pc @ save_flags_cli\n" \ | ||
120 | " orr %1, %0, #0x08000000\n" \ | ||
121 | " and %0, %0, #0x0c000000\n" \ | ||
122 | " teqp %1, #0\n" \ | ||
123 | : "=r" (x), "=r" (temp) \ | ||
124 | : \ | ||
125 | : "memory"); \ | ||
126 | } while (0) | ||
127 | |||
128 | /* | ||
129 | * Enable IRQs (sti) | ||
130 | */ | ||
131 | #define local_irq_enable() \ | ||
132 | do { \ | ||
133 | unsigned long temp; \ | ||
134 | __asm__ __volatile__( \ | ||
135 | " mov %0, pc @ sti\n" \ | ||
136 | " bic %0, %0, #0x08000000\n" \ | ||
137 | " teqp %0, #0\n" \ | ||
138 | : "=r" (temp) \ | ||
139 | : \ | ||
140 | : "memory"); \ | ||
141 | } while(0) | ||
142 | |||
143 | /* | ||
144 | * Disable IRQs (cli) | ||
145 | */ | ||
146 | #define local_irq_disable() \ | ||
147 | do { \ | ||
148 | unsigned long temp; \ | ||
149 | __asm__ __volatile__( \ | ||
150 | " mov %0, pc @ cli\n" \ | ||
151 | " orr %0, %0, #0x08000000\n" \ | ||
152 | " teqp %0, #0\n" \ | ||
153 | : "=r" (temp) \ | ||
154 | : \ | ||
155 | : "memory"); \ | ||
156 | } while(0) | ||
157 | |||
158 | /* Enable FIQs (stf) */ | ||
159 | |||
160 | #define __stf() do { \ | ||
161 | unsigned long temp; \ | ||
162 | __asm__ __volatile__( \ | ||
163 | " mov %0, pc @ stf\n" \ | ||
164 | " bic %0, %0, #0x04000000\n" \ | ||
165 | " teqp %0, #0\n" \ | ||
166 | : "=r" (temp)); \ | ||
167 | } while(0) | ||
168 | |||
169 | /* Disable FIQs (clf) */ | ||
170 | |||
171 | #define __clf() do { \ | ||
172 | unsigned long temp; \ | ||
173 | __asm__ __volatile__( \ | ||
174 | " mov %0, pc @ clf\n" \ | ||
175 | " orr %0, %0, #0x04000000\n" \ | ||
176 | " teqp %0, #0\n" \ | ||
177 | : "=r" (temp)); \ | ||
178 | } while(0) | ||
179 | |||
180 | |||
181 | /* | ||
182 | * Save the current interrupt enable state. | ||
183 | */ | ||
184 | #define local_save_flags(x) \ | ||
185 | do { \ | ||
186 | __asm__ __volatile__( \ | ||
187 | " mov %0, pc @ save_flags\n" \ | ||
188 | " and %0, %0, #0x0c000000\n" \ | ||
189 | : "=r" (x)); \ | ||
190 | } while (0) | ||
191 | |||
192 | |||
193 | /* | ||
194 | * restore saved IRQ & FIQ state | ||
195 | */ | ||
196 | #define local_irq_restore(x) \ | ||
197 | do { \ | ||
198 | unsigned long temp; \ | ||
199 | __asm__ __volatile__( \ | ||
200 | " mov %0, pc @ restore_flags\n" \ | ||
201 | " bic %0, %0, #0x0c000000\n" \ | ||
202 | " orr %0, %0, %1\n" \ | ||
203 | " teqp %0, #0\n" \ | ||
204 | : "=&r" (temp) \ | ||
205 | : "r" (x) \ | ||
206 | : "memory"); \ | ||
207 | } while (0) | ||
208 | |||
209 | |||
210 | #ifdef CONFIG_SMP | ||
211 | #error SMP not supported | ||
212 | #endif | ||
213 | |||
214 | #define smp_mb() barrier() | ||
215 | #define smp_rmb() barrier() | ||
216 | #define smp_wmb() barrier() | ||
217 | #define smp_read_barrier_depends() do { } while(0) | ||
218 | |||
219 | #define clf() __clf() | ||
220 | #define stf() __stf() | ||
221 | |||
222 | #define irqs_disabled() \ | ||
223 | ({ \ | ||
224 | unsigned long flags; \ | ||
225 | local_save_flags(flags); \ | ||
226 | flags & PSR_I_BIT; \ | ||
227 | }) | ||
228 | |||
229 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | ||
230 | { | ||
231 | extern void __bad_xchg(volatile void *, int); | ||
232 | |||
233 | switch (size) { | ||
234 | case 1: return cpu_xchg_1(x, ptr); | ||
235 | case 4: return cpu_xchg_4(x, ptr); | ||
236 | default: __bad_xchg(ptr, size); | ||
237 | } | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | #endif /* __ASSEMBLY__ */ | ||
242 | |||
243 | #define arch_align_stack(x) (x) | ||
244 | |||
245 | #endif /* __KERNEL__ */ | ||
246 | |||
247 | #endif | ||