diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-m68knommu/system.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-m68knommu/system.h')
-rw-r--r-- | include/asm-m68knommu/system.h | 286 |
1 files changed, 286 insertions, 0 deletions
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h new file mode 100644 index 000000000000..c341b66c147b --- /dev/null +++ b/include/asm-m68knommu/system.h | |||
@@ -0,0 +1,286 @@ | |||
1 | #ifndef _M68KNOMMU_SYSTEM_H | ||
2 | #define _M68KNOMMU_SYSTEM_H | ||
3 | |||
4 | #include <linux/config.h> /* get configuration macros */ | ||
5 | #include <linux/linkage.h> | ||
6 | #include <asm/segment.h> | ||
7 | #include <asm/entry.h> | ||
8 | |||
9 | /* | ||
10 | * switch_to(n) should switch tasks to task ptr, first checking that | ||
11 | * ptr isn't the current task, in which case it does nothing. This | ||
12 | * also clears the TS-flag if the task we switched to has used the | ||
13 | * math co-processor latest. | ||
14 | */ | ||
15 | /* | ||
16 | * switch_to() saves the extra registers, that are not saved | ||
17 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | ||
18 | * a0-a1. Some of these are used by schedule() and its predecessors | ||
19 | * and so we might get see unexpected behaviors when a task returns | ||
20 | * with unexpected register values. | ||
21 | * | ||
22 | * syscall stores these registers itself and none of them are used | ||
23 | * by syscall after the function in the syscall has been called. | ||
24 | * | ||
25 | * Beware that resume now expects *next to be in d1 and the offset of | ||
26 | * tss to be in a1. This saves a few instructions as we no longer have | ||
27 | * to push them onto the stack and read them back right after. | ||
28 | * | ||
29 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | ||
30 | * | ||
31 | * Changed 96/09/19 by Andreas Schwab | ||
32 | * pass prev in a0, next in a1, offset of tss in d1, and whether | ||
33 | * the mm structures are shared in d2 (to avoid atc flushing). | ||
34 | */ | ||
35 | asmlinkage void resume(void); | ||
36 | #define switch_to(prev,next,last) \ | ||
37 | { \ | ||
38 | void *_last; \ | ||
39 | __asm__ __volatile__( \ | ||
40 | "movel %1, %%a0\n\t" \ | ||
41 | "movel %2, %%a1\n\t" \ | ||
42 | "jbsr resume\n\t" \ | ||
43 | "movel %%d1, %0\n\t" \ | ||
44 | : "=d" (_last) \ | ||
45 | : "d" (prev), "d" (next) \ | ||
46 | : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ | ||
47 | (last) = _last; \ | ||
48 | } | ||
49 | |||
50 | #ifdef CONFIG_COLDFIRE | ||
51 | #define local_irq_enable() __asm__ __volatile__ ( \ | ||
52 | "move %/sr,%%d0\n\t" \ | ||
53 | "andi.l #0xf8ff,%%d0\n\t" \ | ||
54 | "move %%d0,%/sr\n" \ | ||
55 | : /* no outputs */ \ | ||
56 | : \ | ||
57 | : "cc", "%d0", "memory") | ||
58 | #define local_irq_disable() __asm__ __volatile__ ( \ | ||
59 | "move %/sr,%%d0\n\t" \ | ||
60 | "ori.l #0x0700,%%d0\n\t" \ | ||
61 | "move %%d0,%/sr\n" \ | ||
62 | : /* no inputs */ \ | ||
63 | : \ | ||
64 | : "cc", "%d0", "memory") | ||
65 | #else | ||
66 | |||
67 | /* portable version */ /* FIXME - see entry.h*/ | ||
68 | #define ALLOWINT 0xf8ff | ||
69 | |||
70 | #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") | ||
71 | #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") | ||
72 | #endif | ||
73 | |||
74 | #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") | ||
75 | #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") | ||
76 | |||
77 | /* For spinlocks etc */ | ||
78 | #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) | ||
79 | |||
80 | #define irqs_disabled() \ | ||
81 | ({ \ | ||
82 | unsigned long flags; \ | ||
83 | local_save_flags(flags); \ | ||
84 | ((flags & 0x0700) == 0x0700); \ | ||
85 | }) | ||
86 | |||
87 | #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") | ||
88 | |||
89 | /* | ||
90 | * Force strict CPU ordering. | ||
91 | * Not really required on m68k... | ||
92 | */ | ||
93 | #define nop() asm volatile ("nop"::) | ||
94 | #define mb() asm volatile ("" : : :"memory") | ||
95 | #define rmb() asm volatile ("" : : :"memory") | ||
96 | #define wmb() asm volatile ("" : : :"memory") | ||
97 | #define set_rmb(var, value) do { xchg(&var, value); } while (0) | ||
98 | #define set_mb(var, value) set_rmb(var, value) | ||
99 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
100 | |||
101 | #ifdef CONFIG_SMP | ||
102 | #define smp_mb() mb() | ||
103 | #define smp_rmb() rmb() | ||
104 | #define smp_wmb() wmb() | ||
105 | #define smp_read_barrier_depends() read_barrier_depends() | ||
106 | #else | ||
107 | #define smp_mb() barrier() | ||
108 | #define smp_rmb() barrier() | ||
109 | #define smp_wmb() barrier() | ||
110 | #define smp_read_barrier_depends() do { } while(0) | ||
111 | #endif | ||
112 | |||
113 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
114 | #define tas(ptr) (xchg((ptr),1)) | ||
115 | |||
116 | struct __xchg_dummy { unsigned long a[100]; }; | ||
117 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | ||
118 | |||
119 | #ifndef CONFIG_RMW_INSNS | ||
120 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
121 | { | ||
122 | unsigned long tmp, flags; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | |||
126 | switch (size) { | ||
127 | case 1: | ||
128 | __asm__ __volatile__ | ||
129 | ("moveb %2,%0\n\t" | ||
130 | "moveb %1,%2" | ||
131 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
132 | break; | ||
133 | case 2: | ||
134 | __asm__ __volatile__ | ||
135 | ("movew %2,%0\n\t" | ||
136 | "movew %1,%2" | ||
137 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
138 | break; | ||
139 | case 4: | ||
140 | __asm__ __volatile__ | ||
141 | ("movel %2,%0\n\t" | ||
142 | "movel %1,%2" | ||
143 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
144 | break; | ||
145 | } | ||
146 | local_irq_restore(flags); | ||
147 | return tmp; | ||
148 | } | ||
149 | #else | ||
150 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
151 | { | ||
152 | switch (size) { | ||
153 | case 1: | ||
154 | __asm__ __volatile__ | ||
155 | ("moveb %2,%0\n\t" | ||
156 | "1:\n\t" | ||
157 | "casb %0,%1,%2\n\t" | ||
158 | "jne 1b" | ||
159 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
160 | break; | ||
161 | case 2: | ||
162 | __asm__ __volatile__ | ||
163 | ("movew %2,%0\n\t" | ||
164 | "1:\n\t" | ||
165 | "casw %0,%1,%2\n\t" | ||
166 | "jne 1b" | ||
167 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
168 | break; | ||
169 | case 4: | ||
170 | __asm__ __volatile__ | ||
171 | ("movel %2,%0\n\t" | ||
172 | "1:\n\t" | ||
173 | "casl %0,%1,%2\n\t" | ||
174 | "jne 1b" | ||
175 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
176 | break; | ||
177 | } | ||
178 | return x; | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | /* | ||
183 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
184 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
185 | * indicated by comparing RETURN with OLD. | ||
186 | */ | ||
187 | #define __HAVE_ARCH_CMPXCHG 1 | ||
188 | |||
189 | static __inline__ unsigned long | ||
190 | cmpxchg(volatile int *p, int old, int new) | ||
191 | { | ||
192 | unsigned long flags; | ||
193 | int prev; | ||
194 | |||
195 | local_irq_save(flags); | ||
196 | if ((prev = *p) == old) | ||
197 | *p = new; | ||
198 | local_irq_restore(flags); | ||
199 | return(prev); | ||
200 | } | ||
201 | |||
202 | |||
203 | #ifdef CONFIG_M68332 | ||
204 | #define HARD_RESET_NOW() ({ \ | ||
205 | local_irq_disable(); \ | ||
206 | asm(" \ | ||
207 | movew #0x0000, 0xfffa6a; \ | ||
208 | reset; \ | ||
209 | /*movew #0x1557, 0xfffa44;*/ \ | ||
210 | /*movew #0x0155, 0xfffa46;*/ \ | ||
211 | moveal #0, %a0; \ | ||
212 | movec %a0, %vbr; \ | ||
213 | moveal 0, %sp; \ | ||
214 | moveal 4, %a0; \ | ||
215 | jmp (%a0); \ | ||
216 | "); \ | ||
217 | }) | ||
218 | #endif | ||
219 | |||
220 | #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \ | ||
221 | defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 ) | ||
222 | #define HARD_RESET_NOW() ({ \ | ||
223 | local_irq_disable(); \ | ||
224 | asm(" \ | ||
225 | moveal #0x10c00000, %a0; \ | ||
226 | moveb #0, 0xFFFFF300; \ | ||
227 | moveal 0(%a0), %sp; \ | ||
228 | moveal 4(%a0), %a0; \ | ||
229 | jmp (%a0); \ | ||
230 | "); \ | ||
231 | }) | ||
232 | #endif | ||
233 | |||
234 | #ifdef CONFIG_COLDFIRE | ||
235 | #if defined(CONFIG_M5272) && defined(CONFIG_NETtel) | ||
236 | /* | ||
237 | * Need to account for broken early mask of 5272 silicon. So don't | ||
238 | * jump through the original start address. Jump strait into the | ||
239 | * known start of the FLASH code. | ||
240 | */ | ||
241 | #define HARD_RESET_NOW() ({ \ | ||
242 | asm(" \ | ||
243 | movew #0x2700, %sr; \ | ||
244 | jmp 0xf0000400; \ | ||
245 | "); \ | ||
246 | }) | ||
247 | #elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA) | ||
248 | #define HARD_RESET_NOW() ({ \ | ||
249 | asm(" \ | ||
250 | movew #0x2700, %sr; \ | ||
251 | moveal #0x10000044, %a0; \ | ||
252 | movel #0xffffffff, (%a0); \ | ||
253 | moveal #0x10000001, %a0; \ | ||
254 | moveb #0x00, (%a0); \ | ||
255 | moveal #0xf0000004, %a0; \ | ||
256 | moveal (%a0), %a0; \ | ||
257 | jmp (%a0); \ | ||
258 | "); \ | ||
259 | }) | ||
260 | #elif defined(CONFIG_M528x) | ||
261 | /* | ||
262 | * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR), | ||
263 | * that when set, resets the MCF528x. | ||
264 | */ | ||
265 | #define HARD_RESET_NOW() \ | ||
266 | ({ \ | ||
267 | unsigned char volatile *reset; \ | ||
268 | asm("move.w #0x2700, %sr"); \ | ||
269 | reset = ((volatile unsigned short *)(MCF_IPSBAR + 0x110000)); \ | ||
270 | while(1) \ | ||
271 | *reset |= (0x01 << 7);\ | ||
272 | }) | ||
273 | #else | ||
274 | #define HARD_RESET_NOW() ({ \ | ||
275 | asm(" \ | ||
276 | movew #0x2700, %sr; \ | ||
277 | moveal #0x4, %a0; \ | ||
278 | moveal (%a0), %a0; \ | ||
279 | jmp (%a0); \ | ||
280 | "); \ | ||
281 | }) | ||
282 | #endif | ||
283 | #endif | ||
284 | #define arch_align_stack(x) (x) | ||
285 | |||
286 | #endif /* _M68KNOMMU_SYSTEM_H */ | ||