aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/atomic.h6
-rw-r--r--arch/m68k/include/asm/bitops.h531
-rw-r--r--arch/m68k/include/asm/bitops_mm.h501
-rw-r--r--arch/m68k/include/asm/bitops_no.h333
-rw-r--r--arch/m68k/include/asm/delay.h97
-rw-r--r--arch/m68k/include/asm/delay_mm.h57
-rw-r--r--arch/m68k/include/asm/delay_no.h76
-rw-r--r--arch/m68k/include/asm/entry_no.h12
-rw-r--r--arch/m68k/include/asm/hardirq.h35
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h19
-rw-r--r--arch/m68k/include/asm/irq.h11
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/module.h31
-rw-r--r--arch/m68k/include/asm/signal.h15
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
-rw-r--r--arch/m68k/include/asm/traps.h1
19 files changed, 876 insertions, 1406 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 03ae3d14cd4a..307a573881ad 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -169,18 +169,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
169 char c; 169 char c;
170 __asm__ __volatile__("addl %2,%1; smi %0" 170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v) 171 : "=d" (c), "+m" (*v)
172 : "id" (i)); 172 : ASM_DI (i));
173 return c != 0; 173 return c != 0;
174} 174}
175 175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{ 177{
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); 178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
179} 179}
180 180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{ 182{
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
184} 184}
185 185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index ce163abddaba..c6baa913592a 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -1,5 +1,530 @@
1#ifdef __uClinux__ 1#ifndef _M68K_BITOPS_H
2#include "bitops_no.h" 2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Bit access functions vary across the ColdFire and 68k families.
19 * So we will break them out here, and then macro in the ones we want.
20 *
21 * ColdFire - supports standard bset/bclr/bchg with register operand only
22 * 68000 - supports standard bset/bclr/bchg with memory operand
23 * >= 68020 - also supports the bfset/bfclr/bfchg instructions
24 *
25 * Although it is possible to use only the bset/bclr/bchg with register
26 * operands on all platforms you end up with larger generated code.
27 * So we use the best form possible on a given platform.
28 */
29
30static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33
34 __asm__ __volatile__ ("bset %1,(%0)"
35 :
36 : "a" (p), "di" (nr & 7)
37 : "memory");
38}
39
40static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
41{
42 char *p = (char *)vaddr + (nr ^ 31) / 8;
43
44 __asm__ __volatile__ ("bset %1,%0"
45 : "+m" (*p)
46 : "di" (nr & 7));
47}
48
49static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
50{
51 __asm__ __volatile__ ("bfset %1{%0:#1}"
52 :
53 : "d" (nr ^ 31), "o" (*vaddr)
54 : "memory");
55}
56
57#if defined(CONFIG_COLDFIRE)
58#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
59#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
60#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
61#else
62#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
63 bset_mem_set_bit(nr, vaddr) : \
64 bfset_mem_set_bit(nr, vaddr))
65#endif
66
67#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68
69
70/*
71 * clear_bit() doesn't provide any barrier for the compiler.
72 */
73#define smp_mb__before_clear_bit() barrier()
74#define smp_mb__after_clear_bit() barrier()
75
76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
77{
78 char *p = (char *)vaddr + (nr ^ 31) / 8;
79
80 __asm__ __volatile__ ("bclr %1,(%0)"
81 :
82 : "a" (p), "di" (nr & 7)
83 : "memory");
84}
85
86static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
87{
88 char *p = (char *)vaddr + (nr ^ 31) / 8;
89
90 __asm__ __volatile__ ("bclr %1,%0"
91 : "+m" (*p)
92 : "di" (nr & 7));
93}
94
95static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
96{
97 __asm__ __volatile__ ("bfclr %1{%0:#1}"
98 :
99 : "d" (nr ^ 31), "o" (*vaddr)
100 : "memory");
101}
102
103#if defined(CONFIG_COLDFIRE)
104#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
105#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
106#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
107#else
108#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
109 bclr_mem_clear_bit(nr, vaddr) : \
110 bfclr_mem_clear_bit(nr, vaddr))
111#endif
112
113#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
114
115
116static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
117{
118 char *p = (char *)vaddr + (nr ^ 31) / 8;
119
120 __asm__ __volatile__ ("bchg %1,(%0)"
121 :
122 : "a" (p), "di" (nr & 7)
123 : "memory");
124}
125
126static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
127{
128 char *p = (char *)vaddr + (nr ^ 31) / 8;
129
130 __asm__ __volatile__ ("bchg %1,%0"
131 : "+m" (*p)
132 : "di" (nr & 7));
133}
134
135static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
136{
137 __asm__ __volatile__ ("bfchg %1{%0:#1}"
138 :
139 : "d" (nr ^ 31), "o" (*vaddr)
140 : "memory");
141}
142
143#if defined(CONFIG_COLDFIRE)
144#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
145#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
146#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
147#else
148#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
149 bchg_mem_change_bit(nr, vaddr) : \
150 bfchg_mem_change_bit(nr, vaddr))
151#endif
152
153#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
154
155
156static inline int test_bit(int nr, const unsigned long *vaddr)
157{
158 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
159}
160
161
162static inline int bset_reg_test_and_set_bit(int nr,
163 volatile unsigned long *vaddr)
164{
165 char *p = (char *)vaddr + (nr ^ 31) / 8;
166 char retval;
167
168 __asm__ __volatile__ ("bset %2,(%1); sne %0"
169 : "=d" (retval)
170 : "a" (p), "di" (nr & 7)
171 : "memory");
172 return retval;
173}
174
175static inline int bset_mem_test_and_set_bit(int nr,
176 volatile unsigned long *vaddr)
177{
178 char *p = (char *)vaddr + (nr ^ 31) / 8;
179 char retval;
180
181 __asm__ __volatile__ ("bset %2,%1; sne %0"
182 : "=d" (retval), "+m" (*p)
183 : "di" (nr & 7));
184 return retval;
185}
186
187static inline int bfset_mem_test_and_set_bit(int nr,
188 volatile unsigned long *vaddr)
189{
190 char retval;
191
192 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
193 : "=d" (retval)
194 : "d" (nr ^ 31), "o" (*vaddr)
195 : "memory");
196 return retval;
197}
198
199#if defined(CONFIG_COLDFIRE)
200#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
201#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
202#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
203#else
204#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
205 bset_mem_test_and_set_bit(nr, vaddr) : \
206 bfset_mem_test_and_set_bit(nr, vaddr))
207#endif
208
209#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
210
211
212static inline int bclr_reg_test_and_clear_bit(int nr,
213 volatile unsigned long *vaddr)
214{
215 char *p = (char *)vaddr + (nr ^ 31) / 8;
216 char retval;
217
218 __asm__ __volatile__ ("bclr %2,(%1); sne %0"
219 : "=d" (retval)
220 : "a" (p), "di" (nr & 7)
221 : "memory");
222 return retval;
223}
224
225static inline int bclr_mem_test_and_clear_bit(int nr,
226 volatile unsigned long *vaddr)
227{
228 char *p = (char *)vaddr + (nr ^ 31) / 8;
229 char retval;
230
231 __asm__ __volatile__ ("bclr %2,%1; sne %0"
232 : "=d" (retval), "+m" (*p)
233 : "di" (nr & 7));
234 return retval;
235}
236
237static inline int bfclr_mem_test_and_clear_bit(int nr,
238 volatile unsigned long *vaddr)
239{
240 char retval;
241
242 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
243 : "=d" (retval)
244 : "d" (nr ^ 31), "o" (*vaddr)
245 : "memory");
246 return retval;
247}
248
249#if defined(CONFIG_COLDFIRE)
250#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
251#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
252#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
253#else
254#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
255 bclr_mem_test_and_clear_bit(nr, vaddr) : \
256 bfclr_mem_test_and_clear_bit(nr, vaddr))
257#endif
258
259#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
260
261
262static inline int bchg_reg_test_and_change_bit(int nr,
263 volatile unsigned long *vaddr)
264{
265 char *p = (char *)vaddr + (nr ^ 31) / 8;
266 char retval;
267
268 __asm__ __volatile__ ("bchg %2,(%1); sne %0"
269 : "=d" (retval)
270 : "a" (p), "di" (nr & 7)
271 : "memory");
272 return retval;
273}
274
275static inline int bchg_mem_test_and_change_bit(int nr,
276 volatile unsigned long *vaddr)
277{
278 char *p = (char *)vaddr + (nr ^ 31) / 8;
279 char retval;
280
281 __asm__ __volatile__ ("bchg %2,%1; sne %0"
282 : "=d" (retval), "+m" (*p)
283 : "di" (nr & 7));
284 return retval;
285}
286
287static inline int bfchg_mem_test_and_change_bit(int nr,
288 volatile unsigned long *vaddr)
289{
290 char retval;
291
292 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
293 : "=d" (retval)
294 : "d" (nr ^ 31), "o" (*vaddr)
295 : "memory");
296 return retval;
297}
298
299#if defined(CONFIG_COLDFIRE)
300#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
301#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
302#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
303#else
304#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
305 bchg_mem_test_and_change_bit(nr, vaddr) : \
306 bfchg_mem_test_and_change_bit(nr, vaddr))
307#endif
308
309#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
310
311
312/*
313 * The true 68020 and more advanced processors support the "bfffo"
314 * instruction for finding bits. ColdFire and simple 68000 parts
315 * (including CPU32) do not support this. They simply use the generic
316 * functions.
317 */
318#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
319#include <asm-generic/bitops/find.h>
320#include <asm-generic/bitops/ffz.h>
321#else
322
323static inline int find_first_zero_bit(const unsigned long *vaddr,
324 unsigned size)
325{
326 const unsigned long *p = vaddr;
327 int res = 32;
328 unsigned int words;
329 unsigned long num;
330
331 if (!size)
332 return 0;
333
334 words = (size + 31) >> 5;
335 while (!(num = ~*p++)) {
336 if (!--words)
337 goto out;
338 }
339
340 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
341 : "=d" (res) : "d" (num & -num));
342 res ^= 31;
343out:
344 res += ((long)p - (long)vaddr - 4) * 8;
345 return res < size ? res : size;
346}
347#define find_first_zero_bit find_first_zero_bit
348
349static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
350 int offset)
351{
352 const unsigned long *p = vaddr + (offset >> 5);
353 int bit = offset & 31UL, res;
354
355 if (offset >= size)
356 return size;
357
358 if (bit) {
359 unsigned long num = ~*p++ & (~0UL << bit);
360 offset -= bit;
361
362 /* Look for zero in first longword */
363 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
364 : "=d" (res) : "d" (num & -num));
365 if (res < 32) {
366 offset += res ^ 31;
367 return offset < size ? offset : size;
368 }
369 offset += 32;
370
371 if (offset >= size)
372 return size;
373 }
374 /* No zero yet, search remaining full bytes for a zero */
375 return offset + find_first_zero_bit(p, size - offset);
376}
377#define find_next_zero_bit find_next_zero_bit
378
379static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
380{
381 const unsigned long *p = vaddr;
382 int res = 32;
383 unsigned int words;
384 unsigned long num;
385
386 if (!size)
387 return 0;
388
389 words = (size + 31) >> 5;
390 while (!(num = *p++)) {
391 if (!--words)
392 goto out;
393 }
394
395 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
396 : "=d" (res) : "d" (num & -num));
397 res ^= 31;
398out:
399 res += ((long)p - (long)vaddr - 4) * 8;
400 return res < size ? res : size;
401}
402#define find_first_bit find_first_bit
403
404static inline int find_next_bit(const unsigned long *vaddr, int size,
405 int offset)
406{
407 const unsigned long *p = vaddr + (offset >> 5);
408 int bit = offset & 31UL, res;
409
410 if (offset >= size)
411 return size;
412
413 if (bit) {
414 unsigned long num = *p++ & (~0UL << bit);
415 offset -= bit;
416
417 /* Look for one in first longword */
418 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
419 : "=d" (res) : "d" (num & -num));
420 if (res < 32) {
421 offset += res ^ 31;
422 return offset < size ? offset : size;
423 }
424 offset += 32;
425
426 if (offset >= size)
427 return size;
428 }
429 /* No one yet, search remaining full bytes for a one */
430 return offset + find_first_bit(p, size - offset);
431}
432#define find_next_bit find_next_bit
433
434/*
435 * ffz = Find First Zero in word. Undefined if no zero exists,
436 * so code should check against ~0UL first..
437 */
438static inline unsigned long ffz(unsigned long word)
439{
440 int res;
441
442 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
443 : "=d" (res) : "d" (~word & -~word));
444 return res ^ 31;
445}
446
447#endif
448
449#ifdef __KERNEL__
450
451#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
452
453/*
454 * The newer ColdFire family members support a "bitrev" instruction
455 * and we can use that to implement a fast ffs. Older Coldfire parts,
456 * and normal 68000 parts don't have anything special, so we use the
457 * generic functions for those.
458 */
459#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
460 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
461static inline int __ffs(int x)
462{
463 __asm__ __volatile__ ("bitrev %0; ff1 %0"
464 : "=d" (x)
465 : "0" (x));
466 return x;
467}
468
469static inline int ffs(int x)
470{
471 if (!x)
472 return 0;
473 return __ffs(x) + 1;
474}
475
476#else
477#include <asm-generic/bitops/ffs.h>
478#include <asm-generic/bitops/__ffs.h>
479#endif
480
481#include <asm-generic/bitops/fls.h>
482#include <asm-generic/bitops/__fls.h>
483
3#else 484#else
4#include "bitops_mm.h" 485
486/*
487 * ffs: find first bit set. This is defined the same way as
488 * the libc and compiler builtin ffs routines, therefore
489 * differs in spirit from the above ffz (man ffs).
490 */
491static inline int ffs(int x)
492{
493 int cnt;
494
495 __asm__ ("bfffo %1{#0:#0},%0"
496 : "=d" (cnt)
497 : "dm" (x & -x));
498 return 32 - cnt;
499}
500#define __ffs(x) (ffs(x) - 1)
501
502/*
503 * fls: find last bit set.
504 */
505static inline int fls(int x)
506{
507 int cnt;
508
509 __asm__ ("bfffo %1{#0,#0},%0"
510 : "=d" (cnt)
511 : "dm" (x));
512 return 32 - cnt;
513}
514
515static inline int __fls(int x)
516{
517 return fls(x) - 1;
518}
519
5#endif 520#endif
521
522#include <asm-generic/bitops/ext2-atomic.h>
523#include <asm-generic/bitops/le.h>
524#include <asm-generic/bitops/fls64.h>
525#include <asm-generic/bitops/sched.h>
526#include <asm-generic/bitops/hweight.h>
527#include <asm-generic/bitops/lock.h>
528#endif /* __KERNEL__ */
529
530#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
deleted file mode 100644
index 89cf5b814a4d..000000000000
--- a/arch/m68k/include/asm/bitops_mm.h
+++ /dev/null
@@ -1,501 +0,0 @@
1#ifndef _M68K_BITOPS_H
2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Require 68020 or better.
19 *
20 * They use the standard big-endian m680x0 bit ordering.
21 */
22
23#define test_and_set_bit(nr,vaddr) \
24 (__builtin_constant_p(nr) ? \
25 __constant_test_and_set_bit(nr, vaddr) : \
26 __generic_test_and_set_bit(nr, vaddr))
27
28#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29
30static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33 char retval;
34
35 __asm__ __volatile__ ("bset %2,%1; sne %0"
36 : "=d" (retval), "+m" (*p)
37 : "di" (nr & 7));
38
39 return retval;
40}
41
42static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43{
44 char retval;
45
46 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48
49 return retval;
50}
51
52#define set_bit(nr,vaddr) \
53 (__builtin_constant_p(nr) ? \
54 __constant_set_bit(nr, vaddr) : \
55 __generic_set_bit(nr, vaddr))
56
57#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58
59static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60{
61 char *p = (char *)vaddr + (nr ^ 31) / 8;
62 __asm__ __volatile__ ("bset %1,%0"
63 : "+m" (*p) : "di" (nr & 7));
64}
65
66static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67{
68 __asm__ __volatile__ ("bfset %1{%0:#1}"
69 : : "d" (nr^31), "o" (*vaddr) : "memory");
70}
71
72#define test_and_clear_bit(nr,vaddr) \
73 (__builtin_constant_p(nr) ? \
74 __constant_test_and_clear_bit(nr, vaddr) : \
75 __generic_test_and_clear_bit(nr, vaddr))
76
77#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78
79static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80{
81 char *p = (char *)vaddr + (nr ^ 31) / 8;
82 char retval;
83
84 __asm__ __volatile__ ("bclr %2,%1; sne %0"
85 : "=d" (retval), "+m" (*p)
86 : "di" (nr & 7));
87
88 return retval;
89}
90
91static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92{
93 char retval;
94
95 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97
98 return retval;
99}
100
101/*
102 * clear_bit() doesn't provide any barrier for the compiler.
103 */
104#define smp_mb__before_clear_bit() barrier()
105#define smp_mb__after_clear_bit() barrier()
106
107#define clear_bit(nr,vaddr) \
108 (__builtin_constant_p(nr) ? \
109 __constant_clear_bit(nr, vaddr) : \
110 __generic_clear_bit(nr, vaddr))
111#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112
113static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114{
115 char *p = (char *)vaddr + (nr ^ 31) / 8;
116 __asm__ __volatile__ ("bclr %1,%0"
117 : "+m" (*p) : "di" (nr & 7));
118}
119
120static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121{
122 __asm__ __volatile__ ("bfclr %1{%0:#1}"
123 : : "d" (nr^31), "o" (*vaddr) : "memory");
124}
125
126#define test_and_change_bit(nr,vaddr) \
127 (__builtin_constant_p(nr) ? \
128 __constant_test_and_change_bit(nr, vaddr) : \
129 __generic_test_and_change_bit(nr, vaddr))
130
131#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133
134static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135{
136 char *p = (char *)vaddr + (nr ^ 31) / 8;
137 char retval;
138
139 __asm__ __volatile__ ("bchg %2,%1; sne %0"
140 : "=d" (retval), "+m" (*p)
141 : "di" (nr & 7));
142
143 return retval;
144}
145
146static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147{
148 char retval;
149
150 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152
153 return retval;
154}
155
156#define change_bit(nr,vaddr) \
157 (__builtin_constant_p(nr) ? \
158 __constant_change_bit(nr, vaddr) : \
159 __generic_change_bit(nr, vaddr))
160
161static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162{
163 char *p = (char *)vaddr + (nr ^ 31) / 8;
164 __asm__ __volatile__ ("bchg %1,%0"
165 : "+m" (*p) : "di" (nr & 7));
166}
167
168static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169{
170 __asm__ __volatile__ ("bfchg %1{%0:#1}"
171 : : "d" (nr^31), "o" (*vaddr) : "memory");
172}
173
174static inline int test_bit(int nr, const unsigned long *vaddr)
175{
176 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177}
178
179static inline int find_first_zero_bit(const unsigned long *vaddr,
180 unsigned size)
181{
182 const unsigned long *p = vaddr;
183 int res = 32;
184 unsigned int words;
185 unsigned long num;
186
187 if (!size)
188 return 0;
189
190 words = (size + 31) >> 5;
191 while (!(num = ~*p++)) {
192 if (!--words)
193 goto out;
194 }
195
196 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
197 : "=d" (res) : "d" (num & -num));
198 res ^= 31;
199out:
200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size;
202}
203#define find_first_zero_bit find_first_zero_bit
204
205static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
206 int offset)
207{
208 const unsigned long *p = vaddr + (offset >> 5);
209 int bit = offset & 31UL, res;
210
211 if (offset >= size)
212 return size;
213
214 if (bit) {
215 unsigned long num = ~*p++ & (~0UL << bit);
216 offset -= bit;
217
218 /* Look for zero in first longword */
219 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
220 : "=d" (res) : "d" (num & -num));
221 if (res < 32) {
222 offset += res ^ 31;
223 return offset < size ? offset : size;
224 }
225 offset += 32;
226
227 if (offset >= size)
228 return size;
229 }
230 /* No zero yet, search remaining full bytes for a zero */
231 return offset + find_first_zero_bit(p, size - offset);
232}
233#define find_next_zero_bit find_next_zero_bit
234
235static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
236{
237 const unsigned long *p = vaddr;
238 int res = 32;
239 unsigned int words;
240 unsigned long num;
241
242 if (!size)
243 return 0;
244
245 words = (size + 31) >> 5;
246 while (!(num = *p++)) {
247 if (!--words)
248 goto out;
249 }
250
251 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
252 : "=d" (res) : "d" (num & -num));
253 res ^= 31;
254out:
255 res += ((long)p - (long)vaddr - 4) * 8;
256 return res < size ? res : size;
257}
258#define find_first_bit find_first_bit
259
260static inline int find_next_bit(const unsigned long *vaddr, int size,
261 int offset)
262{
263 const unsigned long *p = vaddr + (offset >> 5);
264 int bit = offset & 31UL, res;
265
266 if (offset >= size)
267 return size;
268
269 if (bit) {
270 unsigned long num = *p++ & (~0UL << bit);
271 offset -= bit;
272
273 /* Look for one in first longword */
274 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
275 : "=d" (res) : "d" (num & -num));
276 if (res < 32) {
277 offset += res ^ 31;
278 return offset < size ? offset : size;
279 }
280 offset += 32;
281
282 if (offset >= size)
283 return size;
284 }
285 /* No one yet, search remaining full bytes for a one */
286 return offset + find_first_bit(p, size - offset);
287}
288#define find_next_bit find_next_bit
289
290/*
291 * ffz = Find First Zero in word. Undefined if no zero exists,
292 * so code should check against ~0UL first..
293 */
294static inline unsigned long ffz(unsigned long word)
295{
296 int res;
297
298 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
299 : "=d" (res) : "d" (~word & -~word));
300 return res ^ 31;
301}
302
303#ifdef __KERNEL__
304
305/*
306 * ffs: find first bit set. This is defined the same way as
307 * the libc and compiler builtin ffs routines, therefore
308 * differs in spirit from the above ffz (man ffs).
309 */
310
311static inline int ffs(int x)
312{
313 int cnt;
314
315 asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
316
317 return 32 - cnt;
318}
319#define __ffs(x) (ffs(x) - 1)
320
321/*
322 * fls: find last bit set.
323 */
324
325static inline int fls(int x)
326{
327 int cnt;
328
329 asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
330
331 return 32 - cnt;
332}
333
334static inline int __fls(int x)
335{
336 return fls(x) - 1;
337}
338
339#include <asm-generic/bitops/fls64.h>
340#include <asm-generic/bitops/sched.h>
341#include <asm-generic/bitops/hweight.h>
342#include <asm-generic/bitops/lock.h>
343
344/* Bitmap functions for the little endian bitmap. */
345
346static inline void __set_bit_le(int nr, void *addr)
347{
348 __set_bit(nr ^ 24, addr);
349}
350
351static inline void __clear_bit_le(int nr, void *addr)
352{
353 __clear_bit(nr ^ 24, addr);
354}
355
356static inline int __test_and_set_bit_le(int nr, void *addr)
357{
358 return __test_and_set_bit(nr ^ 24, addr);
359}
360
361static inline int test_and_set_bit_le(int nr, void *addr)
362{
363 return test_and_set_bit(nr ^ 24, addr);
364}
365
366static inline int __test_and_clear_bit_le(int nr, void *addr)
367{
368 return __test_and_clear_bit(nr ^ 24, addr);
369}
370
371static inline int test_and_clear_bit_le(int nr, void *addr)
372{
373 return test_and_clear_bit(nr ^ 24, addr);
374}
375
376static inline int test_bit_le(int nr, const void *vaddr)
377{
378 const unsigned char *p = vaddr;
379 return (p[nr >> 3] & (1U << (nr & 7))) != 0;
380}
381
382static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
383{
384 const unsigned long *p = vaddr, *addr = vaddr;
385 int res = 0;
386 unsigned int words;
387
388 if (!size)
389 return 0;
390
391 words = (size >> 5) + ((size & 31) > 0);
392 while (*p++ == ~0UL) {
393 if (--words == 0)
394 goto out;
395 }
396
397 --p;
398 for (res = 0; res < 32; res++)
399 if (!test_bit_le(res, p))
400 break;
401out:
402 res += (p - addr) * 32;
403 return res < size ? res : size;
404}
405#define find_first_zero_bit_le find_first_zero_bit_le
406
407static inline unsigned long find_next_zero_bit_le(const void *addr,
408 unsigned long size, unsigned long offset)
409{
410 const unsigned long *p = addr;
411 int bit = offset & 31UL, res;
412
413 if (offset >= size)
414 return size;
415
416 p += offset >> 5;
417
418 if (bit) {
419 offset -= bit;
420 /* Look for zero in first longword */
421 for (res = bit; res < 32; res++)
422 if (!test_bit_le(res, p)) {
423 offset += res;
424 return offset < size ? offset : size;
425 }
426 p++;
427 offset += 32;
428
429 if (offset >= size)
430 return size;
431 }
432 /* No zero yet, search remaining full bytes for a zero */
433 return offset + find_first_zero_bit_le(p, size - offset);
434}
435#define find_next_zero_bit_le find_next_zero_bit_le
436
437static inline int find_first_bit_le(const void *vaddr, unsigned size)
438{
439 const unsigned long *p = vaddr, *addr = vaddr;
440 int res = 0;
441 unsigned int words;
442
443 if (!size)
444 return 0;
445
446 words = (size >> 5) + ((size & 31) > 0);
447 while (*p++ == 0UL) {
448 if (--words == 0)
449 goto out;
450 }
451
452 --p;
453 for (res = 0; res < 32; res++)
454 if (test_bit_le(res, p))
455 break;
456out:
457 res += (p - addr) * 32;
458 return res < size ? res : size;
459}
460#define find_first_bit_le find_first_bit_le
461
462static inline unsigned long find_next_bit_le(const void *addr,
463 unsigned long size, unsigned long offset)
464{
465 const unsigned long *p = addr;
466 int bit = offset & 31UL, res;
467
468 if (offset >= size)
469 return size;
470
471 p += offset >> 5;
472
473 if (bit) {
474 offset -= bit;
475 /* Look for one in first longword */
476 for (res = bit; res < 32; res++)
477 if (test_bit_le(res, p)) {
478 offset += res;
479 return offset < size ? offset : size;
480 }
481 p++;
482 offset += 32;
483
484 if (offset >= size)
485 return size;
486 }
487 /* No set bit yet, search remaining full bytes for a set bit */
488 return offset + find_first_bit_le(p, size - offset);
489}
490#define find_next_bit_le find_next_bit_le
491
492/* Bitmap functions for the ext2 filesystem. */
493
494#define ext2_set_bit_atomic(lock, nr, addr) \
495 test_and_set_bit_le(nr, addr)
496#define ext2_clear_bit_atomic(lock, nr, addr) \
497 test_and_clear_bit_le(nr, addr)
498
499#endif /* __KERNEL__ */
500
501#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
deleted file mode 100644
index 72e85acdd7bd..000000000000
--- a/arch/m68k/include/asm/bitops_no.h
+++ /dev/null
@@ -1,333 +0,0 @@
1#ifndef _M68KNOMMU_BITOPS_H
2#define _M68KNOMMU_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10
11#ifdef __KERNEL__
12
13#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly
15#endif
16
17#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18static inline int ffs(unsigned int val)
19{
20 if (!val)
21 return 0;
22
23 asm volatile(
24 "bitrev %0\n\t"
25 "ff1 %0\n\t"
26 : "=d" (val)
27 : "0" (val)
28 );
29 val++;
30 return val;
31}
32
33static inline int __ffs(unsigned int val)
34{
35 asm volatile(
36 "bitrev %0\n\t"
37 "ff1 %0\n\t"
38 : "=d" (val)
39 : "0" (val)
40 );
41 return val;
42}
43
44#else
45#include <asm-generic/bitops/ffs.h>
46#include <asm-generic/bitops/__ffs.h>
47#endif
48
49#include <asm-generic/bitops/sched.h>
50#include <asm-generic/bitops/ffz.h>
51
52static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53{
54#ifdef CONFIG_COLDFIRE
55 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57 : "d" (nr)
58 : "%a0", "cc");
59#else
60 __asm__ __volatile__ ("bset %1,%0"
61 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62 : "di" (nr)
63 : "cc");
64#endif
65}
66
67#define __set_bit(nr, addr) set_bit(nr, addr)
68
69/*
70 * clear_bit() doesn't provide any barrier for the compiler.
71 */
72#define smp_mb__before_clear_bit() barrier()
73#define smp_mb__after_clear_bit() barrier()
74
75static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76{
77#ifdef CONFIG_COLDFIRE
78 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80 : "d" (nr)
81 : "%a0", "cc");
82#else
83 __asm__ __volatile__ ("bclr %1,%0"
84 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85 : "di" (nr)
86 : "cc");
87#endif
88}
89
90#define __clear_bit(nr, addr) clear_bit(nr, addr)
91
92static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93{
94#ifdef CONFIG_COLDFIRE
95 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97 : "d" (nr)
98 : "%a0", "cc");
99#else
100 __asm__ __volatile__ ("bchg %1,%0"
101 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102 : "di" (nr)
103 : "cc");
104#endif
105}
106
107#define __change_bit(nr, addr) change_bit(nr, addr)
108
109static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110{
111 char retval;
112
113#ifdef CONFIG_COLDFIRE
114 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116 : "d" (nr)
117 : "%a0");
118#else
119 __asm__ __volatile__ ("bset %2,%1; sne %0"
120 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121 : "di" (nr)
122 /* No clobber */);
123#endif
124
125 return retval;
126}
127
128#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129
130static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131{
132 char retval;
133
134#ifdef CONFIG_COLDFIRE
135 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137 : "d" (nr)
138 : "%a0");
139#else
140 __asm__ __volatile__ ("bclr %2,%1; sne %0"
141 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142 : "di" (nr)
143 /* No clobber */);
144#endif
145
146 return retval;
147}
148
149#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150
151static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152{
153 char retval;
154
155#ifdef CONFIG_COLDFIRE
156 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158 : "d" (nr)
159 : "%a0");
160#else
161 __asm__ __volatile__ ("bchg %2,%1; sne %0"
162 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163 : "di" (nr)
164 /* No clobber */);
165#endif
166
167 return retval;
168}
169
170#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171
172/*
173 * This routine doesn't need to be atomic.
174 */
175static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176{
177 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178}
179
180static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181{
182 int * a = (int *) addr;
183 int mask;
184
185 a += nr >> 5;
186 mask = 1 << (nr & 0x1f);
187 return ((mask & *a) != 0);
188}
189
190#define test_bit(nr,addr) \
191(__builtin_constant_p(nr) ? \
192 __constant_test_bit((nr),(addr)) : \
193 __test_bit((nr),(addr)))
194
195#include <asm-generic/bitops/find.h>
196#include <asm-generic/bitops/hweight.h>
197#include <asm-generic/bitops/lock.h>
198
199#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
200
201static inline void __set_bit_le(int nr, void *addr)
202{
203 __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
204}
205
206static inline void __clear_bit_le(int nr, void *addr)
207{
208 __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
209}
210
211static inline int __test_and_set_bit_le(int nr, volatile void *addr)
212{
213 char retval;
214
215#ifdef CONFIG_COLDFIRE
216 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218 : "d" (nr)
219 : "%a0");
220#else
221 __asm__ __volatile__ ("bset %2,%1; sne %0"
222 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223 : "di" (nr)
224 /* No clobber */);
225#endif
226
227 return retval;
228}
229
230static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
231{
232 char retval;
233
234#ifdef CONFIG_COLDFIRE
235 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237 : "d" (nr)
238 : "%a0");
239#else
240 __asm__ __volatile__ ("bclr %2,%1; sne %0"
241 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242 : "di" (nr)
243 /* No clobber */);
244#endif
245
246 return retval;
247}
248
249#include <asm-generic/bitops/ext2-atomic.h>
250
251static inline int test_bit_le(int nr, const volatile void *addr)
252{
253 char retval;
254
255#ifdef CONFIG_COLDFIRE
256 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257 : "=d" (retval)
258 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259 : "%a0");
260#else
261 __asm__ __volatile__ ("btst %2,%1; sne %0"
262 : "=d" (retval)
263 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264 /* No clobber */);
265#endif
266
267 return retval;
268}
269
270#define find_first_zero_bit_le(addr, size) \
271 find_next_zero_bit_le((addr), (size), 0)
272
273static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
274{
275 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276 unsigned long result = offset & ~31UL;
277 unsigned long tmp;
278
279 if (offset >= size)
280 return size;
281 size -= result;
282 offset &= 31UL;
283 if(offset) {
284 /* We hold the little endian value in tmp, but then the
285 * shift is illegal. So we could keep a big endian value
286 * in tmp, like this:
287 *
288 * tmp = __swab32(*(p++));
289 * tmp |= ~0UL >> (32-offset);
290 *
291 * but this would decrease performance, so we change the
292 * shift:
293 */
294 tmp = *(p++);
295 tmp |= __swab32(~0UL >> (32-offset));
296 if(size < 32)
297 goto found_first;
298 if(~tmp)
299 goto found_middle;
300 size -= 32;
301 result += 32;
302 }
303 while(size & ~31UL) {
304 if(~(tmp = *(p++)))
305 goto found_middle;
306 result += 32;
307 size -= 32;
308 }
309 if(!size)
310 return result;
311 tmp = *p;
312
313found_first:
314 /* tmp is little endian, so we would have to swab the shift,
315 * see above. But then we have to swab tmp below for ffz, so
316 * we might as well do this here.
317 */
318 return result + ffz(__swab32(tmp) | (~0UL << size));
319found_middle:
320 return result + ffz(__swab32(tmp));
321}
322#define find_next_zero_bit_le find_next_zero_bit_le
323
324extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
326
327#endif /* __KERNEL__ */
328
329#include <asm-generic/bitops/fls.h>
330#include <asm-generic/bitops/__fls.h>
331#include <asm-generic/bitops/fls64.h>
332
333#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d2598e3dd7b2..9c09becfd4c9 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -1,5 +1,96 @@
1#ifdef __uClinux__ 1#ifndef _M68K_DELAY_H
2#include "delay_no.h" 2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
9 *
10 * Delay routines, using a pre-computed "loops_per_jiffy" value.
11 */
12
13#if defined(CONFIG_COLDFIRE)
14/*
15 * The ColdFire runs the delay loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitrary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
24 */
25#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
3#else 26#else
4#include "delay_mm.h" 27/*
28 * No instruction alignment required for other m68k types.
29 */
30#define DELAY_ALIGN
5#endif 31#endif
32
33static inline void __delay(unsigned long loops)
34{
35 __asm__ __volatile__ (
36 DELAY_ALIGN
37 "1: subql #1,%0\n\t"
38 "jcc 1b"
39 : "=d" (loops)
40 : "0" (loops));
41}
42
43extern void __bad_udelay(void);
44
45
46#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
47/*
48 * The simpler m68k and ColdFire processors do not have a 32*32->64
49 * multiply instruction. So we need to handle them a little differently.
50 * We use a bit of shifting and a single 32*32->32 multiply to get close.
51 * This is a macro so that the const version can factor out the first
52 * multiply and shift.
53 */
54#define HZSCALE (268435456 / (1000000 / HZ))
55
56#define __const_udelay(u) \
57 __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
58
59#else
60
61static inline void __xdelay(unsigned long xloops)
62{
63 unsigned long tmp;
64
65 __asm__ ("mulul %2,%0:%1"
66 : "=d" (xloops), "=d" (tmp)
67 : "d" (xloops), "1" (loops_per_jiffy));
68 __delay(xloops * HZ);
69}
70
71/*
72 * The definition of __const_udelay is specifically made a macro so that
73 * the const factor (4295 = 2**32 / 1000000) can be optimized out when
74 * the delay is a const.
75 */
76#define __const_udelay(n) (__xdelay((n) * 4295))
77
78#endif
79
80static inline void __udelay(unsigned long usecs)
81{
82 __const_udelay(usecs);
83}
84
85/*
86 * Use only for very small delays ( < 1 msec). Should probably use a
87 * lookup table, really, as the multiplications take much too long with
88 * short delays. This is a "reasonable" implementation, though (and the
89 * first constant multiplications gets optimized away if the delay is
90 * a constant)
91 */
92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94
95
96#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_mm.h b/arch/m68k/include/asm/delay_mm.h
deleted file mode 100644
index 5ed92851bc6f..000000000000
--- a/arch/m68k/include/asm/delay_mm.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef _M68K_DELAY_H
2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 *
9 * Delay routines, using a pre-computed "loops_per_jiffy" value.
10 */
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
15 : "=d" (loops) : "0" (loops));
16}
17
18extern void __bad_udelay(void);
19
20/*
21 * Use only for very small delays ( < 1 msec). Should probably use a
22 * lookup table, really, as the multiplications take much too long with
23 * short delays. This is a "reasonable" implementation, though (and the
24 * first constant multiplications gets optimized away if the delay is
25 * a constant)
26 */
27static inline void __const_udelay(unsigned long xloops)
28{
29 unsigned long tmp;
30
31 __asm__ ("mulul %2,%0:%1"
32 : "=d" (xloops), "=d" (tmp)
33 : "d" (xloops), "1" (loops_per_jiffy));
34 __delay(xloops * HZ);
35}
36
37static inline void __udelay(unsigned long usecs)
38{
39 __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
40}
41
42#define udelay(n) (__builtin_constant_p(n) ? \
43 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
44 __udelay(n))
45
46static inline unsigned long muldiv(unsigned long a, unsigned long b,
47 unsigned long c)
48{
49 unsigned long tmp;
50
51 __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
52 : "=d" (tmp), "=d" (a)
53 : "d" (b), "d" (c), "1" (a));
54 return a;
55}
56
57#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_no.h b/arch/m68k/include/asm/delay_no.h
deleted file mode 100644
index c3a0edc90f21..000000000000
--- a/arch/m68k/include/asm/delay_no.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _M68KNOMMU_DELAY_H
2#define _M68KNOMMU_DELAY_H
3
4/*
5 * Copyright (C) 1994 Hamish Macdonald
6 * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
7 */
8
9#include <asm/param.h>
10
11static inline void __delay(unsigned long loops)
12{
13#if defined(CONFIG_COLDFIRE)
14 /* The coldfire runs this loop at significantly different speeds
15 * depending upon long word alignment or not. We'll pad it to
16 * long word alignment which is the faster version.
17 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
18 * than using a NOP (0x4e71) instruction because it executes in one
19 * cycle not three and doesn't allow for an arbitrary delay waiting
20 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
21 * stall waiting for the register to become valid if such is added
22 * to the coldfire at some stage.
23 */
24 __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
25 "1: subql #1, %0\n\t"
26 "jcc 1b"
27 : "=d" (loops) : "0" (loops));
28#else
29 __asm__ __volatile__ ( "1: subql #1, %0\n\t"
30 "jcc 1b"
31 : "=d" (loops) : "0" (loops));
32#endif
33}
34
35/*
36 * Ideally we use a 32*32->64 multiply to calculate the number of
37 * loop iterations, but the older standard 68k and ColdFire do not
38 * have this instruction. So for them we have a clsoe approximation
39 * loop using 32*32->32 multiplies only. This calculation based on
40 * the ARM version of delay.
41 *
42 * We want to implement:
43 *
44 * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
45 */
46
47#define HZSCALE (268435456 / (1000000/HZ))
48
49extern unsigned long loops_per_jiffy;
50
51static inline void _udelay(unsigned long usecs)
52{
53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
55 defined(CONFIG_COLDFIRE)
56 __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
57#else
58 unsigned long tmp;
59
60 usecs *= 4295; /* 2**32 / 1000000 */
61 __asm__ ("mulul %2,%0:%1"
62 : "=d" (usecs), "=d" (tmp)
63 : "d" (usecs), "1" (loops_per_jiffy*HZ));
64 __delay(usecs);
65#endif
66}
67
68/*
69 * Moved the udelay() function into library code, no longer inlined.
70 * I had to change the algorithm because we are overflowing now on
71 * the faster ColdFire parts. The code is a little bigger, so it makes
72 * sense to library it.
73 */
74extern void udelay(unsigned long usecs);
75
76#endif /* defined(_M68KNOMMU_DELAY_H) */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 627d69bacc58..68611e3dbb1d 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -96,11 +96,11 @@
96.endm 96.endm
97 97
98.macro RDUSP 98.macro RDUSP
99 movel sw_usp,%a2 99 movel sw_usp,%a3
100.endm 100.endm
101 101
102.macro WRUSP 102.macro WRUSP
103 movel %a0,sw_usp 103 movel %a3,sw_usp
104.endm 104.endm
105 105
106#else /* !CONFIG_COLDFIRE_SW_A7 */ 106#else /* !CONFIG_COLDFIRE_SW_A7 */
@@ -127,13 +127,13 @@
127.endm 127.endm
128 128
129.macro RDUSP 129.macro RDUSP
130 /*move %usp,%a2*/ 130 /*move %usp,%a3*/
131 .word 0x4e6a 131 .word 0x4e6b
132.endm 132.endm
133 133
134.macro WRUSP 134.macro WRUSP
135 /*move %a0,%usp*/ 135 /*move %a3,%usp*/
136 .word 0x4e60 136 .word 0x4e63
137.endm 137.endm
138 138
139#endif /* !CONFIG_COLDFIRE_SW_A7 */ 139#endif /* !CONFIG_COLDFIRE_SW_A7 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231c..870e5347155b 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,34 @@
1#ifdef __uClinux__ 1#ifndef __M68K_HARDIRQ_H
2#include "hardirq_no.h" 2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6#include <asm/irq.h>
7
8#define HARDIRQ_BITS 8
9
10/*
11 * The hardirq mask has to be large enough to have
12 * space for potentially all IRQ sources in the system
13 * nesting on a single CPU:
14 */
15#if (1 << HARDIRQ_BITS) < NR_IRQS
16# error HARDIRQ_BITS is too low!
17#endif
18
19#ifdef CONFIG_MMU
20
21/* entry.S is sensitive to the offsets of these fields */
22typedef struct {
23 unsigned int __softirq_pending;
24} ____cacheline_aligned irq_cpustat_t;
25
26#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
27
3#else 28#else
4#include "hardirq_mm.h" 29
30#include <asm-generic/hardirq.h>
31
32#endif /* !CONFIG_MMU */
33
5#endif 34#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015c..000000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6
7/* entry.S is sensitive to the offsets of these fields */
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
11
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#define HARDIRQ_BITS 8
15
16#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index b44b14be87d9..000000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <asm/irq.h>
5
6#define HARDIRQ_BITS 8
7
8/*
9 * The hardirq mask has to be large enough to have
10 * space for potentially all IRQ sources in the system
11 * nesting on a single CPU:
12 */
13#if (1 << HARDIRQ_BITS) < NR_IRQS
14# error HARDIRQ_BITS is too low!
15#endif
16
17#include <asm-generic/hardirq.h>
18
19#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 907eff1edd2f..69ed0d74d532 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -33,15 +33,6 @@
33#include <linux/spinlock_types.h> 33#include <linux/spinlock_types.h>
34 34
35/* 35/*
36 * The hardirq mask has to be large enough to have
37 * space for potentially all IRQ sources in the system
38 * nesting on a single CPU:
39 */
40#if (1 << HARDIRQ_BITS) < NR_IRQS
41# error HARDIRQ_BITS is too low!
42#endif
43
44/*
45 * Interrupt source definitions 36 * Interrupt source definitions
46 * General interrupt sources are the level 1-7. 37 * General interrupt sources are the level 1-7.
47 * Adding an interrupt service routine for one of these sources 38 * Adding an interrupt service routine for one of these sources
@@ -131,4 +122,6 @@ asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
131#define irq_canonicalize(irq) (irq) 122#define irq_canonicalize(irq) (irq)
132#endif /* CONFIG_MMU */ 123#endif /* CONFIG_MMU */
133 124
125asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
126
134#endif /* _M68K_IRQ_H_ */ 127#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 415d5484916c..789f3b2de0e9 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy); 40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41 41
42extern void config_BSP(char *command, int len); 42extern void config_BSP(char *command, int len);
43extern void do_IRQ(int irq, struct pt_regs *fp);
44 43
45#endif /* _M68K_MACHDEP_H */ 44#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/module.h b/arch/m68k/include/asm/module.h
index 5f21e11071bd..edffe66b7f49 100644
--- a/arch/m68k/include/asm/module.h
+++ b/arch/m68k/include/asm/module.h
@@ -1,46 +1,41 @@
1#ifndef _ASM_M68K_MODULE_H 1#ifndef _ASM_M68K_MODULE_H
2#define _ASM_M68K_MODULE_H 2#define _ASM_M68K_MODULE_H
3 3
4#ifdef CONFIG_MMU 4enum m68k_fixup_type {
5 m68k_fixup_memoffset,
6 m68k_fixup_vnode_shift,
7};
8
9struct m68k_fixup_info {
10 enum m68k_fixup_type type;
11 void *addr;
12};
5 13
6struct mod_arch_specific { 14struct mod_arch_specific {
7 struct m68k_fixup_info *fixup_start, *fixup_end; 15 struct m68k_fixup_info *fixup_start, *fixup_end;
8}; 16};
9 17
18#ifdef CONFIG_MMU
19
10#define MODULE_ARCH_INIT { \ 20#define MODULE_ARCH_INIT { \
11 .fixup_start = __start_fixup, \ 21 .fixup_start = __start_fixup, \
12 .fixup_end = __stop_fixup, \ 22 .fixup_end = __stop_fixup, \
13} 23}
14 24
15 25
16enum m68k_fixup_type {
17 m68k_fixup_memoffset,
18 m68k_fixup_vnode_shift,
19};
20
21struct m68k_fixup_info {
22 enum m68k_fixup_type type;
23 void *addr;
24};
25
26#define m68k_fixup(type, addr) \ 26#define m68k_fixup(type, addr) \
27 " .section \".m68k_fixup\",\"aw\"\n" \ 27 " .section \".m68k_fixup\",\"aw\"\n" \
28 " .long " #type "," #addr "\n" \ 28 " .long " #type "," #addr "\n" \
29 " .previous\n" 29 " .previous\n"
30 30
31#endif /* CONFIG_MMU */
32
31extern struct m68k_fixup_info __start_fixup[], __stop_fixup[]; 33extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
32 34
33struct module; 35struct module;
34extern void module_fixup(struct module *mod, struct m68k_fixup_info *start, 36extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
35 struct m68k_fixup_info *end); 37 struct m68k_fixup_info *end);
36 38
37#else
38
39struct mod_arch_specific {
40};
41
42#endif /* CONFIG_MMU */
43
44#define Elf_Shdr Elf32_Shdr 39#define Elf_Shdr Elf32_Shdr
45#define Elf_Sym Elf32_Sym 40#define Elf_Sym Elf32_Sym
46#define Elf_Ehdr Elf32_Ehdr 41#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c787a11..60e88660169c 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -150,7 +150,7 @@ typedef struct sigaltstack {
150#ifdef __KERNEL__ 150#ifdef __KERNEL__
151#include <asm/sigcontext.h> 151#include <asm/sigcontext.h>
152 152
153#ifndef __uClinux__ 153#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
154#define __HAVE_ARCH_SIG_BITOPS 154#define __HAVE_ARCH_SIG_BITOPS
155 155
156static inline void sigaddset(sigset_t *set, int _sig) 156static inline void sigaddset(sigset_t *set, int _sig)
@@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
199 return word ^ 31; 199 return word ^ 31;
200} 200}
201 201
202struct pt_regs; 202#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
203extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
204 203
205#else 204#ifdef __uClinux__
206
207#undef __HAVE_ARCH_SIG_BITOPS
208#define ptrace_signal_deliver(regs, cookie) do { } while (0) 205#define ptrace_signal_deliver(regs, cookie) do { } while (0)
209 206#else
207struct pt_regs;
208extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
210#endif /* __uClinux__ */ 209#endif /* __uClinux__ */
211#endif /* __KERNEL__ */
212 210
211#endif /* __KERNEL__ */
213#endif /* _M68K_SIGNAL_H */ 212#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f5..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
1#ifdef __uClinux__ 1#ifndef _M68K_SYSTEM_H
2#include "system_no.h" 2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
3#else 101#else
4#include "system_mm.h" 102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
5#endif 132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726bc..000000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
101#else
102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3ff..000000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
1#ifndef _M68KNOMMU_SYSTEM_H
2#define _M68KNOMMU_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/irqflags.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9/*
10 * switch_to(n) should switch tasks to task ptr, first checking that
11 * ptr isn't the current task, in which case it does nothing. This
12 * also clears the TS-flag if the task we switched to has used the
13 * math co-processor latest.
14 */
15/*
16 * switch_to() saves the extra registers, that are not saved
17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
18 * a0-a1. Some of these are used by schedule() and its predecessors
19 * and so we might get see unexpected behaviors when a task returns
20 * with unexpected register values.
21 *
22 * syscall stores these registers itself and none of them are used
23 * by syscall after the function in the syscall has been called.
24 *
25 * Beware that resume now expects *next to be in d1 and the offset of
26 * tss to be in a1. This saves a few instructions as we no longer have
27 * to push them onto the stack and read them back right after.
28 *
29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
30 *
31 * Changed 96/09/19 by Andreas Schwab
32 * pass prev in a0, next in a1, offset of tss in d1, and whether
33 * the mm structures are shared in d2 (to avoid atc flushing).
34 */
35asmlinkage void resume(void);
36#define switch_to(prev,next,last) \
37{ \
38 void *_last; \
39 __asm__ __volatile__( \
40 "movel %1, %%a0\n\t" \
41 "movel %2, %%a1\n\t" \
42 "jbsr resume\n\t" \
43 "movel %%d1, %0\n\t" \
44 : "=d" (_last) \
45 : "d" (prev), "d" (next) \
46 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
47 (last) = _last; \
48}
49
50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
51
52/*
53 * Force strict CPU ordering.
54 * Not really required on m68k...
55 */
56#define nop() asm volatile ("nop"::)
57#define mb() asm volatile ("" : : :"memory")
58#define rmb() asm volatile ("" : : :"memory")
59#define wmb() asm volatile ("" : : :"memory")
60#define set_mb(var, value) ({ (var) = (value); wmb(); })
61
62#define smp_mb() barrier()
63#define smp_rmb() barrier()
64#define smp_wmb() barrier()
65#define smp_read_barrier_depends() do { } while(0)
66
67#define read_barrier_depends() ((void)0)
68
69#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
70
71struct __xchg_dummy { unsigned long a[100]; };
72#define __xg(x) ((volatile struct __xchg_dummy *)(x))
73
74#ifndef CONFIG_RMW_INSNS
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 unsigned long tmp, flags;
78
79 local_irq_save(flags);
80
81 switch (size) {
82 case 1:
83 __asm__ __volatile__
84 ("moveb %2,%0\n\t"
85 "moveb %1,%2"
86 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
87 break;
88 case 2:
89 __asm__ __volatile__
90 ("movew %2,%0\n\t"
91 "movew %1,%2"
92 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
93 break;
94 case 4:
95 __asm__ __volatile__
96 ("movel %2,%0\n\t"
97 "movel %1,%2"
98 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
99 break;
100 }
101 local_irq_restore(flags);
102 return tmp;
103}
104#else
105static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
106{
107 switch (size) {
108 case 1:
109 __asm__ __volatile__
110 ("moveb %2,%0\n\t"
111 "1:\n\t"
112 "casb %0,%1,%2\n\t"
113 "jne 1b"
114 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
115 break;
116 case 2:
117 __asm__ __volatile__
118 ("movew %2,%0\n\t"
119 "1:\n\t"
120 "casw %0,%1,%2\n\t"
121 "jne 1b"
122 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
123 break;
124 case 4:
125 __asm__ __volatile__
126 ("movel %2,%0\n\t"
127 "1:\n\t"
128 "casl %0,%1,%2\n\t"
129 "jne 1b"
130 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
131 break;
132 }
133 return x;
134}
135#endif
136
137#include <asm-generic/cmpxchg-local.h>
138
139/*
140 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
141 * them available.
142 */
143#define cmpxchg_local(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
145 (unsigned long)(n), sizeof(*(ptr))))
146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
147
148#include <asm-generic/cmpxchg.h>
149
150#define arch_align_stack(x) (x)
151
152
153#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 0bffb17d5db7..151068f64f44 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -22,7 +22,6 @@ extern e_vector vectors[];
22asmlinkage void auto_inthandler(void); 22asmlinkage void auto_inthandler(void);
23asmlinkage void user_inthandler(void); 23asmlinkage void user_inthandler(void);
24asmlinkage void bad_inthandler(void); 24asmlinkage void bad_inthandler(void);
25extern void init_vectors(void);
26 25
27#endif 26#endif
28 27