aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 01:50:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 01:50:54 -0400
commit3b76eefe0f970c2e19f165d4a1650abc523d10bc (patch)
tree1987bc1b2b61ea70170094e3cb1204f5b0a0401e /arch
parent91d44d99992ff2587104df5760bfffbb3564b3c2 (diff)
parent8c9f08f9de38c9af3a946faf0cccd7fc46978443 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: m68k: Revive reporting of spurious interrupts m68knommu: Move forward declaration of do_IRQ() from machdep.h to irq.h m68k: fix some atomic operation asm address modes for ColdFire m68k: use CPU_HAS_NO_BITFIELDS for signal functions m68k: merge and clean up delay.h files m68knommu: correctly use trap_init m68knommu: merge ColdFire 5206 and 5206e platform code m68k: merge mmu and non-mmu bitops.h m68k: merge MMU and non MMU versions of system.h m68k: merge MMU and non-MMU versions of asm/hardirq.h m68k: merge the non-mmu and mmu versions of module.c m68knommu: Fix printk() format in free_initrd_mem() m68knommu: Make empty_zero_page "void *", like on m68k
Diffstat (limited to 'arch')
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/Kconfig.nommu3
-rw-r--r--arch/m68k/Makefile_no2
-rw-r--r--arch/m68k/include/asm/atomic.h6
-rw-r--r--arch/m68k/include/asm/bitops.h531
-rw-r--r--arch/m68k/include/asm/bitops_mm.h501
-rw-r--r--arch/m68k/include/asm/bitops_no.h333
-rw-r--r--arch/m68k/include/asm/delay.h97
-rw-r--r--arch/m68k/include/asm/delay_mm.h57
-rw-r--r--arch/m68k/include/asm/delay_no.h76
-rw-r--r--arch/m68k/include/asm/entry_no.h12
-rw-r--r--arch/m68k/include/asm/hardirq.h35
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h19
-rw-r--r--arch/m68k/include/asm/irq.h11
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/module.h31
-rw-r--r--arch/m68k/include/asm/signal.h15
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
-rw-r--r--arch/m68k/include/asm/traps.h1
-rw-r--r--arch/m68k/kernel/irq.c10
-rw-r--r--arch/m68k/kernel/module.c130
-rw-r--r--arch/m68k/kernel/module_mm.c128
-rw-r--r--arch/m68k/kernel/module_no.c92
-rw-r--r--arch/m68k/kernel/traps_no.c4
-rw-r--r--arch/m68k/lib/Makefile2
-rw-r--r--arch/m68k/lib/delay.c21
-rw-r--r--arch/m68k/mm/init_no.c11
-rw-r--r--arch/m68k/platform/5206/config.c6
-rw-r--r--arch/m68k/platform/5206e/Makefile18
-rw-r--r--arch/m68k/platform/5206e/config.c127
-rw-r--r--arch/m68k/platform/5206e/gpio.c49
-rw-r--r--arch/m68k/platform/5272/intc.c2
-rw-r--r--arch/m68k/platform/68328/entry.S15
-rw-r--r--arch/m68k/platform/68328/ints.c10
-rw-r--r--arch/m68k/platform/68360/entry.S15
-rw-r--r--arch/m68k/platform/68360/ints.c11
-rw-r--r--arch/m68k/platform/coldfire/entry.S24
-rw-r--r--arch/m68k/platform/coldfire/intc-2.c2
-rw-r--r--arch/m68k/platform/coldfire/intc-simr.c2
-rw-r--r--arch/m68k/platform/coldfire/intc.c1
-rw-r--r--arch/m68k/platform/coldfire/vectors.c10
44 files changed, 1074 insertions, 1907 deletions
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d66e34c718d0..284cd3771eaa 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -41,6 +41,10 @@ config NO_DMA
41config ZONE_DMA 41config ZONE_DMA
42 bool 42 bool
43 default y 43 default y
44
45config CPU_HAS_NO_BITFIELDS
46 bool
47
44config HZ 48config HZ
45 int 49 int
46 default 1000 if CLEOPATRA 50 default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index b004dc1b1710..ff46383112a4 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -16,6 +16,7 @@ config GENERIC_CLOCKEVENTS
16 16
17config M68000 17config M68000
18 bool 18 bool
19 select CPU_HAS_NO_BITFIELDS
19 help 20 help
20 The Freescale (was Motorola) 68000 CPU is the first generation of 21 The Freescale (was Motorola) 68000 CPU is the first generation of
21 the well known M68K family of processors. The CPU core as well as 22 the well known M68K family of processors. The CPU core as well as
@@ -25,6 +26,7 @@ config M68000
25 26
26config MCPU32 27config MCPU32
27 bool 28 bool
29 select CPU_HAS_NO_BITFIELDS
28 help 30 help
29 The Freescale (was then Motorola) CPU32 is a CPU core that is 31 The Freescale (was then Motorola) CPU32 is a CPU core that is
30 based on the 68020 processor. For the most part it is used in 32 based on the 68020 processor. For the most part it is used in
@@ -34,6 +36,7 @@ config COLDFIRE
34 bool 36 bool
35 select GENERIC_GPIO 37 select GENERIC_GPIO
36 select ARCH_REQUIRE_GPIOLIB 38 select ARCH_REQUIRE_GPIOLIB
39 select CPU_HAS_NO_BITFIELDS
37 help 40 help
38 The Freescale ColdFire family of processors is a modern derivitive 41 The Freescale ColdFire family of processors is a modern derivitive
39 of the 68000 processor family. They are mainly targeted at embedded 42 of the 68000 processor family. They are mainly targeted at embedded
diff --git a/arch/m68k/Makefile_no b/arch/m68k/Makefile_no
index 81652ab893e1..844d3f172264 100644
--- a/arch/m68k/Makefile_no
+++ b/arch/m68k/Makefile_no
@@ -13,7 +13,7 @@ platform-$(CONFIG_M68EZ328) := 68EZ328
13platform-$(CONFIG_M68VZ328) := 68VZ328 13platform-$(CONFIG_M68VZ328) := 68VZ328
14platform-$(CONFIG_M68360) := 68360 14platform-$(CONFIG_M68360) := 68360
15platform-$(CONFIG_M5206) := 5206 15platform-$(CONFIG_M5206) := 5206
16platform-$(CONFIG_M5206e) := 5206e 16platform-$(CONFIG_M5206e) := 5206
17platform-$(CONFIG_M520x) := 520x 17platform-$(CONFIG_M520x) := 520x
18platform-$(CONFIG_M523x) := 523x 18platform-$(CONFIG_M523x) := 523x
19platform-$(CONFIG_M5249) := 5249 19platform-$(CONFIG_M5249) := 5249
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 03ae3d14cd4a..307a573881ad 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -169,18 +169,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
169 char c; 169 char c;
170 __asm__ __volatile__("addl %2,%1; smi %0" 170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v) 171 : "=d" (c), "+m" (*v)
172 : "id" (i)); 172 : ASM_DI (i));
173 return c != 0; 173 return c != 0;
174} 174}
175 175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{ 177{
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); 178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
179} 179}
180 180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{ 182{
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
184} 184}
185 185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index ce163abddaba..c6baa913592a 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -1,5 +1,530 @@
1#ifdef __uClinux__ 1#ifndef _M68K_BITOPS_H
2#include "bitops_no.h" 2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Bit access functions vary across the ColdFire and 68k families.
19 * So we will break them out here, and then macro in the ones we want.
20 *
21 * ColdFire - supports standard bset/bclr/bchg with register operand only
22 * 68000 - supports standard bset/bclr/bchg with memory operand
23 * >= 68020 - also supports the bfset/bfclr/bfchg instructions
24 *
25 * Although it is possible to use only the bset/bclr/bchg with register
26 * operands on all platforms you end up with larger generated code.
27 * So we use the best form possible on a given platform.
28 */
29
30static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33
34 __asm__ __volatile__ ("bset %1,(%0)"
35 :
36 : "a" (p), "di" (nr & 7)
37 : "memory");
38}
39
40static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
41{
42 char *p = (char *)vaddr + (nr ^ 31) / 8;
43
44 __asm__ __volatile__ ("bset %1,%0"
45 : "+m" (*p)
46 : "di" (nr & 7));
47}
48
49static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
50{
51 __asm__ __volatile__ ("bfset %1{%0:#1}"
52 :
53 : "d" (nr ^ 31), "o" (*vaddr)
54 : "memory");
55}
56
57#if defined(CONFIG_COLDFIRE)
58#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
59#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
60#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
61#else
62#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
63 bset_mem_set_bit(nr, vaddr) : \
64 bfset_mem_set_bit(nr, vaddr))
65#endif
66
67#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68
69
70/*
71 * clear_bit() doesn't provide any barrier for the compiler.
72 */
73#define smp_mb__before_clear_bit() barrier()
74#define smp_mb__after_clear_bit() barrier()
75
76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
77{
78 char *p = (char *)vaddr + (nr ^ 31) / 8;
79
80 __asm__ __volatile__ ("bclr %1,(%0)"
81 :
82 : "a" (p), "di" (nr & 7)
83 : "memory");
84}
85
86static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
87{
88 char *p = (char *)vaddr + (nr ^ 31) / 8;
89
90 __asm__ __volatile__ ("bclr %1,%0"
91 : "+m" (*p)
92 : "di" (nr & 7));
93}
94
95static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
96{
97 __asm__ __volatile__ ("bfclr %1{%0:#1}"
98 :
99 : "d" (nr ^ 31), "o" (*vaddr)
100 : "memory");
101}
102
103#if defined(CONFIG_COLDFIRE)
104#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
105#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
106#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
107#else
108#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
109 bclr_mem_clear_bit(nr, vaddr) : \
110 bfclr_mem_clear_bit(nr, vaddr))
111#endif
112
113#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
114
115
116static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
117{
118 char *p = (char *)vaddr + (nr ^ 31) / 8;
119
120 __asm__ __volatile__ ("bchg %1,(%0)"
121 :
122 : "a" (p), "di" (nr & 7)
123 : "memory");
124}
125
126static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
127{
128 char *p = (char *)vaddr + (nr ^ 31) / 8;
129
130 __asm__ __volatile__ ("bchg %1,%0"
131 : "+m" (*p)
132 : "di" (nr & 7));
133}
134
135static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
136{
137 __asm__ __volatile__ ("bfchg %1{%0:#1}"
138 :
139 : "d" (nr ^ 31), "o" (*vaddr)
140 : "memory");
141}
142
143#if defined(CONFIG_COLDFIRE)
144#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
145#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
146#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
147#else
148#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
149 bchg_mem_change_bit(nr, vaddr) : \
150 bfchg_mem_change_bit(nr, vaddr))
151#endif
152
153#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
154
155
156static inline int test_bit(int nr, const unsigned long *vaddr)
157{
158 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
159}
160
161
162static inline int bset_reg_test_and_set_bit(int nr,
163 volatile unsigned long *vaddr)
164{
165 char *p = (char *)vaddr + (nr ^ 31) / 8;
166 char retval;
167
168 __asm__ __volatile__ ("bset %2,(%1); sne %0"
169 : "=d" (retval)
170 : "a" (p), "di" (nr & 7)
171 : "memory");
172 return retval;
173}
174
175static inline int bset_mem_test_and_set_bit(int nr,
176 volatile unsigned long *vaddr)
177{
178 char *p = (char *)vaddr + (nr ^ 31) / 8;
179 char retval;
180
181 __asm__ __volatile__ ("bset %2,%1; sne %0"
182 : "=d" (retval), "+m" (*p)
183 : "di" (nr & 7));
184 return retval;
185}
186
187static inline int bfset_mem_test_and_set_bit(int nr,
188 volatile unsigned long *vaddr)
189{
190 char retval;
191
192 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
193 : "=d" (retval)
194 : "d" (nr ^ 31), "o" (*vaddr)
195 : "memory");
196 return retval;
197}
198
199#if defined(CONFIG_COLDFIRE)
200#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
201#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
202#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
203#else
204#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
205 bset_mem_test_and_set_bit(nr, vaddr) : \
206 bfset_mem_test_and_set_bit(nr, vaddr))
207#endif
208
209#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
210
211
212static inline int bclr_reg_test_and_clear_bit(int nr,
213 volatile unsigned long *vaddr)
214{
215 char *p = (char *)vaddr + (nr ^ 31) / 8;
216 char retval;
217
218 __asm__ __volatile__ ("bclr %2,(%1); sne %0"
219 : "=d" (retval)
220 : "a" (p), "di" (nr & 7)
221 : "memory");
222 return retval;
223}
224
225static inline int bclr_mem_test_and_clear_bit(int nr,
226 volatile unsigned long *vaddr)
227{
228 char *p = (char *)vaddr + (nr ^ 31) / 8;
229 char retval;
230
231 __asm__ __volatile__ ("bclr %2,%1; sne %0"
232 : "=d" (retval), "+m" (*p)
233 : "di" (nr & 7));
234 return retval;
235}
236
237static inline int bfclr_mem_test_and_clear_bit(int nr,
238 volatile unsigned long *vaddr)
239{
240 char retval;
241
242 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
243 : "=d" (retval)
244 : "d" (nr ^ 31), "o" (*vaddr)
245 : "memory");
246 return retval;
247}
248
249#if defined(CONFIG_COLDFIRE)
250#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
251#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
252#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
253#else
254#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
255 bclr_mem_test_and_clear_bit(nr, vaddr) : \
256 bfclr_mem_test_and_clear_bit(nr, vaddr))
257#endif
258
259#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
260
261
262static inline int bchg_reg_test_and_change_bit(int nr,
263 volatile unsigned long *vaddr)
264{
265 char *p = (char *)vaddr + (nr ^ 31) / 8;
266 char retval;
267
268 __asm__ __volatile__ ("bchg %2,(%1); sne %0"
269 : "=d" (retval)
270 : "a" (p), "di" (nr & 7)
271 : "memory");
272 return retval;
273}
274
275static inline int bchg_mem_test_and_change_bit(int nr,
276 volatile unsigned long *vaddr)
277{
278 char *p = (char *)vaddr + (nr ^ 31) / 8;
279 char retval;
280
281 __asm__ __volatile__ ("bchg %2,%1; sne %0"
282 : "=d" (retval), "+m" (*p)
283 : "di" (nr & 7));
284 return retval;
285}
286
287static inline int bfchg_mem_test_and_change_bit(int nr,
288 volatile unsigned long *vaddr)
289{
290 char retval;
291
292 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
293 : "=d" (retval)
294 : "d" (nr ^ 31), "o" (*vaddr)
295 : "memory");
296 return retval;
297}
298
299#if defined(CONFIG_COLDFIRE)
300#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
301#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
302#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
303#else
304#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
305 bchg_mem_test_and_change_bit(nr, vaddr) : \
306 bfchg_mem_test_and_change_bit(nr, vaddr))
307#endif
308
309#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
310
311
312/*
313 * The true 68020 and more advanced processors support the "bfffo"
314 * instruction for finding bits. ColdFire and simple 68000 parts
315 * (including CPU32) do not support this. They simply use the generic
316 * functions.
317 */
318#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
319#include <asm-generic/bitops/find.h>
320#include <asm-generic/bitops/ffz.h>
321#else
322
323static inline int find_first_zero_bit(const unsigned long *vaddr,
324 unsigned size)
325{
326 const unsigned long *p = vaddr;
327 int res = 32;
328 unsigned int words;
329 unsigned long num;
330
331 if (!size)
332 return 0;
333
334 words = (size + 31) >> 5;
335 while (!(num = ~*p++)) {
336 if (!--words)
337 goto out;
338 }
339
340 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
341 : "=d" (res) : "d" (num & -num));
342 res ^= 31;
343out:
344 res += ((long)p - (long)vaddr - 4) * 8;
345 return res < size ? res : size;
346}
347#define find_first_zero_bit find_first_zero_bit
348
349static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
350 int offset)
351{
352 const unsigned long *p = vaddr + (offset >> 5);
353 int bit = offset & 31UL, res;
354
355 if (offset >= size)
356 return size;
357
358 if (bit) {
359 unsigned long num = ~*p++ & (~0UL << bit);
360 offset -= bit;
361
362 /* Look for zero in first longword */
363 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
364 : "=d" (res) : "d" (num & -num));
365 if (res < 32) {
366 offset += res ^ 31;
367 return offset < size ? offset : size;
368 }
369 offset += 32;
370
371 if (offset >= size)
372 return size;
373 }
374 /* No zero yet, search remaining full bytes for a zero */
375 return offset + find_first_zero_bit(p, size - offset);
376}
377#define find_next_zero_bit find_next_zero_bit
378
379static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
380{
381 const unsigned long *p = vaddr;
382 int res = 32;
383 unsigned int words;
384 unsigned long num;
385
386 if (!size)
387 return 0;
388
389 words = (size + 31) >> 5;
390 while (!(num = *p++)) {
391 if (!--words)
392 goto out;
393 }
394
395 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
396 : "=d" (res) : "d" (num & -num));
397 res ^= 31;
398out:
399 res += ((long)p - (long)vaddr - 4) * 8;
400 return res < size ? res : size;
401}
402#define find_first_bit find_first_bit
403
404static inline int find_next_bit(const unsigned long *vaddr, int size,
405 int offset)
406{
407 const unsigned long *p = vaddr + (offset >> 5);
408 int bit = offset & 31UL, res;
409
410 if (offset >= size)
411 return size;
412
413 if (bit) {
414 unsigned long num = *p++ & (~0UL << bit);
415 offset -= bit;
416
417 /* Look for one in first longword */
418 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
419 : "=d" (res) : "d" (num & -num));
420 if (res < 32) {
421 offset += res ^ 31;
422 return offset < size ? offset : size;
423 }
424 offset += 32;
425
426 if (offset >= size)
427 return size;
428 }
429 /* No one yet, search remaining full bytes for a one */
430 return offset + find_first_bit(p, size - offset);
431}
432#define find_next_bit find_next_bit
433
434/*
435 * ffz = Find First Zero in word. Undefined if no zero exists,
436 * so code should check against ~0UL first..
437 */
438static inline unsigned long ffz(unsigned long word)
439{
440 int res;
441
442 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
443 : "=d" (res) : "d" (~word & -~word));
444 return res ^ 31;
445}
446
447#endif
448
449#ifdef __KERNEL__
450
451#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
452
453/*
454 * The newer ColdFire family members support a "bitrev" instruction
455 * and we can use that to implement a fast ffs. Older Coldfire parts,
456 * and normal 68000 parts don't have anything special, so we use the
457 * generic functions for those.
458 */
459#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
460 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
461static inline int __ffs(int x)
462{
463 __asm__ __volatile__ ("bitrev %0; ff1 %0"
464 : "=d" (x)
465 : "0" (x));
466 return x;
467}
468
469static inline int ffs(int x)
470{
471 if (!x)
472 return 0;
473 return __ffs(x) + 1;
474}
475
476#else
477#include <asm-generic/bitops/ffs.h>
478#include <asm-generic/bitops/__ffs.h>
479#endif
480
481#include <asm-generic/bitops/fls.h>
482#include <asm-generic/bitops/__fls.h>
483
3#else 484#else
4#include "bitops_mm.h" 485
486/*
487 * ffs: find first bit set. This is defined the same way as
488 * the libc and compiler builtin ffs routines, therefore
489 * differs in spirit from the above ffz (man ffs).
490 */
491static inline int ffs(int x)
492{
493 int cnt;
494
495 __asm__ ("bfffo %1{#0:#0},%0"
496 : "=d" (cnt)
497 : "dm" (x & -x));
498 return 32 - cnt;
499}
500#define __ffs(x) (ffs(x) - 1)
501
502/*
503 * fls: find last bit set.
504 */
505static inline int fls(int x)
506{
507 int cnt;
508
509 __asm__ ("bfffo %1{#0,#0},%0"
510 : "=d" (cnt)
511 : "dm" (x));
512 return 32 - cnt;
513}
514
515static inline int __fls(int x)
516{
517 return fls(x) - 1;
518}
519
5#endif 520#endif
521
522#include <asm-generic/bitops/ext2-atomic.h>
523#include <asm-generic/bitops/le.h>
524#include <asm-generic/bitops/fls64.h>
525#include <asm-generic/bitops/sched.h>
526#include <asm-generic/bitops/hweight.h>
527#include <asm-generic/bitops/lock.h>
528#endif /* __KERNEL__ */
529
530#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
deleted file mode 100644
index 89cf5b814a4d..000000000000
--- a/arch/m68k/include/asm/bitops_mm.h
+++ /dev/null
@@ -1,501 +0,0 @@
1#ifndef _M68K_BITOPS_H
2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Require 68020 or better.
19 *
20 * They use the standard big-endian m680x0 bit ordering.
21 */
22
23#define test_and_set_bit(nr,vaddr) \
24 (__builtin_constant_p(nr) ? \
25 __constant_test_and_set_bit(nr, vaddr) : \
26 __generic_test_and_set_bit(nr, vaddr))
27
28#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29
30static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33 char retval;
34
35 __asm__ __volatile__ ("bset %2,%1; sne %0"
36 : "=d" (retval), "+m" (*p)
37 : "di" (nr & 7));
38
39 return retval;
40}
41
42static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43{
44 char retval;
45
46 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48
49 return retval;
50}
51
52#define set_bit(nr,vaddr) \
53 (__builtin_constant_p(nr) ? \
54 __constant_set_bit(nr, vaddr) : \
55 __generic_set_bit(nr, vaddr))
56
57#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58
59static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60{
61 char *p = (char *)vaddr + (nr ^ 31) / 8;
62 __asm__ __volatile__ ("bset %1,%0"
63 : "+m" (*p) : "di" (nr & 7));
64}
65
66static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67{
68 __asm__ __volatile__ ("bfset %1{%0:#1}"
69 : : "d" (nr^31), "o" (*vaddr) : "memory");
70}
71
72#define test_and_clear_bit(nr,vaddr) \
73 (__builtin_constant_p(nr) ? \
74 __constant_test_and_clear_bit(nr, vaddr) : \
75 __generic_test_and_clear_bit(nr, vaddr))
76
77#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78
79static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80{
81 char *p = (char *)vaddr + (nr ^ 31) / 8;
82 char retval;
83
84 __asm__ __volatile__ ("bclr %2,%1; sne %0"
85 : "=d" (retval), "+m" (*p)
86 : "di" (nr & 7));
87
88 return retval;
89}
90
91static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92{
93 char retval;
94
95 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97
98 return retval;
99}
100
101/*
102 * clear_bit() doesn't provide any barrier for the compiler.
103 */
104#define smp_mb__before_clear_bit() barrier()
105#define smp_mb__after_clear_bit() barrier()
106
107#define clear_bit(nr,vaddr) \
108 (__builtin_constant_p(nr) ? \
109 __constant_clear_bit(nr, vaddr) : \
110 __generic_clear_bit(nr, vaddr))
111#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112
113static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114{
115 char *p = (char *)vaddr + (nr ^ 31) / 8;
116 __asm__ __volatile__ ("bclr %1,%0"
117 : "+m" (*p) : "di" (nr & 7));
118}
119
120static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121{
122 __asm__ __volatile__ ("bfclr %1{%0:#1}"
123 : : "d" (nr^31), "o" (*vaddr) : "memory");
124}
125
126#define test_and_change_bit(nr,vaddr) \
127 (__builtin_constant_p(nr) ? \
128 __constant_test_and_change_bit(nr, vaddr) : \
129 __generic_test_and_change_bit(nr, vaddr))
130
131#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133
134static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135{
136 char *p = (char *)vaddr + (nr ^ 31) / 8;
137 char retval;
138
139 __asm__ __volatile__ ("bchg %2,%1; sne %0"
140 : "=d" (retval), "+m" (*p)
141 : "di" (nr & 7));
142
143 return retval;
144}
145
146static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147{
148 char retval;
149
150 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152
153 return retval;
154}
155
156#define change_bit(nr,vaddr) \
157 (__builtin_constant_p(nr) ? \
158 __constant_change_bit(nr, vaddr) : \
159 __generic_change_bit(nr, vaddr))
160
161static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162{
163 char *p = (char *)vaddr + (nr ^ 31) / 8;
164 __asm__ __volatile__ ("bchg %1,%0"
165 : "+m" (*p) : "di" (nr & 7));
166}
167
168static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169{
170 __asm__ __volatile__ ("bfchg %1{%0:#1}"
171 : : "d" (nr^31), "o" (*vaddr) : "memory");
172}
173
174static inline int test_bit(int nr, const unsigned long *vaddr)
175{
176 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177}
178
179static inline int find_first_zero_bit(const unsigned long *vaddr,
180 unsigned size)
181{
182 const unsigned long *p = vaddr;
183 int res = 32;
184 unsigned int words;
185 unsigned long num;
186
187 if (!size)
188 return 0;
189
190 words = (size + 31) >> 5;
191 while (!(num = ~*p++)) {
192 if (!--words)
193 goto out;
194 }
195
196 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
197 : "=d" (res) : "d" (num & -num));
198 res ^= 31;
199out:
200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size;
202}
203#define find_first_zero_bit find_first_zero_bit
204
205static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
206 int offset)
207{
208 const unsigned long *p = vaddr + (offset >> 5);
209 int bit = offset & 31UL, res;
210
211 if (offset >= size)
212 return size;
213
214 if (bit) {
215 unsigned long num = ~*p++ & (~0UL << bit);
216 offset -= bit;
217
218 /* Look for zero in first longword */
219 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
220 : "=d" (res) : "d" (num & -num));
221 if (res < 32) {
222 offset += res ^ 31;
223 return offset < size ? offset : size;
224 }
225 offset += 32;
226
227 if (offset >= size)
228 return size;
229 }
230 /* No zero yet, search remaining full bytes for a zero */
231 return offset + find_first_zero_bit(p, size - offset);
232}
233#define find_next_zero_bit find_next_zero_bit
234
235static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
236{
237 const unsigned long *p = vaddr;
238 int res = 32;
239 unsigned int words;
240 unsigned long num;
241
242 if (!size)
243 return 0;
244
245 words = (size + 31) >> 5;
246 while (!(num = *p++)) {
247 if (!--words)
248 goto out;
249 }
250
251 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
252 : "=d" (res) : "d" (num & -num));
253 res ^= 31;
254out:
255 res += ((long)p - (long)vaddr - 4) * 8;
256 return res < size ? res : size;
257}
258#define find_first_bit find_first_bit
259
260static inline int find_next_bit(const unsigned long *vaddr, int size,
261 int offset)
262{
263 const unsigned long *p = vaddr + (offset >> 5);
264 int bit = offset & 31UL, res;
265
266 if (offset >= size)
267 return size;
268
269 if (bit) {
270 unsigned long num = *p++ & (~0UL << bit);
271 offset -= bit;
272
273 /* Look for one in first longword */
274 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
275 : "=d" (res) : "d" (num & -num));
276 if (res < 32) {
277 offset += res ^ 31;
278 return offset < size ? offset : size;
279 }
280 offset += 32;
281
282 if (offset >= size)
283 return size;
284 }
285 /* No one yet, search remaining full bytes for a one */
286 return offset + find_first_bit(p, size - offset);
287}
288#define find_next_bit find_next_bit
289
290/*
291 * ffz = Find First Zero in word. Undefined if no zero exists,
292 * so code should check against ~0UL first..
293 */
294static inline unsigned long ffz(unsigned long word)
295{
296 int res;
297
298 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
299 : "=d" (res) : "d" (~word & -~word));
300 return res ^ 31;
301}
302
303#ifdef __KERNEL__
304
305/*
306 * ffs: find first bit set. This is defined the same way as
307 * the libc and compiler builtin ffs routines, therefore
308 * differs in spirit from the above ffz (man ffs).
309 */
310
311static inline int ffs(int x)
312{
313 int cnt;
314
315 asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
316
317 return 32 - cnt;
318}
319#define __ffs(x) (ffs(x) - 1)
320
321/*
322 * fls: find last bit set.
323 */
324
325static inline int fls(int x)
326{
327 int cnt;
328
329 asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
330
331 return 32 - cnt;
332}
333
334static inline int __fls(int x)
335{
336 return fls(x) - 1;
337}
338
339#include <asm-generic/bitops/fls64.h>
340#include <asm-generic/bitops/sched.h>
341#include <asm-generic/bitops/hweight.h>
342#include <asm-generic/bitops/lock.h>
343
344/* Bitmap functions for the little endian bitmap. */
345
346static inline void __set_bit_le(int nr, void *addr)
347{
348 __set_bit(nr ^ 24, addr);
349}
350
351static inline void __clear_bit_le(int nr, void *addr)
352{
353 __clear_bit(nr ^ 24, addr);
354}
355
356static inline int __test_and_set_bit_le(int nr, void *addr)
357{
358 return __test_and_set_bit(nr ^ 24, addr);
359}
360
361static inline int test_and_set_bit_le(int nr, void *addr)
362{
363 return test_and_set_bit(nr ^ 24, addr);
364}
365
366static inline int __test_and_clear_bit_le(int nr, void *addr)
367{
368 return __test_and_clear_bit(nr ^ 24, addr);
369}
370
371static inline int test_and_clear_bit_le(int nr, void *addr)
372{
373 return test_and_clear_bit(nr ^ 24, addr);
374}
375
376static inline int test_bit_le(int nr, const void *vaddr)
377{
378 const unsigned char *p = vaddr;
379 return (p[nr >> 3] & (1U << (nr & 7))) != 0;
380}
381
382static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
383{
384 const unsigned long *p = vaddr, *addr = vaddr;
385 int res = 0;
386 unsigned int words;
387
388 if (!size)
389 return 0;
390
391 words = (size >> 5) + ((size & 31) > 0);
392 while (*p++ == ~0UL) {
393 if (--words == 0)
394 goto out;
395 }
396
397 --p;
398 for (res = 0; res < 32; res++)
399 if (!test_bit_le(res, p))
400 break;
401out:
402 res += (p - addr) * 32;
403 return res < size ? res : size;
404}
405#define find_first_zero_bit_le find_first_zero_bit_le
406
407static inline unsigned long find_next_zero_bit_le(const void *addr,
408 unsigned long size, unsigned long offset)
409{
410 const unsigned long *p = addr;
411 int bit = offset & 31UL, res;
412
413 if (offset >= size)
414 return size;
415
416 p += offset >> 5;
417
418 if (bit) {
419 offset -= bit;
420 /* Look for zero in first longword */
421 for (res = bit; res < 32; res++)
422 if (!test_bit_le(res, p)) {
423 offset += res;
424 return offset < size ? offset : size;
425 }
426 p++;
427 offset += 32;
428
429 if (offset >= size)
430 return size;
431 }
432 /* No zero yet, search remaining full bytes for a zero */
433 return offset + find_first_zero_bit_le(p, size - offset);
434}
435#define find_next_zero_bit_le find_next_zero_bit_le
436
437static inline int find_first_bit_le(const void *vaddr, unsigned size)
438{
439 const unsigned long *p = vaddr, *addr = vaddr;
440 int res = 0;
441 unsigned int words;
442
443 if (!size)
444 return 0;
445
446 words = (size >> 5) + ((size & 31) > 0);
447 while (*p++ == 0UL) {
448 if (--words == 0)
449 goto out;
450 }
451
452 --p;
453 for (res = 0; res < 32; res++)
454 if (test_bit_le(res, p))
455 break;
456out:
457 res += (p - addr) * 32;
458 return res < size ? res : size;
459}
460#define find_first_bit_le find_first_bit_le
461
462static inline unsigned long find_next_bit_le(const void *addr,
463 unsigned long size, unsigned long offset)
464{
465 const unsigned long *p = addr;
466 int bit = offset & 31UL, res;
467
468 if (offset >= size)
469 return size;
470
471 p += offset >> 5;
472
473 if (bit) {
474 offset -= bit;
475 /* Look for one in first longword */
476 for (res = bit; res < 32; res++)
477 if (test_bit_le(res, p)) {
478 offset += res;
479 return offset < size ? offset : size;
480 }
481 p++;
482 offset += 32;
483
484 if (offset >= size)
485 return size;
486 }
487 /* No set bit yet, search remaining full bytes for a set bit */
488 return offset + find_first_bit_le(p, size - offset);
489}
490#define find_next_bit_le find_next_bit_le
491
492/* Bitmap functions for the ext2 filesystem. */
493
494#define ext2_set_bit_atomic(lock, nr, addr) \
495 test_and_set_bit_le(nr, addr)
496#define ext2_clear_bit_atomic(lock, nr, addr) \
497 test_and_clear_bit_le(nr, addr)
498
499#endif /* __KERNEL__ */
500
501#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
deleted file mode 100644
index 72e85acdd7bd..000000000000
--- a/arch/m68k/include/asm/bitops_no.h
+++ /dev/null
@@ -1,333 +0,0 @@
1#ifndef _M68KNOMMU_BITOPS_H
2#define _M68KNOMMU_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10
11#ifdef __KERNEL__
12
13#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly
15#endif
16
17#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18static inline int ffs(unsigned int val)
19{
20 if (!val)
21 return 0;
22
23 asm volatile(
24 "bitrev %0\n\t"
25 "ff1 %0\n\t"
26 : "=d" (val)
27 : "0" (val)
28 );
29 val++;
30 return val;
31}
32
33static inline int __ffs(unsigned int val)
34{
35 asm volatile(
36 "bitrev %0\n\t"
37 "ff1 %0\n\t"
38 : "=d" (val)
39 : "0" (val)
40 );
41 return val;
42}
43
44#else
45#include <asm-generic/bitops/ffs.h>
46#include <asm-generic/bitops/__ffs.h>
47#endif
48
49#include <asm-generic/bitops/sched.h>
50#include <asm-generic/bitops/ffz.h>
51
52static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53{
54#ifdef CONFIG_COLDFIRE
55 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57 : "d" (nr)
58 : "%a0", "cc");
59#else
60 __asm__ __volatile__ ("bset %1,%0"
61 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62 : "di" (nr)
63 : "cc");
64#endif
65}
66
67#define __set_bit(nr, addr) set_bit(nr, addr)
68
69/*
70 * clear_bit() doesn't provide any barrier for the compiler.
71 */
72#define smp_mb__before_clear_bit() barrier()
73#define smp_mb__after_clear_bit() barrier()
74
75static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76{
77#ifdef CONFIG_COLDFIRE
78 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80 : "d" (nr)
81 : "%a0", "cc");
82#else
83 __asm__ __volatile__ ("bclr %1,%0"
84 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85 : "di" (nr)
86 : "cc");
87#endif
88}
89
90#define __clear_bit(nr, addr) clear_bit(nr, addr)
91
92static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93{
94#ifdef CONFIG_COLDFIRE
95 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97 : "d" (nr)
98 : "%a0", "cc");
99#else
100 __asm__ __volatile__ ("bchg %1,%0"
101 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102 : "di" (nr)
103 : "cc");
104#endif
105}
106
107#define __change_bit(nr, addr) change_bit(nr, addr)
108
109static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110{
111 char retval;
112
113#ifdef CONFIG_COLDFIRE
114 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116 : "d" (nr)
117 : "%a0");
118#else
119 __asm__ __volatile__ ("bset %2,%1; sne %0"
120 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121 : "di" (nr)
122 /* No clobber */);
123#endif
124
125 return retval;
126}
127
128#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129
130static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131{
132 char retval;
133
134#ifdef CONFIG_COLDFIRE
135 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137 : "d" (nr)
138 : "%a0");
139#else
140 __asm__ __volatile__ ("bclr %2,%1; sne %0"
141 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142 : "di" (nr)
143 /* No clobber */);
144#endif
145
146 return retval;
147}
148
149#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150
151static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152{
153 char retval;
154
155#ifdef CONFIG_COLDFIRE
156 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158 : "d" (nr)
159 : "%a0");
160#else
161 __asm__ __volatile__ ("bchg %2,%1; sne %0"
162 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163 : "di" (nr)
164 /* No clobber */);
165#endif
166
167 return retval;
168}
169
170#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171
172/*
173 * This routine doesn't need to be atomic.
174 */
175static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176{
177 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178}
179
180static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181{
182 int * a = (int *) addr;
183 int mask;
184
185 a += nr >> 5;
186 mask = 1 << (nr & 0x1f);
187 return ((mask & *a) != 0);
188}
189
190#define test_bit(nr,addr) \
191(__builtin_constant_p(nr) ? \
192 __constant_test_bit((nr),(addr)) : \
193 __test_bit((nr),(addr)))
194
195#include <asm-generic/bitops/find.h>
196#include <asm-generic/bitops/hweight.h>
197#include <asm-generic/bitops/lock.h>
198
199#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
200
201static inline void __set_bit_le(int nr, void *addr)
202{
203 __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
204}
205
206static inline void __clear_bit_le(int nr, void *addr)
207{
208 __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
209}
210
211static inline int __test_and_set_bit_le(int nr, volatile void *addr)
212{
213 char retval;
214
215#ifdef CONFIG_COLDFIRE
216 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218 : "d" (nr)
219 : "%a0");
220#else
221 __asm__ __volatile__ ("bset %2,%1; sne %0"
222 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223 : "di" (nr)
224 /* No clobber */);
225#endif
226
227 return retval;
228}
229
230static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
231{
232 char retval;
233
234#ifdef CONFIG_COLDFIRE
235 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237 : "d" (nr)
238 : "%a0");
239#else
240 __asm__ __volatile__ ("bclr %2,%1; sne %0"
241 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242 : "di" (nr)
243 /* No clobber */);
244#endif
245
246 return retval;
247}
248
249#include <asm-generic/bitops/ext2-atomic.h>
250
251static inline int test_bit_le(int nr, const volatile void *addr)
252{
253 char retval;
254
255#ifdef CONFIG_COLDFIRE
256 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257 : "=d" (retval)
258 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259 : "%a0");
260#else
261 __asm__ __volatile__ ("btst %2,%1; sne %0"
262 : "=d" (retval)
263 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264 /* No clobber */);
265#endif
266
267 return retval;
268}
269
270#define find_first_zero_bit_le(addr, size) \
271 find_next_zero_bit_le((addr), (size), 0)
272
273static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
274{
275 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276 unsigned long result = offset & ~31UL;
277 unsigned long tmp;
278
279 if (offset >= size)
280 return size;
281 size -= result;
282 offset &= 31UL;
283 if(offset) {
284 /* We hold the little endian value in tmp, but then the
285 * shift is illegal. So we could keep a big endian value
286 * in tmp, like this:
287 *
288 * tmp = __swab32(*(p++));
289 * tmp |= ~0UL >> (32-offset);
290 *
291 * but this would decrease performance, so we change the
292 * shift:
293 */
294 tmp = *(p++);
295 tmp |= __swab32(~0UL >> (32-offset));
296 if(size < 32)
297 goto found_first;
298 if(~tmp)
299 goto found_middle;
300 size -= 32;
301 result += 32;
302 }
303 while(size & ~31UL) {
304 if(~(tmp = *(p++)))
305 goto found_middle;
306 result += 32;
307 size -= 32;
308 }
309 if(!size)
310 return result;
311 tmp = *p;
312
313found_first:
314 /* tmp is little endian, so we would have to swab the shift,
315 * see above. But then we have to swab tmp below for ffz, so
316 * we might as well do this here.
317 */
318 return result + ffz(__swab32(tmp) | (~0UL << size));
319found_middle:
320 return result + ffz(__swab32(tmp));
321}
322#define find_next_zero_bit_le find_next_zero_bit_le
323
324extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
326
327#endif /* __KERNEL__ */
328
329#include <asm-generic/bitops/fls.h>
330#include <asm-generic/bitops/__fls.h>
331#include <asm-generic/bitops/fls64.h>
332
333#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d2598e3dd7b2..9c09becfd4c9 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -1,5 +1,96 @@
1#ifdef __uClinux__ 1#ifndef _M68K_DELAY_H
2#include "delay_no.h" 2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
9 *
10 * Delay routines, using a pre-computed "loops_per_jiffy" value.
11 */
12
13#if defined(CONFIG_COLDFIRE)
14/*
15 * The ColdFire runs the delay loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitrary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
24 */
25#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
3#else 26#else
4#include "delay_mm.h" 27/*
28 * No instruction alignment required for other m68k types.
29 */
30#define DELAY_ALIGN
5#endif 31#endif
32
33static inline void __delay(unsigned long loops)
34{
35 __asm__ __volatile__ (
36 DELAY_ALIGN
37 "1: subql #1,%0\n\t"
38 "jcc 1b"
39 : "=d" (loops)
40 : "0" (loops));
41}
42
43extern void __bad_udelay(void);
44
45
46#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
47/*
48 * The simpler m68k and ColdFire processors do not have a 32*32->64
49 * multiply instruction. So we need to handle them a little differently.
50 * We use a bit of shifting and a single 32*32->32 multiply to get close.
51 * This is a macro so that the const version can factor out the first
52 * multiply and shift.
53 */
54#define HZSCALE (268435456 / (1000000 / HZ))
55
56#define __const_udelay(u) \
57 __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
58
59#else
60
61static inline void __xdelay(unsigned long xloops)
62{
63 unsigned long tmp;
64
65 __asm__ ("mulul %2,%0:%1"
66 : "=d" (xloops), "=d" (tmp)
67 : "d" (xloops), "1" (loops_per_jiffy));
68 __delay(xloops * HZ);
69}
70
71/*
72 * The definition of __const_udelay is specifically made a macro so that
73 * the const factor (4295 = 2**32 / 1000000) can be optimized out when
74 * the delay is a const.
75 */
76#define __const_udelay(n) (__xdelay((n) * 4295))
77
78#endif
79
80static inline void __udelay(unsigned long usecs)
81{
82 __const_udelay(usecs);
83}
84
85/*
86 * Use only for very small delays ( < 1 msec). Should probably use a
87 * lookup table, really, as the multiplications take much too long with
88 * short delays. This is a "reasonable" implementation, though (and the
89 * first constant multiplications gets optimized away if the delay is
90 * a constant)
91 */
92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94
95
96#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_mm.h b/arch/m68k/include/asm/delay_mm.h
deleted file mode 100644
index 5ed92851bc6f..000000000000
--- a/arch/m68k/include/asm/delay_mm.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef _M68K_DELAY_H
2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 *
9 * Delay routines, using a pre-computed "loops_per_jiffy" value.
10 */
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
15 : "=d" (loops) : "0" (loops));
16}
17
18extern void __bad_udelay(void);
19
20/*
21 * Use only for very small delays ( < 1 msec). Should probably use a
22 * lookup table, really, as the multiplications take much too long with
23 * short delays. This is a "reasonable" implementation, though (and the
24 * first constant multiplications gets optimized away if the delay is
25 * a constant)
26 */
27static inline void __const_udelay(unsigned long xloops)
28{
29 unsigned long tmp;
30
31 __asm__ ("mulul %2,%0:%1"
32 : "=d" (xloops), "=d" (tmp)
33 : "d" (xloops), "1" (loops_per_jiffy));
34 __delay(xloops * HZ);
35}
36
37static inline void __udelay(unsigned long usecs)
38{
39 __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
40}
41
42#define udelay(n) (__builtin_constant_p(n) ? \
43 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
44 __udelay(n))
45
46static inline unsigned long muldiv(unsigned long a, unsigned long b,
47 unsigned long c)
48{
49 unsigned long tmp;
50
51 __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
52 : "=d" (tmp), "=d" (a)
53 : "d" (b), "d" (c), "1" (a));
54 return a;
55}
56
57#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_no.h b/arch/m68k/include/asm/delay_no.h
deleted file mode 100644
index c3a0edc90f21..000000000000
--- a/arch/m68k/include/asm/delay_no.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _M68KNOMMU_DELAY_H
2#define _M68KNOMMU_DELAY_H
3
4/*
5 * Copyright (C) 1994 Hamish Macdonald
6 * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
7 */
8
9#include <asm/param.h>
10
11static inline void __delay(unsigned long loops)
12{
13#if defined(CONFIG_COLDFIRE)
14 /* The coldfire runs this loop at significantly different speeds
15 * depending upon long word alignment or not. We'll pad it to
16 * long word alignment which is the faster version.
17 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
18 * than using a NOP (0x4e71) instruction because it executes in one
19 * cycle not three and doesn't allow for an arbitrary delay waiting
20 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
21 * stall waiting for the register to become valid if such is added
22 * to the coldfire at some stage.
23 */
24 __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
25 "1: subql #1, %0\n\t"
26 "jcc 1b"
27 : "=d" (loops) : "0" (loops));
28#else
29 __asm__ __volatile__ ( "1: subql #1, %0\n\t"
30 "jcc 1b"
31 : "=d" (loops) : "0" (loops));
32#endif
33}
34
35/*
36 * Ideally we use a 32*32->64 multiply to calculate the number of
37 * loop iterations, but the older standard 68k and ColdFire do not
38 * have this instruction. So for them we have a clsoe approximation
39 * loop using 32*32->32 multiplies only. This calculation based on
40 * the ARM version of delay.
41 *
42 * We want to implement:
43 *
44 * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
45 */
46
47#define HZSCALE (268435456 / (1000000/HZ))
48
49extern unsigned long loops_per_jiffy;
50
51static inline void _udelay(unsigned long usecs)
52{
53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
55 defined(CONFIG_COLDFIRE)
56 __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
57#else
58 unsigned long tmp;
59
60 usecs *= 4295; /* 2**32 / 1000000 */
61 __asm__ ("mulul %2,%0:%1"
62 : "=d" (usecs), "=d" (tmp)
63 : "d" (usecs), "1" (loops_per_jiffy*HZ));
64 __delay(usecs);
65#endif
66}
67
68/*
69 * Moved the udelay() function into library code, no longer inlined.
70 * I had to change the algorithm because we are overflowing now on
71 * the faster ColdFire parts. The code is a little bigger, so it makes
72 * sense to library it.
73 */
74extern void udelay(unsigned long usecs);
75
76#endif /* defined(_M68KNOMMU_DELAY_H) */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 627d69bacc58..68611e3dbb1d 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -96,11 +96,11 @@
96.endm 96.endm
97 97
98.macro RDUSP 98.macro RDUSP
99 movel sw_usp,%a2 99 movel sw_usp,%a3
100.endm 100.endm
101 101
102.macro WRUSP 102.macro WRUSP
103 movel %a0,sw_usp 103 movel %a3,sw_usp
104.endm 104.endm
105 105
106#else /* !CONFIG_COLDFIRE_SW_A7 */ 106#else /* !CONFIG_COLDFIRE_SW_A7 */
@@ -127,13 +127,13 @@
127.endm 127.endm
128 128
129.macro RDUSP 129.macro RDUSP
130 /*move %usp,%a2*/ 130 /*move %usp,%a3*/
131 .word 0x4e6a 131 .word 0x4e6b
132.endm 132.endm
133 133
134.macro WRUSP 134.macro WRUSP
135 /*move %a0,%usp*/ 135 /*move %a3,%usp*/
136 .word 0x4e60 136 .word 0x4e63
137.endm 137.endm
138 138
139#endif /* !CONFIG_COLDFIRE_SW_A7 */ 139#endif /* !CONFIG_COLDFIRE_SW_A7 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231c..870e5347155b 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,34 @@
1#ifdef __uClinux__ 1#ifndef __M68K_HARDIRQ_H
2#include "hardirq_no.h" 2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6#include <asm/irq.h>
7
8#define HARDIRQ_BITS 8
9
10/*
11 * The hardirq mask has to be large enough to have
12 * space for potentially all IRQ sources in the system
13 * nesting on a single CPU:
14 */
15#if (1 << HARDIRQ_BITS) < NR_IRQS
16# error HARDIRQ_BITS is too low!
17#endif
18
19#ifdef CONFIG_MMU
20
21/* entry.S is sensitive to the offsets of these fields */
22typedef struct {
23 unsigned int __softirq_pending;
24} ____cacheline_aligned irq_cpustat_t;
25
26#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
27
3#else 28#else
4#include "hardirq_mm.h" 29
30#include <asm-generic/hardirq.h>
31
32#endif /* !CONFIG_MMU */
33
5#endif 34#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015c..000000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6
7/* entry.S is sensitive to the offsets of these fields */
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
11
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#define HARDIRQ_BITS 8
15
16#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index b44b14be87d9..000000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <asm/irq.h>
5
6#define HARDIRQ_BITS 8
7
8/*
9 * The hardirq mask has to be large enough to have
10 * space for potentially all IRQ sources in the system
11 * nesting on a single CPU:
12 */
13#if (1 << HARDIRQ_BITS) < NR_IRQS
14# error HARDIRQ_BITS is too low!
15#endif
16
17#include <asm-generic/hardirq.h>
18
19#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 907eff1edd2f..69ed0d74d532 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -33,15 +33,6 @@
33#include <linux/spinlock_types.h> 33#include <linux/spinlock_types.h>
34 34
35/* 35/*
36 * The hardirq mask has to be large enough to have
37 * space for potentially all IRQ sources in the system
38 * nesting on a single CPU:
39 */
40#if (1 << HARDIRQ_BITS) < NR_IRQS
41# error HARDIRQ_BITS is too low!
42#endif
43
44/*
45 * Interrupt source definitions 36 * Interrupt source definitions
46 * General interrupt sources are the level 1-7. 37 * General interrupt sources are the level 1-7.
47 * Adding an interrupt service routine for one of these sources 38 * Adding an interrupt service routine for one of these sources
@@ -131,4 +122,6 @@ asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
131#define irq_canonicalize(irq) (irq) 122#define irq_canonicalize(irq) (irq)
132#endif /* CONFIG_MMU */ 123#endif /* CONFIG_MMU */
133 124
125asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
126
134#endif /* _M68K_IRQ_H_ */ 127#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 415d5484916c..789f3b2de0e9 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy); 40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41 41
42extern void config_BSP(char *command, int len); 42extern void config_BSP(char *command, int len);
43extern void do_IRQ(int irq, struct pt_regs *fp);
44 43
45#endif /* _M68K_MACHDEP_H */ 44#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/module.h b/arch/m68k/include/asm/module.h
index 5f21e11071bd..edffe66b7f49 100644
--- a/arch/m68k/include/asm/module.h
+++ b/arch/m68k/include/asm/module.h
@@ -1,46 +1,41 @@
1#ifndef _ASM_M68K_MODULE_H 1#ifndef _ASM_M68K_MODULE_H
2#define _ASM_M68K_MODULE_H 2#define _ASM_M68K_MODULE_H
3 3
4#ifdef CONFIG_MMU 4enum m68k_fixup_type {
5 m68k_fixup_memoffset,
6 m68k_fixup_vnode_shift,
7};
8
9struct m68k_fixup_info {
10 enum m68k_fixup_type type;
11 void *addr;
12};
5 13
6struct mod_arch_specific { 14struct mod_arch_specific {
7 struct m68k_fixup_info *fixup_start, *fixup_end; 15 struct m68k_fixup_info *fixup_start, *fixup_end;
8}; 16};
9 17
18#ifdef CONFIG_MMU
19
10#define MODULE_ARCH_INIT { \ 20#define MODULE_ARCH_INIT { \
11 .fixup_start = __start_fixup, \ 21 .fixup_start = __start_fixup, \
12 .fixup_end = __stop_fixup, \ 22 .fixup_end = __stop_fixup, \
13} 23}
14 24
15 25
16enum m68k_fixup_type {
17 m68k_fixup_memoffset,
18 m68k_fixup_vnode_shift,
19};
20
21struct m68k_fixup_info {
22 enum m68k_fixup_type type;
23 void *addr;
24};
25
26#define m68k_fixup(type, addr) \ 26#define m68k_fixup(type, addr) \
27 " .section \".m68k_fixup\",\"aw\"\n" \ 27 " .section \".m68k_fixup\",\"aw\"\n" \
28 " .long " #type "," #addr "\n" \ 28 " .long " #type "," #addr "\n" \
29 " .previous\n" 29 " .previous\n"
30 30
31#endif /* CONFIG_MMU */
32
31extern struct m68k_fixup_info __start_fixup[], __stop_fixup[]; 33extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
32 34
33struct module; 35struct module;
34extern void module_fixup(struct module *mod, struct m68k_fixup_info *start, 36extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
35 struct m68k_fixup_info *end); 37 struct m68k_fixup_info *end);
36 38
37#else
38
39struct mod_arch_specific {
40};
41
42#endif /* CONFIG_MMU */
43
44#define Elf_Shdr Elf32_Shdr 39#define Elf_Shdr Elf32_Shdr
45#define Elf_Sym Elf32_Sym 40#define Elf_Sym Elf32_Sym
46#define Elf_Ehdr Elf32_Ehdr 41#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c787a11..60e88660169c 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -150,7 +150,7 @@ typedef struct sigaltstack {
150#ifdef __KERNEL__ 150#ifdef __KERNEL__
151#include <asm/sigcontext.h> 151#include <asm/sigcontext.h>
152 152
153#ifndef __uClinux__ 153#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
154#define __HAVE_ARCH_SIG_BITOPS 154#define __HAVE_ARCH_SIG_BITOPS
155 155
156static inline void sigaddset(sigset_t *set, int _sig) 156static inline void sigaddset(sigset_t *set, int _sig)
@@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
199 return word ^ 31; 199 return word ^ 31;
200} 200}
201 201
202struct pt_regs; 202#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
203extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
204 203
205#else 204#ifdef __uClinux__
206
207#undef __HAVE_ARCH_SIG_BITOPS
208#define ptrace_signal_deliver(regs, cookie) do { } while (0) 205#define ptrace_signal_deliver(regs, cookie) do { } while (0)
209 206#else
207struct pt_regs;
208extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
210#endif /* __uClinux__ */ 209#endif /* __uClinux__ */
211#endif /* __KERNEL__ */
212 210
211#endif /* __KERNEL__ */
213#endif /* _M68K_SIGNAL_H */ 212#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f5..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
1#ifdef __uClinux__ 1#ifndef _M68K_SYSTEM_H
2#include "system_no.h" 2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
3#else 101#else
4#include "system_mm.h" 102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
5#endif 132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726bc..000000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
101#else
102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3ff..000000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
1#ifndef _M68KNOMMU_SYSTEM_H
2#define _M68KNOMMU_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/irqflags.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9/*
10 * switch_to(n) should switch tasks to task ptr, first checking that
11 * ptr isn't the current task, in which case it does nothing. This
12 * also clears the TS-flag if the task we switched to has used the
13 * math co-processor latest.
14 */
15/*
16 * switch_to() saves the extra registers, that are not saved
17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
18 * a0-a1. Some of these are used by schedule() and its predecessors
19 * and so we might get see unexpected behaviors when a task returns
20 * with unexpected register values.
21 *
22 * syscall stores these registers itself and none of them are used
23 * by syscall after the function in the syscall has been called.
24 *
25 * Beware that resume now expects *next to be in d1 and the offset of
26 * tss to be in a1. This saves a few instructions as we no longer have
27 * to push them onto the stack and read them back right after.
28 *
29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
30 *
31 * Changed 96/09/19 by Andreas Schwab
32 * pass prev in a0, next in a1, offset of tss in d1, and whether
33 * the mm structures are shared in d2 (to avoid atc flushing).
34 */
35asmlinkage void resume(void);
36#define switch_to(prev,next,last) \
37{ \
38 void *_last; \
39 __asm__ __volatile__( \
40 "movel %1, %%a0\n\t" \
41 "movel %2, %%a1\n\t" \
42 "jbsr resume\n\t" \
43 "movel %%d1, %0\n\t" \
44 : "=d" (_last) \
45 : "d" (prev), "d" (next) \
46 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
47 (last) = _last; \
48}
49
50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
51
52/*
53 * Force strict CPU ordering.
54 * Not really required on m68k...
55 */
56#define nop() asm volatile ("nop"::)
57#define mb() asm volatile ("" : : :"memory")
58#define rmb() asm volatile ("" : : :"memory")
59#define wmb() asm volatile ("" : : :"memory")
60#define set_mb(var, value) ({ (var) = (value); wmb(); })
61
62#define smp_mb() barrier()
63#define smp_rmb() barrier()
64#define smp_wmb() barrier()
65#define smp_read_barrier_depends() do { } while(0)
66
67#define read_barrier_depends() ((void)0)
68
69#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
70
71struct __xchg_dummy { unsigned long a[100]; };
72#define __xg(x) ((volatile struct __xchg_dummy *)(x))
73
74#ifndef CONFIG_RMW_INSNS
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 unsigned long tmp, flags;
78
79 local_irq_save(flags);
80
81 switch (size) {
82 case 1:
83 __asm__ __volatile__
84 ("moveb %2,%0\n\t"
85 "moveb %1,%2"
86 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
87 break;
88 case 2:
89 __asm__ __volatile__
90 ("movew %2,%0\n\t"
91 "movew %1,%2"
92 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
93 break;
94 case 4:
95 __asm__ __volatile__
96 ("movel %2,%0\n\t"
97 "movel %1,%2"
98 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
99 break;
100 }
101 local_irq_restore(flags);
102 return tmp;
103}
104#else
105static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
106{
107 switch (size) {
108 case 1:
109 __asm__ __volatile__
110 ("moveb %2,%0\n\t"
111 "1:\n\t"
112 "casb %0,%1,%2\n\t"
113 "jne 1b"
114 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
115 break;
116 case 2:
117 __asm__ __volatile__
118 ("movew %2,%0\n\t"
119 "1:\n\t"
120 "casw %0,%1,%2\n\t"
121 "jne 1b"
122 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
123 break;
124 case 4:
125 __asm__ __volatile__
126 ("movel %2,%0\n\t"
127 "1:\n\t"
128 "casl %0,%1,%2\n\t"
129 "jne 1b"
130 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
131 break;
132 }
133 return x;
134}
135#endif
136
137#include <asm-generic/cmpxchg-local.h>
138
139/*
140 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
141 * them available.
142 */
143#define cmpxchg_local(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
145 (unsigned long)(n), sizeof(*(ptr))))
146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
147
148#include <asm-generic/cmpxchg.h>
149
150#define arch_align_stack(x) (x)
151
152
153#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 0bffb17d5db7..151068f64f44 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -22,7 +22,6 @@ extern e_vector vectors[];
22asmlinkage void auto_inthandler(void); 22asmlinkage void auto_inthandler(void);
23asmlinkage void user_inthandler(void); 23asmlinkage void user_inthandler(void);
24asmlinkage void bad_inthandler(void); 24asmlinkage void bad_inthandler(void);
25extern void init_vectors(void);
26 25
27#endif 26#endif
28 27
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
index 544b8717d499..c73988cfa90f 100644
--- a/arch/m68k/kernel/irq.c
+++ b/arch/m68k/kernel/irq.c
@@ -28,3 +28,13 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
28 28
29 set_irq_regs(oldregs); 29 set_irq_regs(oldregs);
30} 30}
31
32
33/* The number of spurious interrupts */
34atomic_t irq_err_count;
35
36int arch_show_interrupts(struct seq_file *p, int prec)
37{
38 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
39 return 0;
40}
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 7ea203ce6b1a..34849c4c6e3d 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -1,5 +1,129 @@
1#ifdef CONFIG_MMU 1/*
2#include "module_mm.c" 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/elf.h>
9#include <linux/vmalloc.h>
10#include <linux/fs.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13
14#if 0
15#define DEBUGP printk
3#else 16#else
4#include "module_no.c" 17#define DEBUGP(fmt...)
18#endif
19
20#ifdef CONFIG_MODULES
21
22int apply_relocate(Elf32_Shdr *sechdrs,
23 const char *strtab,
24 unsigned int symindex,
25 unsigned int relsec,
26 struct module *me)
27{
28 unsigned int i;
29 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
30 Elf32_Sym *sym;
31 uint32_t *location;
32
33 DEBUGP("Applying relocate section %u to %u\n", relsec,
34 sechdrs[relsec].sh_info);
35 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
36 /* This is where to make the change */
37 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
38 + rel[i].r_offset;
39 /* This is the symbol it is referring to. Note that all
40 undefined symbols have been resolved. */
41 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
42 + ELF32_R_SYM(rel[i].r_info);
43
44 switch (ELF32_R_TYPE(rel[i].r_info)) {
45 case R_68K_32:
46 /* We add the value into the location given */
47 *location += sym->st_value;
48 break;
49 case R_68K_PC32:
50 /* Add the value, subtract its postition */
51 *location += sym->st_value - (uint32_t)location;
52 break;
53 default:
54 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
55 me->name, ELF32_R_TYPE(rel[i].r_info));
56 return -ENOEXEC;
57 }
58 }
59 return 0;
60}
61
62int apply_relocate_add(Elf32_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 unsigned int i;
69 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
70 Elf32_Sym *sym;
71 uint32_t *location;
72
73 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
74 sechdrs[relsec].sh_info);
75 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
76 /* This is where to make the change */
77 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
78 + rel[i].r_offset;
79 /* This is the symbol it is referring to. Note that all
80 undefined symbols have been resolved. */
81 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
82 + ELF32_R_SYM(rel[i].r_info);
83
84 switch (ELF32_R_TYPE(rel[i].r_info)) {
85 case R_68K_32:
86 /* We add the value into the location given */
87 *location = rel[i].r_addend + sym->st_value;
88 break;
89 case R_68K_PC32:
90 /* Add the value, subtract its postition */
91 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
92 break;
93 default:
94 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
95 me->name, ELF32_R_TYPE(rel[i].r_info));
96 return -ENOEXEC;
97 }
98 }
99 return 0;
100}
101
102int module_finalize(const Elf_Ehdr *hdr,
103 const Elf_Shdr *sechdrs,
104 struct module *mod)
105{
106 module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
107 return 0;
108}
109
110#endif /* CONFIG_MODULES */
111
112void module_fixup(struct module *mod, struct m68k_fixup_info *start,
113 struct m68k_fixup_info *end)
114{
115#ifdef CONFIG_MMU
116 struct m68k_fixup_info *fixup;
117
118 for (fixup = start; fixup < end; fixup++) {
119 switch (fixup->type) {
120 case m68k_fixup_memoffset:
121 *(u32 *)fixup->addr = m68k_memoffset;
122 break;
123 case m68k_fixup_vnode_shift:
124 *(u16 *)fixup->addr += m68k_virt_to_node_shift;
125 break;
126 }
127 }
5#endif 128#endif
129}
diff --git a/arch/m68k/kernel/module_mm.c b/arch/m68k/kernel/module_mm.c
deleted file mode 100644
index ceafc47c96d5..000000000000
--- a/arch/m68k/kernel/module_mm.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/elf.h>
9#include <linux/vmalloc.h>
10#include <linux/fs.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13
14#if 0
15#define DEBUGP printk
16#else
17#define DEBUGP(fmt...)
18#endif
19
20#ifdef CONFIG_MODULES
21
22int apply_relocate(Elf32_Shdr *sechdrs,
23 const char *strtab,
24 unsigned int symindex,
25 unsigned int relsec,
26 struct module *me)
27{
28 unsigned int i;
29 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
30 Elf32_Sym *sym;
31 uint32_t *location;
32
33 DEBUGP("Applying relocate section %u to %u\n", relsec,
34 sechdrs[relsec].sh_info);
35 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
36 /* This is where to make the change */
37 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
38 + rel[i].r_offset;
39 /* This is the symbol it is referring to. Note that all
40 undefined symbols have been resolved. */
41 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
42 + ELF32_R_SYM(rel[i].r_info);
43
44 switch (ELF32_R_TYPE(rel[i].r_info)) {
45 case R_68K_32:
46 /* We add the value into the location given */
47 *location += sym->st_value;
48 break;
49 case R_68K_PC32:
50 /* Add the value, subtract its postition */
51 *location += sym->st_value - (uint32_t)location;
52 break;
53 default:
54 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
55 me->name, ELF32_R_TYPE(rel[i].r_info));
56 return -ENOEXEC;
57 }
58 }
59 return 0;
60}
61
62int apply_relocate_add(Elf32_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 unsigned int i;
69 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
70 Elf32_Sym *sym;
71 uint32_t *location;
72
73 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
74 sechdrs[relsec].sh_info);
75 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
76 /* This is where to make the change */
77 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
78 + rel[i].r_offset;
79 /* This is the symbol it is referring to. Note that all
80 undefined symbols have been resolved. */
81 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
82 + ELF32_R_SYM(rel[i].r_info);
83
84 switch (ELF32_R_TYPE(rel[i].r_info)) {
85 case R_68K_32:
86 /* We add the value into the location given */
87 *location = rel[i].r_addend + sym->st_value;
88 break;
89 case R_68K_PC32:
90 /* Add the value, subtract its postition */
91 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
92 break;
93 default:
94 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
95 me->name, ELF32_R_TYPE(rel[i].r_info));
96 return -ENOEXEC;
97 }
98 }
99 return 0;
100}
101
102int module_finalize(const Elf_Ehdr *hdr,
103 const Elf_Shdr *sechdrs,
104 struct module *mod)
105{
106 module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
107
108 return 0;
109}
110
111#endif /* CONFIG_MODULES */
112
113void module_fixup(struct module *mod, struct m68k_fixup_info *start,
114 struct m68k_fixup_info *end)
115{
116 struct m68k_fixup_info *fixup;
117
118 for (fixup = start; fixup < end; fixup++) {
119 switch (fixup->type) {
120 case m68k_fixup_memoffset:
121 *(u32 *)fixup->addr = m68k_memoffset;
122 break;
123 case m68k_fixup_vnode_shift:
124 *(u16 *)fixup->addr += m68k_virt_to_node_shift;
125 break;
126 }
127 }
128}
diff --git a/arch/m68k/kernel/module_no.c b/arch/m68k/kernel/module_no.c
deleted file mode 100644
index 5a097c6063fa..000000000000
--- a/arch/m68k/kernel/module_no.c
+++ /dev/null
@@ -1,92 +0,0 @@
1#include <linux/moduleloader.h>
2#include <linux/elf.h>
3#include <linux/vmalloc.h>
4#include <linux/fs.h>
5#include <linux/string.h>
6#include <linux/kernel.h>
7
8#if 0
9#define DEBUGP printk
10#else
11#define DEBUGP(fmt...)
12#endif
13
14int apply_relocate(Elf32_Shdr *sechdrs,
15 const char *strtab,
16 unsigned int symindex,
17 unsigned int relsec,
18 struct module *me)
19{
20 unsigned int i;
21 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
22 Elf32_Sym *sym;
23 uint32_t *location;
24
25 DEBUGP("Applying relocate section %u to %u\n", relsec,
26 sechdrs[relsec].sh_info);
27 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
28 /* This is where to make the change */
29 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
30 + rel[i].r_offset;
31 /* This is the symbol it is referring to. Note that all
32 undefined symbols have been resolved. */
33 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
34 + ELF32_R_SYM(rel[i].r_info);
35
36 switch (ELF32_R_TYPE(rel[i].r_info)) {
37 case R_68K_32:
38 /* We add the value into the location given */
39 *location += sym->st_value;
40 break;
41 case R_68K_PC32:
42 /* Add the value, subtract its postition */
43 *location += sym->st_value - (uint32_t)location;
44 break;
45 default:
46 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
47 me->name, ELF32_R_TYPE(rel[i].r_info));
48 return -ENOEXEC;
49 }
50 }
51 return 0;
52}
53
54int apply_relocate_add(Elf32_Shdr *sechdrs,
55 const char *strtab,
56 unsigned int symindex,
57 unsigned int relsec,
58 struct module *me)
59{
60 unsigned int i;
61 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
62 Elf32_Sym *sym;
63 uint32_t *location;
64
65 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
66 sechdrs[relsec].sh_info);
67 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
68 /* This is where to make the change */
69 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
70 + rel[i].r_offset;
71 /* This is the symbol it is referring to. Note that all
72 undefined symbols have been resolved. */
73 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
74 + ELF32_R_SYM(rel[i].r_info);
75
76 switch (ELF32_R_TYPE(rel[i].r_info)) {
77 case R_68K_32:
78 /* We add the value into the location given */
79 *location = rel[i].r_addend + sym->st_value;
80 break;
81 case R_68K_PC32:
82 /* Add the value, subtract its postition */
83 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
84 break;
85 default:
86 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
87 me->name, ELF32_R_TYPE(rel[i].r_info));
88 return -ENOEXEC;
89 }
90 }
91 return 0;
92}
diff --git a/arch/m68k/kernel/traps_no.c b/arch/m68k/kernel/traps_no.c
index a768008dfd06..e67b8c806959 100644
--- a/arch/m68k/kernel/traps_no.c
+++ b/arch/m68k/kernel/traps_no.c
@@ -60,10 +60,6 @@ static char const * const vec_names[] = {
60 "MMU CONFIGURATION ERROR" 60 "MMU CONFIGURATION ERROR"
61}; 61};
62 62
63void __init trap_init(void)
64{
65}
66
67void die_if_kernel(char *str, struct pt_regs *fp, int nr) 63void die_if_kernel(char *str, struct pt_regs *fp, int nr)
68{ 64{
69 if (!(fp->sr & PS_S)) 65 if (!(fp->sr & PS_S))
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index df421e501436..1a1bd9067e90 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -9,6 +9,6 @@ lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
9ifdef CONFIG_MMU 9ifdef CONFIG_MMU
10lib-y += string.o uaccess.o checksum_mm.o 10lib-y += string.o uaccess.o checksum_mm.o
11else 11else
12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o 12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
13endif 13endif
14 14
diff --git a/arch/m68k/lib/delay.c b/arch/m68k/lib/delay.c
deleted file mode 100644
index 5bd5472d38a0..000000000000
--- a/arch/m68k/lib/delay.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/m68knommu/lib/delay.c
3 *
4 * (C) Copyright 2004, Greg Ungerer <gerg@snapgear.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <asm/param.h>
13#include <asm/delay.h>
14
15EXPORT_SYMBOL(udelay);
16
17void udelay(unsigned long usecs)
18{
19 _udelay(usecs);
20}
21
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 7cbd7bd1f8bc..50cd12cf28d9 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -42,7 +42,7 @@
42 * ZERO_PAGE is a special page that is used for zero-initialized 42 * ZERO_PAGE is a special page that is used for zero-initialized
43 * data and COW. 43 * data and COW.
44 */ 44 */
45unsigned long empty_zero_page; 45void *empty_zero_page;
46 46
47extern unsigned long memory_start; 47extern unsigned long memory_start;
48extern unsigned long memory_end; 48extern unsigned long memory_end;
@@ -62,8 +62,8 @@ void __init paging_init(void)
62 unsigned long end_mem = memory_end & PAGE_MASK; 62 unsigned long end_mem = memory_end & PAGE_MASK;
63 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 63 unsigned long zones_size[MAX_NR_ZONES] = {0, };
64 64
65 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 65 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
66 memset((void *)empty_zero_page, 0, PAGE_SIZE); 66 memset(empty_zero_page, 0, PAGE_SIZE);
67 67
68 /* 68 /*
69 * Set up SFC/DFC registers (user data space). 69 * Set up SFC/DFC registers (user data space).
@@ -120,7 +120,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
120 totalram_pages++; 120 totalram_pages++;
121 pages++; 121 pages++;
122 } 122 }
123 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); 123 pr_notice("Freeing initrd memory: %luk freed\n",
124 pages * (PAGE_SIZE / 1024));
124} 125}
125#endif 126#endif
126 127
@@ -141,7 +142,7 @@ void free_initmem(void)
141 free_page(addr); 142 free_page(addr);
142 totalram_pages++; 143 totalram_pages++;
143 } 144 }
144 printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", 145 pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
145 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, 146 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
146 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), 147 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
147 (int)(addr - PAGE_SIZE)); 148 (int)(addr - PAGE_SIZE));
diff --git a/arch/m68k/platform/5206/config.c b/arch/m68k/platform/5206/config.c
index 9c335465e66d..6fa3f800277a 100644
--- a/arch/m68k/platform/5206/config.c
+++ b/arch/m68k/platform/5206/config.c
@@ -98,6 +98,12 @@ void m5206_cpu_reset(void)
98 98
99void __init config_BSP(char *commandp, int size) 99void __init config_BSP(char *commandp, int size)
100{ 100{
101#if defined(CONFIG_NETtel)
102 /* Copy command line from FLASH to local buffer... */
103 memcpy(commandp, (char *) 0xf0004000, size);
104 commandp[size-1] = 0;
105#endif /* CONFIG_NETtel */
106
101 mach_reset = m5206_cpu_reset; 107 mach_reset = m5206_cpu_reset;
102 m5206_timers_init(); 108 m5206_timers_init();
103 m5206_uarts_init(); 109 m5206_uarts_init();
diff --git a/arch/m68k/platform/5206e/Makefile b/arch/m68k/platform/5206e/Makefile
deleted file mode 100644
index b5db05625cfa..000000000000
--- a/arch/m68k/platform/5206e/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# Makefile for the m68knommu linux kernel.
3#
4
5#
6# If you want to play with the HW breakpoints then you will
7# need to add define this, which will give you a stack backtrace
8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt:
10#
11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# asflags-y := -DTRAP_DBG_INTERRUPT
13#
14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
16
17obj-y := config.o gpio.o
18
diff --git a/arch/m68k/platform/5206e/config.c b/arch/m68k/platform/5206e/config.c
deleted file mode 100644
index 942397984c66..000000000000
--- a/arch/m68k/platform/5206e/config.c
+++ /dev/null
@@ -1,127 +0,0 @@
1/***************************************************************************/
2
3/*
4 * linux/arch/m68knommu/platform/5206e/config.c
5 *
6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 */
8
9/***************************************************************************/
10
11#include <linux/kernel.h>
12#include <linux/param.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <asm/machdep.h>
16#include <asm/coldfire.h>
17#include <asm/mcfsim.h>
18#include <asm/mcfuart.h>
19#include <asm/mcfdma.h>
20
21/***************************************************************************/
22
23static struct mcf_platform_uart m5206e_uart_platform[] = {
24 {
25 .mapbase = MCF_MBAR + MCFUART_BASE1,
26 .irq = 73,
27 },
28 {
29 .mapbase = MCF_MBAR + MCFUART_BASE2,
30 .irq = 74,
31 },
32 { },
33};
34
35static struct platform_device m5206e_uart = {
36 .name = "mcfuart",
37 .id = 0,
38 .dev.platform_data = m5206e_uart_platform,
39};
40
41static struct platform_device *m5206e_devices[] __initdata = {
42 &m5206e_uart,
43};
44
45/***************************************************************************/
46
47static void __init m5206e_uart_init_line(int line, int irq)
48{
49 if (line == 0) {
50 writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
51 writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
52 mcf_mapirq2imr(irq, MCFINTC_UART0);
53 } else if (line == 1) {
54 writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
55 writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
56 mcf_mapirq2imr(irq, MCFINTC_UART1);
57 }
58}
59
60static void __init m5206e_uarts_init(void)
61{
62 const int nrlines = ARRAY_SIZE(m5206e_uart_platform);
63 int line;
64
65 for (line = 0; (line < nrlines); line++)
66 m5206e_uart_init_line(line, m5206e_uart_platform[line].irq);
67}
68
69/***************************************************************************/
70
71static void __init m5206e_timers_init(void)
72{
73 /* Timer1 is always used as system timer */
74 writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
75 MCF_MBAR + MCFSIM_TIMER1ICR);
76 mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
77
78#ifdef CONFIG_HIGHPROFILE
79 /* Timer2 is to be used as a high speed profile timer */
80 writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
81 MCF_MBAR + MCFSIM_TIMER2ICR);
82 mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
83#endif
84}
85
86/***************************************************************************/
87
88void m5206e_cpu_reset(void)
89{
90 local_irq_disable();
91 /* Set watchdog to soft reset, and enabled */
92 __raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
93 for (;;)
94 /* wait for watchdog to timeout */;
95}
96
97/***************************************************************************/
98
99void __init config_BSP(char *commandp, int size)
100{
101#if defined(CONFIG_NETtel)
102 /* Copy command line from FLASH to local buffer... */
103 memcpy(commandp, (char *) 0xf0004000, size);
104 commandp[size-1] = 0;
105#endif /* CONFIG_NETtel */
106
107 mach_reset = m5206e_cpu_reset;
108 m5206e_timers_init();
109 m5206e_uarts_init();
110
111 /* Only support the external interrupts on their primary level */
112 mcf_mapirq2imr(25, MCFINTC_EINT1);
113 mcf_mapirq2imr(28, MCFINTC_EINT4);
114 mcf_mapirq2imr(31, MCFINTC_EINT7);
115}
116
117/***************************************************************************/
118
119static int __init init_BSP(void)
120{
121 platform_add_devices(m5206e_devices, ARRAY_SIZE(m5206e_devices));
122 return 0;
123}
124
125arch_initcall(init_BSP);
126
127/***************************************************************************/
diff --git a/arch/m68k/platform/5206e/gpio.c b/arch/m68k/platform/5206e/gpio.c
deleted file mode 100644
index b9ab4a120f28..000000000000
--- a/arch/m68k/platform/5206e/gpio.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Coldfire generic GPIO support
3 *
4 * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14*/
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18
19#include <asm/coldfire.h>
20#include <asm/mcfsim.h>
21#include <asm/mcfgpio.h>
22
23static struct mcf_gpio_chip mcf_gpio_chips[] = {
24 {
25 .gpio_chip = {
26 .label = "PP",
27 .request = mcf_gpio_request,
28 .free = mcf_gpio_free,
29 .direction_input = mcf_gpio_direction_input,
30 .direction_output = mcf_gpio_direction_output,
31 .get = mcf_gpio_get_value,
32 .set = mcf_gpio_set_value,
33 .ngpio = 8,
34 },
35 .pddr = (void __iomem *) MCFSIM_PADDR,
36 .podr = (void __iomem *) MCFSIM_PADAT,
37 .ppdr = (void __iomem *) MCFSIM_PADAT,
38 },
39};
40
41static int __init mcf_gpio_init(void)
42{
43 unsigned i = 0;
44 while (i < ARRAY_SIZE(mcf_gpio_chips))
45 (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
46 return 0;
47}
48
49core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5272/intc.c b/arch/m68k/platform/5272/intc.c
index 7e715dfe2819..7160e618b0a9 100644
--- a/arch/m68k/platform/5272/intc.c
+++ b/arch/m68k/platform/5272/intc.c
@@ -162,8 +162,6 @@ void __init init_IRQ(void)
162{ 162{
163 int irq, edge; 163 int irq, edge;
164 164
165 init_vectors();
166
167 /* Mask all interrupt sources */ 165 /* Mask all interrupt sources */
168 writel(0x88888888, MCF_MBAR + MCFSIM_ICR1); 166 writel(0x88888888, MCF_MBAR + MCFSIM_ICR1);
169 writel(0x88888888, MCF_MBAR + MCFSIM_ICR2); 167 writel(0x88888888, MCF_MBAR + MCFSIM_ICR2);
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68328/entry.S
index f68dce766c0a..293e1eba9acc 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68328/entry.S
@@ -236,27 +236,26 @@ ret_from_interrupt:
236 * Handler for uninitialized and spurious interrupts. 236 * Handler for uninitialized and spurious interrupts.
237 */ 237 */
238ENTRY(bad_interrupt) 238ENTRY(bad_interrupt)
239 addql #1,num_spurious 239 addql #1,irq_err_count
240 rte 240 rte
241 241
242/* 242/*
243 * Beware - when entering resume, prev (the current task) is 243 * Beware - when entering resume, prev (the current task) is
244 * in a0, next (the new task) is in a1,so don't change these 244 * in a0, next (the new task) is in a1, so don't change these
245 * registers until their contents are no longer needed. 245 * registers until their contents are no longer needed.
246 */ 246 */
247ENTRY(resume) 247ENTRY(resume)
248 movel %a0,%d1 /* save prev thread in d1 */ 248 movel %a0,%d1 /* save prev thread in d1 */
249 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 249 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
250 movel %usp,%a2 /* save usp */
251 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
252
253 SAVE_SWITCH_STACK 250 SAVE_SWITCH_STACK
254 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 251 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
252 movel %usp,%a3 /* save usp */
253 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
254
255 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
256 movel %a3,%usp
255 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 257 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
256 RESTORE_SWITCH_STACK 258 RESTORE_SWITCH_STACK
257
258 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
259 movel %a0,%usp
260 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 259 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
261 rts 260 rts
262 261
diff --git a/arch/m68k/platform/68328/ints.c b/arch/m68k/platform/68328/ints.c
index a90288cf7446..4bd456531f91 100644
--- a/arch/m68k/platform/68328/ints.c
+++ b/arch/m68k/platform/68328/ints.c
@@ -70,9 +70,6 @@ asmlinkage irqreturn_t inthandler7(void);
70 70
71extern e_vector *_ramvec; 71extern e_vector *_ramvec;
72 72
73/* The number of spurious interrupts */
74volatile unsigned int num_spurious;
75
76/* The 68k family did not have a good way to determine the source 73/* The 68k family did not have a good way to determine the source
77 * of interrupts until later in the family. The EC000 core does 74 * of interrupts until later in the family. The EC000 core does
78 * not provide the vector number on the stack, we vector everything 75 * not provide the vector number on the stack, we vector everything
@@ -155,7 +152,7 @@ static struct irq_chip intc_irq_chip = {
155 * This function should be called during kernel startup to initialize 152 * This function should be called during kernel startup to initialize
156 * the machine vector table. 153 * the machine vector table.
157 */ 154 */
158void __init init_IRQ(void) 155void __init trap_init(void)
159{ 156{
160 int i; 157 int i;
161 158
@@ -172,6 +169,11 @@ void __init init_IRQ(void)
172 _ramvec[69] = (e_vector) inthandler5; 169 _ramvec[69] = (e_vector) inthandler5;
173 _ramvec[70] = (e_vector) inthandler6; 170 _ramvec[70] = (e_vector) inthandler6;
174 _ramvec[71] = (e_vector) inthandler7; 171 _ramvec[71] = (e_vector) inthandler7;
172}
173
174void __init init_IRQ(void)
175{
176 int i;
175 177
176 IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */ 178 IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
177 179
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index a07b14feed92..abbb89672ea0 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -157,27 +157,26 @@ ret_from_interrupt:
157 * Handler for uninitialized and spurious interrupts. 157 * Handler for uninitialized and spurious interrupts.
158 */ 158 */
159bad_interrupt: 159bad_interrupt:
160 addql #1,num_spurious 160 addql #1,irq_err_count
161 rte 161 rte
162 162
163/* 163/*
164 * Beware - when entering resume, prev (the current task) is 164 * Beware - when entering resume, prev (the current task) is
165 * in a0, next (the new task) is in a1,so don't change these 165 * in a0, next (the new task) is in a1, so don't change these
166 * registers until their contents are no longer needed. 166 * registers until their contents are no longer needed.
167 */ 167 */
168ENTRY(resume) 168ENTRY(resume)
169 movel %a0,%d1 /* save prev thread in d1 */ 169 movel %a0,%d1 /* save prev thread in d1 */
170 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 170 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
171 movel %usp,%a2 /* save usp */
172 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
173
174 SAVE_SWITCH_STACK 171 SAVE_SWITCH_STACK
175 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 172 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
173 movel %usp,%a3 /* save usp */
174 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
175
176 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
177 movel %a3,%usp
176 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 178 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
177 RESTORE_SWITCH_STACK 179 RESTORE_SWITCH_STACK
178
179 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
180 movel %a0,%usp
181 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 180 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
182 rts 181 rts
183 182
diff --git a/arch/m68k/platform/68360/ints.c b/arch/m68k/platform/68360/ints.c
index 4af0f4e30f74..7b40202d9638 100644
--- a/arch/m68k/platform/68360/ints.c
+++ b/arch/m68k/platform/68360/ints.c
@@ -34,9 +34,6 @@ asmlinkage void inthandler(void);
34 34
35extern void *_ramvec[]; 35extern void *_ramvec[];
36 36
37/* The number of spurious interrupts */
38volatile unsigned int num_spurious;
39
40static void intc_irq_unmask(struct irq_data *d) 37static void intc_irq_unmask(struct irq_data *d)
41{ 38{
42 pquicc->intr_cimr |= (1 << d->irq); 39 pquicc->intr_cimr |= (1 << d->irq);
@@ -63,9 +60,8 @@ static struct irq_chip intc_irq_chip = {
63 * This function should be called during kernel startup to initialize 60 * This function should be called during kernel startup to initialize
64 * the vector table. 61 * the vector table.
65 */ 62 */
66void init_IRQ(void) 63void __init trap_init(void)
67{ 64{
68 int i;
69 int vba = (CPM_VECTOR_BASE<<4); 65 int vba = (CPM_VECTOR_BASE<<4);
70 66
71 /* set up the vectors */ 67 /* set up the vectors */
@@ -130,6 +126,11 @@ void init_IRQ(void)
130 126
131 /* turn off all CPM interrupts */ 127 /* turn off all CPM interrupts */
132 pquicc->intr_cimr = 0x00000000; 128 pquicc->intr_cimr = 0x00000000;
129}
130
131void init_IRQ(void)
132{
133 int i;
133 134
134 for (i = 0; (i < NR_IRQS); i++) { 135 for (i = 0; (i < NR_IRQS); i++) {
135 irq_set_chip(i, &intc_irq_chip); 136 irq_set_chip(i, &intc_irq_chip);
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index 27c2b001161e..bd27242c2f43 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -182,21 +182,23 @@ ENTRY(inthandler)
182 182
183/* 183/*
184 * Beware - when entering resume, prev (the current task) is 184 * Beware - when entering resume, prev (the current task) is
185 * in a0, next (the new task) is in a1,so don't change these 185 * in a0, next (the new task) is in a1, so don't change these
186 * registers until their contents are no longer needed. 186 * registers until their contents are no longer needed.
187 * This is always called in supervisor mode, so don't bother to save
188 * and restore sr; user's process sr is actually in the stack.
189 */ 187 */
190ENTRY(resume) 188ENTRY(resume)
191 movel %a0, %d1 /* get prev thread in d1 */ 189 movew %sr,%d1 /* save current status */
192 RDUSP 190 movew %d1,%a0@(TASK_THREAD+THREAD_SR)
193 movel %a2,%a0@(TASK_THREAD+THREAD_USP) 191 movel %a0,%d1 /* get prev thread in d1 */
194
195 SAVE_SWITCH_STACK 192 SAVE_SWITCH_STACK
196 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ 193 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
197 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 194 RDUSP /* movel %usp,%a3 */
195 movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
196
197 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
198 WRUSP /* movel %a3,%usp */
199 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
200 movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
201 movew %d7,%sr
198 RESTORE_SWITCH_STACK 202 RESTORE_SWITCH_STACK
199
200 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
201 WRUSP
202 rts 203 rts
204
diff --git a/arch/m68k/platform/coldfire/intc-2.c b/arch/m68k/platform/coldfire/intc-2.c
index 74b55cfbc3cb..995093357c59 100644
--- a/arch/m68k/platform/coldfire/intc-2.c
+++ b/arch/m68k/platform/coldfire/intc-2.c
@@ -194,8 +194,6 @@ void __init init_IRQ(void)
194{ 194{
195 int irq; 195 int irq;
196 196
197 init_vectors();
198
199 /* Mask all interrupt sources */ 197 /* Mask all interrupt sources */
200 __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL); 198 __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL);
201#ifdef MCFICM_INTC1 199#ifdef MCFICM_INTC1
diff --git a/arch/m68k/platform/coldfire/intc-simr.c b/arch/m68k/platform/coldfire/intc-simr.c
index d6a4d9d53e42..650d52e2927e 100644
--- a/arch/m68k/platform/coldfire/intc-simr.c
+++ b/arch/m68k/platform/coldfire/intc-simr.c
@@ -171,8 +171,6 @@ void __init init_IRQ(void)
171{ 171{
172 int irq, eirq; 172 int irq, eirq;
173 173
174 init_vectors();
175
176 /* Mask all interrupt sources */ 174 /* Mask all interrupt sources */
177 __raw_writeb(0xff, MCFINTC0_SIMR); 175 __raw_writeb(0xff, MCFINTC0_SIMR);
178 if (MCFINTC1_SIMR) 176 if (MCFINTC1_SIMR)
diff --git a/arch/m68k/platform/coldfire/intc.c b/arch/m68k/platform/coldfire/intc.c
index 0bbb414856eb..5c0c150b4067 100644
--- a/arch/m68k/platform/coldfire/intc.c
+++ b/arch/m68k/platform/coldfire/intc.c
@@ -139,7 +139,6 @@ void __init init_IRQ(void)
139{ 139{
140 int irq; 140 int irq;
141 141
142 init_vectors();
143 mcf_maskimr(0xffffffff); 142 mcf_maskimr(0xffffffff);
144 143
145 for (irq = 0; (irq < NR_IRQS); irq++) { 144 for (irq = 0; (irq < NR_IRQS); irq++) {
diff --git a/arch/m68k/platform/coldfire/vectors.c b/arch/m68k/platform/coldfire/vectors.c
index a21d3f870b7a..3a7cc524ecd3 100644
--- a/arch/m68k/platform/coldfire/vectors.c
+++ b/arch/m68k/platform/coldfire/vectors.c
@@ -35,21 +35,13 @@ asmlinkage void dbginterrupt_c(struct frame *fp)
35 35
36extern e_vector *_ramvec; 36extern e_vector *_ramvec;
37 37
38void set_evector(int vecnum, void (*handler)(void))
39{
40 if (vecnum >= 0 && vecnum <= 255)
41 _ramvec[vecnum] = handler;
42}
43
44/***************************************************************************/
45
46/* Assembler routines */ 38/* Assembler routines */
47asmlinkage void buserr(void); 39asmlinkage void buserr(void);
48asmlinkage void trap(void); 40asmlinkage void trap(void);
49asmlinkage void system_call(void); 41asmlinkage void system_call(void);
50asmlinkage void inthandler(void); 42asmlinkage void inthandler(void);
51 43
52void __init init_vectors(void) 44void __init trap_init(void)
53{ 45{
54 int i; 46 int i;
55 47