aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k')
-rw-r--r--arch/m68k/Kconfig5
-rw-r--r--arch/m68k/Kconfig.mmu6
-rw-r--r--arch/m68k/Kconfig.nommu3
-rw-r--r--arch/m68k/Makefile_no2
-rw-r--r--arch/m68k/amiga/chipram.c136
-rw-r--r--arch/m68k/atari/stram.c354
-rw-r--r--arch/m68k/emu/nfeth.c2
-rw-r--r--arch/m68k/include/asm/atari_stram.h3
-rw-r--r--arch/m68k/include/asm/atarihw.h4
-rw-r--r--arch/m68k/include/asm/atomic.h13
-rw-r--r--arch/m68k/include/asm/bitops.h531
-rw-r--r--arch/m68k/include/asm/bitops_mm.h501
-rw-r--r--arch/m68k/include/asm/bitops_no.h333
-rw-r--r--arch/m68k/include/asm/delay.h97
-rw-r--r--arch/m68k/include/asm/delay_mm.h57
-rw-r--r--arch/m68k/include/asm/delay_no.h76
-rw-r--r--arch/m68k/include/asm/entry_no.h12
-rw-r--r--arch/m68k/include/asm/hardirq.h35
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h19
-rw-r--r--arch/m68k/include/asm/irq.h11
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/module.h31
-rw-r--r--arch/m68k/include/asm/page_mm.h2
-rw-r--r--arch/m68k/include/asm/posix_types.h2
-rw-r--r--arch/m68k/include/asm/processor.h4
-rw-r--r--arch/m68k/include/asm/ptrace.h1
-rw-r--r--arch/m68k/include/asm/signal.h15
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
-rw-r--r--arch/m68k/include/asm/traps.h1
-rw-r--r--arch/m68k/kernel/irq.c10
-rw-r--r--arch/m68k/kernel/module.c130
-rw-r--r--arch/m68k/kernel/module_mm.c155
-rw-r--r--arch/m68k/kernel/module_no.c126
-rw-r--r--arch/m68k/kernel/process_mm.c2
-rw-r--r--arch/m68k/kernel/process_no.c2
-rw-r--r--arch/m68k/kernel/setup_mm.c2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/m68k/kernel/traps_no.c4
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds91
-rw-r--r--arch/m68k/lib/Makefile2
-rw-r--r--arch/m68k/lib/delay.c21
-rw-r--r--arch/m68k/math-emu/fp_log.c3
-rw-r--r--arch/m68k/math-emu/multi_arith.h530
-rw-r--r--arch/m68k/mm/init_mm.c5
-rw-r--r--arch/m68k/mm/init_no.c11
-rw-r--r--arch/m68k/platform/5206/config.c6
-rw-r--r--arch/m68k/platform/5206e/Makefile18
-rw-r--r--arch/m68k/platform/5206e/config.c127
-rw-r--r--arch/m68k/platform/5206e/gpio.c49
-rw-r--r--arch/m68k/platform/5272/intc.c2
-rw-r--r--arch/m68k/platform/68328/entry.S15
-rw-r--r--arch/m68k/platform/68328/ints.c10
-rw-r--r--arch/m68k/platform/68360/entry.S15
-rw-r--r--arch/m68k/platform/68360/ints.c11
-rw-r--r--arch/m68k/platform/coldfire/entry.S24
-rw-r--r--arch/m68k/platform/coldfire/intc-2.c2
-rw-r--r--arch/m68k/platform/coldfire/intc-simr.c2
-rw-r--r--arch/m68k/platform/coldfire/intc.c1
-rw-r--r--arch/m68k/platform/coldfire/vectors.c10
62 files changed, 1311 insertions, 2890 deletions
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d66e34c718d..9e8ee9d2b8c 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -6,6 +6,7 @@ config M68K
6 select GENERIC_ATOMIC64 if MMU 6 select GENERIC_ATOMIC64 if MMU
7 select HAVE_GENERIC_HARDIRQS if !MMU 7 select HAVE_GENERIC_HARDIRQS if !MMU
8 select GENERIC_IRQ_SHOW if !MMU 8 select GENERIC_IRQ_SHOW if !MMU
9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
9 10
10config RWSEM_GENERIC_SPINLOCK 11config RWSEM_GENERIC_SPINLOCK
11 bool 12 bool
@@ -41,6 +42,10 @@ config NO_DMA
41config ZONE_DMA 42config ZONE_DMA
42 bool 43 bool
43 default y 44 default y
45
46config CPU_HAS_NO_BITFIELDS
47 bool
48
44config HZ 49config HZ
45 int 50 int
46 default 1000 if CLEOPATRA 51 default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.mmu b/arch/m68k/Kconfig.mmu
index 16539b1d5d3..13e20bbc407 100644
--- a/arch/m68k/Kconfig.mmu
+++ b/arch/m68k/Kconfig.mmu
@@ -372,12 +372,6 @@ config AMIGA_PCMCIA
372 Include support in the kernel for pcmcia on Amiga 1200 and Amiga 372 Include support in the kernel for pcmcia on Amiga 1200 and Amiga
373 600. If you intend to use pcmcia cards say Y; otherwise say N. 373 600. If you intend to use pcmcia cards say Y; otherwise say N.
374 374
375config STRAM_PROC
376 bool "ST-RAM statistics in /proc"
377 depends on ATARI
378 help
379 Say Y here to report ST-RAM usage statistics in /proc/stram.
380
381config HEARTBEAT 375config HEARTBEAT
382 bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40 376 bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
383 default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300 377 default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index b004dc1b171..ff46383112a 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -16,6 +16,7 @@ config GENERIC_CLOCKEVENTS
16 16
17config M68000 17config M68000
18 bool 18 bool
19 select CPU_HAS_NO_BITFIELDS
19 help 20 help
20 The Freescale (was Motorola) 68000 CPU is the first generation of 21 The Freescale (was Motorola) 68000 CPU is the first generation of
21 the well known M68K family of processors. The CPU core as well as 22 the well known M68K family of processors. The CPU core as well as
@@ -25,6 +26,7 @@ config M68000
25 26
26config MCPU32 27config MCPU32
27 bool 28 bool
29 select CPU_HAS_NO_BITFIELDS
28 help 30 help
29 The Freescale (was then Motorola) CPU32 is a CPU core that is 31 The Freescale (was then Motorola) CPU32 is a CPU core that is
30 based on the 68020 processor. For the most part it is used in 32 based on the 68020 processor. For the most part it is used in
@@ -34,6 +36,7 @@ config COLDFIRE
34 bool 36 bool
35 select GENERIC_GPIO 37 select GENERIC_GPIO
36 select ARCH_REQUIRE_GPIOLIB 38 select ARCH_REQUIRE_GPIOLIB
39 select CPU_HAS_NO_BITFIELDS
37 help 40 help
38 The Freescale ColdFire family of processors is a modern derivitive 41 The Freescale ColdFire family of processors is a modern derivitive
39 of the 68000 processor family. They are mainly targeted at embedded 42 of the 68000 processor family. They are mainly targeted at embedded
diff --git a/arch/m68k/Makefile_no b/arch/m68k/Makefile_no
index 81652ab893e..844d3f17226 100644
--- a/arch/m68k/Makefile_no
+++ b/arch/m68k/Makefile_no
@@ -13,7 +13,7 @@ platform-$(CONFIG_M68EZ328) := 68EZ328
13platform-$(CONFIG_M68VZ328) := 68VZ328 13platform-$(CONFIG_M68VZ328) := 68VZ328
14platform-$(CONFIG_M68360) := 68360 14platform-$(CONFIG_M68360) := 68360
15platform-$(CONFIG_M5206) := 5206 15platform-$(CONFIG_M5206) := 5206
16platform-$(CONFIG_M5206e) := 5206e 16platform-$(CONFIG_M5206e) := 5206
17platform-$(CONFIG_M520x) := 520x 17platform-$(CONFIG_M520x) := 520x
18platform-$(CONFIG_M523x) := 523x 18platform-$(CONFIG_M523x) := 523x
19platform-$(CONFIG_M5249) := 5249 19platform-$(CONFIG_M5249) := 5249
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index dd0447db1c9..99449fbf9a7 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#include <asm/atomic.h>
19#include <asm/page.h> 20#include <asm/page.h>
20#include <asm/amigahw.h> 21#include <asm/amigahw.h>
21 22
@@ -23,111 +24,100 @@ unsigned long amiga_chip_size;
23EXPORT_SYMBOL(amiga_chip_size); 24EXPORT_SYMBOL(amiga_chip_size);
24 25
25static struct resource chipram_res = { 26static struct resource chipram_res = {
26 .name = "Chip RAM", .start = CHIP_PHYSADDR 27 .name = "Chip RAM", .start = CHIP_PHYSADDR
27}; 28};
28static unsigned long chipavail; 29static atomic_t chipavail;
29 30
30 31
31void __init amiga_chip_init(void) 32void __init amiga_chip_init(void)
32{ 33{
33 if (!AMIGAHW_PRESENT(CHIP_RAM)) 34 if (!AMIGAHW_PRESENT(CHIP_RAM))
34 return; 35 return;
35 36
36 chipram_res.end = amiga_chip_size-1; 37 chipram_res.end = CHIP_PHYSADDR + amiga_chip_size - 1;
37 request_resource(&iomem_resource, &chipram_res); 38 request_resource(&iomem_resource, &chipram_res);
38 39
39 chipavail = amiga_chip_size; 40 atomic_set(&chipavail, amiga_chip_size);
40} 41}
41 42
42 43
43void *amiga_chip_alloc(unsigned long size, const char *name) 44void *amiga_chip_alloc(unsigned long size, const char *name)
44{ 45{
45 struct resource *res; 46 struct resource *res;
47 void *p;
46 48
47 /* round up */ 49 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
48 size = PAGE_ALIGN(size); 50 if (!res)
51 return NULL;
49 52
50#ifdef DEBUG 53 res->name = name;
51 printk("amiga_chip_alloc: allocate %ld bytes\n", size); 54 p = amiga_chip_alloc_res(size, res);
52#endif 55 if (!p) {
53 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 56 kfree(res);
54 if (!res) 57 return NULL;
55 return NULL; 58 }
56 res->name = name;
57 59
58 if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) { 60 return p;
59 kfree(res);
60 return NULL;
61 }
62 chipavail -= size;
63#ifdef DEBUG
64 printk("amiga_chip_alloc: returning %lx\n", res->start);
65#endif
66 return (void *)ZTWO_VADDR(res->start);
67} 61}
68EXPORT_SYMBOL(amiga_chip_alloc); 62EXPORT_SYMBOL(amiga_chip_alloc);
69 63
70 64
71 /* 65 /*
72 * Warning: 66 * Warning:
73 * amiga_chip_alloc_res is meant only for drivers that need to allocate 67 * amiga_chip_alloc_res is meant only for drivers that need to
74 * Chip RAM before kmalloc() is functional. As a consequence, those 68 * allocate Chip RAM before kmalloc() is functional. As a consequence,
75 * drivers must not free that Chip RAM afterwards. 69 * those drivers must not free that Chip RAM afterwards.
76 */ 70 */
77 71
78void * __init amiga_chip_alloc_res(unsigned long size, struct resource *res) 72void *amiga_chip_alloc_res(unsigned long size, struct resource *res)
79{ 73{
80 unsigned long start; 74 int error;
81 75
82 /* round up */ 76 /* round up */
83 size = PAGE_ALIGN(size); 77 size = PAGE_ALIGN(size);
84 /* dmesg into chipmem prefers memory at the safe end */ 78
85 start = CHIP_PHYSADDR + chipavail - size; 79 pr_debug("amiga_chip_alloc_res: allocate %lu bytes\n", size);
86 80 error = allocate_resource(&chipram_res, res, size, 0, UINT_MAX,
87#ifdef DEBUG 81 PAGE_SIZE, NULL, NULL);
88 printk("amiga_chip_alloc_res: allocate %ld bytes\n", size); 82 if (error < 0) {
89#endif 83 pr_err("amiga_chip_alloc_res: allocate_resource() failed %d!\n",
90 if (allocate_resource(&chipram_res, res, size, start, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) { 84 error);
91 printk("amiga_chip_alloc_res: first alloc failed!\n"); 85 return NULL;
92 if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) 86 }
93 return NULL; 87
94 } 88 atomic_sub(size, &chipavail);
95 chipavail -= size; 89 pr_debug("amiga_chip_alloc_res: returning %pR\n", res);
96#ifdef DEBUG 90 return (void *)ZTWO_VADDR(res->start);
97 printk("amiga_chip_alloc_res: returning %lx\n", res->start);
98#endif
99 return (void *)ZTWO_VADDR(res->start);
100} 91}
101 92
102void amiga_chip_free(void *ptr) 93void amiga_chip_free(void *ptr)
103{ 94{
104 unsigned long start = ZTWO_PADDR(ptr); 95 unsigned long start = ZTWO_PADDR(ptr);
105 struct resource **p, *res; 96 struct resource *res;
106 unsigned long size; 97 unsigned long size;
107 98
108 for (p = &chipram_res.child; (res = *p); p = &res->sibling) { 99 res = lookup_resource(&chipram_res, start);
109 if (res->start != start) 100 if (!res) {
110 continue; 101 pr_err("amiga_chip_free: trying to free nonexistent region at "
111 *p = res->sibling; 102 "%p\n", ptr);
112 size = res->end-start; 103 return;
113#ifdef DEBUG 104 }
114 printk("amiga_chip_free: free %ld bytes at %p\n", size, ptr); 105
115#endif 106 size = resource_size(res);
116 chipavail += size; 107 pr_debug("amiga_chip_free: free %lu bytes at %p\n", size, ptr);
108 atomic_add(size, &chipavail);
109 release_resource(res);
117 kfree(res); 110 kfree(res);
118 return;
119 }
120 printk("amiga_chip_free: trying to free nonexistent region at %p\n", ptr);
121} 111}
122EXPORT_SYMBOL(amiga_chip_free); 112EXPORT_SYMBOL(amiga_chip_free);
123 113
124 114
125unsigned long amiga_chip_avail(void) 115unsigned long amiga_chip_avail(void)
126{ 116{
127#ifdef DEBUG 117 unsigned long n = atomic_read(&chipavail);
128 printk("amiga_chip_avail : %ld bytes\n", chipavail); 118
129#endif 119 pr_debug("amiga_chip_avail : %lu bytes\n", n);
130 return chipavail; 120 return n;
131} 121}
132EXPORT_SYMBOL(amiga_chip_avail); 122EXPORT_SYMBOL(amiga_chip_avail);
133 123
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6ec3b7f3377..0810c8d56e5 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/m68k/atari/stram.c: Functions for ST-RAM allocations 2 * Functions for ST-RAM allocations
3 * 3 *
4 * Copyright 1994-97 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> 4 * Copyright 1994-97 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 * 5 *
@@ -30,91 +30,35 @@
30#include <asm/atari_stram.h> 30#include <asm/atari_stram.h>
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#undef DEBUG
34
35#ifdef DEBUG
36#define DPRINTK(fmt,args...) printk( fmt, ##args )
37#else
38#define DPRINTK(fmt,args...)
39#endif
40
41#if defined(CONFIG_PROC_FS) && defined(CONFIG_STRAM_PROC)
42/* abbrev for the && above... */
43#define DO_PROC
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#endif
47 33
48/* 34/*
49 * ++roman: 35 * The ST-RAM allocator allocates memory from a pool of reserved ST-RAM of
50 * 36 * configurable size, set aside on ST-RAM init.
51 * New version of ST-Ram buffer allocation. Instead of using the 37 * As long as this pool is not exhausted, allocation of real ST-RAM can be
52 * 1 MB - 4 KB that remain when the ST-Ram chunk starts at $1000 38 * guaranteed.
53 * (1 MB granularity!), such buffers are reserved like this:
54 *
55 * - If the kernel resides in ST-Ram anyway, we can take the buffer
56 * from behind the current kernel data space the normal way
57 * (incrementing start_mem).
58 *
59 * - If the kernel is in TT-Ram, stram_init() initializes start and
60 * end of the available region. Buffers are allocated from there
61 * and mem_init() later marks the such used pages as reserved.
62 * Since each TT-Ram chunk is at least 4 MB in size, I hope there
63 * won't be an overrun of the ST-Ram region by normal kernel data
64 * space.
65 *
66 * For that, ST-Ram may only be allocated while kernel initialization
67 * is going on, or exactly: before mem_init() is called. There is also
68 * no provision now for freeing ST-Ram buffers. It seems that isn't
69 * really needed.
70 *
71 */ 39 */
72 40
73/* Start and end (virtual) of ST-RAM */
74static void *stram_start, *stram_end;
75
76/* set after memory_init() executed and allocations via start_mem aren't
77 * possible anymore */
78static int mem_init_done;
79
80/* set if kernel is in ST-RAM */ 41/* set if kernel is in ST-RAM */
81static int kernel_in_stram; 42static int kernel_in_stram;
82 43
83typedef struct stram_block { 44static struct resource stram_pool = {
84 struct stram_block *next; 45 .name = "ST-RAM Pool"
85 void *start; 46};
86 unsigned long size;
87 unsigned flags;
88 const char *owner;
89} BLOCK;
90
91/* values for flags field */
92#define BLOCK_FREE 0x01 /* free structure in the BLOCKs pool */
93#define BLOCK_KMALLOCED 0x02 /* structure allocated by kmalloc() */
94#define BLOCK_GFP 0x08 /* block allocated with __get_dma_pages() */
95 47
96/* list of allocated blocks */ 48static unsigned long pool_size = 1024*1024;
97static BLOCK *alloc_list;
98 49
99/* We can't always use kmalloc() to allocate BLOCK structures, since
100 * stram_alloc() can be called rather early. So we need some pool of
101 * statically allocated structures. 20 of them is more than enough, so in most
102 * cases we never should need to call kmalloc(). */
103#define N_STATIC_BLOCKS 20
104static BLOCK static_blocks[N_STATIC_BLOCKS];
105 50
106/***************************** Prototypes *****************************/ 51static int __init atari_stram_setup(char *arg)
52{
53 if (!MACH_IS_ATARI)
54 return 0;
107 55
108static BLOCK *add_region( void *addr, unsigned long size ); 56 pool_size = memparse(arg, NULL);
109static BLOCK *find_region( void *addr ); 57 return 0;
110static int remove_region( BLOCK *block ); 58}
111 59
112/************************* End of Prototypes **************************/ 60early_param("stram_pool", atari_stram_setup);
113 61
114
115/* ------------------------------------------------------------------------ */
116/* Public Interface */
117/* ------------------------------------------------------------------------ */
118 62
119/* 63/*
120 * This init function is called very early by atari/config.c 64 * This init function is called very early by atari/config.c
@@ -123,25 +67,23 @@ static int remove_region( BLOCK *block );
123void __init atari_stram_init(void) 67void __init atari_stram_init(void)
124{ 68{
125 int i; 69 int i;
70 void *stram_start;
126 71
127 /* initialize static blocks */ 72 /*
128 for( i = 0; i < N_STATIC_BLOCKS; ++i ) 73 * determine whether kernel code resides in ST-RAM
129 static_blocks[i].flags = BLOCK_FREE; 74 * (then ST-RAM is the first memory block at virtual 0x0)
130 75 */
131 /* determine whether kernel code resides in ST-RAM (then ST-RAM is the
132 * first memory block at virtual 0x0) */
133 stram_start = phys_to_virt(0); 76 stram_start = phys_to_virt(0);
134 kernel_in_stram = (stram_start == 0); 77 kernel_in_stram = (stram_start == 0);
135 78
136 for( i = 0; i < m68k_num_memory; ++i ) { 79 for (i = 0; i < m68k_num_memory; ++i) {
137 if (m68k_memory[i].addr == 0) { 80 if (m68k_memory[i].addr == 0) {
138 /* skip first 2kB or page (supervisor-only!) */
139 stram_end = stram_start + m68k_memory[i].size;
140 return; 81 return;
141 } 82 }
142 } 83 }
84
143 /* Should never come here! (There is always ST-Ram!) */ 85 /* Should never come here! (There is always ST-Ram!) */
144 panic( "atari_stram_init: no ST-RAM found!" ); 86 panic("atari_stram_init: no ST-RAM found!");
145} 87}
146 88
147 89
@@ -151,226 +93,68 @@ void __init atari_stram_init(void)
151 */ 93 */
152void __init atari_stram_reserve_pages(void *start_mem) 94void __init atari_stram_reserve_pages(void *start_mem)
153{ 95{
154 /* always reserve first page of ST-RAM, the first 2 kB are 96 /*
155 * supervisor-only! */ 97 * always reserve first page of ST-RAM, the first 2 KiB are
98 * supervisor-only!
99 */
156 if (!kernel_in_stram) 100 if (!kernel_in_stram)
157 reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); 101 reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
158 102
159} 103 stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size);
104 stram_pool.end = stram_pool.start + pool_size - 1;
105 request_resource(&iomem_resource, &stram_pool);
160 106
161void atari_stram_mem_init_hook (void) 107 pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n",
162{ 108 pool_size, &stram_pool);
163 mem_init_done = 1;
164} 109}
165 110
166 111
167/* 112void *atari_stram_alloc(unsigned long size, const char *owner)
168 * This is main public interface: somehow allocate a ST-RAM block
169 *
170 * - If we're before mem_init(), we have to make a static allocation. The
171 * region is taken in the kernel data area (if the kernel is in ST-RAM) or
172 * from the start of ST-RAM (if the kernel is in TT-RAM) and added to the
173 * rsvd_stram_* region. The ST-RAM is somewhere in the middle of kernel
174 * address space in the latter case.
175 *
176 * - If mem_init() already has been called, try with __get_dma_pages().
177 * This has the disadvantage that it's very hard to get more than 1 page,
178 * and it is likely to fail :-(
179 *
180 */
181void *atari_stram_alloc(long size, const char *owner)
182{ 113{
183 void *addr = NULL; 114 struct resource *res;
184 BLOCK *block; 115 int error;
185 int flags; 116
186 117 pr_debug("atari_stram_alloc: allocate %lu bytes\n", size);
187 DPRINTK("atari_stram_alloc(size=%08lx,owner=%s)\n", size, owner); 118
188 119 /* round up */
189 if (!mem_init_done) 120 size = PAGE_ALIGN(size);
190 return alloc_bootmem_low(size); 121
191 else { 122 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
192 /* After mem_init(): can only resort to __get_dma_pages() */ 123 if (!res)
193 addr = (void *)__get_dma_pages(GFP_KERNEL, get_order(size)); 124 return NULL;
194 flags = BLOCK_GFP; 125
195 DPRINTK( "atari_stram_alloc: after mem_init, " 126 res->name = owner;
196 "get_pages=%p\n", addr ); 127 error = allocate_resource(&stram_pool, res, size, 0, UINT_MAX,
128 PAGE_SIZE, NULL, NULL);
129 if (error < 0) {
130 pr_err("atari_stram_alloc: allocate_resource() failed %d!\n",
131 error);
132 kfree(res);
133 return NULL;
197 } 134 }
198 135
199 if (addr) { 136 pr_debug("atari_stram_alloc: returning %pR\n", res);
200 if (!(block = add_region( addr, size ))) { 137 return (void *)res->start;
201 /* out of memory for BLOCK structure :-( */
202 DPRINTK( "atari_stram_alloc: out of mem for BLOCK -- "
203 "freeing again\n" );
204 free_pages((unsigned long)addr, get_order(size));
205 return( NULL );
206 }
207 block->owner = owner;
208 block->flags |= flags;
209 }
210 return( addr );
211} 138}
212EXPORT_SYMBOL(atari_stram_alloc); 139EXPORT_SYMBOL(atari_stram_alloc);
213 140
214void atari_stram_free( void *addr )
215 141
142void atari_stram_free(void *addr)
216{ 143{
217 BLOCK *block; 144 unsigned long start = (unsigned long)addr;
218 145 struct resource *res;
219 DPRINTK( "atari_stram_free(addr=%p)\n", addr ); 146 unsigned long size;
220 147
221 if (!(block = find_region( addr ))) { 148 res = lookup_resource(&stram_pool, start);
222 printk( KERN_ERR "Attempt to free non-allocated ST-RAM block at %p " 149 if (!res) {
223 "from %p\n", addr, __builtin_return_address(0) ); 150 pr_err("atari_stram_free: trying to free nonexistent region "
151 "at %p\n", addr);
224 return; 152 return;
225 } 153 }
226 DPRINTK( "atari_stram_free: found block (%p): size=%08lx, owner=%s, "
227 "flags=%02x\n", block, block->size, block->owner, block->flags );
228
229 if (!(block->flags & BLOCK_GFP))
230 goto fail;
231 154
232 DPRINTK("atari_stram_free: is kmalloced, order_size=%d\n", 155 size = resource_size(res);
233 get_order(block->size)); 156 pr_debug("atari_stram_free: free %lu bytes at %p\n", size, addr);
234 free_pages((unsigned long)addr, get_order(block->size)); 157 release_resource(res);
235 remove_region( block ); 158 kfree(res);
236 return;
237
238 fail:
239 printk( KERN_ERR "atari_stram_free: cannot free block at %p "
240 "(called from %p)\n", addr, __builtin_return_address(0) );
241} 159}
242EXPORT_SYMBOL(atari_stram_free); 160EXPORT_SYMBOL(atari_stram_free);
243
244
245/* ------------------------------------------------------------------------ */
246/* Region Management */
247/* ------------------------------------------------------------------------ */
248
249
250/* insert a region into the alloced list (sorted) */
251static BLOCK *add_region( void *addr, unsigned long size )
252{
253 BLOCK **p, *n = NULL;
254 int i;
255
256 for( i = 0; i < N_STATIC_BLOCKS; ++i ) {
257 if (static_blocks[i].flags & BLOCK_FREE) {
258 n = &static_blocks[i];
259 n->flags = 0;
260 break;
261 }
262 }
263 if (!n && mem_init_done) {
264 /* if statics block pool exhausted and we can call kmalloc() already
265 * (after mem_init()), try that */
266 n = kmalloc( sizeof(BLOCK), GFP_KERNEL );
267 if (n)
268 n->flags = BLOCK_KMALLOCED;
269 }
270 if (!n) {
271 printk( KERN_ERR "Out of memory for ST-RAM descriptor blocks\n" );
272 return( NULL );
273 }
274 n->start = addr;
275 n->size = size;
276
277 for( p = &alloc_list; *p; p = &((*p)->next) )
278 if ((*p)->start > addr) break;
279 n->next = *p;
280 *p = n;
281
282 return( n );
283}
284
285
286/* find a region (by start addr) in the alloced list */
287static BLOCK *find_region( void *addr )
288{
289 BLOCK *p;
290
291 for( p = alloc_list; p; p = p->next ) {
292 if (p->start == addr)
293 return( p );
294 if (p->start > addr)
295 break;
296 }
297 return( NULL );
298}
299
300
301/* remove a block from the alloced list */
302static int remove_region( BLOCK *block )
303{
304 BLOCK **p;
305
306 for( p = &alloc_list; *p; p = &((*p)->next) )
307 if (*p == block) break;
308 if (!*p)
309 return( 0 );
310
311 *p = block->next;
312 if (block->flags & BLOCK_KMALLOCED)
313 kfree( block );
314 else
315 block->flags |= BLOCK_FREE;
316 return( 1 );
317}
318
319
320
321/* ------------------------------------------------------------------------ */
322/* /proc statistics file stuff */
323/* ------------------------------------------------------------------------ */
324
325#ifdef DO_PROC
326
327#define PRINT_PROC(fmt,args...) seq_printf( m, fmt, ##args )
328
329static int stram_proc_show(struct seq_file *m, void *v)
330{
331 BLOCK *p;
332
333 PRINT_PROC("Total ST-RAM: %8u kB\n",
334 (stram_end - stram_start) >> 10);
335 PRINT_PROC( "Allocated regions:\n" );
336 for( p = alloc_list; p; p = p->next ) {
337 PRINT_PROC("0x%08lx-0x%08lx: %s (",
338 virt_to_phys(p->start),
339 virt_to_phys(p->start+p->size-1),
340 p->owner);
341 if (p->flags & BLOCK_GFP)
342 PRINT_PROC( "page-alloced)\n" );
343 else
344 PRINT_PROC( "??)\n" );
345 }
346
347 return 0;
348}
349
350static int stram_proc_open(struct inode *inode, struct file *file)
351{
352 return single_open(file, stram_proc_show, NULL);
353}
354
355static const struct file_operations stram_proc_fops = {
356 .open = stram_proc_open,
357 .read = seq_read,
358 .llseek = seq_lseek,
359 .release = single_release,
360};
361
362static int __init proc_stram_init(void)
363{
364 proc_create("stram", 0, NULL, &stram_proc_fops);
365 return 0;
366}
367module_init(proc_stram_init);
368#endif
369
370
371/*
372 * Local variables:
373 * c-indent-level: 4
374 * tab-width: 4
375 * End:
376 */
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 8b6e201b2c2..c5748bb4ea7 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/interrupt.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <asm/natfeat.h> 21#include <asm/natfeat.h>
21#include <asm/virtconvert.h> 22#include <asm/virtconvert.h>
@@ -204,7 +205,6 @@ static struct net_device * __init nfeth_probe(int unit)
204 dev->irq = nfEtherIRQ; 205 dev->irq = nfEtherIRQ;
205 dev->netdev_ops = &nfeth_netdev_ops; 206 dev->netdev_ops = &nfeth_netdev_ops;
206 207
207 dev->flags |= NETIF_F_NO_CSUM;
208 memcpy(dev->dev_addr, mac, ETH_ALEN); 208 memcpy(dev->dev_addr, mac, ETH_ALEN);
209 209
210 priv = netdev_priv(dev); 210 priv = netdev_priv(dev);
diff --git a/arch/m68k/include/asm/atari_stram.h b/arch/m68k/include/asm/atari_stram.h
index 7546d13963b..62e27598af9 100644
--- a/arch/m68k/include/asm/atari_stram.h
+++ b/arch/m68k/include/asm/atari_stram.h
@@ -6,12 +6,11 @@
6 */ 6 */
7 7
8/* public interface */ 8/* public interface */
9void *atari_stram_alloc(long size, const char *owner); 9void *atari_stram_alloc(unsigned long size, const char *owner);
10void atari_stram_free(void *); 10void atari_stram_free(void *);
11 11
12/* functions called internally by other parts of the kernel */ 12/* functions called internally by other parts of the kernel */
13void atari_stram_init(void); 13void atari_stram_init(void);
14void atari_stram_reserve_pages(void *start_mem); 14void atari_stram_reserve_pages(void *start_mem);
15void atari_stram_mem_init_hook (void);
16 15
17#endif /*_M68K_ATARI_STRAM_H */ 16#endif /*_M68K_ATARI_STRAM_H */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index f51f709bbf3..0392b28656a 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -399,8 +399,8 @@ struct CODEC
399#define CODEC_OVERFLOW_LEFT 2 399#define CODEC_OVERFLOW_LEFT 2
400 u_char unused2, unused3, unused4, unused5; 400 u_char unused2, unused3, unused4, unused5;
401 u_char gpio_directions; 401 u_char gpio_directions;
402#define GPIO_IN 0 402#define CODEC_GPIO_IN 0
403#define GPIO_OUT 1 403#define CODEC_GPIO_OUT 1
404 u_char unused6; 404 u_char unused6;
405 u_char gpio_data; 405 u_char gpio_data;
406}; 406};
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 03ae3d14cd4..65c6be6c818 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -169,21 +169,21 @@ static inline int atomic_add_negative(int i, atomic_t *v)
169 char c; 169 char c;
170 __asm__ __volatile__("addl %2,%1; smi %0" 170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v) 171 : "=d" (c), "+m" (*v)
172 : "id" (i)); 172 : ASM_DI (i));
173 return c != 0; 173 return c != 0;
174} 174}
175 175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{ 177{
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); 178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
179} 179}
180 180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{ 182{
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
184} 184}
185 185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 186static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
187{ 187{
188 int c, old; 188 int c, old;
189 c = atomic_read(v); 189 c = atomic_read(v);
@@ -195,10 +195,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
195 break; 195 break;
196 c = old; 196 c = old;
197 } 197 }
198 return c != (u); 198 return c;
199} 199}
200 200
201#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
202 201
203/* Atomic operations are already serializing */ 202/* Atomic operations are already serializing */
204#define smp_mb__before_atomic_dec() barrier() 203#define smp_mb__before_atomic_dec() barrier()
@@ -206,6 +205,4 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
206#define smp_mb__before_atomic_inc() barrier() 205#define smp_mb__before_atomic_inc() barrier()
207#define smp_mb__after_atomic_inc() barrier() 206#define smp_mb__after_atomic_inc() barrier()
208 207
209#include <asm-generic/atomic-long.h>
210#include <asm-generic/atomic64.h>
211#endif /* __ARCH_M68K_ATOMIC __ */ 208#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index ce163abddab..c6baa913592 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -1,5 +1,530 @@
1#ifdef __uClinux__ 1#ifndef _M68K_BITOPS_H
2#include "bitops_no.h" 2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Bit access functions vary across the ColdFire and 68k families.
19 * So we will break them out here, and then macro in the ones we want.
20 *
21 * ColdFire - supports standard bset/bclr/bchg with register operand only
22 * 68000 - supports standard bset/bclr/bchg with memory operand
23 * >= 68020 - also supports the bfset/bfclr/bfchg instructions
24 *
25 * Although it is possible to use only the bset/bclr/bchg with register
26 * operands on all platforms you end up with larger generated code.
27 * So we use the best form possible on a given platform.
28 */
29
30static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33
34 __asm__ __volatile__ ("bset %1,(%0)"
35 :
36 : "a" (p), "di" (nr & 7)
37 : "memory");
38}
39
40static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
41{
42 char *p = (char *)vaddr + (nr ^ 31) / 8;
43
44 __asm__ __volatile__ ("bset %1,%0"
45 : "+m" (*p)
46 : "di" (nr & 7));
47}
48
49static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
50{
51 __asm__ __volatile__ ("bfset %1{%0:#1}"
52 :
53 : "d" (nr ^ 31), "o" (*vaddr)
54 : "memory");
55}
56
57#if defined(CONFIG_COLDFIRE)
58#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
59#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
60#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
61#else
62#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
63 bset_mem_set_bit(nr, vaddr) : \
64 bfset_mem_set_bit(nr, vaddr))
65#endif
66
67#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68
69
70/*
71 * clear_bit() doesn't provide any barrier for the compiler.
72 */
73#define smp_mb__before_clear_bit() barrier()
74#define smp_mb__after_clear_bit() barrier()
75
76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
77{
78 char *p = (char *)vaddr + (nr ^ 31) / 8;
79
80 __asm__ __volatile__ ("bclr %1,(%0)"
81 :
82 : "a" (p), "di" (nr & 7)
83 : "memory");
84}
85
86static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
87{
88 char *p = (char *)vaddr + (nr ^ 31) / 8;
89
90 __asm__ __volatile__ ("bclr %1,%0"
91 : "+m" (*p)
92 : "di" (nr & 7));
93}
94
95static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
96{
97 __asm__ __volatile__ ("bfclr %1{%0:#1}"
98 :
99 : "d" (nr ^ 31), "o" (*vaddr)
100 : "memory");
101}
102
103#if defined(CONFIG_COLDFIRE)
104#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
105#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
106#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
107#else
108#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
109 bclr_mem_clear_bit(nr, vaddr) : \
110 bfclr_mem_clear_bit(nr, vaddr))
111#endif
112
113#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
114
115
116static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
117{
118 char *p = (char *)vaddr + (nr ^ 31) / 8;
119
120 __asm__ __volatile__ ("bchg %1,(%0)"
121 :
122 : "a" (p), "di" (nr & 7)
123 : "memory");
124}
125
126static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
127{
128 char *p = (char *)vaddr + (nr ^ 31) / 8;
129
130 __asm__ __volatile__ ("bchg %1,%0"
131 : "+m" (*p)
132 : "di" (nr & 7));
133}
134
135static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
136{
137 __asm__ __volatile__ ("bfchg %1{%0:#1}"
138 :
139 : "d" (nr ^ 31), "o" (*vaddr)
140 : "memory");
141}
142
143#if defined(CONFIG_COLDFIRE)
144#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
145#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
146#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
147#else
148#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
149 bchg_mem_change_bit(nr, vaddr) : \
150 bfchg_mem_change_bit(nr, vaddr))
151#endif
152
153#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
154
155
156static inline int test_bit(int nr, const unsigned long *vaddr)
157{
158 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
159}
160
161
162static inline int bset_reg_test_and_set_bit(int nr,
163 volatile unsigned long *vaddr)
164{
165 char *p = (char *)vaddr + (nr ^ 31) / 8;
166 char retval;
167
168 __asm__ __volatile__ ("bset %2,(%1); sne %0"
169 : "=d" (retval)
170 : "a" (p), "di" (nr & 7)
171 : "memory");
172 return retval;
173}
174
175static inline int bset_mem_test_and_set_bit(int nr,
176 volatile unsigned long *vaddr)
177{
178 char *p = (char *)vaddr + (nr ^ 31) / 8;
179 char retval;
180
181 __asm__ __volatile__ ("bset %2,%1; sne %0"
182 : "=d" (retval), "+m" (*p)
183 : "di" (nr & 7));
184 return retval;
185}
186
187static inline int bfset_mem_test_and_set_bit(int nr,
188 volatile unsigned long *vaddr)
189{
190 char retval;
191
192 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
193 : "=d" (retval)
194 : "d" (nr ^ 31), "o" (*vaddr)
195 : "memory");
196 return retval;
197}
198
199#if defined(CONFIG_COLDFIRE)
200#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
201#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
202#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
203#else
204#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
205 bset_mem_test_and_set_bit(nr, vaddr) : \
206 bfset_mem_test_and_set_bit(nr, vaddr))
207#endif
208
209#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
210
211
212static inline int bclr_reg_test_and_clear_bit(int nr,
213 volatile unsigned long *vaddr)
214{
215 char *p = (char *)vaddr + (nr ^ 31) / 8;
216 char retval;
217
218 __asm__ __volatile__ ("bclr %2,(%1); sne %0"
219 : "=d" (retval)
220 : "a" (p), "di" (nr & 7)
221 : "memory");
222 return retval;
223}
224
225static inline int bclr_mem_test_and_clear_bit(int nr,
226 volatile unsigned long *vaddr)
227{
228 char *p = (char *)vaddr + (nr ^ 31) / 8;
229 char retval;
230
231 __asm__ __volatile__ ("bclr %2,%1; sne %0"
232 : "=d" (retval), "+m" (*p)
233 : "di" (nr & 7));
234 return retval;
235}
236
237static inline int bfclr_mem_test_and_clear_bit(int nr,
238 volatile unsigned long *vaddr)
239{
240 char retval;
241
242 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
243 : "=d" (retval)
244 : "d" (nr ^ 31), "o" (*vaddr)
245 : "memory");
246 return retval;
247}
248
249#if defined(CONFIG_COLDFIRE)
250#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
251#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
252#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
253#else
254#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
255 bclr_mem_test_and_clear_bit(nr, vaddr) : \
256 bfclr_mem_test_and_clear_bit(nr, vaddr))
257#endif
258
259#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
260
261
262static inline int bchg_reg_test_and_change_bit(int nr,
263 volatile unsigned long *vaddr)
264{
265 char *p = (char *)vaddr + (nr ^ 31) / 8;
266 char retval;
267
268 __asm__ __volatile__ ("bchg %2,(%1); sne %0"
269 : "=d" (retval)
270 : "a" (p), "di" (nr & 7)
271 : "memory");
272 return retval;
273}
274
275static inline int bchg_mem_test_and_change_bit(int nr,
276 volatile unsigned long *vaddr)
277{
278 char *p = (char *)vaddr + (nr ^ 31) / 8;
279 char retval;
280
281 __asm__ __volatile__ ("bchg %2,%1; sne %0"
282 : "=d" (retval), "+m" (*p)
283 : "di" (nr & 7));
284 return retval;
285}
286
287static inline int bfchg_mem_test_and_change_bit(int nr,
288 volatile unsigned long *vaddr)
289{
290 char retval;
291
292 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
293 : "=d" (retval)
294 : "d" (nr ^ 31), "o" (*vaddr)
295 : "memory");
296 return retval;
297}
298
299#if defined(CONFIG_COLDFIRE)
300#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
301#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
302#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
303#else
304#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
305 bchg_mem_test_and_change_bit(nr, vaddr) : \
306 bfchg_mem_test_and_change_bit(nr, vaddr))
307#endif
308
309#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
310
311
312/*
313 * The true 68020 and more advanced processors support the "bfffo"
314 * instruction for finding bits. ColdFire and simple 68000 parts
315 * (including CPU32) do not support this. They simply use the generic
316 * functions.
317 */
318#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
319#include <asm-generic/bitops/find.h>
320#include <asm-generic/bitops/ffz.h>
321#else
322
323static inline int find_first_zero_bit(const unsigned long *vaddr,
324 unsigned size)
325{
326 const unsigned long *p = vaddr;
327 int res = 32;
328 unsigned int words;
329 unsigned long num;
330
331 if (!size)
332 return 0;
333
334 words = (size + 31) >> 5;
335 while (!(num = ~*p++)) {
336 if (!--words)
337 goto out;
338 }
339
340 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
341 : "=d" (res) : "d" (num & -num));
342 res ^= 31;
343out:
344 res += ((long)p - (long)vaddr - 4) * 8;
345 return res < size ? res : size;
346}
347#define find_first_zero_bit find_first_zero_bit
348
349static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
350 int offset)
351{
352 const unsigned long *p = vaddr + (offset >> 5);
353 int bit = offset & 31UL, res;
354
355 if (offset >= size)
356 return size;
357
358 if (bit) {
359 unsigned long num = ~*p++ & (~0UL << bit);
360 offset -= bit;
361
362 /* Look for zero in first longword */
363 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
364 : "=d" (res) : "d" (num & -num));
365 if (res < 32) {
366 offset += res ^ 31;
367 return offset < size ? offset : size;
368 }
369 offset += 32;
370
371 if (offset >= size)
372 return size;
373 }
374 /* No zero yet, search remaining full bytes for a zero */
375 return offset + find_first_zero_bit(p, size - offset);
376}
377#define find_next_zero_bit find_next_zero_bit
378
379static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
380{
381 const unsigned long *p = vaddr;
382 int res = 32;
383 unsigned int words;
384 unsigned long num;
385
386 if (!size)
387 return 0;
388
389 words = (size + 31) >> 5;
390 while (!(num = *p++)) {
391 if (!--words)
392 goto out;
393 }
394
395 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
396 : "=d" (res) : "d" (num & -num));
397 res ^= 31;
398out:
399 res += ((long)p - (long)vaddr - 4) * 8;
400 return res < size ? res : size;
401}
402#define find_first_bit find_first_bit
403
404static inline int find_next_bit(const unsigned long *vaddr, int size,
405 int offset)
406{
407 const unsigned long *p = vaddr + (offset >> 5);
408 int bit = offset & 31UL, res;
409
410 if (offset >= size)
411 return size;
412
413 if (bit) {
414 unsigned long num = *p++ & (~0UL << bit);
415 offset -= bit;
416
417 /* Look for one in first longword */
418 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
419 : "=d" (res) : "d" (num & -num));
420 if (res < 32) {
421 offset += res ^ 31;
422 return offset < size ? offset : size;
423 }
424 offset += 32;
425
426 if (offset >= size)
427 return size;
428 }
429 /* No one yet, search remaining full bytes for a one */
430 return offset + find_first_bit(p, size - offset);
431}
432#define find_next_bit find_next_bit
433
434/*
435 * ffz = Find First Zero in word. Undefined if no zero exists,
436 * so code should check against ~0UL first..
437 */
438static inline unsigned long ffz(unsigned long word)
439{
440 int res;
441
442 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
443 : "=d" (res) : "d" (~word & -~word));
444 return res ^ 31;
445}
446
447#endif
448
449#ifdef __KERNEL__
450
451#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
452
453/*
454 * The newer ColdFire family members support a "bitrev" instruction
455 * and we can use that to implement a fast ffs. Older Coldfire parts,
456 * and normal 68000 parts don't have anything special, so we use the
457 * generic functions for those.
458 */
459#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
460 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
461static inline int __ffs(int x)
462{
463 __asm__ __volatile__ ("bitrev %0; ff1 %0"
464 : "=d" (x)
465 : "0" (x));
466 return x;
467}
468
469static inline int ffs(int x)
470{
471 if (!x)
472 return 0;
473 return __ffs(x) + 1;
474}
475
476#else
477#include <asm-generic/bitops/ffs.h>
478#include <asm-generic/bitops/__ffs.h>
479#endif
480
481#include <asm-generic/bitops/fls.h>
482#include <asm-generic/bitops/__fls.h>
483
3#else 484#else
4#include "bitops_mm.h" 485
486/*
487 * ffs: find first bit set. This is defined the same way as
488 * the libc and compiler builtin ffs routines, therefore
489 * differs in spirit from the above ffz (man ffs).
490 */
491static inline int ffs(int x)
492{
493 int cnt;
494
495 __asm__ ("bfffo %1{#0:#0},%0"
496 : "=d" (cnt)
497 : "dm" (x & -x));
498 return 32 - cnt;
499}
500#define __ffs(x) (ffs(x) - 1)
501
502/*
503 * fls: find last bit set.
504 */
505static inline int fls(int x)
506{
507 int cnt;
508
509 __asm__ ("bfffo %1{#0,#0},%0"
510 : "=d" (cnt)
511 : "dm" (x));
512 return 32 - cnt;
513}
514
515static inline int __fls(int x)
516{
517 return fls(x) - 1;
518}
519
5#endif 520#endif
521
522#include <asm-generic/bitops/ext2-atomic.h>
523#include <asm-generic/bitops/le.h>
524#include <asm-generic/bitops/fls64.h>
525#include <asm-generic/bitops/sched.h>
526#include <asm-generic/bitops/hweight.h>
527#include <asm-generic/bitops/lock.h>
528#endif /* __KERNEL__ */
529
530#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
deleted file mode 100644
index 89cf5b814a4..00000000000
--- a/arch/m68k/include/asm/bitops_mm.h
+++ /dev/null
@@ -1,501 +0,0 @@
1#ifndef _M68K_BITOPS_H
2#define _M68K_BITOPS_H
3/*
4 * Copyright 1992, Linus Torvalds.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _LINUX_BITOPS_H
12#error only <linux/bitops.h> can be included directly
13#endif
14
15#include <linux/compiler.h>
16
17/*
18 * Require 68020 or better.
19 *
20 * They use the standard big-endian m680x0 bit ordering.
21 */
22
23#define test_and_set_bit(nr,vaddr) \
24 (__builtin_constant_p(nr) ? \
25 __constant_test_and_set_bit(nr, vaddr) : \
26 __generic_test_and_set_bit(nr, vaddr))
27
28#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29
30static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31{
32 char *p = (char *)vaddr + (nr ^ 31) / 8;
33 char retval;
34
35 __asm__ __volatile__ ("bset %2,%1; sne %0"
36 : "=d" (retval), "+m" (*p)
37 : "di" (nr & 7));
38
39 return retval;
40}
41
42static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43{
44 char retval;
45
46 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48
49 return retval;
50}
51
52#define set_bit(nr,vaddr) \
53 (__builtin_constant_p(nr) ? \
54 __constant_set_bit(nr, vaddr) : \
55 __generic_set_bit(nr, vaddr))
56
57#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58
59static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60{
61 char *p = (char *)vaddr + (nr ^ 31) / 8;
62 __asm__ __volatile__ ("bset %1,%0"
63 : "+m" (*p) : "di" (nr & 7));
64}
65
66static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67{
68 __asm__ __volatile__ ("bfset %1{%0:#1}"
69 : : "d" (nr^31), "o" (*vaddr) : "memory");
70}
71
72#define test_and_clear_bit(nr,vaddr) \
73 (__builtin_constant_p(nr) ? \
74 __constant_test_and_clear_bit(nr, vaddr) : \
75 __generic_test_and_clear_bit(nr, vaddr))
76
77#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78
79static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80{
81 char *p = (char *)vaddr + (nr ^ 31) / 8;
82 char retval;
83
84 __asm__ __volatile__ ("bclr %2,%1; sne %0"
85 : "=d" (retval), "+m" (*p)
86 : "di" (nr & 7));
87
88 return retval;
89}
90
91static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92{
93 char retval;
94
95 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97
98 return retval;
99}
100
101/*
102 * clear_bit() doesn't provide any barrier for the compiler.
103 */
104#define smp_mb__before_clear_bit() barrier()
105#define smp_mb__after_clear_bit() barrier()
106
107#define clear_bit(nr,vaddr) \
108 (__builtin_constant_p(nr) ? \
109 __constant_clear_bit(nr, vaddr) : \
110 __generic_clear_bit(nr, vaddr))
111#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112
113static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114{
115 char *p = (char *)vaddr + (nr ^ 31) / 8;
116 __asm__ __volatile__ ("bclr %1,%0"
117 : "+m" (*p) : "di" (nr & 7));
118}
119
120static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121{
122 __asm__ __volatile__ ("bfclr %1{%0:#1}"
123 : : "d" (nr^31), "o" (*vaddr) : "memory");
124}
125
126#define test_and_change_bit(nr,vaddr) \
127 (__builtin_constant_p(nr) ? \
128 __constant_test_and_change_bit(nr, vaddr) : \
129 __generic_test_and_change_bit(nr, vaddr))
130
131#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133
134static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135{
136 char *p = (char *)vaddr + (nr ^ 31) / 8;
137 char retval;
138
139 __asm__ __volatile__ ("bchg %2,%1; sne %0"
140 : "=d" (retval), "+m" (*p)
141 : "di" (nr & 7));
142
143 return retval;
144}
145
146static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147{
148 char retval;
149
150 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151 : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152
153 return retval;
154}
155
156#define change_bit(nr,vaddr) \
157 (__builtin_constant_p(nr) ? \
158 __constant_change_bit(nr, vaddr) : \
159 __generic_change_bit(nr, vaddr))
160
161static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162{
163 char *p = (char *)vaddr + (nr ^ 31) / 8;
164 __asm__ __volatile__ ("bchg %1,%0"
165 : "+m" (*p) : "di" (nr & 7));
166}
167
168static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169{
170 __asm__ __volatile__ ("bfchg %1{%0:#1}"
171 : : "d" (nr^31), "o" (*vaddr) : "memory");
172}
173
174static inline int test_bit(int nr, const unsigned long *vaddr)
175{
176 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177}
178
179static inline int find_first_zero_bit(const unsigned long *vaddr,
180 unsigned size)
181{
182 const unsigned long *p = vaddr;
183 int res = 32;
184 unsigned int words;
185 unsigned long num;
186
187 if (!size)
188 return 0;
189
190 words = (size + 31) >> 5;
191 while (!(num = ~*p++)) {
192 if (!--words)
193 goto out;
194 }
195
196 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
197 : "=d" (res) : "d" (num & -num));
198 res ^= 31;
199out:
200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size;
202}
203#define find_first_zero_bit find_first_zero_bit
204
205static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
206 int offset)
207{
208 const unsigned long *p = vaddr + (offset >> 5);
209 int bit = offset & 31UL, res;
210
211 if (offset >= size)
212 return size;
213
214 if (bit) {
215 unsigned long num = ~*p++ & (~0UL << bit);
216 offset -= bit;
217
218 /* Look for zero in first longword */
219 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
220 : "=d" (res) : "d" (num & -num));
221 if (res < 32) {
222 offset += res ^ 31;
223 return offset < size ? offset : size;
224 }
225 offset += 32;
226
227 if (offset >= size)
228 return size;
229 }
230 /* No zero yet, search remaining full bytes for a zero */
231 return offset + find_first_zero_bit(p, size - offset);
232}
233#define find_next_zero_bit find_next_zero_bit
234
235static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
236{
237 const unsigned long *p = vaddr;
238 int res = 32;
239 unsigned int words;
240 unsigned long num;
241
242 if (!size)
243 return 0;
244
245 words = (size + 31) >> 5;
246 while (!(num = *p++)) {
247 if (!--words)
248 goto out;
249 }
250
251 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
252 : "=d" (res) : "d" (num & -num));
253 res ^= 31;
254out:
255 res += ((long)p - (long)vaddr - 4) * 8;
256 return res < size ? res : size;
257}
258#define find_first_bit find_first_bit
259
260static inline int find_next_bit(const unsigned long *vaddr, int size,
261 int offset)
262{
263 const unsigned long *p = vaddr + (offset >> 5);
264 int bit = offset & 31UL, res;
265
266 if (offset >= size)
267 return size;
268
269 if (bit) {
270 unsigned long num = *p++ & (~0UL << bit);
271 offset -= bit;
272
273 /* Look for one in first longword */
274 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
275 : "=d" (res) : "d" (num & -num));
276 if (res < 32) {
277 offset += res ^ 31;
278 return offset < size ? offset : size;
279 }
280 offset += 32;
281
282 if (offset >= size)
283 return size;
284 }
285 /* No one yet, search remaining full bytes for a one */
286 return offset + find_first_bit(p, size - offset);
287}
288#define find_next_bit find_next_bit
289
290/*
291 * ffz = Find First Zero in word. Undefined if no zero exists,
292 * so code should check against ~0UL first..
293 */
294static inline unsigned long ffz(unsigned long word)
295{
296 int res;
297
298 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
299 : "=d" (res) : "d" (~word & -~word));
300 return res ^ 31;
301}
302
303#ifdef __KERNEL__
304
305/*
306 * ffs: find first bit set. This is defined the same way as
307 * the libc and compiler builtin ffs routines, therefore
308 * differs in spirit from the above ffz (man ffs).
309 */
310
311static inline int ffs(int x)
312{
313 int cnt;
314
315 asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
316
317 return 32 - cnt;
318}
319#define __ffs(x) (ffs(x) - 1)
320
321/*
322 * fls: find last bit set.
323 */
324
325static inline int fls(int x)
326{
327 int cnt;
328
329 asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
330
331 return 32 - cnt;
332}
333
334static inline int __fls(int x)
335{
336 return fls(x) - 1;
337}
338
339#include <asm-generic/bitops/fls64.h>
340#include <asm-generic/bitops/sched.h>
341#include <asm-generic/bitops/hweight.h>
342#include <asm-generic/bitops/lock.h>
343
344/* Bitmap functions for the little endian bitmap. */
345
346static inline void __set_bit_le(int nr, void *addr)
347{
348 __set_bit(nr ^ 24, addr);
349}
350
351static inline void __clear_bit_le(int nr, void *addr)
352{
353 __clear_bit(nr ^ 24, addr);
354}
355
356static inline int __test_and_set_bit_le(int nr, void *addr)
357{
358 return __test_and_set_bit(nr ^ 24, addr);
359}
360
361static inline int test_and_set_bit_le(int nr, void *addr)
362{
363 return test_and_set_bit(nr ^ 24, addr);
364}
365
366static inline int __test_and_clear_bit_le(int nr, void *addr)
367{
368 return __test_and_clear_bit(nr ^ 24, addr);
369}
370
371static inline int test_and_clear_bit_le(int nr, void *addr)
372{
373 return test_and_clear_bit(nr ^ 24, addr);
374}
375
376static inline int test_bit_le(int nr, const void *vaddr)
377{
378 const unsigned char *p = vaddr;
379 return (p[nr >> 3] & (1U << (nr & 7))) != 0;
380}
381
382static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
383{
384 const unsigned long *p = vaddr, *addr = vaddr;
385 int res = 0;
386 unsigned int words;
387
388 if (!size)
389 return 0;
390
391 words = (size >> 5) + ((size & 31) > 0);
392 while (*p++ == ~0UL) {
393 if (--words == 0)
394 goto out;
395 }
396
397 --p;
398 for (res = 0; res < 32; res++)
399 if (!test_bit_le(res, p))
400 break;
401out:
402 res += (p - addr) * 32;
403 return res < size ? res : size;
404}
405#define find_first_zero_bit_le find_first_zero_bit_le
406
407static inline unsigned long find_next_zero_bit_le(const void *addr,
408 unsigned long size, unsigned long offset)
409{
410 const unsigned long *p = addr;
411 int bit = offset & 31UL, res;
412
413 if (offset >= size)
414 return size;
415
416 p += offset >> 5;
417
418 if (bit) {
419 offset -= bit;
420 /* Look for zero in first longword */
421 for (res = bit; res < 32; res++)
422 if (!test_bit_le(res, p)) {
423 offset += res;
424 return offset < size ? offset : size;
425 }
426 p++;
427 offset += 32;
428
429 if (offset >= size)
430 return size;
431 }
432 /* No zero yet, search remaining full bytes for a zero */
433 return offset + find_first_zero_bit_le(p, size - offset);
434}
435#define find_next_zero_bit_le find_next_zero_bit_le
436
437static inline int find_first_bit_le(const void *vaddr, unsigned size)
438{
439 const unsigned long *p = vaddr, *addr = vaddr;
440 int res = 0;
441 unsigned int words;
442
443 if (!size)
444 return 0;
445
446 words = (size >> 5) + ((size & 31) > 0);
447 while (*p++ == 0UL) {
448 if (--words == 0)
449 goto out;
450 }
451
452 --p;
453 for (res = 0; res < 32; res++)
454 if (test_bit_le(res, p))
455 break;
456out:
457 res += (p - addr) * 32;
458 return res < size ? res : size;
459}
460#define find_first_bit_le find_first_bit_le
461
462static inline unsigned long find_next_bit_le(const void *addr,
463 unsigned long size, unsigned long offset)
464{
465 const unsigned long *p = addr;
466 int bit = offset & 31UL, res;
467
468 if (offset >= size)
469 return size;
470
471 p += offset >> 5;
472
473 if (bit) {
474 offset -= bit;
475 /* Look for one in first longword */
476 for (res = bit; res < 32; res++)
477 if (test_bit_le(res, p)) {
478 offset += res;
479 return offset < size ? offset : size;
480 }
481 p++;
482 offset += 32;
483
484 if (offset >= size)
485 return size;
486 }
487 /* No set bit yet, search remaining full bytes for a set bit */
488 return offset + find_first_bit_le(p, size - offset);
489}
490#define find_next_bit_le find_next_bit_le
491
492/* Bitmap functions for the ext2 filesystem. */
493
494#define ext2_set_bit_atomic(lock, nr, addr) \
495 test_and_set_bit_le(nr, addr)
496#define ext2_clear_bit_atomic(lock, nr, addr) \
497 test_and_clear_bit_le(nr, addr)
498
499#endif /* __KERNEL__ */
500
501#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
deleted file mode 100644
index 72e85acdd7b..00000000000
--- a/arch/m68k/include/asm/bitops_no.h
+++ /dev/null
@@ -1,333 +0,0 @@
1#ifndef _M68KNOMMU_BITOPS_H
2#define _M68KNOMMU_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10
11#ifdef __KERNEL__
12
13#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly
15#endif
16
17#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18static inline int ffs(unsigned int val)
19{
20 if (!val)
21 return 0;
22
23 asm volatile(
24 "bitrev %0\n\t"
25 "ff1 %0\n\t"
26 : "=d" (val)
27 : "0" (val)
28 );
29 val++;
30 return val;
31}
32
33static inline int __ffs(unsigned int val)
34{
35 asm volatile(
36 "bitrev %0\n\t"
37 "ff1 %0\n\t"
38 : "=d" (val)
39 : "0" (val)
40 );
41 return val;
42}
43
44#else
45#include <asm-generic/bitops/ffs.h>
46#include <asm-generic/bitops/__ffs.h>
47#endif
48
49#include <asm-generic/bitops/sched.h>
50#include <asm-generic/bitops/ffz.h>
51
52static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53{
54#ifdef CONFIG_COLDFIRE
55 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57 : "d" (nr)
58 : "%a0", "cc");
59#else
60 __asm__ __volatile__ ("bset %1,%0"
61 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62 : "di" (nr)
63 : "cc");
64#endif
65}
66
67#define __set_bit(nr, addr) set_bit(nr, addr)
68
69/*
70 * clear_bit() doesn't provide any barrier for the compiler.
71 */
72#define smp_mb__before_clear_bit() barrier()
73#define smp_mb__after_clear_bit() barrier()
74
75static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76{
77#ifdef CONFIG_COLDFIRE
78 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80 : "d" (nr)
81 : "%a0", "cc");
82#else
83 __asm__ __volatile__ ("bclr %1,%0"
84 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85 : "di" (nr)
86 : "cc");
87#endif
88}
89
90#define __clear_bit(nr, addr) clear_bit(nr, addr)
91
92static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93{
94#ifdef CONFIG_COLDFIRE
95 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97 : "d" (nr)
98 : "%a0", "cc");
99#else
100 __asm__ __volatile__ ("bchg %1,%0"
101 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102 : "di" (nr)
103 : "cc");
104#endif
105}
106
107#define __change_bit(nr, addr) change_bit(nr, addr)
108
109static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110{
111 char retval;
112
113#ifdef CONFIG_COLDFIRE
114 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116 : "d" (nr)
117 : "%a0");
118#else
119 __asm__ __volatile__ ("bset %2,%1; sne %0"
120 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121 : "di" (nr)
122 /* No clobber */);
123#endif
124
125 return retval;
126}
127
128#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129
130static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131{
132 char retval;
133
134#ifdef CONFIG_COLDFIRE
135 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137 : "d" (nr)
138 : "%a0");
139#else
140 __asm__ __volatile__ ("bclr %2,%1; sne %0"
141 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142 : "di" (nr)
143 /* No clobber */);
144#endif
145
146 return retval;
147}
148
149#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150
151static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152{
153 char retval;
154
155#ifdef CONFIG_COLDFIRE
156 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158 : "d" (nr)
159 : "%a0");
160#else
161 __asm__ __volatile__ ("bchg %2,%1; sne %0"
162 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163 : "di" (nr)
164 /* No clobber */);
165#endif
166
167 return retval;
168}
169
170#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171
172/*
173 * This routine doesn't need to be atomic.
174 */
175static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176{
177 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178}
179
180static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181{
182 int * a = (int *) addr;
183 int mask;
184
185 a += nr >> 5;
186 mask = 1 << (nr & 0x1f);
187 return ((mask & *a) != 0);
188}
189
190#define test_bit(nr,addr) \
191(__builtin_constant_p(nr) ? \
192 __constant_test_bit((nr),(addr)) : \
193 __test_bit((nr),(addr)))
194
195#include <asm-generic/bitops/find.h>
196#include <asm-generic/bitops/hweight.h>
197#include <asm-generic/bitops/lock.h>
198
199#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
200
201static inline void __set_bit_le(int nr, void *addr)
202{
203 __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
204}
205
206static inline void __clear_bit_le(int nr, void *addr)
207{
208 __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
209}
210
211static inline int __test_and_set_bit_le(int nr, volatile void *addr)
212{
213 char retval;
214
215#ifdef CONFIG_COLDFIRE
216 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218 : "d" (nr)
219 : "%a0");
220#else
221 __asm__ __volatile__ ("bset %2,%1; sne %0"
222 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223 : "di" (nr)
224 /* No clobber */);
225#endif
226
227 return retval;
228}
229
230static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
231{
232 char retval;
233
234#ifdef CONFIG_COLDFIRE
235 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237 : "d" (nr)
238 : "%a0");
239#else
240 __asm__ __volatile__ ("bclr %2,%1; sne %0"
241 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242 : "di" (nr)
243 /* No clobber */);
244#endif
245
246 return retval;
247}
248
249#include <asm-generic/bitops/ext2-atomic.h>
250
251static inline int test_bit_le(int nr, const volatile void *addr)
252{
253 char retval;
254
255#ifdef CONFIG_COLDFIRE
256 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257 : "=d" (retval)
258 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259 : "%a0");
260#else
261 __asm__ __volatile__ ("btst %2,%1; sne %0"
262 : "=d" (retval)
263 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264 /* No clobber */);
265#endif
266
267 return retval;
268}
269
270#define find_first_zero_bit_le(addr, size) \
271 find_next_zero_bit_le((addr), (size), 0)
272
273static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
274{
275 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276 unsigned long result = offset & ~31UL;
277 unsigned long tmp;
278
279 if (offset >= size)
280 return size;
281 size -= result;
282 offset &= 31UL;
283 if(offset) {
284 /* We hold the little endian value in tmp, but then the
285 * shift is illegal. So we could keep a big endian value
286 * in tmp, like this:
287 *
288 * tmp = __swab32(*(p++));
289 * tmp |= ~0UL >> (32-offset);
290 *
291 * but this would decrease performance, so we change the
292 * shift:
293 */
294 tmp = *(p++);
295 tmp |= __swab32(~0UL >> (32-offset));
296 if(size < 32)
297 goto found_first;
298 if(~tmp)
299 goto found_middle;
300 size -= 32;
301 result += 32;
302 }
303 while(size & ~31UL) {
304 if(~(tmp = *(p++)))
305 goto found_middle;
306 result += 32;
307 size -= 32;
308 }
309 if(!size)
310 return result;
311 tmp = *p;
312
313found_first:
314 /* tmp is little endian, so we would have to swab the shift,
315 * see above. But then we have to swab tmp below for ffz, so
316 * we might as well do this here.
317 */
318 return result + ffz(__swab32(tmp) | (~0UL << size));
319found_middle:
320 return result + ffz(__swab32(tmp));
321}
322#define find_next_zero_bit_le find_next_zero_bit_le
323
324extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
326
327#endif /* __KERNEL__ */
328
329#include <asm-generic/bitops/fls.h>
330#include <asm-generic/bitops/__fls.h>
331#include <asm-generic/bitops/fls64.h>
332
333#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d2598e3dd7b..9c09becfd4c 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -1,5 +1,96 @@
1#ifdef __uClinux__ 1#ifndef _M68K_DELAY_H
2#include "delay_no.h" 2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
9 *
10 * Delay routines, using a pre-computed "loops_per_jiffy" value.
11 */
12
13#if defined(CONFIG_COLDFIRE)
14/*
15 * The ColdFire runs the delay loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitrary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
24 */
25#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
3#else 26#else
4#include "delay_mm.h" 27/*
28 * No instruction alignment required for other m68k types.
29 */
30#define DELAY_ALIGN
5#endif 31#endif
32
33static inline void __delay(unsigned long loops)
34{
35 __asm__ __volatile__ (
36 DELAY_ALIGN
37 "1: subql #1,%0\n\t"
38 "jcc 1b"
39 : "=d" (loops)
40 : "0" (loops));
41}
42
43extern void __bad_udelay(void);
44
45
46#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
47/*
48 * The simpler m68k and ColdFire processors do not have a 32*32->64
49 * multiply instruction. So we need to handle them a little differently.
50 * We use a bit of shifting and a single 32*32->32 multiply to get close.
51 * This is a macro so that the const version can factor out the first
52 * multiply and shift.
53 */
54#define HZSCALE (268435456 / (1000000 / HZ))
55
56#define __const_udelay(u) \
57 __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
58
59#else
60
61static inline void __xdelay(unsigned long xloops)
62{
63 unsigned long tmp;
64
65 __asm__ ("mulul %2,%0:%1"
66 : "=d" (xloops), "=d" (tmp)
67 : "d" (xloops), "1" (loops_per_jiffy));
68 __delay(xloops * HZ);
69}
70
71/*
72 * The definition of __const_udelay is specifically made a macro so that
73 * the const factor (4295 = 2**32 / 1000000) can be optimized out when
74 * the delay is a const.
75 */
76#define __const_udelay(n) (__xdelay((n) * 4295))
77
78#endif
79
80static inline void __udelay(unsigned long usecs)
81{
82 __const_udelay(usecs);
83}
84
85/*
86 * Use only for very small delays ( < 1 msec). Should probably use a
87 * lookup table, really, as the multiplications take much too long with
88 * short delays. This is a "reasonable" implementation, though (and the
89 * first constant multiplications gets optimized away if the delay is
90 * a constant)
91 */
92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94
95
96#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_mm.h b/arch/m68k/include/asm/delay_mm.h
deleted file mode 100644
index 5ed92851bc6..00000000000
--- a/arch/m68k/include/asm/delay_mm.h
+++ /dev/null
@@ -1,57 +0,0 @@
1#ifndef _M68K_DELAY_H
2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 *
9 * Delay routines, using a pre-computed "loops_per_jiffy" value.
10 */
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
15 : "=d" (loops) : "0" (loops));
16}
17
18extern void __bad_udelay(void);
19
20/*
21 * Use only for very small delays ( < 1 msec). Should probably use a
22 * lookup table, really, as the multiplications take much too long with
23 * short delays. This is a "reasonable" implementation, though (and the
24 * first constant multiplications gets optimized away if the delay is
25 * a constant)
26 */
27static inline void __const_udelay(unsigned long xloops)
28{
29 unsigned long tmp;
30
31 __asm__ ("mulul %2,%0:%1"
32 : "=d" (xloops), "=d" (tmp)
33 : "d" (xloops), "1" (loops_per_jiffy));
34 __delay(xloops * HZ);
35}
36
37static inline void __udelay(unsigned long usecs)
38{
39 __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
40}
41
42#define udelay(n) (__builtin_constant_p(n) ? \
43 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
44 __udelay(n))
45
46static inline unsigned long muldiv(unsigned long a, unsigned long b,
47 unsigned long c)
48{
49 unsigned long tmp;
50
51 __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
52 : "=d" (tmp), "=d" (a)
53 : "d" (b), "d" (c), "1" (a));
54 return a;
55}
56
57#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_no.h b/arch/m68k/include/asm/delay_no.h
deleted file mode 100644
index c3a0edc90f2..00000000000
--- a/arch/m68k/include/asm/delay_no.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _M68KNOMMU_DELAY_H
2#define _M68KNOMMU_DELAY_H
3
4/*
5 * Copyright (C) 1994 Hamish Macdonald
6 * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
7 */
8
9#include <asm/param.h>
10
11static inline void __delay(unsigned long loops)
12{
13#if defined(CONFIG_COLDFIRE)
14 /* The coldfire runs this loop at significantly different speeds
15 * depending upon long word alignment or not. We'll pad it to
16 * long word alignment which is the faster version.
17 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
18 * than using a NOP (0x4e71) instruction because it executes in one
19 * cycle not three and doesn't allow for an arbitrary delay waiting
20 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
21 * stall waiting for the register to become valid if such is added
22 * to the coldfire at some stage.
23 */
24 __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
25 "1: subql #1, %0\n\t"
26 "jcc 1b"
27 : "=d" (loops) : "0" (loops));
28#else
29 __asm__ __volatile__ ( "1: subql #1, %0\n\t"
30 "jcc 1b"
31 : "=d" (loops) : "0" (loops));
32#endif
33}
34
35/*
36 * Ideally we use a 32*32->64 multiply to calculate the number of
37 * loop iterations, but the older standard 68k and ColdFire do not
38 * have this instruction. So for them we have a clsoe approximation
39 * loop using 32*32->32 multiplies only. This calculation based on
40 * the ARM version of delay.
41 *
42 * We want to implement:
43 *
44 * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
45 */
46
47#define HZSCALE (268435456 / (1000000/HZ))
48
49extern unsigned long loops_per_jiffy;
50
51static inline void _udelay(unsigned long usecs)
52{
53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
55 defined(CONFIG_COLDFIRE)
56 __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
57#else
58 unsigned long tmp;
59
60 usecs *= 4295; /* 2**32 / 1000000 */
61 __asm__ ("mulul %2,%0:%1"
62 : "=d" (usecs), "=d" (tmp)
63 : "d" (usecs), "1" (loops_per_jiffy*HZ));
64 __delay(usecs);
65#endif
66}
67
68/*
69 * Moved the udelay() function into library code, no longer inlined.
70 * I had to change the algorithm because we are overflowing now on
71 * the faster ColdFire parts. The code is a little bigger, so it makes
72 * sense to library it.
73 */
74extern void udelay(unsigned long usecs);
75
76#endif /* defined(_M68KNOMMU_DELAY_H) */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 627d69bacc5..68611e3dbb1 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -96,11 +96,11 @@
96.endm 96.endm
97 97
98.macro RDUSP 98.macro RDUSP
99 movel sw_usp,%a2 99 movel sw_usp,%a3
100.endm 100.endm
101 101
102.macro WRUSP 102.macro WRUSP
103 movel %a0,sw_usp 103 movel %a3,sw_usp
104.endm 104.endm
105 105
106#else /* !CONFIG_COLDFIRE_SW_A7 */ 106#else /* !CONFIG_COLDFIRE_SW_A7 */
@@ -127,13 +127,13 @@
127.endm 127.endm
128 128
129.macro RDUSP 129.macro RDUSP
130 /*move %usp,%a2*/ 130 /*move %usp,%a3*/
131 .word 0x4e6a 131 .word 0x4e6b
132.endm 132.endm
133 133
134.macro WRUSP 134.macro WRUSP
135 /*move %a0,%usp*/ 135 /*move %a3,%usp*/
136 .word 0x4e60 136 .word 0x4e63
137.endm 137.endm
138 138
139#endif /* !CONFIG_COLDFIRE_SW_A7 */ 139#endif /* !CONFIG_COLDFIRE_SW_A7 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231..870e5347155 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,34 @@
1#ifdef __uClinux__ 1#ifndef __M68K_HARDIRQ_H
2#include "hardirq_no.h" 2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6#include <asm/irq.h>
7
8#define HARDIRQ_BITS 8
9
10/*
11 * The hardirq mask has to be large enough to have
12 * space for potentially all IRQ sources in the system
13 * nesting on a single CPU:
14 */
15#if (1 << HARDIRQ_BITS) < NR_IRQS
16# error HARDIRQ_BITS is too low!
17#endif
18
19#ifdef CONFIG_MMU
20
21/* entry.S is sensitive to the offsets of these fields */
22typedef struct {
23 unsigned int __softirq_pending;
24} ____cacheline_aligned irq_cpustat_t;
25
26#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
27
3#else 28#else
4#include "hardirq_mm.h" 29
30#include <asm-generic/hardirq.h>
31
32#endif /* !CONFIG_MMU */
33
5#endif 34#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015..00000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/cache.h>
6
7/* entry.S is sensitive to the offsets of these fields */
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
11
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#define HARDIRQ_BITS 8
15
16#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index b44b14be87d..00000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H
3
4#include <asm/irq.h>
5
6#define HARDIRQ_BITS 8
7
8/*
9 * The hardirq mask has to be large enough to have
10 * space for potentially all IRQ sources in the system
11 * nesting on a single CPU:
12 */
13#if (1 << HARDIRQ_BITS) < NR_IRQS
14# error HARDIRQ_BITS is too low!
15#endif
16
17#include <asm-generic/hardirq.h>
18
19#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 907eff1edd2..69ed0d74d53 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -33,15 +33,6 @@
33#include <linux/spinlock_types.h> 33#include <linux/spinlock_types.h>
34 34
35/* 35/*
36 * The hardirq mask has to be large enough to have
37 * space for potentially all IRQ sources in the system
38 * nesting on a single CPU:
39 */
40#if (1 << HARDIRQ_BITS) < NR_IRQS
41# error HARDIRQ_BITS is too low!
42#endif
43
44/*
45 * Interrupt source definitions 36 * Interrupt source definitions
46 * General interrupt sources are the level 1-7. 37 * General interrupt sources are the level 1-7.
47 * Adding an interrupt service routine for one of these sources 38 * Adding an interrupt service routine for one of these sources
@@ -131,4 +122,6 @@ asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
131#define irq_canonicalize(irq) (irq) 122#define irq_canonicalize(irq) (irq)
132#endif /* CONFIG_MMU */ 123#endif /* CONFIG_MMU */
133 124
125asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
126
134#endif /* _M68K_IRQ_H_ */ 127#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 415d5484916..789f3b2de0e 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy); 40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41 41
42extern void config_BSP(char *command, int len); 42extern void config_BSP(char *command, int len);
43extern void do_IRQ(int irq, struct pt_regs *fp);
44 43
45#endif /* _M68K_MACHDEP_H */ 44#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/module.h b/arch/m68k/include/asm/module.h
index 5f21e11071b..edffe66b7f4 100644
--- a/arch/m68k/include/asm/module.h
+++ b/arch/m68k/include/asm/module.h
@@ -1,46 +1,41 @@
1#ifndef _ASM_M68K_MODULE_H 1#ifndef _ASM_M68K_MODULE_H
2#define _ASM_M68K_MODULE_H 2#define _ASM_M68K_MODULE_H
3 3
4#ifdef CONFIG_MMU 4enum m68k_fixup_type {
5 m68k_fixup_memoffset,
6 m68k_fixup_vnode_shift,
7};
8
9struct m68k_fixup_info {
10 enum m68k_fixup_type type;
11 void *addr;
12};
5 13
6struct mod_arch_specific { 14struct mod_arch_specific {
7 struct m68k_fixup_info *fixup_start, *fixup_end; 15 struct m68k_fixup_info *fixup_start, *fixup_end;
8}; 16};
9 17
18#ifdef CONFIG_MMU
19
10#define MODULE_ARCH_INIT { \ 20#define MODULE_ARCH_INIT { \
11 .fixup_start = __start_fixup, \ 21 .fixup_start = __start_fixup, \
12 .fixup_end = __stop_fixup, \ 22 .fixup_end = __stop_fixup, \
13} 23}
14 24
15 25
16enum m68k_fixup_type {
17 m68k_fixup_memoffset,
18 m68k_fixup_vnode_shift,
19};
20
21struct m68k_fixup_info {
22 enum m68k_fixup_type type;
23 void *addr;
24};
25
26#define m68k_fixup(type, addr) \ 26#define m68k_fixup(type, addr) \
27 " .section \".m68k_fixup\",\"aw\"\n" \ 27 " .section \".m68k_fixup\",\"aw\"\n" \
28 " .long " #type "," #addr "\n" \ 28 " .long " #type "," #addr "\n" \
29 " .previous\n" 29 " .previous\n"
30 30
31#endif /* CONFIG_MMU */
32
31extern struct m68k_fixup_info __start_fixup[], __stop_fixup[]; 33extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
32 34
33struct module; 35struct module;
34extern void module_fixup(struct module *mod, struct m68k_fixup_info *start, 36extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
35 struct m68k_fixup_info *end); 37 struct m68k_fixup_info *end);
36 38
37#else
38
39struct mod_arch_specific {
40};
41
42#endif /* CONFIG_MMU */
43
44#define Elf_Shdr Elf32_Shdr 39#define Elf_Shdr Elf32_Shdr
45#define Elf_Sym Elf32_Sym 40#define Elf_Sym Elf32_Sym
46#define Elf_Ehdr Elf32_Ehdr 41#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 31d5570d656..89f201434b5 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -162,7 +162,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
162 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ 162 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
163}) 163})
164#define page_to_pfn(_page) ({ \ 164#define page_to_pfn(_page) ({ \
165 struct page *__p = (_page); \ 165 const struct page *__p = (_page); \
166 struct pglist_data *pgdat; \ 166 struct pglist_data *pgdat; \
167 pgdat = &pg_data_map[page_to_nid(__p)]; \ 167 pgdat = &pg_data_map[page_to_nid(__p)]; \
168 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ 168 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 63cdcc142d9..98d0970d9ba 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -51,7 +51,7 @@ typedef struct {
51#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) 51#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
52 52
53#undef __FD_ISSET 53#undef __FD_ISSET
54#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) 54#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
55 55
56#undef __FD_ZERO 56#undef __FD_ZERO
57#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp))) 57#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index f111b02b704..d8ef53ac03f 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -105,9 +105,6 @@ struct thread_struct {
105static inline void start_thread(struct pt_regs * regs, unsigned long pc, 105static inline void start_thread(struct pt_regs * regs, unsigned long pc,
106 unsigned long usp) 106 unsigned long usp)
107{ 107{
108 /* reads from user space */
109 set_fs(USER_DS);
110
111 regs->pc = pc; 108 regs->pc = pc;
112 regs->sr &= ~0x2000; 109 regs->sr &= ~0x2000;
113 wrusp(usp); 110 wrusp(usp);
@@ -129,7 +126,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
129 126
130#define start_thread(_regs, _pc, _usp) \ 127#define start_thread(_regs, _pc, _usp) \
131do { \ 128do { \
132 set_fs(USER_DS); /* reads from user space */ \
133 (_regs)->pc = (_pc); \ 129 (_regs)->pc = (_pc); \
134 ((struct switch_stack *)(_regs))[-1].a6 = 0; \ 130 ((struct switch_stack *)(_regs))[-1].a6 = 0; \
135 reformat(_regs); \ 131 reformat(_regs); \
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index 6e6e3ac1d91..65322b17b6c 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -85,7 +85,6 @@ struct switch_stack {
85#define user_mode(regs) (!((regs)->sr & PS_S)) 85#define user_mode(regs) (!((regs)->sr & PS_S))
86#define instruction_pointer(regs) ((regs)->pc) 86#define instruction_pointer(regs) ((regs)->pc)
87#define profile_pc(regs) instruction_pointer(regs) 87#define profile_pc(regs) instruction_pointer(regs)
88extern void show_regs(struct pt_regs *);
89 88
90#define arch_has_single_step() (1) 89#define arch_has_single_step() (1)
91 90
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c787a1..60e88660169 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -150,7 +150,7 @@ typedef struct sigaltstack {
150#ifdef __KERNEL__ 150#ifdef __KERNEL__
151#include <asm/sigcontext.h> 151#include <asm/sigcontext.h>
152 152
153#ifndef __uClinux__ 153#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
154#define __HAVE_ARCH_SIG_BITOPS 154#define __HAVE_ARCH_SIG_BITOPS
155 155
156static inline void sigaddset(sigset_t *set, int _sig) 156static inline void sigaddset(sigset_t *set, int _sig)
@@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
199 return word ^ 31; 199 return word ^ 31;
200} 200}
201 201
202struct pt_regs; 202#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
203extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
204 203
205#else 204#ifdef __uClinux__
206
207#undef __HAVE_ARCH_SIG_BITOPS
208#define ptrace_signal_deliver(regs, cookie) do { } while (0) 205#define ptrace_signal_deliver(regs, cookie) do { } while (0)
209 206#else
207struct pt_regs;
208extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
210#endif /* __uClinux__ */ 209#endif /* __uClinux__ */
211#endif /* __KERNEL__ */
212 210
211#endif /* __KERNEL__ */
213#endif /* _M68K_SIGNAL_H */ 212#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f..47b01f4726b 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
1#ifdef __uClinux__ 1#ifndef _M68K_SYSTEM_H
2#include "system_no.h" 2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
3#else 101#else
4#include "system_mm.h" 102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
5#endif 132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726b..00000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
101#else
102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3f..00000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
1#ifndef _M68KNOMMU_SYSTEM_H
2#define _M68KNOMMU_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/irqflags.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9/*
10 * switch_to(n) should switch tasks to task ptr, first checking that
11 * ptr isn't the current task, in which case it does nothing. This
12 * also clears the TS-flag if the task we switched to has used the
13 * math co-processor latest.
14 */
15/*
16 * switch_to() saves the extra registers, that are not saved
17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
18 * a0-a1. Some of these are used by schedule() and its predecessors
19 * and so we might get see unexpected behaviors when a task returns
20 * with unexpected register values.
21 *
22 * syscall stores these registers itself and none of them are used
23 * by syscall after the function in the syscall has been called.
24 *
25 * Beware that resume now expects *next to be in d1 and the offset of
26 * tss to be in a1. This saves a few instructions as we no longer have
27 * to push them onto the stack and read them back right after.
28 *
29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
30 *
31 * Changed 96/09/19 by Andreas Schwab
32 * pass prev in a0, next in a1, offset of tss in d1, and whether
33 * the mm structures are shared in d2 (to avoid atc flushing).
34 */
35asmlinkage void resume(void);
36#define switch_to(prev,next,last) \
37{ \
38 void *_last; \
39 __asm__ __volatile__( \
40 "movel %1, %%a0\n\t" \
41 "movel %2, %%a1\n\t" \
42 "jbsr resume\n\t" \
43 "movel %%d1, %0\n\t" \
44 : "=d" (_last) \
45 : "d" (prev), "d" (next) \
46 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
47 (last) = _last; \
48}
49
50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
51
52/*
53 * Force strict CPU ordering.
54 * Not really required on m68k...
55 */
56#define nop() asm volatile ("nop"::)
57#define mb() asm volatile ("" : : :"memory")
58#define rmb() asm volatile ("" : : :"memory")
59#define wmb() asm volatile ("" : : :"memory")
60#define set_mb(var, value) ({ (var) = (value); wmb(); })
61
62#define smp_mb() barrier()
63#define smp_rmb() barrier()
64#define smp_wmb() barrier()
65#define smp_read_barrier_depends() do { } while(0)
66
67#define read_barrier_depends() ((void)0)
68
69#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
70
71struct __xchg_dummy { unsigned long a[100]; };
72#define __xg(x) ((volatile struct __xchg_dummy *)(x))
73
74#ifndef CONFIG_RMW_INSNS
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 unsigned long tmp, flags;
78
79 local_irq_save(flags);
80
81 switch (size) {
82 case 1:
83 __asm__ __volatile__
84 ("moveb %2,%0\n\t"
85 "moveb %1,%2"
86 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
87 break;
88 case 2:
89 __asm__ __volatile__
90 ("movew %2,%0\n\t"
91 "movew %1,%2"
92 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
93 break;
94 case 4:
95 __asm__ __volatile__
96 ("movel %2,%0\n\t"
97 "movel %1,%2"
98 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
99 break;
100 }
101 local_irq_restore(flags);
102 return tmp;
103}
104#else
105static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
106{
107 switch (size) {
108 case 1:
109 __asm__ __volatile__
110 ("moveb %2,%0\n\t"
111 "1:\n\t"
112 "casb %0,%1,%2\n\t"
113 "jne 1b"
114 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
115 break;
116 case 2:
117 __asm__ __volatile__
118 ("movew %2,%0\n\t"
119 "1:\n\t"
120 "casw %0,%1,%2\n\t"
121 "jne 1b"
122 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
123 break;
124 case 4:
125 __asm__ __volatile__
126 ("movel %2,%0\n\t"
127 "1:\n\t"
128 "casl %0,%1,%2\n\t"
129 "jne 1b"
130 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
131 break;
132 }
133 return x;
134}
135#endif
136
137#include <asm-generic/cmpxchg-local.h>
138
139/*
140 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
141 * them available.
142 */
143#define cmpxchg_local(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
145 (unsigned long)(n), sizeof(*(ptr))))
146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
147
148#include <asm-generic/cmpxchg.h>
149
150#define arch_align_stack(x) (x)
151
152
153#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 0bffb17d5db..151068f64f4 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -22,7 +22,6 @@ extern e_vector vectors[];
22asmlinkage void auto_inthandler(void); 22asmlinkage void auto_inthandler(void);
23asmlinkage void user_inthandler(void); 23asmlinkage void user_inthandler(void);
24asmlinkage void bad_inthandler(void); 24asmlinkage void bad_inthandler(void);
25extern void init_vectors(void);
26 25
27#endif 26#endif
28 27
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
index 544b8717d49..c73988cfa90 100644
--- a/arch/m68k/kernel/irq.c
+++ b/arch/m68k/kernel/irq.c
@@ -28,3 +28,13 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
28 28
29 set_irq_regs(oldregs); 29 set_irq_regs(oldregs);
30} 30}
31
32
33/* The number of spurious interrupts */
34atomic_t irq_err_count;
35
36int arch_show_interrupts(struct seq_file *p, int prec)
37{
38 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
39 return 0;
40}
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 7ea203ce6b1..34849c4c6e3 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -1,5 +1,129 @@
1#ifdef CONFIG_MMU 1/*
2#include "module_mm.c" 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/elf.h>
9#include <linux/vmalloc.h>
10#include <linux/fs.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13
14#if 0
15#define DEBUGP printk
3#else 16#else
4#include "module_no.c" 17#define DEBUGP(fmt...)
18#endif
19
20#ifdef CONFIG_MODULES
21
22int apply_relocate(Elf32_Shdr *sechdrs,
23 const char *strtab,
24 unsigned int symindex,
25 unsigned int relsec,
26 struct module *me)
27{
28 unsigned int i;
29 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
30 Elf32_Sym *sym;
31 uint32_t *location;
32
33 DEBUGP("Applying relocate section %u to %u\n", relsec,
34 sechdrs[relsec].sh_info);
35 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
36 /* This is where to make the change */
37 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
38 + rel[i].r_offset;
39 /* This is the symbol it is referring to. Note that all
40 undefined symbols have been resolved. */
41 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
42 + ELF32_R_SYM(rel[i].r_info);
43
44 switch (ELF32_R_TYPE(rel[i].r_info)) {
45 case R_68K_32:
46 /* We add the value into the location given */
47 *location += sym->st_value;
48 break;
49 case R_68K_PC32:
50 /* Add the value, subtract its postition */
51 *location += sym->st_value - (uint32_t)location;
52 break;
53 default:
54 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
55 me->name, ELF32_R_TYPE(rel[i].r_info));
56 return -ENOEXEC;
57 }
58 }
59 return 0;
60}
61
62int apply_relocate_add(Elf32_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 unsigned int i;
69 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
70 Elf32_Sym *sym;
71 uint32_t *location;
72
73 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
74 sechdrs[relsec].sh_info);
75 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
76 /* This is where to make the change */
77 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
78 + rel[i].r_offset;
79 /* This is the symbol it is referring to. Note that all
80 undefined symbols have been resolved. */
81 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
82 + ELF32_R_SYM(rel[i].r_info);
83
84 switch (ELF32_R_TYPE(rel[i].r_info)) {
85 case R_68K_32:
86 /* We add the value into the location given */
87 *location = rel[i].r_addend + sym->st_value;
88 break;
89 case R_68K_PC32:
90 /* Add the value, subtract its postition */
91 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
92 break;
93 default:
94 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
95 me->name, ELF32_R_TYPE(rel[i].r_info));
96 return -ENOEXEC;
97 }
98 }
99 return 0;
100}
101
102int module_finalize(const Elf_Ehdr *hdr,
103 const Elf_Shdr *sechdrs,
104 struct module *mod)
105{
106 module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
107 return 0;
108}
109
110#endif /* CONFIG_MODULES */
111
112void module_fixup(struct module *mod, struct m68k_fixup_info *start,
113 struct m68k_fixup_info *end)
114{
115#ifdef CONFIG_MMU
116 struct m68k_fixup_info *fixup;
117
118 for (fixup = start; fixup < end; fixup++) {
119 switch (fixup->type) {
120 case m68k_fixup_memoffset:
121 *(u32 *)fixup->addr = m68k_memoffset;
122 break;
123 case m68k_fixup_vnode_shift:
124 *(u16 *)fixup->addr += m68k_virt_to_node_shift;
125 break;
126 }
127 }
5#endif 128#endif
129}
diff --git a/arch/m68k/kernel/module_mm.c b/arch/m68k/kernel/module_mm.c
deleted file mode 100644
index cd6bcb1c957..00000000000
--- a/arch/m68k/kernel/module_mm.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/elf.h>
9#include <linux/vmalloc.h>
10#include <linux/fs.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13
14#if 0
15#define DEBUGP printk
16#else
17#define DEBUGP(fmt...)
18#endif
19
20#ifdef CONFIG_MODULES
21
22void *module_alloc(unsigned long size)
23{
24 if (size == 0)
25 return NULL;
26 return vmalloc(size);
27}
28
29
30/* Free memory returned from module_alloc */
31void module_free(struct module *mod, void *module_region)
32{
33 vfree(module_region);
34}
35
36/* We don't need anything special. */
37int module_frob_arch_sections(Elf_Ehdr *hdr,
38 Elf_Shdr *sechdrs,
39 char *secstrings,
40 struct module *mod)
41{
42 return 0;
43}
44
45int apply_relocate(Elf32_Shdr *sechdrs,
46 const char *strtab,
47 unsigned int symindex,
48 unsigned int relsec,
49 struct module *me)
50{
51 unsigned int i;
52 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
53 Elf32_Sym *sym;
54 uint32_t *location;
55
56 DEBUGP("Applying relocate section %u to %u\n", relsec,
57 sechdrs[relsec].sh_info);
58 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
59 /* This is where to make the change */
60 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
61 + rel[i].r_offset;
62 /* This is the symbol it is referring to. Note that all
63 undefined symbols have been resolved. */
64 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
65 + ELF32_R_SYM(rel[i].r_info);
66
67 switch (ELF32_R_TYPE(rel[i].r_info)) {
68 case R_68K_32:
69 /* We add the value into the location given */
70 *location += sym->st_value;
71 break;
72 case R_68K_PC32:
73 /* Add the value, subtract its postition */
74 *location += sym->st_value - (uint32_t)location;
75 break;
76 default:
77 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
78 me->name, ELF32_R_TYPE(rel[i].r_info));
79 return -ENOEXEC;
80 }
81 }
82 return 0;
83}
84
85int apply_relocate_add(Elf32_Shdr *sechdrs,
86 const char *strtab,
87 unsigned int symindex,
88 unsigned int relsec,
89 struct module *me)
90{
91 unsigned int i;
92 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
93 Elf32_Sym *sym;
94 uint32_t *location;
95
96 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
97 sechdrs[relsec].sh_info);
98 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
99 /* This is where to make the change */
100 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
101 + rel[i].r_offset;
102 /* This is the symbol it is referring to. Note that all
103 undefined symbols have been resolved. */
104 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
105 + ELF32_R_SYM(rel[i].r_info);
106
107 switch (ELF32_R_TYPE(rel[i].r_info)) {
108 case R_68K_32:
109 /* We add the value into the location given */
110 *location = rel[i].r_addend + sym->st_value;
111 break;
112 case R_68K_PC32:
113 /* Add the value, subtract its postition */
114 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
115 break;
116 default:
117 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
118 me->name, ELF32_R_TYPE(rel[i].r_info));
119 return -ENOEXEC;
120 }
121 }
122 return 0;
123}
124
125int module_finalize(const Elf_Ehdr *hdr,
126 const Elf_Shdr *sechdrs,
127 struct module *mod)
128{
129 module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
130
131 return 0;
132}
133
134void module_arch_cleanup(struct module *mod)
135{
136}
137
138#endif /* CONFIG_MODULES */
139
140void module_fixup(struct module *mod, struct m68k_fixup_info *start,
141 struct m68k_fixup_info *end)
142{
143 struct m68k_fixup_info *fixup;
144
145 for (fixup = start; fixup < end; fixup++) {
146 switch (fixup->type) {
147 case m68k_fixup_memoffset:
148 *(u32 *)fixup->addr = m68k_memoffset;
149 break;
150 case m68k_fixup_vnode_shift:
151 *(u16 *)fixup->addr += m68k_virt_to_node_shift;
152 break;
153 }
154 }
155}
diff --git a/arch/m68k/kernel/module_no.c b/arch/m68k/kernel/module_no.c
deleted file mode 100644
index d11ffae7956..00000000000
--- a/arch/m68k/kernel/module_no.c
+++ /dev/null
@@ -1,126 +0,0 @@
1#include <linux/moduleloader.h>
2#include <linux/elf.h>
3#include <linux/vmalloc.h>
4#include <linux/fs.h>
5#include <linux/string.h>
6#include <linux/kernel.h>
7
8#if 0
9#define DEBUGP printk
10#else
11#define DEBUGP(fmt...)
12#endif
13
14void *module_alloc(unsigned long size)
15{
16 if (size == 0)
17 return NULL;
18 return vmalloc(size);
19}
20
21
22/* Free memory returned from module_alloc */
23void module_free(struct module *mod, void *module_region)
24{
25 vfree(module_region);
26}
27
28/* We don't need anything special. */
29int module_frob_arch_sections(Elf_Ehdr *hdr,
30 Elf_Shdr *sechdrs,
31 char *secstrings,
32 struct module *mod)
33{
34 return 0;
35}
36
37int apply_relocate(Elf32_Shdr *sechdrs,
38 const char *strtab,
39 unsigned int symindex,
40 unsigned int relsec,
41 struct module *me)
42{
43 unsigned int i;
44 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
45 Elf32_Sym *sym;
46 uint32_t *location;
47
48 DEBUGP("Applying relocate section %u to %u\n", relsec,
49 sechdrs[relsec].sh_info);
50 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
51 /* This is where to make the change */
52 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
53 + rel[i].r_offset;
54 /* This is the symbol it is referring to. Note that all
55 undefined symbols have been resolved. */
56 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
57 + ELF32_R_SYM(rel[i].r_info);
58
59 switch (ELF32_R_TYPE(rel[i].r_info)) {
60 case R_68K_32:
61 /* We add the value into the location given */
62 *location += sym->st_value;
63 break;
64 case R_68K_PC32:
65 /* Add the value, subtract its postition */
66 *location += sym->st_value - (uint32_t)location;
67 break;
68 default:
69 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
70 me->name, ELF32_R_TYPE(rel[i].r_info));
71 return -ENOEXEC;
72 }
73 }
74 return 0;
75}
76
77int apply_relocate_add(Elf32_Shdr *sechdrs,
78 const char *strtab,
79 unsigned int symindex,
80 unsigned int relsec,
81 struct module *me)
82{
83 unsigned int i;
84 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
85 Elf32_Sym *sym;
86 uint32_t *location;
87
88 DEBUGP("Applying relocate_add section %u to %u\n", relsec,
89 sechdrs[relsec].sh_info);
90 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
91 /* This is where to make the change */
92 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
93 + rel[i].r_offset;
94 /* This is the symbol it is referring to. Note that all
95 undefined symbols have been resolved. */
96 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
97 + ELF32_R_SYM(rel[i].r_info);
98
99 switch (ELF32_R_TYPE(rel[i].r_info)) {
100 case R_68K_32:
101 /* We add the value into the location given */
102 *location = rel[i].r_addend + sym->st_value;
103 break;
104 case R_68K_PC32:
105 /* Add the value, subtract its postition */
106 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
107 break;
108 default:
109 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
110 me->name, ELF32_R_TYPE(rel[i].r_info));
111 return -ENOEXEC;
112 }
113 }
114 return 0;
115}
116
117int module_finalize(const Elf_Ehdr *hdr,
118 const Elf_Shdr *sechdrs,
119 struct module *me)
120{
121 return 0;
122}
123
124void module_arch_cleanup(struct module *mod)
125{
126}
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c
index c2a1fc23dd7..1bc223aa07e 100644
--- a/arch/m68k/kernel/process_mm.c
+++ b/arch/m68k/kernel/process_mm.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(kernel_thread);
185void flush_thread(void) 185void flush_thread(void)
186{ 186{
187 unsigned long zero = 0; 187 unsigned long zero = 0;
188 set_fs(USER_DS); 188
189 current->thread.fs = __USER_DS; 189 current->thread.fs = __USER_DS;
190 if (!FPU_IS_EMU) 190 if (!FPU_IS_EMU)
191 asm volatile (".chip 68k/68881\n\t" 191 asm volatile (".chip 68k/68881\n\t"
diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c
index 9b86ad11c68..69c1803fcf1 100644
--- a/arch/m68k/kernel/process_no.c
+++ b/arch/m68k/kernel/process_no.c
@@ -158,7 +158,7 @@ void flush_thread(void)
158#ifdef CONFIG_FPU 158#ifdef CONFIG_FPU
159 unsigned long zero = 0; 159 unsigned long zero = 0;
160#endif 160#endif
161 set_fs(USER_DS); 161
162 current->thread.fs = __USER_DS; 162 current->thread.fs = __USER_DS;
163#ifdef CONFIG_FPU 163#ifdef CONFIG_FPU
164 if (!FPU_IS_EMU) 164 if (!FPU_IS_EMU)
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 334d8364037..c3b45061dd0 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -216,7 +216,9 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
216 216
217void __init setup_arch(char **cmdline_p) 217void __init setup_arch(char **cmdline_p)
218{ 218{
219#ifndef CONFIG_SUN3
219 int i; 220 int i;
221#endif
220 222
221 /* The bootinfo is located right after the kernel bss */ 223 /* The bootinfo is located right after the kernel bss */
222 m68k_parse_bootinfo((const struct bi_record *)_end); 224 m68k_parse_bootinfo((const struct bi_record *)_end);
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 00d1452f957..c468f2edaa8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -189,7 +189,7 @@ ENTRY(sys_call_table)
189 .long sys_getpagesize 189 .long sys_getpagesize
190 .long sys_ni_syscall /* old "query_module" */ 190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll 191 .long sys_poll
192 .long sys_nfsservctl 192 .long sys_ni_syscall /* old nfsservctl */
193 .long sys_setresgid16 /* 170 */ 193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16 194 .long sys_getresgid16
195 .long sys_prctl 195 .long sys_prctl
diff --git a/arch/m68k/kernel/traps_no.c b/arch/m68k/kernel/traps_no.c
index a768008dfd0..e67b8c80695 100644
--- a/arch/m68k/kernel/traps_no.c
+++ b/arch/m68k/kernel/traps_no.c
@@ -60,10 +60,6 @@ static char const * const vec_names[] = {
60 "MMU CONFIGURATION ERROR" 60 "MMU CONFIGURATION ERROR"
61}; 61};
62 62
63void __init trap_init(void)
64{
65}
66
67void die_if_kernel(char *str, struct pt_regs *fp, int nr) 63void die_if_kernel(char *str, struct pt_regs *fp, int nr)
68{ 64{
69 if (!(fp->sr & PS_S)) 65 if (!(fp->sr & PS_S))
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
new file mode 100644
index 00000000000..06a763f49fd
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -0,0 +1,91 @@
1/*
2 * vmlinux.lds.S -- master linker script for m68knommu arch
3 *
4 * (C) Copyright 2002-2012, Greg Ungerer <gerg@snapgear.com>
5 *
6 * This linker script is equipped to build either ROM loaded or RAM
7 * run kernels.
8 */
9
10#if defined(CONFIG_RAMKERNEL)
11#define KTEXT_ADDR CONFIG_KERNELBASE
12#endif
13#if defined(CONFIG_ROMKERNEL)
14#define KTEXT_ADDR CONFIG_ROMSTART
15#define KDATA_ADDR CONFIG_KERNELBASE
16#define LOAD_OFFSET KDATA_ADDR + (ADDR(.text) + SIZEOF(.text))
17#endif
18
19#include <asm/page.h>
20#include <asm/thread_info.h>
21#include <asm-generic/vmlinux.lds.h>
22
23OUTPUT_ARCH(m68k)
24ENTRY(_start)
25
26jiffies = jiffies_64 + 4;
27
28SECTIONS {
29
30#ifdef CONFIG_ROMVEC
31 . = CONFIG_ROMVEC;
32 .romvec : {
33 __rom_start = .;
34 _romvec = .;
35 *(.romvec)
36 *(.data..initvect)
37 }
38#endif
39
40 . = KTEXT_ADDR;
41
42 _text = .;
43 _stext = .;
44 .text : {
45 HEAD_TEXT
46 TEXT_TEXT
47 SCHED_TEXT
48 LOCK_TEXT
49 *(.fixup)
50 . = ALIGN(16);
51 }
52 _etext = .;
53
54#ifdef KDATA_ADDR
55 . = KDATA_ADDR;
56#endif
57
58 _sdata = .;
59 RO_DATA_SECTION(PAGE_SIZE)
60 RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
61 _edata = .;
62
63 EXCEPTION_TABLE(16)
64 NOTES
65
66 . = ALIGN(PAGE_SIZE);
67 __init_begin = .;
68 INIT_TEXT_SECTION(PAGE_SIZE)
69 INIT_DATA_SECTION(16)
70 PERCPU_SECTION(16)
71 .m68k_fixup : {
72 __start_fixup = .;
73 *(.m68k_fixup)
74 __stop_fixup = .;
75 }
76 .init.data : {
77 . = ALIGN(PAGE_SIZE);
78 __init_end = .;
79 }
80
81 BSS_SECTION(0, 0, 0)
82
83 _end = .;
84
85 STABS_DEBUG
86 .comment 0 : { *(.comment) }
87
88 /* Sections to be discarded */
89 DISCARDS
90}
91
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index df421e50143..1a1bd9067e9 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -9,6 +9,6 @@ lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
9ifdef CONFIG_MMU 9ifdef CONFIG_MMU
10lib-y += string.o uaccess.o checksum_mm.o 10lib-y += string.o uaccess.o checksum_mm.o
11else 11else
12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o 12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
13endif 13endif
14 14
diff --git a/arch/m68k/lib/delay.c b/arch/m68k/lib/delay.c
deleted file mode 100644
index 5bd5472d38a..00000000000
--- a/arch/m68k/lib/delay.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/m68knommu/lib/delay.c
3 *
4 * (C) Copyright 2004, Greg Ungerer <gerg@snapgear.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <asm/param.h>
13#include <asm/delay.h>
14
15EXPORT_SYMBOL(udelay);
16
17void udelay(unsigned long usecs)
18{
19 _udelay(usecs);
20}
21
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index 367ecee2f98..3384a5244fb 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -105,9 +105,6 @@ fp_fetoxm1(struct fp_ext *dest, struct fp_ext *src)
105 105
106 fp_monadic_check(dest, src); 106 fp_monadic_check(dest, src);
107 107
108 if (IS_ZERO(dest))
109 return dest;
110
111 return dest; 108 return dest;
112} 109}
113 110
diff --git a/arch/m68k/math-emu/multi_arith.h b/arch/m68k/math-emu/multi_arith.h
index 4ad0ca918e2..4b5eb3d8563 100644
--- a/arch/m68k/math-emu/multi_arith.h
+++ b/arch/m68k/math-emu/multi_arith.h
@@ -19,246 +19,6 @@
19#ifndef MULTI_ARITH_H 19#ifndef MULTI_ARITH_H
20#define MULTI_ARITH_H 20#define MULTI_ARITH_H
21 21
22#if 0 /* old code... */
23
24/* Unsigned only, because we don't need signs to multiply and divide. */
25typedef unsigned int int128[4];
26
27/* Word order */
28enum {
29 MSW128,
30 NMSW128,
31 NLSW128,
32 LSW128
33};
34
35/* big-endian */
36#define LO_WORD(ll) (((unsigned int *) &ll)[1])
37#define HI_WORD(ll) (((unsigned int *) &ll)[0])
38
39/* Convenience functions to stuff various integer values into int128s */
40
41static inline void zero128(int128 a)
42{
43 a[LSW128] = a[NLSW128] = a[NMSW128] = a[MSW128] = 0;
44}
45
46/* Human-readable word order in the arguments */
47static inline void set128(unsigned int i3, unsigned int i2, unsigned int i1,
48 unsigned int i0, int128 a)
49{
50 a[LSW128] = i0;
51 a[NLSW128] = i1;
52 a[NMSW128] = i2;
53 a[MSW128] = i3;
54}
55
56/* Convenience functions (for testing as well) */
57static inline void int64_to_128(unsigned long long src, int128 dest)
58{
59 dest[LSW128] = (unsigned int) src;
60 dest[NLSW128] = src >> 32;
61 dest[NMSW128] = dest[MSW128] = 0;
62}
63
64static inline void int128_to_64(const int128 src, unsigned long long *dest)
65{
66 *dest = src[LSW128] | (long long) src[NLSW128] << 32;
67}
68
69static inline void put_i128(const int128 a)
70{
71 printk("%08x %08x %08x %08x\n", a[MSW128], a[NMSW128],
72 a[NLSW128], a[LSW128]);
73}
74
75/* Internal shifters:
76
77 Note that these are only good for 0 < count < 32.
78 */
79
80static inline void _lsl128(unsigned int count, int128 a)
81{
82 a[MSW128] = (a[MSW128] << count) | (a[NMSW128] >> (32 - count));
83 a[NMSW128] = (a[NMSW128] << count) | (a[NLSW128] >> (32 - count));
84 a[NLSW128] = (a[NLSW128] << count) | (a[LSW128] >> (32 - count));
85 a[LSW128] <<= count;
86}
87
88static inline void _lsr128(unsigned int count, int128 a)
89{
90 a[LSW128] = (a[LSW128] >> count) | (a[NLSW128] << (32 - count));
91 a[NLSW128] = (a[NLSW128] >> count) | (a[NMSW128] << (32 - count));
92 a[NMSW128] = (a[NMSW128] >> count) | (a[MSW128] << (32 - count));
93 a[MSW128] >>= count;
94}
95
96/* Should be faster, one would hope */
97
98static inline void lslone128(int128 a)
99{
100 asm volatile ("lsl.l #1,%0\n"
101 "roxl.l #1,%1\n"
102 "roxl.l #1,%2\n"
103 "roxl.l #1,%3\n"
104 :
105 "=d" (a[LSW128]),
106 "=d"(a[NLSW128]),
107 "=d"(a[NMSW128]),
108 "=d"(a[MSW128])
109 :
110 "0"(a[LSW128]),
111 "1"(a[NLSW128]),
112 "2"(a[NMSW128]),
113 "3"(a[MSW128]));
114}
115
116static inline void lsrone128(int128 a)
117{
118 asm volatile ("lsr.l #1,%0\n"
119 "roxr.l #1,%1\n"
120 "roxr.l #1,%2\n"
121 "roxr.l #1,%3\n"
122 :
123 "=d" (a[MSW128]),
124 "=d"(a[NMSW128]),
125 "=d"(a[NLSW128]),
126 "=d"(a[LSW128])
127 :
128 "0"(a[MSW128]),
129 "1"(a[NMSW128]),
130 "2"(a[NLSW128]),
131 "3"(a[LSW128]));
132}
133
134/* Generalized 128-bit shifters:
135
136 These bit-shift to a multiple of 32, then move whole longwords. */
137
138static inline void lsl128(unsigned int count, int128 a)
139{
140 int wordcount, i;
141
142 if (count % 32)
143 _lsl128(count % 32, a);
144
145 if (0 == (wordcount = count / 32))
146 return;
147
148 /* argh, gak, endian-sensitive */
149 for (i = 0; i < 4 - wordcount; i++) {
150 a[i] = a[i + wordcount];
151 }
152 for (i = 3; i >= 4 - wordcount; --i) {
153 a[i] = 0;
154 }
155}
156
157static inline void lsr128(unsigned int count, int128 a)
158{
159 int wordcount, i;
160
161 if (count % 32)
162 _lsr128(count % 32, a);
163
164 if (0 == (wordcount = count / 32))
165 return;
166
167 for (i = 3; i >= wordcount; --i) {
168 a[i] = a[i - wordcount];
169 }
170 for (i = 0; i < wordcount; i++) {
171 a[i] = 0;
172 }
173}
174
175static inline int orl128(int a, int128 b)
176{
177 b[LSW128] |= a;
178}
179
180static inline int btsthi128(const int128 a)
181{
182 return a[MSW128] & 0x80000000;
183}
184
185/* test bits (numbered from 0 = LSB) up to and including "top" */
186static inline int bftestlo128(int top, const int128 a)
187{
188 int r = 0;
189
190 if (top > 31)
191 r |= a[LSW128];
192 if (top > 63)
193 r |= a[NLSW128];
194 if (top > 95)
195 r |= a[NMSW128];
196
197 r |= a[3 - (top / 32)] & ((1 << (top % 32 + 1)) - 1);
198
199 return (r != 0);
200}
201
202/* Aargh. We need these because GCC is broken */
203/* FIXME: do them in assembly, for goodness' sake! */
204static inline void mask64(int pos, unsigned long long *mask)
205{
206 *mask = 0;
207
208 if (pos < 32) {
209 LO_WORD(*mask) = (1 << pos) - 1;
210 return;
211 }
212 LO_WORD(*mask) = -1;
213 HI_WORD(*mask) = (1 << (pos - 32)) - 1;
214}
215
216static inline void bset64(int pos, unsigned long long *dest)
217{
218 /* This conditional will be optimized away. Thanks, GCC! */
219 if (pos < 32)
220 asm volatile ("bset %1,%0":"=m"
221 (LO_WORD(*dest)):"id"(pos));
222 else
223 asm volatile ("bset %1,%0":"=m"
224 (HI_WORD(*dest)):"id"(pos - 32));
225}
226
227static inline int btst64(int pos, unsigned long long dest)
228{
229 if (pos < 32)
230 return (0 != (LO_WORD(dest) & (1 << pos)));
231 else
232 return (0 != (HI_WORD(dest) & (1 << (pos - 32))));
233}
234
235static inline void lsl64(int count, unsigned long long *dest)
236{
237 if (count < 32) {
238 HI_WORD(*dest) = (HI_WORD(*dest) << count)
239 | (LO_WORD(*dest) >> count);
240 LO_WORD(*dest) <<= count;
241 return;
242 }
243 count -= 32;
244 HI_WORD(*dest) = LO_WORD(*dest) << count;
245 LO_WORD(*dest) = 0;
246}
247
248static inline void lsr64(int count, unsigned long long *dest)
249{
250 if (count < 32) {
251 LO_WORD(*dest) = (LO_WORD(*dest) >> count)
252 | (HI_WORD(*dest) << (32 - count));
253 HI_WORD(*dest) >>= count;
254 return;
255 }
256 count -= 32;
257 LO_WORD(*dest) = HI_WORD(*dest) >> count;
258 HI_WORD(*dest) = 0;
259}
260#endif
261
262static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt) 22static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
263{ 23{
264 reg->exp += cnt; 24 reg->exp += cnt;
@@ -481,117 +241,6 @@ static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
481 } 241 }
482} 242}
483 243
484#if 0
485static inline unsigned int fp_fls128(union fp_mant128 *src)
486{
487 unsigned long data;
488 unsigned int res, off;
489
490 if ((data = src->m32[0]))
491 off = 0;
492 else if ((data = src->m32[1]))
493 off = 32;
494 else if ((data = src->m32[2]))
495 off = 64;
496 else if ((data = src->m32[3]))
497 off = 96;
498 else
499 return 128;
500
501 asm ("bfffo %1{#0,#32},%0" : "=d" (res) : "dm" (data));
502 return res + off;
503}
504
505static inline void fp_shiftmant128(union fp_mant128 *src, int shift)
506{
507 unsigned long sticky;
508
509 switch (shift) {
510 case 0:
511 return;
512 case 1:
513 asm volatile ("lsl.l #1,%0"
514 : "=d" (src->m32[3]) : "0" (src->m32[3]));
515 asm volatile ("roxl.l #1,%0"
516 : "=d" (src->m32[2]) : "0" (src->m32[2]));
517 asm volatile ("roxl.l #1,%0"
518 : "=d" (src->m32[1]) : "0" (src->m32[1]));
519 asm volatile ("roxl.l #1,%0"
520 : "=d" (src->m32[0]) : "0" (src->m32[0]));
521 return;
522 case 2 ... 31:
523 src->m32[0] = (src->m32[0] << shift) | (src->m32[1] >> (32 - shift));
524 src->m32[1] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
525 src->m32[2] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
526 src->m32[3] = (src->m32[3] << shift);
527 return;
528 case 32 ... 63:
529 shift -= 32;
530 src->m32[0] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
531 src->m32[1] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
532 src->m32[2] = (src->m32[3] << shift);
533 src->m32[3] = 0;
534 return;
535 case 64 ... 95:
536 shift -= 64;
537 src->m32[0] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
538 src->m32[1] = (src->m32[3] << shift);
539 src->m32[2] = src->m32[3] = 0;
540 return;
541 case 96 ... 127:
542 shift -= 96;
543 src->m32[0] = (src->m32[3] << shift);
544 src->m32[1] = src->m32[2] = src->m32[3] = 0;
545 return;
546 case -31 ... -1:
547 shift = -shift;
548 sticky = 0;
549 if (src->m32[3] << (32 - shift))
550 sticky = 1;
551 src->m32[3] = (src->m32[3] >> shift) | (src->m32[2] << (32 - shift)) | sticky;
552 src->m32[2] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift));
553 src->m32[1] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
554 src->m32[0] = (src->m32[0] >> shift);
555 return;
556 case -63 ... -32:
557 shift = -shift - 32;
558 sticky = 0;
559 if ((src->m32[2] << (32 - shift)) || src->m32[3])
560 sticky = 1;
561 src->m32[3] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift)) | sticky;
562 src->m32[2] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
563 src->m32[1] = (src->m32[0] >> shift);
564 src->m32[0] = 0;
565 return;
566 case -95 ... -64:
567 shift = -shift - 64;
568 sticky = 0;
569 if ((src->m32[1] << (32 - shift)) || src->m32[2] || src->m32[3])
570 sticky = 1;
571 src->m32[3] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift)) | sticky;
572 src->m32[2] = (src->m32[0] >> shift);
573 src->m32[1] = src->m32[0] = 0;
574 return;
575 case -127 ... -96:
576 shift = -shift - 96;
577 sticky = 0;
578 if ((src->m32[0] << (32 - shift)) || src->m32[1] || src->m32[2] || src->m32[3])
579 sticky = 1;
580 src->m32[3] = (src->m32[0] >> shift) | sticky;
581 src->m32[2] = src->m32[1] = src->m32[0] = 0;
582 return;
583 }
584
585 if (shift < 0 && (src->m32[0] || src->m32[1] || src->m32[2] || src->m32[3]))
586 src->m32[3] = 1;
587 else
588 src->m32[3] = 0;
589 src->m32[2] = 0;
590 src->m32[1] = 0;
591 src->m32[0] = 0;
592}
593#endif
594
595static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src, 244static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
596 int shift) 245 int shift)
597{ 246{
@@ -637,183 +286,4 @@ static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
637 } 286 }
638} 287}
639 288
640#if 0 /* old code... */
641static inline int fls(unsigned int a)
642{
643 int r;
644
645 asm volatile ("bfffo %1{#0,#32},%0"
646 : "=d" (r) : "md" (a));
647 return r;
648}
649
650/* fls = "find last set" (cf. ffs(3)) */
651static inline int fls128(const int128 a)
652{
653 if (a[MSW128])
654 return fls(a[MSW128]);
655 if (a[NMSW128])
656 return fls(a[NMSW128]) + 32;
657 /* XXX: it probably never gets beyond this point in actual
658 use, but that's indicative of a more general problem in the
659 algorithm (i.e. as per the actual 68881 implementation, we
660 really only need at most 67 bits of precision [plus
661 overflow]) so I'm not going to fix it. */
662 if (a[NLSW128])
663 return fls(a[NLSW128]) + 64;
664 if (a[LSW128])
665 return fls(a[LSW128]) + 96;
666 else
667 return -1;
668}
669
670static inline int zerop128(const int128 a)
671{
672 return !(a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
673}
674
675static inline int nonzerop128(const int128 a)
676{
677 return (a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
678}
679
680/* Addition and subtraction */
681/* Do these in "pure" assembly, because "extended" asm is unmanageable
682 here */
683static inline void add128(const int128 a, int128 b)
684{
685 /* rotating carry flags */
686 unsigned int carry[2];
687
688 carry[0] = a[LSW128] > (0xffffffff - b[LSW128]);
689 b[LSW128] += a[LSW128];
690
691 carry[1] = a[NLSW128] > (0xffffffff - b[NLSW128] - carry[0]);
692 b[NLSW128] = a[NLSW128] + b[NLSW128] + carry[0];
693
694 carry[0] = a[NMSW128] > (0xffffffff - b[NMSW128] - carry[1]);
695 b[NMSW128] = a[NMSW128] + b[NMSW128] + carry[1];
696
697 b[MSW128] = a[MSW128] + b[MSW128] + carry[0];
698}
699
700/* Note: assembler semantics: "b -= a" */
701static inline void sub128(const int128 a, int128 b)
702{
703 /* rotating borrow flags */
704 unsigned int borrow[2];
705
706 borrow[0] = b[LSW128] < a[LSW128];
707 b[LSW128] -= a[LSW128];
708
709 borrow[1] = b[NLSW128] < a[NLSW128] + borrow[0];
710 b[NLSW128] = b[NLSW128] - a[NLSW128] - borrow[0];
711
712 borrow[0] = b[NMSW128] < a[NMSW128] + borrow[1];
713 b[NMSW128] = b[NMSW128] - a[NMSW128] - borrow[1];
714
715 b[MSW128] = b[MSW128] - a[MSW128] - borrow[0];
716}
717
718/* Poor man's 64-bit expanding multiply */
719static inline void mul64(unsigned long long a, unsigned long long b, int128 c)
720{
721 unsigned long long acc;
722 int128 acc128;
723
724 zero128(acc128);
725 zero128(c);
726
727 /* first the low words */
728 if (LO_WORD(a) && LO_WORD(b)) {
729 acc = (long long) LO_WORD(a) * LO_WORD(b);
730 c[NLSW128] = HI_WORD(acc);
731 c[LSW128] = LO_WORD(acc);
732 }
733 /* Next the high words */
734 if (HI_WORD(a) && HI_WORD(b)) {
735 acc = (long long) HI_WORD(a) * HI_WORD(b);
736 c[MSW128] = HI_WORD(acc);
737 c[NMSW128] = LO_WORD(acc);
738 }
739 /* The middle words */
740 if (LO_WORD(a) && HI_WORD(b)) {
741 acc = (long long) LO_WORD(a) * HI_WORD(b);
742 acc128[NMSW128] = HI_WORD(acc);
743 acc128[NLSW128] = LO_WORD(acc);
744 add128(acc128, c);
745 }
746 /* The first and last words */
747 if (HI_WORD(a) && LO_WORD(b)) {
748 acc = (long long) HI_WORD(a) * LO_WORD(b);
749 acc128[NMSW128] = HI_WORD(acc);
750 acc128[NLSW128] = LO_WORD(acc);
751 add128(acc128, c);
752 }
753}
754
755/* Note: unsigned */
756static inline int cmp128(int128 a, int128 b)
757{
758 if (a[MSW128] < b[MSW128])
759 return -1;
760 if (a[MSW128] > b[MSW128])
761 return 1;
762 if (a[NMSW128] < b[NMSW128])
763 return -1;
764 if (a[NMSW128] > b[NMSW128])
765 return 1;
766 if (a[NLSW128] < b[NLSW128])
767 return -1;
768 if (a[NLSW128] > b[NLSW128])
769 return 1;
770
771 return (signed) a[LSW128] - b[LSW128];
772}
773
774inline void div128(int128 a, int128 b, int128 c)
775{
776 int128 mask;
777
778 /* Algorithm:
779
780 Shift the divisor until it's at least as big as the
781 dividend, keeping track of the position to which we've
782 shifted it, i.e. the power of 2 which we've multiplied it
783 by.
784
785 Then, for this power of 2 (the mask), and every one smaller
786 than it, subtract the mask from the dividend and add it to
787 the quotient until the dividend is smaller than the raised
788 divisor. At this point, divide the dividend and the mask
789 by 2 (i.e. shift one place to the right). Lather, rinse,
790 and repeat, until there are no more powers of 2 left. */
791
792 /* FIXME: needless to say, there's room for improvement here too. */
793
794 /* Shift up */
795 /* XXX: since it just has to be "at least as big", we can
796 probably eliminate this horribly wasteful loop. I will
797 have to prove this first, though */
798 set128(0, 0, 0, 1, mask);
799 while (cmp128(b, a) < 0 && !btsthi128(b)) {
800 lslone128(b);
801 lslone128(mask);
802 }
803
804 /* Shift down */
805 zero128(c);
806 do {
807 if (cmp128(a, b) >= 0) {
808 sub128(b, a);
809 add128(mask, c);
810 }
811 lsrone128(mask);
812 lsrone128(b);
813 } while (nonzerop128(mask));
814
815 /* The remainder is in a... */
816}
817#endif
818
819#endif /* MULTI_ARITH_H */ 289#endif /* MULTI_ARITH_H */
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index 9113c2f1760..bbe525434cc 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -83,11 +83,6 @@ void __init mem_init(void)
83 int initpages = 0; 83 int initpages = 0;
84 int i; 84 int i;
85 85
86#ifdef CONFIG_ATARI
87 if (MACH_IS_ATARI)
88 atari_stram_mem_init_hook();
89#endif
90
91 /* this will put all memory onto the freelists */ 86 /* this will put all memory onto the freelists */
92 totalram_pages = num_physpages = 0; 87 totalram_pages = num_physpages = 0;
93 for_each_online_pgdat(pgdat) { 88 for_each_online_pgdat(pgdat) {
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 7cbd7bd1f8b..50cd12cf28d 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -42,7 +42,7 @@
42 * ZERO_PAGE is a special page that is used for zero-initialized 42 * ZERO_PAGE is a special page that is used for zero-initialized
43 * data and COW. 43 * data and COW.
44 */ 44 */
45unsigned long empty_zero_page; 45void *empty_zero_page;
46 46
47extern unsigned long memory_start; 47extern unsigned long memory_start;
48extern unsigned long memory_end; 48extern unsigned long memory_end;
@@ -62,8 +62,8 @@ void __init paging_init(void)
62 unsigned long end_mem = memory_end & PAGE_MASK; 62 unsigned long end_mem = memory_end & PAGE_MASK;
63 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 63 unsigned long zones_size[MAX_NR_ZONES] = {0, };
64 64
65 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 65 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
66 memset((void *)empty_zero_page, 0, PAGE_SIZE); 66 memset(empty_zero_page, 0, PAGE_SIZE);
67 67
68 /* 68 /*
69 * Set up SFC/DFC registers (user data space). 69 * Set up SFC/DFC registers (user data space).
@@ -120,7 +120,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
120 totalram_pages++; 120 totalram_pages++;
121 pages++; 121 pages++;
122 } 122 }
123 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); 123 pr_notice("Freeing initrd memory: %luk freed\n",
124 pages * (PAGE_SIZE / 1024));
124} 125}
125#endif 126#endif
126 127
@@ -141,7 +142,7 @@ void free_initmem(void)
141 free_page(addr); 142 free_page(addr);
142 totalram_pages++; 143 totalram_pages++;
143 } 144 }
144 printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", 145 pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
145 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, 146 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
146 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), 147 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
147 (int)(addr - PAGE_SIZE)); 148 (int)(addr - PAGE_SIZE));
diff --git a/arch/m68k/platform/5206/config.c b/arch/m68k/platform/5206/config.c
index 9c335465e66..6fa3f800277 100644
--- a/arch/m68k/platform/5206/config.c
+++ b/arch/m68k/platform/5206/config.c
@@ -98,6 +98,12 @@ void m5206_cpu_reset(void)
98 98
99void __init config_BSP(char *commandp, int size) 99void __init config_BSP(char *commandp, int size)
100{ 100{
101#if defined(CONFIG_NETtel)
102 /* Copy command line from FLASH to local buffer... */
103 memcpy(commandp, (char *) 0xf0004000, size);
104 commandp[size-1] = 0;
105#endif /* CONFIG_NETtel */
106
101 mach_reset = m5206_cpu_reset; 107 mach_reset = m5206_cpu_reset;
102 m5206_timers_init(); 108 m5206_timers_init();
103 m5206_uarts_init(); 109 m5206_uarts_init();
diff --git a/arch/m68k/platform/5206e/Makefile b/arch/m68k/platform/5206e/Makefile
deleted file mode 100644
index b5db05625cf..00000000000
--- a/arch/m68k/platform/5206e/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# Makefile for the m68knommu linux kernel.
3#
4
5#
6# If you want to play with the HW breakpoints then you will
7# need to add define this, which will give you a stack backtrace
8# on the console port whenever a DBG interrupt occurs. You have to
9# set up you HW breakpoints to trigger a DBG interrupt:
10#
11# ccflags-y := -DTRAP_DBG_INTERRUPT
12# asflags-y := -DTRAP_DBG_INTERRUPT
13#
14
15asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
16
17obj-y := config.o gpio.o
18
diff --git a/arch/m68k/platform/5206e/config.c b/arch/m68k/platform/5206e/config.c
deleted file mode 100644
index 942397984c6..00000000000
--- a/arch/m68k/platform/5206e/config.c
+++ /dev/null
@@ -1,127 +0,0 @@
1/***************************************************************************/
2
3/*
4 * linux/arch/m68knommu/platform/5206e/config.c
5 *
6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 */
8
9/***************************************************************************/
10
11#include <linux/kernel.h>
12#include <linux/param.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <asm/machdep.h>
16#include <asm/coldfire.h>
17#include <asm/mcfsim.h>
18#include <asm/mcfuart.h>
19#include <asm/mcfdma.h>
20
21/***************************************************************************/
22
23static struct mcf_platform_uart m5206e_uart_platform[] = {
24 {
25 .mapbase = MCF_MBAR + MCFUART_BASE1,
26 .irq = 73,
27 },
28 {
29 .mapbase = MCF_MBAR + MCFUART_BASE2,
30 .irq = 74,
31 },
32 { },
33};
34
35static struct platform_device m5206e_uart = {
36 .name = "mcfuart",
37 .id = 0,
38 .dev.platform_data = m5206e_uart_platform,
39};
40
41static struct platform_device *m5206e_devices[] __initdata = {
42 &m5206e_uart,
43};
44
45/***************************************************************************/
46
47static void __init m5206e_uart_init_line(int line, int irq)
48{
49 if (line == 0) {
50 writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
51 writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
52 mcf_mapirq2imr(irq, MCFINTC_UART0);
53 } else if (line == 1) {
54 writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
55 writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
56 mcf_mapirq2imr(irq, MCFINTC_UART1);
57 }
58}
59
60static void __init m5206e_uarts_init(void)
61{
62 const int nrlines = ARRAY_SIZE(m5206e_uart_platform);
63 int line;
64
65 for (line = 0; (line < nrlines); line++)
66 m5206e_uart_init_line(line, m5206e_uart_platform[line].irq);
67}
68
69/***************************************************************************/
70
71static void __init m5206e_timers_init(void)
72{
73 /* Timer1 is always used as system timer */
74 writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
75 MCF_MBAR + MCFSIM_TIMER1ICR);
76 mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
77
78#ifdef CONFIG_HIGHPROFILE
79 /* Timer2 is to be used as a high speed profile timer */
80 writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
81 MCF_MBAR + MCFSIM_TIMER2ICR);
82 mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
83#endif
84}
85
86/***************************************************************************/
87
88void m5206e_cpu_reset(void)
89{
90 local_irq_disable();
91 /* Set watchdog to soft reset, and enabled */
92 __raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
93 for (;;)
94 /* wait for watchdog to timeout */;
95}
96
97/***************************************************************************/
98
99void __init config_BSP(char *commandp, int size)
100{
101#if defined(CONFIG_NETtel)
102 /* Copy command line from FLASH to local buffer... */
103 memcpy(commandp, (char *) 0xf0004000, size);
104 commandp[size-1] = 0;
105#endif /* CONFIG_NETtel */
106
107 mach_reset = m5206e_cpu_reset;
108 m5206e_timers_init();
109 m5206e_uarts_init();
110
111 /* Only support the external interrupts on their primary level */
112 mcf_mapirq2imr(25, MCFINTC_EINT1);
113 mcf_mapirq2imr(28, MCFINTC_EINT4);
114 mcf_mapirq2imr(31, MCFINTC_EINT7);
115}
116
117/***************************************************************************/
118
119static int __init init_BSP(void)
120{
121 platform_add_devices(m5206e_devices, ARRAY_SIZE(m5206e_devices));
122 return 0;
123}
124
125arch_initcall(init_BSP);
126
127/***************************************************************************/
diff --git a/arch/m68k/platform/5206e/gpio.c b/arch/m68k/platform/5206e/gpio.c
deleted file mode 100644
index b9ab4a120f2..00000000000
--- a/arch/m68k/platform/5206e/gpio.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Coldfire generic GPIO support
3 *
4 * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14*/
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18
19#include <asm/coldfire.h>
20#include <asm/mcfsim.h>
21#include <asm/mcfgpio.h>
22
23static struct mcf_gpio_chip mcf_gpio_chips[] = {
24 {
25 .gpio_chip = {
26 .label = "PP",
27 .request = mcf_gpio_request,
28 .free = mcf_gpio_free,
29 .direction_input = mcf_gpio_direction_input,
30 .direction_output = mcf_gpio_direction_output,
31 .get = mcf_gpio_get_value,
32 .set = mcf_gpio_set_value,
33 .ngpio = 8,
34 },
35 .pddr = (void __iomem *) MCFSIM_PADDR,
36 .podr = (void __iomem *) MCFSIM_PADAT,
37 .ppdr = (void __iomem *) MCFSIM_PADAT,
38 },
39};
40
41static int __init mcf_gpio_init(void)
42{
43 unsigned i = 0;
44 while (i < ARRAY_SIZE(mcf_gpio_chips))
45 (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
46 return 0;
47}
48
49core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5272/intc.c b/arch/m68k/platform/5272/intc.c
index 7e715dfe281..7160e618b0a 100644
--- a/arch/m68k/platform/5272/intc.c
+++ b/arch/m68k/platform/5272/intc.c
@@ -162,8 +162,6 @@ void __init init_IRQ(void)
162{ 162{
163 int irq, edge; 163 int irq, edge;
164 164
165 init_vectors();
166
167 /* Mask all interrupt sources */ 165 /* Mask all interrupt sources */
168 writel(0x88888888, MCF_MBAR + MCFSIM_ICR1); 166 writel(0x88888888, MCF_MBAR + MCFSIM_ICR1);
169 writel(0x88888888, MCF_MBAR + MCFSIM_ICR2); 167 writel(0x88888888, MCF_MBAR + MCFSIM_ICR2);
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68328/entry.S
index f68dce766c0..293e1eba9ac 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68328/entry.S
@@ -236,27 +236,26 @@ ret_from_interrupt:
236 * Handler for uninitialized and spurious interrupts. 236 * Handler for uninitialized and spurious interrupts.
237 */ 237 */
238ENTRY(bad_interrupt) 238ENTRY(bad_interrupt)
239 addql #1,num_spurious 239 addql #1,irq_err_count
240 rte 240 rte
241 241
242/* 242/*
243 * Beware - when entering resume, prev (the current task) is 243 * Beware - when entering resume, prev (the current task) is
244 * in a0, next (the new task) is in a1,so don't change these 244 * in a0, next (the new task) is in a1, so don't change these
245 * registers until their contents are no longer needed. 245 * registers until their contents are no longer needed.
246 */ 246 */
247ENTRY(resume) 247ENTRY(resume)
248 movel %a0,%d1 /* save prev thread in d1 */ 248 movel %a0,%d1 /* save prev thread in d1 */
249 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 249 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
250 movel %usp,%a2 /* save usp */
251 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
252
253 SAVE_SWITCH_STACK 250 SAVE_SWITCH_STACK
254 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 251 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
252 movel %usp,%a3 /* save usp */
253 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
254
255 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
256 movel %a3,%usp
255 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 257 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
256 RESTORE_SWITCH_STACK 258 RESTORE_SWITCH_STACK
257
258 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
259 movel %a0,%usp
260 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 259 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
261 rts 260 rts
262 261
diff --git a/arch/m68k/platform/68328/ints.c b/arch/m68k/platform/68328/ints.c
index a90288cf744..4bd456531f9 100644
--- a/arch/m68k/platform/68328/ints.c
+++ b/arch/m68k/platform/68328/ints.c
@@ -70,9 +70,6 @@ asmlinkage irqreturn_t inthandler7(void);
70 70
71extern e_vector *_ramvec; 71extern e_vector *_ramvec;
72 72
73/* The number of spurious interrupts */
74volatile unsigned int num_spurious;
75
76/* The 68k family did not have a good way to determine the source 73/* The 68k family did not have a good way to determine the source
77 * of interrupts until later in the family. The EC000 core does 74 * of interrupts until later in the family. The EC000 core does
78 * not provide the vector number on the stack, we vector everything 75 * not provide the vector number on the stack, we vector everything
@@ -155,7 +152,7 @@ static struct irq_chip intc_irq_chip = {
155 * This function should be called during kernel startup to initialize 152 * This function should be called during kernel startup to initialize
156 * the machine vector table. 153 * the machine vector table.
157 */ 154 */
158void __init init_IRQ(void) 155void __init trap_init(void)
159{ 156{
160 int i; 157 int i;
161 158
@@ -172,6 +169,11 @@ void __init init_IRQ(void)
172 _ramvec[69] = (e_vector) inthandler5; 169 _ramvec[69] = (e_vector) inthandler5;
173 _ramvec[70] = (e_vector) inthandler6; 170 _ramvec[70] = (e_vector) inthandler6;
174 _ramvec[71] = (e_vector) inthandler7; 171 _ramvec[71] = (e_vector) inthandler7;
172}
173
174void __init init_IRQ(void)
175{
176 int i;
175 177
176 IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */ 178 IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
177 179
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index a07b14feed9..abbb89672ea 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -157,27 +157,26 @@ ret_from_interrupt:
157 * Handler for uninitialized and spurious interrupts. 157 * Handler for uninitialized and spurious interrupts.
158 */ 158 */
159bad_interrupt: 159bad_interrupt:
160 addql #1,num_spurious 160 addql #1,irq_err_count
161 rte 161 rte
162 162
163/* 163/*
164 * Beware - when entering resume, prev (the current task) is 164 * Beware - when entering resume, prev (the current task) is
165 * in a0, next (the new task) is in a1,so don't change these 165 * in a0, next (the new task) is in a1, so don't change these
166 * registers until their contents are no longer needed. 166 * registers until their contents are no longer needed.
167 */ 167 */
168ENTRY(resume) 168ENTRY(resume)
169 movel %a0,%d1 /* save prev thread in d1 */ 169 movel %a0,%d1 /* save prev thread in d1 */
170 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */ 170 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
171 movel %usp,%a2 /* save usp */
172 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
173
174 SAVE_SWITCH_STACK 171 SAVE_SWITCH_STACK
175 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */ 172 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
173 movel %usp,%a3 /* save usp */
174 movel %a3,%a0@(TASK_THREAD+THREAD_USP)
175
176 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
177 movel %a3,%usp
176 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 178 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
177 RESTORE_SWITCH_STACK 179 RESTORE_SWITCH_STACK
178
179 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
180 movel %a0,%usp
181 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */ 180 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
182 rts 181 rts
183 182
diff --git a/arch/m68k/platform/68360/ints.c b/arch/m68k/platform/68360/ints.c
index 4af0f4e30f7..7b40202d963 100644
--- a/arch/m68k/platform/68360/ints.c
+++ b/arch/m68k/platform/68360/ints.c
@@ -34,9 +34,6 @@ asmlinkage void inthandler(void);
34 34
35extern void *_ramvec[]; 35extern void *_ramvec[];
36 36
37/* The number of spurious interrupts */
38volatile unsigned int num_spurious;
39
40static void intc_irq_unmask(struct irq_data *d) 37static void intc_irq_unmask(struct irq_data *d)
41{ 38{
42 pquicc->intr_cimr |= (1 << d->irq); 39 pquicc->intr_cimr |= (1 << d->irq);
@@ -63,9 +60,8 @@ static struct irq_chip intc_irq_chip = {
63 * This function should be called during kernel startup to initialize 60 * This function should be called during kernel startup to initialize
64 * the vector table. 61 * the vector table.
65 */ 62 */
66void init_IRQ(void) 63void __init trap_init(void)
67{ 64{
68 int i;
69 int vba = (CPM_VECTOR_BASE<<4); 65 int vba = (CPM_VECTOR_BASE<<4);
70 66
71 /* set up the vectors */ 67 /* set up the vectors */
@@ -130,6 +126,11 @@ void init_IRQ(void)
130 126
131 /* turn off all CPM interrupts */ 127 /* turn off all CPM interrupts */
132 pquicc->intr_cimr = 0x00000000; 128 pquicc->intr_cimr = 0x00000000;
129}
130
131void init_IRQ(void)
132{
133 int i;
133 134
134 for (i = 0; (i < NR_IRQS); i++) { 135 for (i = 0; (i < NR_IRQS); i++) {
135 irq_set_chip(i, &intc_irq_chip); 136 irq_set_chip(i, &intc_irq_chip);
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index 27c2b001161..bd27242c2f4 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -182,21 +182,23 @@ ENTRY(inthandler)
182 182
183/* 183/*
184 * Beware - when entering resume, prev (the current task) is 184 * Beware - when entering resume, prev (the current task) is
185 * in a0, next (the new task) is in a1,so don't change these 185 * in a0, next (the new task) is in a1, so don't change these
186 * registers until their contents are no longer needed. 186 * registers until their contents are no longer needed.
187 * This is always called in supervisor mode, so don't bother to save
188 * and restore sr; user's process sr is actually in the stack.
189 */ 187 */
190ENTRY(resume) 188ENTRY(resume)
191 movel %a0, %d1 /* get prev thread in d1 */ 189 movew %sr,%d1 /* save current status */
192 RDUSP 190 movew %d1,%a0@(TASK_THREAD+THREAD_SR)
193 movel %a2,%a0@(TASK_THREAD+THREAD_USP) 191 movel %a0,%d1 /* get prev thread in d1 */
194
195 SAVE_SWITCH_STACK 192 SAVE_SWITCH_STACK
196 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ 193 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
197 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 194 RDUSP /* movel %usp,%a3 */
195 movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
196
197 movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
198 WRUSP /* movel %a3,%usp */
199 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
200 movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
201 movew %d7,%sr
198 RESTORE_SWITCH_STACK 202 RESTORE_SWITCH_STACK
199
200 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
201 WRUSP
202 rts 203 rts
204
diff --git a/arch/m68k/platform/coldfire/intc-2.c b/arch/m68k/platform/coldfire/intc-2.c
index 74b55cfbc3c..995093357c5 100644
--- a/arch/m68k/platform/coldfire/intc-2.c
+++ b/arch/m68k/platform/coldfire/intc-2.c
@@ -194,8 +194,6 @@ void __init init_IRQ(void)
194{ 194{
195 int irq; 195 int irq;
196 196
197 init_vectors();
198
199 /* Mask all interrupt sources */ 197 /* Mask all interrupt sources */
200 __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL); 198 __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL);
201#ifdef MCFICM_INTC1 199#ifdef MCFICM_INTC1
diff --git a/arch/m68k/platform/coldfire/intc-simr.c b/arch/m68k/platform/coldfire/intc-simr.c
index d6a4d9d53e4..650d52e2927 100644
--- a/arch/m68k/platform/coldfire/intc-simr.c
+++ b/arch/m68k/platform/coldfire/intc-simr.c
@@ -171,8 +171,6 @@ void __init init_IRQ(void)
171{ 171{
172 int irq, eirq; 172 int irq, eirq;
173 173
174 init_vectors();
175
176 /* Mask all interrupt sources */ 174 /* Mask all interrupt sources */
177 __raw_writeb(0xff, MCFINTC0_SIMR); 175 __raw_writeb(0xff, MCFINTC0_SIMR);
178 if (MCFINTC1_SIMR) 176 if (MCFINTC1_SIMR)
diff --git a/arch/m68k/platform/coldfire/intc.c b/arch/m68k/platform/coldfire/intc.c
index 0bbb414856e..5c0c150b406 100644
--- a/arch/m68k/platform/coldfire/intc.c
+++ b/arch/m68k/platform/coldfire/intc.c
@@ -139,7 +139,6 @@ void __init init_IRQ(void)
139{ 139{
140 int irq; 140 int irq;
141 141
142 init_vectors();
143 mcf_maskimr(0xffffffff); 142 mcf_maskimr(0xffffffff);
144 143
145 for (irq = 0; (irq < NR_IRQS); irq++) { 144 for (irq = 0; (irq < NR_IRQS); irq++) {
diff --git a/arch/m68k/platform/coldfire/vectors.c b/arch/m68k/platform/coldfire/vectors.c
index a21d3f870b7..3a7cc524ecd 100644
--- a/arch/m68k/platform/coldfire/vectors.c
+++ b/arch/m68k/platform/coldfire/vectors.c
@@ -35,21 +35,13 @@ asmlinkage void dbginterrupt_c(struct frame *fp)
35 35
36extern e_vector *_ramvec; 36extern e_vector *_ramvec;
37 37
38void set_evector(int vecnum, void (*handler)(void))
39{
40 if (vecnum >= 0 && vecnum <= 255)
41 _ramvec[vecnum] = handler;
42}
43
44/***************************************************************************/
45
46/* Assembler routines */ 38/* Assembler routines */
47asmlinkage void buserr(void); 39asmlinkage void buserr(void);
48asmlinkage void trap(void); 40asmlinkage void trap(void);
49asmlinkage void system_call(void); 41asmlinkage void system_call(void);
50asmlinkage void inthandler(void); 42asmlinkage void inthandler(void);
51 43
52void __init init_vectors(void) 44void __init trap_init(void)
53{ 45{
54 int i; 46 int i;
55 47