diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 20:59:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 20:59:33 -0500 |
commit | c77417132c12af338a7d37956809b2b98d20413c (patch) | |
tree | 02cb0ef1f8dfa1af8ce0965883dd449adf33eb2c /arch/m68k | |
parent | e4e88f31bcb5f05f24b9ae518d4ecb44e1a7774d (diff) | |
parent | 1f7034b9616e6f14dc7b6aa280210421428f31af (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (56 commits)
m68k: allow ColdFire 547x and 548x CPUs to be built with MMU enabled
m68k/Kconfig: Separate classic m68k and coldfire early
m68k: add ColdFire with MMU enabled support to the m68k mem init code
m68k: do not use m68k startup or interrupt code for ColdFire CPUs
m68k: add ColdFire FPU support for the V4e ColdFire CPUs
m68k: adjustments to stack frame for ColdFire with MMU enabled
m68k: use non-MMU linker script for ColdFire MMU builds
m68k: ColdFire with MMU enabled uses same clocking code as non-MMU
m68k: add code to setup a ColdFire 54xx platform when MMU enabled
m68k: use non-MMU entry.S code when compiling for ColdFire CPU
m68k: create ColdFire MMU pgalloc code
m68k: compile appropriate mm arch files for ColdFire MMU support
m68k: ColdFire V4e MMU paging init code and miss handler
m68k: use ColdFire MMU read/write bit flags when ioremapping
m68k: modify cache push and clear code for ColdFire with MMU enable
m68k: use tracehook_report_syscall_entry/exit for ColdFire MMU ptrace path
m68k: ColdFire V4e MMU context support code
m68k: MMU enabled ColdFire needs 8k ELF alignment
m68k: set ColdFire MMU page size
m68k: define PAGE_OFFSET_RAW for ColdFire CPU with MMU enabled
...
Diffstat (limited to 'arch/m68k')
74 files changed, 2004 insertions, 701 deletions
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 361d54019bb0..81fdaa72c540 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -3,7 +3,6 @@ config M68K | |||
3 | default y | 3 | default y |
4 | select HAVE_IDE | 4 | select HAVE_IDE |
5 | select HAVE_AOUT if MMU | 5 | select HAVE_AOUT if MMU |
6 | select GENERIC_ATOMIC64 if MMU | ||
7 | select HAVE_GENERIC_HARDIRQS | 6 | select HAVE_GENERIC_HARDIRQS |
8 | select GENERIC_IRQ_SHOW | 7 | select GENERIC_IRQ_SHOW |
9 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS | 8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS |
@@ -41,12 +40,15 @@ config GENERIC_CALIBRATE_DELAY | |||
41 | config GENERIC_IOMAP | 40 | config GENERIC_IOMAP |
42 | def_bool MMU | 41 | def_bool MMU |
43 | 42 | ||
43 | config GENERIC_CSUM | ||
44 | bool | ||
45 | |||
44 | config TIME_LOW_RES | 46 | config TIME_LOW_RES |
45 | bool | 47 | bool |
46 | default y | 48 | default y |
47 | 49 | ||
48 | config ARCH_USES_GETTIMEOFFSET | 50 | config ARCH_USES_GETTIMEOFFSET |
49 | def_bool MMU | 51 | def_bool MMU && !COLDFIRE |
50 | 52 | ||
51 | config NO_IOPORT | 53 | config NO_IOPORT |
52 | def_bool y | 54 | def_bool y |
@@ -61,6 +63,12 @@ config ZONE_DMA | |||
61 | config CPU_HAS_NO_BITFIELDS | 63 | config CPU_HAS_NO_BITFIELDS |
62 | bool | 64 | bool |
63 | 65 | ||
66 | config CPU_HAS_NO_MULDIV64 | ||
67 | bool | ||
68 | |||
69 | config CPU_HAS_ADDRESS_SPACES | ||
70 | bool | ||
71 | |||
64 | config HZ | 72 | config HZ |
65 | int | 73 | int |
66 | default 1000 if CLEOPATRA | 74 | default 1000 if CLEOPATRA |
@@ -80,9 +88,12 @@ config MMU | |||
80 | config MMU_MOTOROLA | 88 | config MMU_MOTOROLA |
81 | bool | 89 | bool |
82 | 90 | ||
91 | config MMU_COLDFIRE | ||
92 | bool | ||
93 | |||
83 | config MMU_SUN3 | 94 | config MMU_SUN3 |
84 | bool | 95 | bool |
85 | depends on MMU && !MMU_MOTOROLA | 96 | depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE |
86 | 97 | ||
87 | menu "Platform setup" | 98 | menu "Platform setup" |
88 | 99 | ||
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index e632b2d12106..8a9c767125a4 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu | |||
@@ -1,8 +1,42 @@ | |||
1 | comment "Processor Type" | 1 | comment "Processor Type" |
2 | 2 | ||
3 | choice | ||
4 | prompt "CPU family support" | ||
5 | default M68KCLASSIC if MMU | ||
6 | default COLDFIRE if !MMU | ||
7 | help | ||
8 | The Freescale (was Motorola) M68K family of processors implements | ||
9 | the full 68000 processor instruction set. | ||
10 | The Freescale ColdFire family of processors is a modern derivitive | ||
11 | of the 68000 processor family. They are mainly targeted at embedded | ||
12 | applications, and are all System-On-Chip (SOC) devices, as opposed | ||
13 | to stand alone CPUs. They implement a subset of the original 68000 | ||
14 | processor instruction set. | ||
15 | If you anticipate running this kernel on a computer with a classic | ||
16 | MC68xxx processor, select M68KCLASSIC. | ||
17 | If you anticipate running this kernel on a computer with a ColdFire | ||
18 | processor, select COLDFIRE. | ||
19 | |||
20 | config M68KCLASSIC | ||
21 | bool "Classic M68K CPU family support" | ||
22 | |||
23 | config COLDFIRE | ||
24 | bool "Coldfire CPU family support" | ||
25 | select GENERIC_GPIO | ||
26 | select ARCH_REQUIRE_GPIOLIB | ||
27 | select CPU_HAS_NO_BITFIELDS | ||
28 | select CPU_HAS_NO_MULDIV64 | ||
29 | select GENERIC_CSUM | ||
30 | |||
31 | endchoice | ||
32 | |||
33 | if M68KCLASSIC | ||
34 | |||
3 | config M68000 | 35 | config M68000 |
4 | bool | 36 | bool |
5 | select CPU_HAS_NO_BITFIELDS | 37 | select CPU_HAS_NO_BITFIELDS |
38 | select CPU_HAS_NO_MULDIV64 | ||
39 | select GENERIC_CSUM | ||
6 | help | 40 | help |
7 | The Freescale (was Motorola) 68000 CPU is the first generation of | 41 | The Freescale (was Motorola) 68000 CPU is the first generation of |
8 | the well known M68K family of processors. The CPU core as well as | 42 | the well known M68K family of processors. The CPU core as well as |
@@ -18,21 +52,11 @@ config MCPU32 | |||
18 | based on the 68020 processor. For the most part it is used in | 52 | based on the 68020 processor. For the most part it is used in |
19 | System-On-Chip parts, and does not contain a paging MMU. | 53 | System-On-Chip parts, and does not contain a paging MMU. |
20 | 54 | ||
21 | config COLDFIRE | ||
22 | bool | ||
23 | select GENERIC_GPIO | ||
24 | select ARCH_REQUIRE_GPIOLIB | ||
25 | select CPU_HAS_NO_BITFIELDS | ||
26 | help | ||
27 | The Freescale ColdFire family of processors is a modern derivitive | ||
28 | of the 68000 processor family. They are mainly targeted at embedded | ||
29 | applications, and are all System-On-Chip (SOC) devices, as opposed | ||
30 | to stand alone CPUs. They implement a subset of the original 68000 | ||
31 | processor instruction set. | ||
32 | |||
33 | config M68020 | 55 | config M68020 |
34 | bool "68020 support" | 56 | bool "68020 support" |
35 | depends on MMU | 57 | depends on MMU |
58 | select GENERIC_ATOMIC64 | ||
59 | select CPU_HAS_ADDRESS_SPACES | ||
36 | help | 60 | help |
37 | If you anticipate running this kernel on a computer with a MC68020 | 61 | If you anticipate running this kernel on a computer with a MC68020 |
38 | processor, say Y. Otherwise, say N. Note that the 68020 requires a | 62 | processor, say Y. Otherwise, say N. Note that the 68020 requires a |
@@ -42,6 +66,8 @@ config M68020 | |||
42 | config M68030 | 66 | config M68030 |
43 | bool "68030 support" | 67 | bool "68030 support" |
44 | depends on MMU && !MMU_SUN3 | 68 | depends on MMU && !MMU_SUN3 |
69 | select GENERIC_ATOMIC64 | ||
70 | select CPU_HAS_ADDRESS_SPACES | ||
45 | help | 71 | help |
46 | If you anticipate running this kernel on a computer with a MC68030 | 72 | If you anticipate running this kernel on a computer with a MC68030 |
47 | processor, say Y. Otherwise, say N. Note that a MC68EC030 will not | 73 | processor, say Y. Otherwise, say N. Note that a MC68EC030 will not |
@@ -50,6 +76,8 @@ config M68030 | |||
50 | config M68040 | 76 | config M68040 |
51 | bool "68040 support" | 77 | bool "68040 support" |
52 | depends on MMU && !MMU_SUN3 | 78 | depends on MMU && !MMU_SUN3 |
79 | select GENERIC_ATOMIC64 | ||
80 | select CPU_HAS_ADDRESS_SPACES | ||
53 | help | 81 | help |
54 | If you anticipate running this kernel on a computer with a MC68LC040 | 82 | If you anticipate running this kernel on a computer with a MC68LC040 |
55 | or MC68040 processor, say Y. Otherwise, say N. Note that an | 83 | or MC68040 processor, say Y. Otherwise, say N. Note that an |
@@ -59,6 +87,8 @@ config M68040 | |||
59 | config M68060 | 87 | config M68060 |
60 | bool "68060 support" | 88 | bool "68060 support" |
61 | depends on MMU && !MMU_SUN3 | 89 | depends on MMU && !MMU_SUN3 |
90 | select GENERIC_ATOMIC64 | ||
91 | select CPU_HAS_ADDRESS_SPACES | ||
62 | help | 92 | help |
63 | If you anticipate running this kernel on a computer with a MC68060 | 93 | If you anticipate running this kernel on a computer with a MC68060 |
64 | processor, say Y. Otherwise, say N. | 94 | processor, say Y. Otherwise, say N. |
@@ -91,10 +121,13 @@ config M68360 | |||
91 | help | 121 | help |
92 | Motorola 68360 processor support. | 122 | Motorola 68360 processor support. |
93 | 123 | ||
124 | endif # M68KCLASSIC | ||
125 | |||
126 | if COLDFIRE | ||
127 | |||
94 | config M5206 | 128 | config M5206 |
95 | bool "MCF5206" | 129 | bool "MCF5206" |
96 | depends on !MMU | 130 | depends on !MMU |
97 | select COLDFIRE | ||
98 | select COLDFIRE_SW_A7 | 131 | select COLDFIRE_SW_A7 |
99 | select HAVE_MBAR | 132 | select HAVE_MBAR |
100 | help | 133 | help |
@@ -103,7 +136,6 @@ config M5206 | |||
103 | config M5206e | 136 | config M5206e |
104 | bool "MCF5206e" | 137 | bool "MCF5206e" |
105 | depends on !MMU | 138 | depends on !MMU |
106 | select COLDFIRE | ||
107 | select COLDFIRE_SW_A7 | 139 | select COLDFIRE_SW_A7 |
108 | select HAVE_MBAR | 140 | select HAVE_MBAR |
109 | help | 141 | help |
@@ -112,7 +144,6 @@ config M5206e | |||
112 | config M520x | 144 | config M520x |
113 | bool "MCF520x" | 145 | bool "MCF520x" |
114 | depends on !MMU | 146 | depends on !MMU |
115 | select COLDFIRE | ||
116 | select GENERIC_CLOCKEVENTS | 147 | select GENERIC_CLOCKEVENTS |
117 | select HAVE_CACHE_SPLIT | 148 | select HAVE_CACHE_SPLIT |
118 | help | 149 | help |
@@ -121,7 +152,6 @@ config M520x | |||
121 | config M523x | 152 | config M523x |
122 | bool "MCF523x" | 153 | bool "MCF523x" |
123 | depends on !MMU | 154 | depends on !MMU |
124 | select COLDFIRE | ||
125 | select GENERIC_CLOCKEVENTS | 155 | select GENERIC_CLOCKEVENTS |
126 | select HAVE_CACHE_SPLIT | 156 | select HAVE_CACHE_SPLIT |
127 | select HAVE_IPSBAR | 157 | select HAVE_IPSBAR |
@@ -131,7 +161,6 @@ config M523x | |||
131 | config M5249 | 161 | config M5249 |
132 | bool "MCF5249" | 162 | bool "MCF5249" |
133 | depends on !MMU | 163 | depends on !MMU |
134 | select COLDFIRE | ||
135 | select COLDFIRE_SW_A7 | 164 | select COLDFIRE_SW_A7 |
136 | select HAVE_MBAR | 165 | select HAVE_MBAR |
137 | help | 166 | help |
@@ -143,7 +172,6 @@ config M527x | |||
143 | config M5271 | 172 | config M5271 |
144 | bool "MCF5271" | 173 | bool "MCF5271" |
145 | depends on !MMU | 174 | depends on !MMU |
146 | select COLDFIRE | ||
147 | select M527x | 175 | select M527x |
148 | select HAVE_CACHE_SPLIT | 176 | select HAVE_CACHE_SPLIT |
149 | select HAVE_IPSBAR | 177 | select HAVE_IPSBAR |
@@ -154,7 +182,6 @@ config M5271 | |||
154 | config M5272 | 182 | config M5272 |
155 | bool "MCF5272" | 183 | bool "MCF5272" |
156 | depends on !MMU | 184 | depends on !MMU |
157 | select COLDFIRE | ||
158 | select COLDFIRE_SW_A7 | 185 | select COLDFIRE_SW_A7 |
159 | select HAVE_MBAR | 186 | select HAVE_MBAR |
160 | help | 187 | help |
@@ -163,7 +190,6 @@ config M5272 | |||
163 | config M5275 | 190 | config M5275 |
164 | bool "MCF5275" | 191 | bool "MCF5275" |
165 | depends on !MMU | 192 | depends on !MMU |
166 | select COLDFIRE | ||
167 | select M527x | 193 | select M527x |
168 | select HAVE_CACHE_SPLIT | 194 | select HAVE_CACHE_SPLIT |
169 | select HAVE_IPSBAR | 195 | select HAVE_IPSBAR |
@@ -174,7 +200,6 @@ config M5275 | |||
174 | config M528x | 200 | config M528x |
175 | bool "MCF528x" | 201 | bool "MCF528x" |
176 | depends on !MMU | 202 | depends on !MMU |
177 | select COLDFIRE | ||
178 | select GENERIC_CLOCKEVENTS | 203 | select GENERIC_CLOCKEVENTS |
179 | select HAVE_CACHE_SPLIT | 204 | select HAVE_CACHE_SPLIT |
180 | select HAVE_IPSBAR | 205 | select HAVE_IPSBAR |
@@ -184,7 +209,6 @@ config M528x | |||
184 | config M5307 | 209 | config M5307 |
185 | bool "MCF5307" | 210 | bool "MCF5307" |
186 | depends on !MMU | 211 | depends on !MMU |
187 | select COLDFIRE | ||
188 | select COLDFIRE_SW_A7 | 212 | select COLDFIRE_SW_A7 |
189 | select HAVE_CACHE_CB | 213 | select HAVE_CACHE_CB |
190 | select HAVE_MBAR | 214 | select HAVE_MBAR |
@@ -194,7 +218,6 @@ config M5307 | |||
194 | config M532x | 218 | config M532x |
195 | bool "MCF532x" | 219 | bool "MCF532x" |
196 | depends on !MMU | 220 | depends on !MMU |
197 | select COLDFIRE | ||
198 | select HAVE_CACHE_CB | 221 | select HAVE_CACHE_CB |
199 | help | 222 | help |
200 | Freescale (Motorola) ColdFire 532x processor support. | 223 | Freescale (Motorola) ColdFire 532x processor support. |
@@ -202,7 +225,6 @@ config M532x | |||
202 | config M5407 | 225 | config M5407 |
203 | bool "MCF5407" | 226 | bool "MCF5407" |
204 | depends on !MMU | 227 | depends on !MMU |
205 | select COLDFIRE | ||
206 | select COLDFIRE_SW_A7 | 228 | select COLDFIRE_SW_A7 |
207 | select HAVE_CACHE_CB | 229 | select HAVE_CACHE_CB |
208 | select HAVE_MBAR | 230 | select HAVE_MBAR |
@@ -214,9 +236,8 @@ config M54xx | |||
214 | 236 | ||
215 | config M547x | 237 | config M547x |
216 | bool "MCF547x" | 238 | bool "MCF547x" |
217 | depends on !MMU | ||
218 | select COLDFIRE | ||
219 | select M54xx | 239 | select M54xx |
240 | select MMU_COLDFIRE if MMU | ||
220 | select HAVE_CACHE_CB | 241 | select HAVE_CACHE_CB |
221 | select HAVE_MBAR | 242 | select HAVE_MBAR |
222 | help | 243 | help |
@@ -224,14 +245,15 @@ config M547x | |||
224 | 245 | ||
225 | config M548x | 246 | config M548x |
226 | bool "MCF548x" | 247 | bool "MCF548x" |
227 | depends on !MMU | 248 | select MMU_COLDFIRE if MMU |
228 | select COLDFIRE | ||
229 | select M54xx | 249 | select M54xx |
230 | select HAVE_CACHE_CB | 250 | select HAVE_CACHE_CB |
231 | select HAVE_MBAR | 251 | select HAVE_MBAR |
232 | help | 252 | help |
233 | Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. | 253 | Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. |
234 | 254 | ||
255 | endif # COLDFIRE | ||
256 | |||
235 | 257 | ||
236 | comment "Processor Specific Options" | 258 | comment "Processor Specific Options" |
237 | 259 | ||
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug index 2bdb1b01115c..af4fd5f8f8d5 100644 --- a/arch/m68k/Kconfig.debug +++ b/arch/m68k/Kconfig.debug | |||
@@ -2,6 +2,14 @@ menu "Kernel hacking" | |||
2 | 2 | ||
3 | source "lib/Kconfig.debug" | 3 | source "lib/Kconfig.debug" |
4 | 4 | ||
5 | config BOOTPARAM | ||
6 | bool 'Compiled-in Kernel Boot Parameter' | ||
7 | |||
8 | config BOOTPARAM_STRING | ||
9 | string 'Kernel Boot Parameter' | ||
10 | default 'console=ttyS0,19200' | ||
11 | depends on BOOTPARAM | ||
12 | |||
5 | if !MMU | 13 | if !MMU |
6 | 14 | ||
7 | config FULLDEBUG | 15 | config FULLDEBUG |
@@ -15,14 +23,6 @@ config HIGHPROFILE | |||
15 | help | 23 | help |
16 | Use a fast secondary clock to produce profiling information. | 24 | Use a fast secondary clock to produce profiling information. |
17 | 25 | ||
18 | config BOOTPARAM | ||
19 | bool 'Compiled-in Kernel Boot Parameter' | ||
20 | |||
21 | config BOOTPARAM_STRING | ||
22 | string 'Kernel Boot Parameter' | ||
23 | default 'console=ttyS0,19200' | ||
24 | depends on BOOTPARAM | ||
25 | |||
26 | config NO_KERNEL_MSG | 26 | config NO_KERNEL_MSG |
27 | bool "Suppress Kernel BUG Messages" | 27 | bool "Suppress Kernel BUG Messages" |
28 | help | 28 | help |
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index ef4a26aff780..7cdf6b010381 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine | |||
@@ -1,5 +1,7 @@ | |||
1 | comment "Machine Types" | 1 | comment "Machine Types" |
2 | 2 | ||
3 | if M68KCLASSIC | ||
4 | |||
3 | config AMIGA | 5 | config AMIGA |
4 | bool "Amiga support" | 6 | bool "Amiga support" |
5 | depends on MMU | 7 | depends on MMU |
@@ -130,6 +132,8 @@ config SUN3 | |||
130 | 132 | ||
131 | If you don't want to compile a kernel exclusively for a Sun 3, say N. | 133 | If you don't want to compile a kernel exclusively for a Sun 3, say N. |
132 | 134 | ||
135 | endif # M68KCLASSIC | ||
136 | |||
133 | config PILOT | 137 | config PILOT |
134 | bool | 138 | bool |
135 | 139 | ||
diff --git a/arch/m68k/include/asm/anchor.h b/arch/m68k/include/asm/anchor.h deleted file mode 100644 index 871c0d5cfc3d..000000000000 --- a/arch/m68k/include/asm/anchor.h +++ /dev/null | |||
@@ -1,112 +0,0 @@ | |||
1 | /****************************************************************************/ | ||
2 | |||
3 | /* | ||
4 | * anchor.h -- Anchor CO-MEM Lite PCI host bridge part. | ||
5 | * | ||
6 | * (C) Copyright 2000, Moreton Bay (www.moreton.com.au) | ||
7 | */ | ||
8 | |||
9 | /****************************************************************************/ | ||
10 | #ifndef anchor_h | ||
11 | #define anchor_h | ||
12 | /****************************************************************************/ | ||
13 | |||
14 | /* | ||
15 | * Define basic addressing info. | ||
16 | */ | ||
17 | #if defined(CONFIG_M5407C3) | ||
18 | #define COMEM_BASE 0xFFFF0000 /* Base of CO-MEM address space */ | ||
19 | #define COMEM_IRQ 25 /* IRQ of anchor part */ | ||
20 | #else | ||
21 | #define COMEM_BASE 0x80000000 /* Base of CO-MEM address space */ | ||
22 | #define COMEM_IRQ 25 /* IRQ of anchor part */ | ||
23 | #endif | ||
24 | |||
25 | /****************************************************************************/ | ||
26 | |||
27 | /* | ||
28 | * 4-byte registers of CO-MEM, so adjust register addresses for | ||
29 | * easy access. Handy macro for word access too. | ||
30 | */ | ||
31 | #define LREG(a) ((a) >> 2) | ||
32 | #define WREG(a) ((a) >> 1) | ||
33 | |||
34 | |||
35 | /* | ||
36 | * Define base addresses within CO-MEM Lite register address space. | ||
37 | */ | ||
38 | #define COMEM_I2O 0x0000 /* I2O registers */ | ||
39 | #define COMEM_OPREGS 0x0400 /* Operation registers */ | ||
40 | #define COMEM_PCIBUS 0x2000 /* Direct access to PCI bus */ | ||
41 | #define COMEM_SHMEM 0x4000 /* Shared memory region */ | ||
42 | |||
43 | #define COMEM_SHMEMSIZE 0x4000 /* Size of shared memory */ | ||
44 | |||
45 | |||
46 | /* | ||
47 | * Define CO-MEM Registers. | ||
48 | */ | ||
49 | #define COMEM_I2OHISR 0x0030 /* I2O host interrupt status */ | ||
50 | #define COMEM_I2OHIMR 0x0034 /* I2O host interrupt mask */ | ||
51 | #define COMEM_I2OLISR 0x0038 /* I2O local interrupt status */ | ||
52 | #define COMEM_I2OLIMR 0x003c /* I2O local interrupt mask */ | ||
53 | #define COMEM_IBFPFIFO 0x0040 /* I2O inbound free/post FIFO */ | ||
54 | #define COMEM_OBPFFIFO 0x0044 /* I2O outbound post/free FIFO */ | ||
55 | #define COMEM_IBPFFIFO 0x0048 /* I2O inbound post/free FIFO */ | ||
56 | #define COMEM_OBFPFIFO 0x004c /* I2O outbound free/post FIFO */ | ||
57 | |||
58 | #define COMEM_DAHBASE 0x0460 /* Direct access base address */ | ||
59 | |||
60 | #define COMEM_NVCMD 0x04a0 /* I2C serial command */ | ||
61 | #define COMEM_NVREAD 0x04a4 /* I2C serial read */ | ||
62 | #define COMEM_NVSTAT 0x04a8 /* I2C status */ | ||
63 | |||
64 | #define COMEM_DMALBASE 0x04b0 /* DMA local base address */ | ||
65 | #define COMEM_DMAHBASE 0x04b4 /* DMA host base address */ | ||
66 | #define COMEM_DMASIZE 0x04b8 /* DMA size */ | ||
67 | #define COMEM_DMACTL 0x04bc /* DMA control */ | ||
68 | |||
69 | #define COMEM_HCTL 0x04e0 /* Host control */ | ||
70 | #define COMEM_HINT 0x04e4 /* Host interrupt control/status */ | ||
71 | #define COMEM_HLDATA 0x04e8 /* Host to local data mailbox */ | ||
72 | #define COMEM_LINT 0x04f4 /* Local interrupt contole status */ | ||
73 | #define COMEM_LHDATA 0x04f8 /* Local to host data mailbox */ | ||
74 | |||
75 | #define COMEM_LBUSCFG 0x04fc /* Local bus configuration */ | ||
76 | |||
77 | |||
78 | /* | ||
79 | * Commands and flags for use with Direct Access Register. | ||
80 | */ | ||
81 | #define COMEM_DA_IACK 0x00000000 /* Interrupt acknowledge (read) */ | ||
82 | #define COMEM_DA_SPCL 0x00000010 /* Special cycle (write) */ | ||
83 | #define COMEM_DA_MEMRD 0x00000004 /* Memory read cycle */ | ||
84 | #define COMEM_DA_MEMWR 0x00000004 /* Memory write cycle */ | ||
85 | #define COMEM_DA_IORD 0x00000002 /* I/O read cycle */ | ||
86 | #define COMEM_DA_IOWR 0x00000002 /* I/O write cycle */ | ||
87 | #define COMEM_DA_CFGRD 0x00000006 /* Configuration read cycle */ | ||
88 | #define COMEM_DA_CFGWR 0x00000006 /* Configuration write cycle */ | ||
89 | |||
90 | #define COMEM_DA_ADDR(a) ((a) & 0xffffe000) | ||
91 | |||
92 | #define COMEM_DA_OFFSET(a) ((a) & 0x00001fff) | ||
93 | |||
94 | |||
95 | /* | ||
96 | * The PCI bus will be limited in what slots will actually be used. | ||
97 | * Define valid device numbers for different boards. | ||
98 | */ | ||
99 | #if defined(CONFIG_M5407C3) | ||
100 | #define COMEM_MINDEV 14 /* Minimum valid DEVICE */ | ||
101 | #define COMEM_MAXDEV 14 /* Maximum valid DEVICE */ | ||
102 | #define COMEM_BRIDGEDEV 15 /* Slot bridge is in */ | ||
103 | #else | ||
104 | #define COMEM_MINDEV 0 /* Minimum valid DEVICE */ | ||
105 | #define COMEM_MAXDEV 3 /* Maximum valid DEVICE */ | ||
106 | #endif | ||
107 | |||
108 | #define COMEM_MAXPCI (COMEM_MAXDEV+1) /* Maximum PCI devices */ | ||
109 | |||
110 | |||
111 | /****************************************************************************/ | ||
112 | #endif /* anchor_h */ | ||
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 65c6be6c8180..4eba796c00d4 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h | |||
@@ -55,6 +55,16 @@ static inline int atomic_dec_and_test(atomic_t *v) | |||
55 | return c != 0; | 55 | return c != 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline int atomic_dec_and_test_lt(atomic_t *v) | ||
59 | { | ||
60 | char c; | ||
61 | __asm__ __volatile__( | ||
62 | "subql #1,%1; slt %0" | ||
63 | : "=d" (c), "=m" (*v) | ||
64 | : "m" (*v)); | ||
65 | return c != 0; | ||
66 | } | ||
67 | |||
58 | static inline int atomic_inc_and_test(atomic_t *v) | 68 | static inline int atomic_inc_and_test(atomic_t *v) |
59 | { | 69 | { |
60 | char c; | 70 | char c; |
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h index 73de7c89d8e0..8104bd874649 100644 --- a/arch/m68k/include/asm/cacheflush_mm.h +++ b/arch/m68k/include/asm/cacheflush_mm.h | |||
@@ -2,23 +2,89 @@ | |||
2 | #define _M68K_CACHEFLUSH_H | 2 | #define _M68K_CACHEFLUSH_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #ifdef CONFIG_COLDFIRE | ||
6 | #include <asm/mcfsim.h> | ||
7 | #endif | ||
5 | 8 | ||
6 | /* cache code */ | 9 | /* cache code */ |
7 | #define FLUSH_I_AND_D (0x00000808) | 10 | #define FLUSH_I_AND_D (0x00000808) |
8 | #define FLUSH_I (0x00000008) | 11 | #define FLUSH_I (0x00000008) |
9 | 12 | ||
13 | #ifndef ICACHE_MAX_ADDR | ||
14 | #define ICACHE_MAX_ADDR 0 | ||
15 | #define ICACHE_SET_MASK 0 | ||
16 | #define DCACHE_MAX_ADDR 0 | ||
17 | #define DCACHE_SETMASK 0 | ||
18 | #endif | ||
19 | |||
20 | static inline void flush_cf_icache(unsigned long start, unsigned long end) | ||
21 | { | ||
22 | unsigned long set; | ||
23 | |||
24 | for (set = start; set <= end; set += (0x10 - 3)) { | ||
25 | __asm__ __volatile__ ( | ||
26 | "cpushl %%ic,(%0)\n\t" | ||
27 | "addq%.l #1,%0\n\t" | ||
28 | "cpushl %%ic,(%0)\n\t" | ||
29 | "addq%.l #1,%0\n\t" | ||
30 | "cpushl %%ic,(%0)\n\t" | ||
31 | "addq%.l #1,%0\n\t" | ||
32 | "cpushl %%ic,(%0)" | ||
33 | : "=a" (set) | ||
34 | : "a" (set)); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | static inline void flush_cf_dcache(unsigned long start, unsigned long end) | ||
39 | { | ||
40 | unsigned long set; | ||
41 | |||
42 | for (set = start; set <= end; set += (0x10 - 3)) { | ||
43 | __asm__ __volatile__ ( | ||
44 | "cpushl %%dc,(%0)\n\t" | ||
45 | "addq%.l #1,%0\n\t" | ||
46 | "cpushl %%dc,(%0)\n\t" | ||
47 | "addq%.l #1,%0\n\t" | ||
48 | "cpushl %%dc,(%0)\n\t" | ||
49 | "addq%.l #1,%0\n\t" | ||
50 | "cpushl %%dc,(%0)" | ||
51 | : "=a" (set) | ||
52 | : "a" (set)); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static inline void flush_cf_bcache(unsigned long start, unsigned long end) | ||
57 | { | ||
58 | unsigned long set; | ||
59 | |||
60 | for (set = start; set <= end; set += (0x10 - 3)) { | ||
61 | __asm__ __volatile__ ( | ||
62 | "cpushl %%bc,(%0)\n\t" | ||
63 | "addq%.l #1,%0\n\t" | ||
64 | "cpushl %%bc,(%0)\n\t" | ||
65 | "addq%.l #1,%0\n\t" | ||
66 | "cpushl %%bc,(%0)\n\t" | ||
67 | "addq%.l #1,%0\n\t" | ||
68 | "cpushl %%bc,(%0)" | ||
69 | : "=a" (set) | ||
70 | : "a" (set)); | ||
71 | } | ||
72 | } | ||
73 | |||
10 | /* | 74 | /* |
11 | * Cache handling functions | 75 | * Cache handling functions |
12 | */ | 76 | */ |
13 | 77 | ||
14 | static inline void flush_icache(void) | 78 | static inline void flush_icache(void) |
15 | { | 79 | { |
16 | if (CPU_IS_040_OR_060) | 80 | if (CPU_IS_COLDFIRE) { |
81 | flush_cf_icache(0, ICACHE_MAX_ADDR); | ||
82 | } else if (CPU_IS_040_OR_060) { | ||
17 | asm volatile ( "nop\n" | 83 | asm volatile ( "nop\n" |
18 | " .chip 68040\n" | 84 | " .chip 68040\n" |
19 | " cpusha %bc\n" | 85 | " cpusha %bc\n" |
20 | " .chip 68k"); | 86 | " .chip 68k"); |
21 | else { | 87 | } else { |
22 | unsigned long tmp; | 88 | unsigned long tmp; |
23 | asm volatile ( "movec %%cacr,%0\n" | 89 | asm volatile ( "movec %%cacr,%0\n" |
24 | " or.w %1,%0\n" | 90 | " or.w %1,%0\n" |
@@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len); | |||
51 | process changes. */ | 117 | process changes. */ |
52 | #define __flush_cache_all() \ | 118 | #define __flush_cache_all() \ |
53 | ({ \ | 119 | ({ \ |
54 | if (CPU_IS_040_OR_060) \ | 120 | if (CPU_IS_COLDFIRE) { \ |
121 | flush_cf_dcache(0, DCACHE_MAX_ADDR); \ | ||
122 | } else if (CPU_IS_040_OR_060) { \ | ||
55 | __asm__ __volatile__("nop\n\t" \ | 123 | __asm__ __volatile__("nop\n\t" \ |
56 | ".chip 68040\n\t" \ | 124 | ".chip 68040\n\t" \ |
57 | "cpusha %dc\n\t" \ | 125 | "cpusha %dc\n\t" \ |
58 | ".chip 68k"); \ | 126 | ".chip 68k"); \ |
59 | else { \ | 127 | } else { \ |
60 | unsigned long _tmp; \ | 128 | unsigned long _tmp; \ |
61 | __asm__ __volatile__("movec %%cacr,%0\n\t" \ | 129 | __asm__ __volatile__("movec %%cacr,%0\n\t" \ |
62 | "orw %1,%0\n\t" \ | 130 | "orw %1,%0\n\t" \ |
@@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm | |||
112 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | 180 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ |
113 | static inline void __flush_page_to_ram(void *vaddr) | 181 | static inline void __flush_page_to_ram(void *vaddr) |
114 | { | 182 | { |
115 | if (CPU_IS_040_OR_060) { | 183 | if (CPU_IS_COLDFIRE) { |
184 | unsigned long addr, start, end; | ||
185 | addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1); | ||
186 | start = addr & ICACHE_SET_MASK; | ||
187 | end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK; | ||
188 | if (start > end) { | ||
189 | flush_cf_bcache(0, end); | ||
190 | end = ICACHE_MAX_ADDR; | ||
191 | } | ||
192 | flush_cf_bcache(start, end); | ||
193 | } else if (CPU_IS_040_OR_060) { | ||
116 | __asm__ __volatile__("nop\n\t" | 194 | __asm__ __volatile__("nop\n\t" |
117 | ".chip 68040\n\t" | 195 | ".chip 68040\n\t" |
118 | "cpushp %%bc,(%0)\n\t" | 196 | "cpushp %%bc,(%0)\n\t" |
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h index ec514485c8b6..2f88d867c711 100644 --- a/arch/m68k/include/asm/checksum.h +++ b/arch/m68k/include/asm/checksum.h | |||
@@ -3,6 +3,10 @@ | |||
3 | 3 | ||
4 | #include <linux/in6.h> | 4 | #include <linux/in6.h> |
5 | 5 | ||
6 | #ifdef CONFIG_GENERIC_CSUM | ||
7 | #include <asm-generic/checksum.h> | ||
8 | #else | ||
9 | |||
6 | /* | 10 | /* |
7 | * computes the checksum of a memory block at buff, length len, | 11 | * computes the checksum of a memory block at buff, length len, |
8 | * and adds in "sum" (32-bit) | 12 | * and adds in "sum" (32-bit) |
@@ -34,30 +38,6 @@ extern __wsum csum_partial_copy_nocheck(const void *src, | |||
34 | void *dst, int len, | 38 | void *dst, int len, |
35 | __wsum sum); | 39 | __wsum sum); |
36 | 40 | ||
37 | |||
38 | #ifdef CONFIG_COLDFIRE | ||
39 | |||
40 | /* | ||
41 | * The ColdFire cores don't support all the 68k instructions used | ||
42 | * in the optimized checksum code below. So it reverts back to using | ||
43 | * more standard C coded checksums. The fast checksum code is | ||
44 | * significantly larger than the optimized version, so it is not | ||
45 | * inlined here. | ||
46 | */ | ||
47 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | ||
48 | |||
49 | static inline __sum16 csum_fold(__wsum sum) | ||
50 | { | ||
51 | unsigned int tmp = (__force u32)sum; | ||
52 | |||
53 | tmp = (tmp & 0xffff) + (tmp >> 16); | ||
54 | tmp = (tmp & 0xffff) + (tmp >> 16); | ||
55 | |||
56 | return (__force __sum16)~tmp; | ||
57 | } | ||
58 | |||
59 | #else | ||
60 | |||
61 | /* | 41 | /* |
62 | * This is a version of ip_fast_csum() optimized for IP headers, | 42 | * This is a version of ip_fast_csum() optimized for IP headers, |
63 | * which always checksum on 4 octet boundaries. | 43 | * which always checksum on 4 octet boundaries. |
@@ -97,8 +77,6 @@ static inline __sum16 csum_fold(__wsum sum) | |||
97 | return (__force __sum16)~sum; | 77 | return (__force __sum16)~sum; |
98 | } | 78 | } |
99 | 79 | ||
100 | #endif /* CONFIG_COLDFIRE */ | ||
101 | |||
102 | static inline __wsum | 80 | static inline __wsum |
103 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | 81 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, |
104 | unsigned short proto, __wsum sum) | 82 | unsigned short proto, __wsum sum) |
@@ -167,4 +145,5 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, | |||
167 | return csum_fold(sum); | 145 | return csum_fold(sum); |
168 | } | 146 | } |
169 | 147 | ||
148 | #endif /* CONFIG_GENERIC_CSUM */ | ||
170 | #endif /* _M68K_CHECKSUM_H */ | 149 | #endif /* _M68K_CHECKSUM_H */ |
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index edb66148a71d..444ea8a09e9f 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h | |||
@@ -1,7 +1,9 @@ | |||
1 | #ifndef _M68K_DIV64_H | 1 | #ifndef _M68K_DIV64_H |
2 | #define _M68K_DIV64_H | 2 | #define _M68K_DIV64_H |
3 | 3 | ||
4 | #ifdef CONFIG_MMU | 4 | #ifdef CONFIG_CPU_HAS_NO_MULDIV64 |
5 | #include <asm-generic/div64.h> | ||
6 | #else | ||
5 | 7 | ||
6 | #include <linux/types.h> | 8 | #include <linux/types.h> |
7 | 9 | ||
@@ -27,8 +29,6 @@ | |||
27 | __rem; \ | 29 | __rem; \ |
28 | }) | 30 | }) |
29 | 31 | ||
30 | #else | 32 | #endif /* CONFIG_CPU_HAS_NO_MULDIV64 */ |
31 | #include <asm-generic/div64.h> | ||
32 | #endif /* CONFIG_MMU */ | ||
33 | 33 | ||
34 | #endif /* _M68K_DIV64_H */ | 34 | #endif /* _M68K_DIV64_H */ |
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h index 01c193d91412..e9b7cda59744 100644 --- a/arch/m68k/include/asm/elf.h +++ b/arch/m68k/include/asm/elf.h | |||
@@ -59,10 +59,10 @@ typedef struct user_m68kfp_struct elf_fpregset_t; | |||
59 | is actually used on ASV. */ | 59 | is actually used on ASV. */ |
60 | #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 | 60 | #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 |
61 | 61 | ||
62 | #ifndef CONFIG_SUN3 | 62 | #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE) |
63 | #define ELF_EXEC_PAGESIZE 4096 | ||
64 | #else | ||
65 | #define ELF_EXEC_PAGESIZE 8192 | 63 | #define ELF_EXEC_PAGESIZE 8192 |
64 | #else | ||
65 | #define ELF_EXEC_PAGESIZE 4096 | ||
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 68 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h index c3c5a8643e15..622138dc7288 100644 --- a/arch/m68k/include/asm/entry.h +++ b/arch/m68k/include/asm/entry.h | |||
@@ -222,16 +222,24 @@ | |||
222 | * Non-MMU systems do not reserve %a2 in this way, and this definition is | 222 | * Non-MMU systems do not reserve %a2 in this way, and this definition is |
223 | * not used for them. | 223 | * not used for them. |
224 | */ | 224 | */ |
225 | #ifdef CONFIG_MMU | ||
226 | |||
225 | #define curptr a2 | 227 | #define curptr a2 |
226 | 228 | ||
227 | #define GET_CURRENT(tmp) get_current tmp | 229 | #define GET_CURRENT(tmp) get_current tmp |
228 | .macro get_current reg=%d0 | 230 | .macro get_current reg=%d0 |
229 | movel %sp,\reg | 231 | movel %sp,\reg |
230 | andw #-THREAD_SIZE,\reg | 232 | andl #-THREAD_SIZE,\reg |
231 | movel \reg,%curptr | 233 | movel \reg,%curptr |
232 | movel %curptr@,%curptr | 234 | movel %curptr@,%curptr |
233 | .endm | 235 | .endm |
234 | 236 | ||
237 | #else | ||
238 | |||
239 | #define GET_CURRENT(tmp) | ||
240 | |||
241 | #endif /* CONFIG_MMU */ | ||
242 | |||
235 | #else /* C source */ | 243 | #else /* C source */ |
236 | 244 | ||
237 | #define STR(X) STR1(X) | 245 | #define STR(X) STR1(X) |
diff --git a/arch/m68k/include/asm/fpu.h b/arch/m68k/include/asm/fpu.h index ffb6b8cfc6d5..526db9da9e43 100644 --- a/arch/m68k/include/asm/fpu.h +++ b/arch/m68k/include/asm/fpu.h | |||
@@ -12,6 +12,8 @@ | |||
12 | #define FPSTATESIZE (96) | 12 | #define FPSTATESIZE (96) |
13 | #elif defined(CONFIG_M68KFPU_EMU) | 13 | #elif defined(CONFIG_M68KFPU_EMU) |
14 | #define FPSTATESIZE (28) | 14 | #define FPSTATESIZE (28) |
15 | #elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) | ||
16 | #define FPSTATESIZE (16) | ||
15 | #elif defined(CONFIG_M68060) | 17 | #elif defined(CONFIG_M68060) |
16 | #define FPSTATESIZE (12) | 18 | #define FPSTATESIZE (12) |
17 | #else | 19 | #else |
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index b2046839f4b2..00d0071de4c3 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h | |||
@@ -225,7 +225,8 @@ static inline void gpio_set_value(unsigned gpio, int value) | |||
225 | 225 | ||
226 | static inline int gpio_to_irq(unsigned gpio) | 226 | static inline int gpio_to_irq(unsigned gpio) |
227 | { | 227 | { |
228 | return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL; | 228 | return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE |
229 | : __gpio_to_irq(gpio); | ||
229 | } | 230 | } |
230 | 231 | ||
231 | static inline int irq_to_gpio(unsigned irq) | 232 | static inline int irq_to_gpio(unsigned irq) |
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h index 6198df5ff245..0e89fa05de0e 100644 --- a/arch/m68k/include/asm/irq.h +++ b/arch/m68k/include/asm/irq.h | |||
@@ -25,7 +25,8 @@ | |||
25 | #define NR_IRQS 0 | 25 | #define NR_IRQS 0 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_MMU | 28 | #if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \ |
29 | defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * Interrupt source definitions | 32 | * Interrupt source definitions |
@@ -80,7 +81,7 @@ extern unsigned int irq_canonicalize(unsigned int irq); | |||
80 | 81 | ||
81 | #else | 82 | #else |
82 | #define irq_canonicalize(irq) (irq) | 83 | #define irq_canonicalize(irq) (irq) |
83 | #endif /* CONFIG_MMU */ | 84 | #endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */ |
84 | 85 | ||
85 | asmlinkage void do_IRQ(int irq, struct pt_regs *regs); | 86 | asmlinkage void do_IRQ(int irq, struct pt_regs *regs); |
86 | extern atomic_t irq_err_count; | 87 | extern atomic_t irq_err_count; |
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h index 16a1835f9b2a..47906aafbf67 100644 --- a/arch/m68k/include/asm/m54xxacr.h +++ b/arch/m68k/include/asm/m54xxacr.h | |||
@@ -39,8 +39,12 @@ | |||
39 | #define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */ | 39 | #define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */ |
40 | #define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */ | 40 | #define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */ |
41 | #define ACR_CM 0x00000060 /* Cache mode mask */ | 41 | #define ACR_CM 0x00000060 /* Cache mode mask */ |
42 | #define ACR_SP 0x00000008 /* Supervisor protect */ | ||
42 | #define ACR_WPROTECT 0x00000004 /* Write protect */ | 43 | #define ACR_WPROTECT 0x00000004 /* Write protect */ |
43 | 44 | ||
45 | #define ACR_BA(x) ((x) & 0xff000000) | ||
46 | #define ACR_ADMSK(x) ((((x) - 1) & 0xff000000) >> 8) | ||
47 | |||
44 | #if defined(CONFIG_M5407) | 48 | #if defined(CONFIG_M5407) |
45 | 49 | ||
46 | #define ICACHE_SIZE 0x4000 /* instruction - 16k */ | 50 | #define ICACHE_SIZE 0x4000 /* instruction - 16k */ |
@@ -56,6 +60,11 @@ | |||
56 | #define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ | 60 | #define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ |
57 | #define CACHE_WAYS 4 /* 4 ways */ | 61 | #define CACHE_WAYS 4 /* 4 ways */ |
58 | 62 | ||
63 | #define ICACHE_SET_MASK ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS) | ||
64 | #define DCACHE_SET_MASK ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS) | ||
65 | #define ICACHE_MAX_ADDR ICACHE_SET_MASK | ||
66 | #define DCACHE_MAX_ADDR DCACHE_SET_MASK | ||
67 | |||
59 | /* | 68 | /* |
60 | * Version 4 cores have a true harvard style separate instruction | 69 | * Version 4 cores have a true harvard style separate instruction |
61 | * and data cache. Enable data and instruction caches, also enable write | 70 | * and data cache. Enable data and instruction caches, also enable write |
@@ -73,6 +82,27 @@ | |||
73 | #else | 82 | #else |
74 | #define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP) | 83 | #define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP) |
75 | #endif | 84 | #endif |
85 | #define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) | ||
86 | |||
87 | #if defined(CONFIG_MMU) | ||
88 | /* | ||
89 | * If running with the MMU enabled then we need to map the internal | ||
90 | * register region as non-cacheable. And then we map all our RAM as | ||
91 | * cacheable and supervisor access only. | ||
92 | */ | ||
93 | #define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \ | ||
94 | ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP) | ||
95 | #define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ | ||
96 | ACR_ENABLE+ACR_SUPER+ACR_SP) | ||
97 | #define ACR2_MODE 0 | ||
98 | #define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ | ||
99 | ACR_ENABLE+ACR_SUPER+ACR_SP) | ||
100 | |||
101 | #else | ||
102 | |||
103 | /* | ||
104 | * For the non-MMU enabled case we map all of RAM as cacheable. | ||
105 | */ | ||
76 | #if defined(CONFIG_CACHE_COPYBACK) | 106 | #if defined(CONFIG_CACHE_COPYBACK) |
77 | #define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP) | 107 | #define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP) |
78 | #else | 108 | #else |
@@ -80,7 +110,6 @@ | |||
80 | #endif | 110 | #endif |
81 | #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) | 111 | #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) |
82 | 112 | ||
83 | #define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) | ||
84 | #define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) | 113 | #define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) |
85 | #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA) | 114 | #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA) |
86 | #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA) | 115 | #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA) |
@@ -94,4 +123,5 @@ | |||
94 | #define CACHE_PUSH | 123 | #define CACHE_PUSH |
95 | #endif | 124 | #endif |
96 | 125 | ||
126 | #endif /* CONFIG_MMU */ | ||
97 | #endif /* m54xxacr_h */ | 127 | #endif /* m54xxacr_h */ |
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h new file mode 100644 index 000000000000..313f3dd23cdc --- /dev/null +++ b/arch/m68k/include/asm/mcf_pgalloc.h | |||
@@ -0,0 +1,102 @@ | |||
1 | #ifndef M68K_MCF_PGALLOC_H | ||
2 | #define M68K_MCF_PGALLOC_H | ||
3 | |||
4 | #include <asm/tlb.h> | ||
5 | #include <asm/tlbflush.h> | ||
6 | |||
7 | extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
8 | { | ||
9 | free_page((unsigned long) pte); | ||
10 | } | ||
11 | |||
12 | extern const char bad_pmd_string[]; | ||
13 | |||
14 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
15 | unsigned long address) | ||
16 | { | ||
17 | unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); | ||
18 | |||
19 | if (!page) | ||
20 | return NULL; | ||
21 | |||
22 | memset((void *)page, 0, PAGE_SIZE); | ||
23 | return (pte_t *) (page); | ||
24 | } | ||
25 | |||
26 | extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) | ||
27 | { | ||
28 | return (pmd_t *) pgd; | ||
29 | } | ||
30 | |||
31 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) | ||
32 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | ||
33 | |||
34 | #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) | ||
35 | |||
36 | #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ | ||
37 | (unsigned long)(page_address(page))) | ||
38 | |||
39 | #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) | ||
40 | |||
41 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
42 | |||
43 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, | ||
44 | unsigned long address) | ||
45 | { | ||
46 | __free_page(page); | ||
47 | } | ||
48 | |||
49 | #define __pmd_free_tlb(tlb, pmd, address) do { } while (0) | ||
50 | |||
51 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | ||
52 | unsigned long address) | ||
53 | { | ||
54 | struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); | ||
55 | pte_t *pte; | ||
56 | |||
57 | if (!page) | ||
58 | return NULL; | ||
59 | |||
60 | pte = kmap(page); | ||
61 | if (pte) { | ||
62 | clear_page(pte); | ||
63 | __flush_page_to_ram(pte); | ||
64 | flush_tlb_kernel_page(pte); | ||
65 | nocache_page(pte); | ||
66 | } | ||
67 | kunmap(page); | ||
68 | |||
69 | return page; | ||
70 | } | ||
71 | |||
72 | extern inline void pte_free(struct mm_struct *mm, struct page *page) | ||
73 | { | ||
74 | __free_page(page); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * In our implementation, each pgd entry contains 1 pmd that is never allocated | ||
79 | * or freed. pgd_present is always 1, so this should never be called. -NL | ||
80 | */ | ||
81 | #define pmd_free(mm, pmd) BUG() | ||
82 | |||
83 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
84 | { | ||
85 | free_page((unsigned long) pgd); | ||
86 | } | ||
87 | |||
88 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
89 | { | ||
90 | pgd_t *new_pgd; | ||
91 | |||
92 | new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); | ||
93 | if (!new_pgd) | ||
94 | return NULL; | ||
95 | memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); | ||
96 | memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); | ||
97 | return new_pgd; | ||
98 | } | ||
99 | |||
100 | #define pgd_populate(mm, pmd, pte) BUG() | ||
101 | |||
102 | #endif /* M68K_MCF_PGALLOC_H */ | ||
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h new file mode 100644 index 000000000000..756bde4fb4f8 --- /dev/null +++ b/arch/m68k/include/asm/mcf_pgtable.h | |||
@@ -0,0 +1,425 @@ | |||
1 | #ifndef _MCF_PGTABLE_H | ||
2 | #define _MCF_PGTABLE_H | ||
3 | |||
4 | #include <asm/mcfmmu.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | /* | ||
8 | * MMUDR bits, in proper place. We write these directly into the MMUDR | ||
9 | * after masking from the pte. | ||
10 | */ | ||
11 | #define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */ | ||
12 | #define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */ | ||
13 | #define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */ | ||
14 | #define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */ | ||
15 | #define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */ | ||
16 | #define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */ | ||
17 | #define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */ | ||
18 | |||
19 | #define CF_CACHEMASK (~MMUDR_CM_CCB) | ||
20 | #define CF_PAGE_MMUDR_MASK 0x000000fe | ||
21 | |||
22 | #define _PAGE_NOCACHE030 CF_PAGE_NOCACHE | ||
23 | |||
24 | /* | ||
25 | * MMUTR bits, need shifting down. | ||
26 | */ | ||
27 | #define CF_PAGE_MMUTR_MASK 0x00000c00 | ||
28 | #define CF_PAGE_MMUTR_SHIFT 10 | ||
29 | |||
30 | #define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT) | ||
31 | #define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT) | ||
32 | |||
33 | /* | ||
34 | * Fake bits, not implemented in CF, will get masked out before | ||
35 | * hitting hardware. | ||
36 | */ | ||
37 | #define CF_PAGE_DIRTY 0x00000001 | ||
38 | #define CF_PAGE_FILE 0x00000200 | ||
39 | #define CF_PAGE_ACCESSED 0x00001000 | ||
40 | |||
41 | #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */ | ||
42 | #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */ | ||
43 | #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */ | ||
44 | #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */ | ||
45 | #define _DESCTYPE_MASK 0x003 | ||
46 | #define _CACHEMASK040 (~0x060) | ||
47 | #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */ | ||
48 | |||
49 | /* | ||
50 | * Externally used page protection values. | ||
51 | */ | ||
52 | #define _PAGE_PRESENT (CF_PAGE_VALID) | ||
53 | #define _PAGE_ACCESSED (CF_PAGE_ACCESSED) | ||
54 | #define _PAGE_DIRTY (CF_PAGE_DIRTY) | ||
55 | #define _PAGE_READWRITE (CF_PAGE_READABLE \ | ||
56 | | CF_PAGE_WRITABLE \ | ||
57 | | CF_PAGE_SYSTEM \ | ||
58 | | CF_PAGE_SHARED) | ||
59 | |||
60 | /* | ||
61 | * Compound page protection values. | ||
62 | */ | ||
63 | #define PAGE_NONE __pgprot(CF_PAGE_VALID \ | ||
64 | | CF_PAGE_ACCESSED) | ||
65 | |||
66 | #define PAGE_SHARED __pgprot(CF_PAGE_VALID \ | ||
67 | | CF_PAGE_ACCESSED \ | ||
68 | | CF_PAGE_SHARED) | ||
69 | |||
70 | #define PAGE_INIT __pgprot(CF_PAGE_VALID \ | ||
71 | | CF_PAGE_READABLE \ | ||
72 | | CF_PAGE_WRITABLE \ | ||
73 | | CF_PAGE_EXEC \ | ||
74 | | CF_PAGE_SYSTEM) | ||
75 | |||
76 | #define PAGE_KERNEL __pgprot(CF_PAGE_VALID \ | ||
77 | | CF_PAGE_ACCESSED \ | ||
78 | | CF_PAGE_READABLE \ | ||
79 | | CF_PAGE_WRITABLE \ | ||
80 | | CF_PAGE_EXEC \ | ||
81 | | CF_PAGE_SYSTEM) | ||
82 | |||
83 | #define PAGE_COPY __pgprot(CF_PAGE_VALID \ | ||
84 | | CF_PAGE_ACCESSED \ | ||
85 | | CF_PAGE_READABLE \ | ||
86 | | CF_PAGE_DIRTY) | ||
87 | |||
88 | /* | ||
89 | * Page protections for initialising protection_map. See mm/mmap.c | ||
90 | * for use. In general, the bit positions are xwr, and P-items are | ||
91 | * private, the S-items are shared. | ||
92 | */ | ||
93 | #define __P000 PAGE_NONE | ||
94 | #define __P001 __pgprot(CF_PAGE_VALID \ | ||
95 | | CF_PAGE_ACCESSED \ | ||
96 | | CF_PAGE_READABLE) | ||
97 | #define __P010 __pgprot(CF_PAGE_VALID \ | ||
98 | | CF_PAGE_ACCESSED \ | ||
99 | | CF_PAGE_WRITABLE) | ||
100 | #define __P011 __pgprot(CF_PAGE_VALID \ | ||
101 | | CF_PAGE_ACCESSED \ | ||
102 | | CF_PAGE_READABLE \ | ||
103 | | CF_PAGE_WRITABLE) | ||
104 | #define __P100 __pgprot(CF_PAGE_VALID \ | ||
105 | | CF_PAGE_ACCESSED \ | ||
106 | | CF_PAGE_EXEC) | ||
107 | #define __P101 __pgprot(CF_PAGE_VALID \ | ||
108 | | CF_PAGE_ACCESSED \ | ||
109 | | CF_PAGE_READABLE \ | ||
110 | | CF_PAGE_EXEC) | ||
111 | #define __P110 __pgprot(CF_PAGE_VALID \ | ||
112 | | CF_PAGE_ACCESSED \ | ||
113 | | CF_PAGE_WRITABLE \ | ||
114 | | CF_PAGE_EXEC) | ||
115 | #define __P111 __pgprot(CF_PAGE_VALID \ | ||
116 | | CF_PAGE_ACCESSED \ | ||
117 | | CF_PAGE_READABLE \ | ||
118 | | CF_PAGE_WRITABLE \ | ||
119 | | CF_PAGE_EXEC) | ||
120 | |||
121 | #define __S000 PAGE_NONE | ||
122 | #define __S001 __pgprot(CF_PAGE_VALID \ | ||
123 | | CF_PAGE_ACCESSED \ | ||
124 | | CF_PAGE_READABLE) | ||
125 | #define __S010 PAGE_SHARED | ||
126 | #define __S011 __pgprot(CF_PAGE_VALID \ | ||
127 | | CF_PAGE_ACCESSED \ | ||
128 | | CF_PAGE_SHARED \ | ||
129 | | CF_PAGE_READABLE) | ||
130 | #define __S100 __pgprot(CF_PAGE_VALID \ | ||
131 | | CF_PAGE_ACCESSED \ | ||
132 | | CF_PAGE_EXEC) | ||
133 | #define __S101 __pgprot(CF_PAGE_VALID \ | ||
134 | | CF_PAGE_ACCESSED \ | ||
135 | | CF_PAGE_READABLE \ | ||
136 | | CF_PAGE_EXEC) | ||
137 | #define __S110 __pgprot(CF_PAGE_VALID \ | ||
138 | | CF_PAGE_ACCESSED \ | ||
139 | | CF_PAGE_SHARED \ | ||
140 | | CF_PAGE_EXEC) | ||
141 | #define __S111 __pgprot(CF_PAGE_VALID \ | ||
142 | | CF_PAGE_ACCESSED \ | ||
143 | | CF_PAGE_SHARED \ | ||
144 | | CF_PAGE_READABLE \ | ||
145 | | CF_PAGE_EXEC) | ||
146 | |||
147 | #define PTE_MASK PAGE_MASK | ||
148 | #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) | ||
149 | |||
150 | #ifndef __ASSEMBLY__ | ||
151 | |||
152 | /* | ||
153 | * Conversion functions: convert a page and protection to a page entry, | ||
154 | * and a page entry and page directory to the page they refer to. | ||
155 | */ | ||
156 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
157 | |||
158 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
159 | { | ||
160 | pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); | ||
161 | return pte; | ||
162 | } | ||
163 | |||
164 | #define pmd_set(pmdp, ptep) do {} while (0) | ||
165 | |||
166 | static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) | ||
167 | { | ||
168 | pgd_val(*pgdp) = virt_to_phys(pmdp); | ||
169 | } | ||
170 | |||
171 | #define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK)) | ||
172 | #define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd))) | ||
173 | |||
174 | static inline int pte_none(pte_t pte) | ||
175 | { | ||
176 | return !pte_val(pte); | ||
177 | } | ||
178 | |||
179 | static inline int pte_present(pte_t pte) | ||
180 | { | ||
181 | return pte_val(pte) & CF_PAGE_VALID; | ||
182 | } | ||
183 | |||
184 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | ||
185 | pte_t *ptep) | ||
186 | { | ||
187 | pte_val(*ptep) = 0; | ||
188 | } | ||
189 | |||
190 | #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) | ||
191 | #define pte_page(pte) virt_to_page(__pte_page(pte)) | ||
192 | |||
193 | static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } | ||
194 | #define pmd_none(pmd) pmd_none2(&(pmd)) | ||
195 | static inline int pmd_bad2(pmd_t *pmd) { return 0; } | ||
196 | #define pmd_bad(pmd) pmd_bad2(&(pmd)) | ||
197 | #define pmd_present(pmd) (!pmd_none2(&(pmd))) | ||
198 | static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; } | ||
199 | |||
200 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
201 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
202 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
203 | static inline void pgd_clear(pgd_t *pgdp) {} | ||
204 | |||
205 | #define pte_ERROR(e) \ | ||
206 | printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ | ||
207 | __FILE__, __LINE__, pte_val(e)) | ||
208 | #define pmd_ERROR(e) \ | ||
209 | printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ | ||
210 | __FILE__, __LINE__, pmd_val(e)) | ||
211 | #define pgd_ERROR(e) \ | ||
212 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ | ||
213 | __FILE__, __LINE__, pgd_val(e)) | ||
214 | |||
215 | /* | ||
216 | * The following only work if pte_present() is true. | ||
217 | * Undefined behaviour if not... | ||
218 | * [we have the full set here even if they don't change from m68k] | ||
219 | */ | ||
220 | static inline int pte_read(pte_t pte) | ||
221 | { | ||
222 | return pte_val(pte) & CF_PAGE_READABLE; | ||
223 | } | ||
224 | |||
225 | static inline int pte_write(pte_t pte) | ||
226 | { | ||
227 | return pte_val(pte) & CF_PAGE_WRITABLE; | ||
228 | } | ||
229 | |||
230 | static inline int pte_exec(pte_t pte) | ||
231 | { | ||
232 | return pte_val(pte) & CF_PAGE_EXEC; | ||
233 | } | ||
234 | |||
235 | static inline int pte_dirty(pte_t pte) | ||
236 | { | ||
237 | return pte_val(pte) & CF_PAGE_DIRTY; | ||
238 | } | ||
239 | |||
240 | static inline int pte_young(pte_t pte) | ||
241 | { | ||
242 | return pte_val(pte) & CF_PAGE_ACCESSED; | ||
243 | } | ||
244 | |||
245 | static inline int pte_file(pte_t pte) | ||
246 | { | ||
247 | return pte_val(pte) & CF_PAGE_FILE; | ||
248 | } | ||
249 | |||
250 | static inline int pte_special(pte_t pte) | ||
251 | { | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static inline pte_t pte_wrprotect(pte_t pte) | ||
256 | { | ||
257 | pte_val(pte) &= ~CF_PAGE_WRITABLE; | ||
258 | return pte; | ||
259 | } | ||
260 | |||
261 | static inline pte_t pte_rdprotect(pte_t pte) | ||
262 | { | ||
263 | pte_val(pte) &= ~CF_PAGE_READABLE; | ||
264 | return pte; | ||
265 | } | ||
266 | |||
267 | static inline pte_t pte_exprotect(pte_t pte) | ||
268 | { | ||
269 | pte_val(pte) &= ~CF_PAGE_EXEC; | ||
270 | return pte; | ||
271 | } | ||
272 | |||
273 | static inline pte_t pte_mkclean(pte_t pte) | ||
274 | { | ||
275 | pte_val(pte) &= ~CF_PAGE_DIRTY; | ||
276 | return pte; | ||
277 | } | ||
278 | |||
279 | static inline pte_t pte_mkold(pte_t pte) | ||
280 | { | ||
281 | pte_val(pte) &= ~CF_PAGE_ACCESSED; | ||
282 | return pte; | ||
283 | } | ||
284 | |||
285 | static inline pte_t pte_mkwrite(pte_t pte) | ||
286 | { | ||
287 | pte_val(pte) |= CF_PAGE_WRITABLE; | ||
288 | return pte; | ||
289 | } | ||
290 | |||
291 | static inline pte_t pte_mkread(pte_t pte) | ||
292 | { | ||
293 | pte_val(pte) |= CF_PAGE_READABLE; | ||
294 | return pte; | ||
295 | } | ||
296 | |||
297 | static inline pte_t pte_mkexec(pte_t pte) | ||
298 | { | ||
299 | pte_val(pte) |= CF_PAGE_EXEC; | ||
300 | return pte; | ||
301 | } | ||
302 | |||
303 | static inline pte_t pte_mkdirty(pte_t pte) | ||
304 | { | ||
305 | pte_val(pte) |= CF_PAGE_DIRTY; | ||
306 | return pte; | ||
307 | } | ||
308 | |||
309 | static inline pte_t pte_mkyoung(pte_t pte) | ||
310 | { | ||
311 | pte_val(pte) |= CF_PAGE_ACCESSED; | ||
312 | return pte; | ||
313 | } | ||
314 | |||
315 | static inline pte_t pte_mknocache(pte_t pte) | ||
316 | { | ||
317 | pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); | ||
318 | return pte; | ||
319 | } | ||
320 | |||
321 | static inline pte_t pte_mkcache(pte_t pte) | ||
322 | { | ||
323 | pte_val(pte) &= ~CF_PAGE_NOCACHE; | ||
324 | return pte; | ||
325 | } | ||
326 | |||
327 | static inline pte_t pte_mkspecial(pte_t pte) | ||
328 | { | ||
329 | return pte; | ||
330 | } | ||
331 | |||
332 | #define swapper_pg_dir kernel_pg_dir | ||
333 | extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; | ||
334 | |||
335 | /* | ||
336 | * Find an entry in a pagetable directory. | ||
337 | */ | ||
338 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
339 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
340 | |||
341 | /* | ||
342 | * Find an entry in a kernel pagetable directory. | ||
343 | */ | ||
344 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
345 | |||
346 | /* | ||
347 | * Find an entry in the second-level pagetable. | ||
348 | */ | ||
349 | static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address) | ||
350 | { | ||
351 | return (pmd_t *) pgd; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Find an entry in the third-level pagetable. | ||
356 | */ | ||
357 | #define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
358 | #define pte_offset_kernel(dir, address) \ | ||
359 | ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address)) | ||
360 | |||
361 | /* | ||
362 | * Disable caching for page at given kernel virtual address. | ||
363 | */ | ||
364 | static inline void nocache_page(void *vaddr) | ||
365 | { | ||
366 | pgd_t *dir; | ||
367 | pmd_t *pmdp; | ||
368 | pte_t *ptep; | ||
369 | unsigned long addr = (unsigned long) vaddr; | ||
370 | |||
371 | dir = pgd_offset_k(addr); | ||
372 | pmdp = pmd_offset(dir, addr); | ||
373 | ptep = pte_offset_kernel(pmdp, addr); | ||
374 | *ptep = pte_mknocache(*ptep); | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Enable caching for page at given kernel virtual address. | ||
379 | */ | ||
380 | static inline void cache_page(void *vaddr) | ||
381 | { | ||
382 | pgd_t *dir; | ||
383 | pmd_t *pmdp; | ||
384 | pte_t *ptep; | ||
385 | unsigned long addr = (unsigned long) vaddr; | ||
386 | |||
387 | dir = pgd_offset_k(addr); | ||
388 | pmdp = pmd_offset(dir, addr); | ||
389 | ptep = pte_offset_kernel(pmdp, addr); | ||
390 | *ptep = pte_mkcache(*ptep); | ||
391 | } | ||
392 | |||
393 | #define PTE_FILE_MAX_BITS 21 | ||
394 | #define PTE_FILE_SHIFT 11 | ||
395 | |||
396 | static inline unsigned long pte_to_pgoff(pte_t pte) | ||
397 | { | ||
398 | return pte_val(pte) >> PTE_FILE_SHIFT; | ||
399 | } | ||
400 | |||
401 | static inline pte_t pgoff_to_pte(unsigned pgoff) | ||
402 | { | ||
403 | return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) | ||
408 | */ | ||
409 | #define __swp_type(x) ((x).val & 0xFF) | ||
410 | #define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT) | ||
411 | #define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \ | ||
412 | (off << PTE_FILE_SHIFT) }) | ||
413 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
414 | #define __swp_entry_to_pte(x) (__pte((x).val)) | ||
415 | |||
416 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
417 | |||
418 | #define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \ | ||
419 | __pte_offset(addr)) | ||
420 | #define pte_unmap(pte) ((void) 0) | ||
421 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
422 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
423 | |||
424 | #endif /* !__ASSEMBLY__ */ | ||
425 | #endif /* _MCF_PGTABLE_H */ | ||
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h new file mode 100644 index 000000000000..26cc3d5a63f8 --- /dev/null +++ b/arch/m68k/include/asm/mcfmmu.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * mcfmmu.h -- definitions for the ColdFire v4e MMU | ||
3 | * | ||
4 | * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org> | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef MCFMMU_H | ||
12 | #define MCFMMU_H | ||
13 | |||
14 | /* | ||
15 | * The MMU support registers are mapped into the address space using | ||
16 | * the processor MMUBASE register. We used a fixed address for mapping, | ||
17 | * there doesn't seem any need to make this configurable yet. | ||
18 | */ | ||
19 | #define MMUBASE 0xfe000000 | ||
20 | |||
21 | /* | ||
22 | * The support registers of the MMU. Names are the sames as those | ||
23 | * used in the Freescale v4e documentation. | ||
24 | */ | ||
25 | #define MMUCR (MMUBASE + 0x00) /* Control register */ | ||
26 | #define MMUOR (MMUBASE + 0x04) /* Operation register */ | ||
27 | #define MMUSR (MMUBASE + 0x08) /* Status register */ | ||
28 | #define MMUAR (MMUBASE + 0x10) /* TLB Address register */ | ||
29 | #define MMUTR (MMUBASE + 0x14) /* TLB Tag register */ | ||
30 | #define MMUDR (MMUBASE + 0x18) /* TLB Data register */ | ||
31 | |||
32 | /* | ||
33 | * MMU Control register bit flags | ||
34 | */ | ||
35 | #define MMUCR_EN 0x00000001 /* Virtual mode enable */ | ||
36 | #define MMUCR_ASM 0x00000002 /* Address space mode */ | ||
37 | |||
38 | /* | ||
39 | * MMU Operation register. | ||
40 | */ | ||
41 | #define MMUOR_UAA 0x00000001 /* Update allocatiom address */ | ||
42 | #define MMUOR_ACC 0x00000002 /* TLB access */ | ||
43 | #define MMUOR_RD 0x00000004 /* TLB access read */ | ||
44 | #define MMUOR_WR 0x00000000 /* TLB access write */ | ||
45 | #define MMUOR_ADR 0x00000008 /* TLB address select */ | ||
46 | #define MMUOR_ITLB 0x00000010 /* ITLB operation */ | ||
47 | #define MMUOR_CAS 0x00000020 /* Clear non-locked ASID TLBs */ | ||
48 | #define MMUOR_CNL 0x00000040 /* Clear non-locked TLBs */ | ||
49 | #define MMUOR_CA 0x00000080 /* Clear all TLBs */ | ||
50 | #define MMUOR_STLB 0x00000100 /* Search TLBs */ | ||
51 | #define MMUOR_AAN 16 /* TLB allocation address */ | ||
52 | #define MMUOR_AAMASK 0xffff0000 /* AA mask */ | ||
53 | |||
54 | /* | ||
55 | * MMU Status register. | ||
56 | */ | ||
57 | #define MMUSR_HIT 0x00000002 /* Search TLB hit */ | ||
58 | #define MMUSR_WF 0x00000008 /* Write access fault */ | ||
59 | #define MMUSR_RF 0x00000010 /* Read access fault */ | ||
60 | #define MMUSR_SPF 0x00000020 /* Supervisor protect fault */ | ||
61 | |||
62 | /* | ||
63 | * MMU Read/Write Tag register. | ||
64 | */ | ||
65 | #define MMUTR_V 0x00000001 /* Valid */ | ||
66 | #define MMUTR_SG 0x00000002 /* Shared global */ | ||
67 | #define MMUTR_IDN 2 /* Address Space ID */ | ||
68 | #define MMUTR_IDMASK 0x000003fc /* ASID mask */ | ||
69 | #define MMUTR_VAN 10 /* Virtual Address */ | ||
70 | #define MMUTR_VAMASK 0xfffffc00 /* VA mask */ | ||
71 | |||
72 | /* | ||
73 | * MMU Read/Write Data register. | ||
74 | */ | ||
75 | #define MMUDR_LK 0x00000002 /* Lock entry */ | ||
76 | #define MMUDR_X 0x00000004 /* Execute access enable */ | ||
77 | #define MMUDR_W 0x00000008 /* Write access enable */ | ||
78 | #define MMUDR_R 0x00000010 /* Read access enable */ | ||
79 | #define MMUDR_SP 0x00000020 /* Supervisor access enable */ | ||
80 | #define MMUDR_CM_CWT 0x00000000 /* Cachable write thru */ | ||
81 | #define MMUDR_CM_CCB 0x00000040 /* Cachable copy back */ | ||
82 | #define MMUDR_CM_NCP 0x00000080 /* Non-cachable precise */ | ||
83 | #define MMUDR_CM_NCI 0x000000c0 /* Non-cachable imprecise */ | ||
84 | #define MMUDR_SZ_1MB 0x00000000 /* 1MB page size */ | ||
85 | #define MMUDR_SZ_4KB 0x00000100 /* 4kB page size */ | ||
86 | #define MMUDR_SZ_8KB 0x00000200 /* 8kB page size */ | ||
87 | #define MMUDR_SZ_1KB 0x00000300 /* 1kB page size */ | ||
88 | #define MMUDR_PAN 10 /* Physical address */ | ||
89 | #define MMUDR_PAMASK 0xfffffc00 /* PA mask */ | ||
90 | |||
91 | #ifndef __ASSEMBLY__ | ||
92 | |||
93 | /* | ||
94 | * Simple access functions for the MMU registers. Nothing fancy | ||
95 | * currently required, just simple 32bit access. | ||
96 | */ | ||
97 | static inline u32 mmu_read(u32 a) | ||
98 | { | ||
99 | return *((volatile u32 *) a); | ||
100 | } | ||
101 | |||
102 | static inline void mmu_write(u32 a, u32 v) | ||
103 | { | ||
104 | *((volatile u32 *) a) = v; | ||
105 | __asm__ __volatile__ ("nop"); | ||
106 | } | ||
107 | |||
108 | int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word); | ||
109 | |||
110 | #endif | ||
111 | |||
112 | #endif /* MCFMMU_H */ | ||
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index 7d4341e55a99..dc3be991d634 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h | |||
@@ -8,7 +8,206 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
8 | } | 8 | } |
9 | 9 | ||
10 | #ifdef CONFIG_MMU | 10 | #ifdef CONFIG_MMU |
11 | #ifndef CONFIG_SUN3 | 11 | |
12 | #if defined(CONFIG_COLDFIRE) | ||
13 | |||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/bitops.h> | ||
16 | #include <asm/mcfmmu.h> | ||
17 | #include <asm/mmu.h> | ||
18 | |||
19 | #define NO_CONTEXT 256 | ||
20 | #define LAST_CONTEXT 255 | ||
21 | #define FIRST_CONTEXT 1 | ||
22 | |||
23 | extern unsigned long context_map[]; | ||
24 | extern mm_context_t next_mmu_context; | ||
25 | |||
26 | extern atomic_t nr_free_contexts; | ||
27 | extern struct mm_struct *context_mm[LAST_CONTEXT+1]; | ||
28 | extern void steal_context(void); | ||
29 | |||
30 | static inline void get_mmu_context(struct mm_struct *mm) | ||
31 | { | ||
32 | mm_context_t ctx; | ||
33 | |||
34 | if (mm->context != NO_CONTEXT) | ||
35 | return; | ||
36 | while (atomic_dec_and_test_lt(&nr_free_contexts)) { | ||
37 | atomic_inc(&nr_free_contexts); | ||
38 | steal_context(); | ||
39 | } | ||
40 | ctx = next_mmu_context; | ||
41 | while (test_and_set_bit(ctx, context_map)) { | ||
42 | ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); | ||
43 | if (ctx > LAST_CONTEXT) | ||
44 | ctx = 0; | ||
45 | } | ||
46 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | ||
47 | mm->context = ctx; | ||
48 | context_mm[ctx] = mm; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Set up the context for a new address space. | ||
53 | */ | ||
54 | #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) | ||
55 | |||
56 | /* | ||
57 | * We're finished using the context for an address space. | ||
58 | */ | ||
59 | static inline void destroy_context(struct mm_struct *mm) | ||
60 | { | ||
61 | if (mm->context != NO_CONTEXT) { | ||
62 | clear_bit(mm->context, context_map); | ||
63 | mm->context = NO_CONTEXT; | ||
64 | atomic_inc(&nr_free_contexts); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static inline void set_context(mm_context_t context, pgd_t *pgd) | ||
69 | { | ||
70 | __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); | ||
71 | } | ||
72 | |||
73 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
74 | struct task_struct *tsk) | ||
75 | { | ||
76 | get_mmu_context(tsk->mm); | ||
77 | set_context(tsk->mm->context, next->pgd); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * After we have set current->mm to a new value, this activates | ||
82 | * the context for the new mm so we see the new mappings. | ||
83 | */ | ||
84 | static inline void activate_mm(struct mm_struct *active_mm, | ||
85 | struct mm_struct *mm) | ||
86 | { | ||
87 | get_mmu_context(mm); | ||
88 | set_context(mm->context, mm->pgd); | ||
89 | } | ||
90 | |||
91 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
92 | |||
93 | extern void mmu_context_init(void); | ||
94 | #define prepare_arch_switch(next) load_ksp_mmu(next) | ||
95 | |||
96 | static inline void load_ksp_mmu(struct task_struct *task) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | struct mm_struct *mm; | ||
100 | int asid; | ||
101 | pgd_t *pgd; | ||
102 | pmd_t *pmd; | ||
103 | pte_t *pte; | ||
104 | unsigned long mmuar; | ||
105 | |||
106 | local_irq_save(flags); | ||
107 | mmuar = task->thread.ksp; | ||
108 | |||
109 | /* Search for a valid TLB entry, if one is found, don't remap */ | ||
110 | mmu_write(MMUAR, mmuar); | ||
111 | mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR); | ||
112 | if (mmu_read(MMUSR) & MMUSR_HIT) | ||
113 | goto end; | ||
114 | |||
115 | if (mmuar >= PAGE_OFFSET) { | ||
116 | mm = &init_mm; | ||
117 | } else { | ||
118 | pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm); | ||
119 | mm = task->mm; | ||
120 | } | ||
121 | |||
122 | if (!mm) | ||
123 | goto bug; | ||
124 | |||
125 | pgd = pgd_offset(mm, mmuar); | ||
126 | if (pgd_none(*pgd)) | ||
127 | goto bug; | ||
128 | |||
129 | pmd = pmd_offset(pgd, mmuar); | ||
130 | if (pmd_none(*pmd)) | ||
131 | goto bug; | ||
132 | |||
133 | pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) | ||
134 | : pte_offset_map(pmd, mmuar); | ||
135 | if (pte_none(*pte) || !pte_present(*pte)) | ||
136 | goto bug; | ||
137 | |||
138 | set_pte(pte, pte_mkyoung(*pte)); | ||
139 | asid = mm->context & 0xff; | ||
140 | if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET) | ||
141 | set_pte(pte, pte_wrprotect(*pte)); | ||
142 | |||
143 | mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | | ||
144 | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK) | ||
145 | >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V); | ||
146 | |||
147 | mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | | ||
148 | ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); | ||
149 | |||
150 | mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); | ||
151 | |||
152 | goto end; | ||
153 | |||
154 | bug: | ||
155 | pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); | ||
156 | end: | ||
157 | local_irq_restore(flags); | ||
158 | } | ||
159 | |||
160 | #elif defined(CONFIG_SUN3) | ||
161 | #include <asm/sun3mmu.h> | ||
162 | #include <linux/sched.h> | ||
163 | |||
164 | extern unsigned long get_free_context(struct mm_struct *mm); | ||
165 | extern void clear_context(unsigned long context); | ||
166 | |||
167 | /* set the context for a new task to unmapped */ | ||
168 | static inline int init_new_context(struct task_struct *tsk, | ||
169 | struct mm_struct *mm) | ||
170 | { | ||
171 | mm->context = SUN3_INVALID_CONTEXT; | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /* find the context given to this process, and if it hasn't already | ||
176 | got one, go get one for it. */ | ||
177 | static inline void get_mmu_context(struct mm_struct *mm) | ||
178 | { | ||
179 | if (mm->context == SUN3_INVALID_CONTEXT) | ||
180 | mm->context = get_free_context(mm); | ||
181 | } | ||
182 | |||
183 | /* flush context if allocated... */ | ||
184 | static inline void destroy_context(struct mm_struct *mm) | ||
185 | { | ||
186 | if (mm->context != SUN3_INVALID_CONTEXT) | ||
187 | clear_context(mm->context); | ||
188 | } | ||
189 | |||
190 | static inline void activate_context(struct mm_struct *mm) | ||
191 | { | ||
192 | get_mmu_context(mm); | ||
193 | sun3_put_context(mm->context); | ||
194 | } | ||
195 | |||
196 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
197 | struct task_struct *tsk) | ||
198 | { | ||
199 | activate_context(tsk->mm); | ||
200 | } | ||
201 | |||
202 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
203 | |||
204 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
205 | struct mm_struct *next_mm) | ||
206 | { | ||
207 | activate_context(next_mm); | ||
208 | } | ||
209 | |||
210 | #else | ||
12 | 211 | ||
13 | #include <asm/setup.h> | 212 | #include <asm/setup.h> |
14 | #include <asm/page.h> | 213 | #include <asm/page.h> |
@@ -103,55 +302,8 @@ static inline void activate_mm(struct mm_struct *prev_mm, | |||
103 | switch_mm_0460(next_mm); | 302 | switch_mm_0460(next_mm); |
104 | } | 303 | } |
105 | 304 | ||
106 | #else /* CONFIG_SUN3 */ | ||
107 | #include <asm/sun3mmu.h> | ||
108 | #include <linux/sched.h> | ||
109 | |||
110 | extern unsigned long get_free_context(struct mm_struct *mm); | ||
111 | extern void clear_context(unsigned long context); | ||
112 | |||
113 | /* set the context for a new task to unmapped */ | ||
114 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
115 | { | ||
116 | mm->context = SUN3_INVALID_CONTEXT; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* find the context given to this process, and if it hasn't already | ||
121 | got one, go get one for it. */ | ||
122 | static inline void get_mmu_context(struct mm_struct *mm) | ||
123 | { | ||
124 | if(mm->context == SUN3_INVALID_CONTEXT) | ||
125 | mm->context = get_free_context(mm); | ||
126 | } | ||
127 | |||
128 | /* flush context if allocated... */ | ||
129 | static inline void destroy_context(struct mm_struct *mm) | ||
130 | { | ||
131 | if(mm->context != SUN3_INVALID_CONTEXT) | ||
132 | clear_context(mm->context); | ||
133 | } | ||
134 | |||
135 | static inline void activate_context(struct mm_struct *mm) | ||
136 | { | ||
137 | get_mmu_context(mm); | ||
138 | sun3_put_context(mm->context); | ||
139 | } | ||
140 | |||
141 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | ||
142 | { | ||
143 | activate_context(tsk->mm); | ||
144 | } | ||
145 | |||
146 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
147 | |||
148 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
149 | struct mm_struct *next_mm) | ||
150 | { | ||
151 | activate_context(next_mm); | ||
152 | } | ||
153 | |||
154 | #endif | 305 | #endif |
306 | |||
155 | #else /* !CONFIG_MMU */ | 307 | #else /* !CONFIG_MMU */ |
156 | 308 | ||
157 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 309 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 45bd3f589bf0..e0fdd4d08075 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define _PAGE_PRESENT 0x001 | 8 | #define _PAGE_PRESENT 0x001 |
9 | #define _PAGE_SHORT 0x002 | 9 | #define _PAGE_SHORT 0x002 |
10 | #define _PAGE_RONLY 0x004 | 10 | #define _PAGE_RONLY 0x004 |
11 | #define _PAGE_READWRITE 0x000 | ||
11 | #define _PAGE_ACCESSED 0x008 | 12 | #define _PAGE_ACCESSED 0x008 |
12 | #define _PAGE_DIRTY 0x010 | 13 | #define _PAGE_DIRTY 0x010 |
13 | #define _PAGE_SUPER 0x080 /* 68040 supervisor only */ | 14 | #define _PAGE_SUPER 0x080 /* 68040 supervisor only */ |
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index dfebb7c1e379..98baa82a8615 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h | |||
@@ -6,10 +6,10 @@ | |||
6 | #include <asm/page_offset.h> | 6 | #include <asm/page_offset.h> |
7 | 7 | ||
8 | /* PAGE_SHIFT determines the page size */ | 8 | /* PAGE_SHIFT determines the page size */ |
9 | #ifndef CONFIG_SUN3 | 9 | #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE) |
10 | #define PAGE_SHIFT (12) | 10 | #define PAGE_SHIFT 13 |
11 | #else | 11 | #else |
12 | #define PAGE_SHIFT (13) | 12 | #define PAGE_SHIFT 12 |
13 | #endif | 13 | #endif |
14 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | 14 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
15 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 15 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
@@ -36,6 +36,10 @@ typedef struct page *pgtable_t; | |||
36 | #define __pgd(x) ((pgd_t) { (x) } ) | 36 | #define __pgd(x) ((pgd_t) { (x) } ) |
37 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 37 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
38 | 38 | ||
39 | extern unsigned long _rambase; | ||
40 | extern unsigned long _ramstart; | ||
41 | extern unsigned long _ramend; | ||
42 | |||
39 | #endif /* !__ASSEMBLY__ */ | 43 | #endif /* !__ASSEMBLY__ */ |
40 | 44 | ||
41 | #ifdef CONFIG_MMU | 45 | #ifdef CONFIG_MMU |
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index a8d1c60eb9ce..90595721185f 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h | |||
@@ -5,9 +5,6 @@ | |||
5 | 5 | ||
6 | extern unsigned long memory_start; | 6 | extern unsigned long memory_start; |
7 | extern unsigned long memory_end; | 7 | extern unsigned long memory_end; |
8 | extern unsigned long _rambase; | ||
9 | extern unsigned long _ramstart; | ||
10 | extern unsigned long _ramend; | ||
11 | 8 | ||
12 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | 9 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) |
13 | #define free_user_page(page, addr) free_page(addr) | 10 | #define free_user_page(page, addr) free_page(addr) |
diff --git a/arch/m68k/include/asm/page_offset.h b/arch/m68k/include/asm/page_offset.h index 1780152d81da..82626a8f1d0a 100644 --- a/arch/m68k/include/asm/page_offset.h +++ b/arch/m68k/include/asm/page_offset.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* This handles the memory map.. */ | 1 | /* This handles the memory map.. */ |
2 | 2 | ||
3 | #ifdef CONFIG_MMU | 3 | #if defined(CONFIG_RAMBASE) |
4 | #ifndef CONFIG_SUN3 | 4 | #define PAGE_OFFSET_RAW CONFIG_RAMBASE |
5 | #define PAGE_OFFSET_RAW 0x00000000 | 5 | #elif defined(CONFIG_SUN3) |
6 | #else | ||
7 | #define PAGE_OFFSET_RAW 0x0E000000 | 6 | #define PAGE_OFFSET_RAW 0x0E000000 |
8 | #endif | ||
9 | #else | 7 | #else |
10 | #define PAGE_OFFSET_RAW CONFIG_RAMBASE | 8 | #define PAGE_OFFSET_RAW 0x00000000 |
11 | #endif | 9 | #endif |
diff --git a/arch/m68k/include/asm/pgalloc.h b/arch/m68k/include/asm/pgalloc.h index c294aad8a900..37bee7e3223d 100644 --- a/arch/m68k/include/asm/pgalloc.h +++ b/arch/m68k/include/asm/pgalloc.h | |||
@@ -7,7 +7,9 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_MMU | 8 | #ifdef CONFIG_MMU |
9 | #include <asm/virtconvert.h> | 9 | #include <asm/virtconvert.h> |
10 | #ifdef CONFIG_SUN3 | 10 | #if defined(CONFIG_COLDFIRE) |
11 | #include <asm/mcf_pgalloc.h> | ||
12 | #elif defined(CONFIG_SUN3) | ||
11 | #include <asm/sun3_pgalloc.h> | 13 | #include <asm/sun3_pgalloc.h> |
12 | #else | 14 | #else |
13 | #include <asm/motorola_pgalloc.h> | 15 | #include <asm/motorola_pgalloc.h> |
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 87174c904d2b..dc35e0e106e4 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h | |||
@@ -40,6 +40,8 @@ | |||
40 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 40 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
41 | #ifdef CONFIG_SUN3 | 41 | #ifdef CONFIG_SUN3 |
42 | #define PGDIR_SHIFT 17 | 42 | #define PGDIR_SHIFT 17 |
43 | #elif defined(CONFIG_COLDFIRE) | ||
44 | #define PGDIR_SHIFT 22 | ||
43 | #else | 45 | #else |
44 | #define PGDIR_SHIFT 25 | 46 | #define PGDIR_SHIFT 25 |
45 | #endif | 47 | #endif |
@@ -54,6 +56,10 @@ | |||
54 | #define PTRS_PER_PTE 16 | 56 | #define PTRS_PER_PTE 16 |
55 | #define PTRS_PER_PMD 1 | 57 | #define PTRS_PER_PMD 1 |
56 | #define PTRS_PER_PGD 2048 | 58 | #define PTRS_PER_PGD 2048 |
59 | #elif defined(CONFIG_COLDFIRE) | ||
60 | #define PTRS_PER_PTE 512 | ||
61 | #define PTRS_PER_PMD 1 | ||
62 | #define PTRS_PER_PGD 1024 | ||
57 | #else | 63 | #else |
58 | #define PTRS_PER_PTE 1024 | 64 | #define PTRS_PER_PTE 1024 |
59 | #define PTRS_PER_PMD 8 | 65 | #define PTRS_PER_PMD 8 |
@@ -66,12 +72,22 @@ | |||
66 | #ifdef CONFIG_SUN3 | 72 | #ifdef CONFIG_SUN3 |
67 | #define KMAP_START 0x0DC00000 | 73 | #define KMAP_START 0x0DC00000 |
68 | #define KMAP_END 0x0E000000 | 74 | #define KMAP_END 0x0E000000 |
75 | #elif defined(CONFIG_COLDFIRE) | ||
76 | #define KMAP_START 0xe0000000 | ||
77 | #define KMAP_END 0xf0000000 | ||
69 | #else | 78 | #else |
70 | #define KMAP_START 0xd0000000 | 79 | #define KMAP_START 0xd0000000 |
71 | #define KMAP_END 0xf0000000 | 80 | #define KMAP_END 0xf0000000 |
72 | #endif | 81 | #endif |
73 | 82 | ||
74 | #ifndef CONFIG_SUN3 | 83 | #ifdef CONFIG_SUN3 |
84 | extern unsigned long m68k_vmalloc_end; | ||
85 | #define VMALLOC_START 0x0f800000 | ||
86 | #define VMALLOC_END m68k_vmalloc_end | ||
87 | #elif defined(CONFIG_COLDFIRE) | ||
88 | #define VMALLOC_START 0xd0000000 | ||
89 | #define VMALLOC_END 0xe0000000 | ||
90 | #else | ||
75 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | 91 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
76 | * current 8MB value just means that there will be a 8MB "hole" after the | 92 | * current 8MB value just means that there will be a 8MB "hole" after the |
77 | * physical memory until the kernel virtual memory starts. That means that | 93 | * physical memory until the kernel virtual memory starts. That means that |
@@ -82,11 +98,7 @@ | |||
82 | #define VMALLOC_OFFSET (8*1024*1024) | 98 | #define VMALLOC_OFFSET (8*1024*1024) |
83 | #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 99 | #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
84 | #define VMALLOC_END KMAP_START | 100 | #define VMALLOC_END KMAP_START |
85 | #else | 101 | #endif |
86 | extern unsigned long m68k_vmalloc_end; | ||
87 | #define VMALLOC_START 0x0f800000 | ||
88 | #define VMALLOC_END m68k_vmalloc_end | ||
89 | #endif /* CONFIG_SUN3 */ | ||
90 | 102 | ||
91 | /* zero page used for uninitialized stuff */ | 103 | /* zero page used for uninitialized stuff */ |
92 | extern void *empty_zero_page; | 104 | extern void *empty_zero_page; |
@@ -130,6 +142,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
130 | 142 | ||
131 | #ifdef CONFIG_SUN3 | 143 | #ifdef CONFIG_SUN3 |
132 | #include <asm/sun3_pgtable.h> | 144 | #include <asm/sun3_pgtable.h> |
145 | #elif defined(CONFIG_COLDFIRE) | ||
146 | #include <asm/mcf_pgtable.h> | ||
133 | #else | 147 | #else |
134 | #include <asm/motorola_pgtable.h> | 148 | #include <asm/motorola_pgtable.h> |
135 | #endif | 149 | #endif |
@@ -138,6 +152,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
138 | /* | 152 | /* |
139 | * Macro to mark a page protection value as "uncacheable". | 153 | * Macro to mark a page protection value as "uncacheable". |
140 | */ | 154 | */ |
155 | #ifdef CONFIG_COLDFIRE | ||
156 | # define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE)) | ||
157 | #else | ||
141 | #ifdef SUN3_PAGE_NOCACHE | 158 | #ifdef SUN3_PAGE_NOCACHE |
142 | # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE | 159 | # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE |
143 | #else | 160 | #else |
@@ -152,6 +169,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
152 | ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ | 169 | ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ |
153 | : (prot))) | 170 | : (prot))) |
154 | 171 | ||
172 | #endif /* CONFIG_COLDFIRE */ | ||
155 | #include <asm-generic/pgtable.h> | 173 | #include <asm-generic/pgtable.h> |
156 | #endif /* !__ASSEMBLY__ */ | 174 | #endif /* !__ASSEMBLY__ */ |
157 | 175 | ||
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index 568facf30276..46460fa15d5c 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
@@ -48,10 +48,12 @@ static inline void wrusp(unsigned long usp) | |||
48 | * so don't change it unless you know what you are doing. | 48 | * so don't change it unless you know what you are doing. |
49 | */ | 49 | */ |
50 | #ifdef CONFIG_MMU | 50 | #ifdef CONFIG_MMU |
51 | #ifndef CONFIG_SUN3 | 51 | #if defined(CONFIG_COLDFIRE) |
52 | #define TASK_SIZE (0xF0000000UL) | 52 | #define TASK_SIZE (0xC0000000UL) |
53 | #else | 53 | #elif defined(CONFIG_SUN3) |
54 | #define TASK_SIZE (0x0E000000UL) | 54 | #define TASK_SIZE (0x0E000000UL) |
55 | #else | ||
56 | #define TASK_SIZE (0xF0000000UL) | ||
55 | #endif | 57 | #endif |
56 | #else | 58 | #else |
57 | #define TASK_SIZE (0xFFFFFFFFUL) | 59 | #define TASK_SIZE (0xFFFFFFFFUL) |
@@ -66,10 +68,12 @@ static inline void wrusp(unsigned long usp) | |||
66 | * space during mmap's. | 68 | * space during mmap's. |
67 | */ | 69 | */ |
68 | #ifdef CONFIG_MMU | 70 | #ifdef CONFIG_MMU |
69 | #ifndef CONFIG_SUN3 | 71 | #if defined(CONFIG_COLDFIRE) |
70 | #define TASK_UNMAPPED_BASE 0xC0000000UL | 72 | #define TASK_UNMAPPED_BASE 0x60000000UL |
71 | #else | 73 | #elif defined(CONFIG_SUN3) |
72 | #define TASK_UNMAPPED_BASE 0x0A000000UL | 74 | #define TASK_UNMAPPED_BASE 0x0A000000UL |
75 | #else | ||
76 | #define TASK_UNMAPPED_BASE 0xC0000000UL | ||
73 | #endif | 77 | #endif |
74 | #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) | 78 | #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) |
75 | #else | 79 | #else |
@@ -88,14 +92,12 @@ struct thread_struct { | |||
88 | unsigned long fp[8*3]; | 92 | unsigned long fp[8*3]; |
89 | unsigned long fpcntl[3]; /* fp control regs */ | 93 | unsigned long fpcntl[3]; /* fp control regs */ |
90 | unsigned char fpstate[FPSTATESIZE]; /* floating point state */ | 94 | unsigned char fpstate[FPSTATESIZE]; /* floating point state */ |
91 | struct thread_info info; | ||
92 | }; | 95 | }; |
93 | 96 | ||
94 | #define INIT_THREAD { \ | 97 | #define INIT_THREAD { \ |
95 | .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ | 98 | .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ |
96 | .sr = PS_S, \ | 99 | .sr = PS_S, \ |
97 | .fs = __KERNEL_DS, \ | 100 | .fs = __KERNEL_DS, \ |
98 | .info = INIT_THREAD_INFO(init_task), \ | ||
99 | } | 101 | } |
100 | 102 | ||
101 | #ifdef CONFIG_MMU | 103 | #ifdef CONFIG_MMU |
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index ee959219fdfe..0fa80e97ed2d 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h | |||
@@ -22,23 +22,26 @@ typedef struct { | |||
22 | } mm_segment_t; | 22 | } mm_segment_t; |
23 | 23 | ||
24 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | 24 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
25 | #define USER_DS MAKE_MM_SEG(__USER_DS) | ||
26 | #define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) | ||
27 | 25 | ||
26 | #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES | ||
28 | /* | 27 | /* |
29 | * Get/set the SFC/DFC registers for MOVES instructions | 28 | * Get/set the SFC/DFC registers for MOVES instructions |
30 | */ | 29 | */ |
30 | #define USER_DS MAKE_MM_SEG(__USER_DS) | ||
31 | #define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) | ||
31 | 32 | ||
32 | static inline mm_segment_t get_fs(void) | 33 | static inline mm_segment_t get_fs(void) |
33 | { | 34 | { |
34 | #ifdef CONFIG_MMU | ||
35 | mm_segment_t _v; | 35 | mm_segment_t _v; |
36 | __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); | 36 | __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); |
37 | |||
38 | return _v; | 37 | return _v; |
39 | #else | 38 | } |
40 | return USER_DS; | 39 | |
41 | #endif | 40 | static inline void set_fs(mm_segment_t val) |
41 | { | ||
42 | __asm__ __volatile__ ("movec %0,%/sfc\n\t" | ||
43 | "movec %0,%/dfc\n\t" | ||
44 | : /* no outputs */ : "r" (val.seg) : "memory"); | ||
42 | } | 45 | } |
43 | 46 | ||
44 | static inline mm_segment_t get_ds(void) | 47 | static inline mm_segment_t get_ds(void) |
@@ -47,14 +50,13 @@ static inline mm_segment_t get_ds(void) | |||
47 | return KERNEL_DS; | 50 | return KERNEL_DS; |
48 | } | 51 | } |
49 | 52 | ||
50 | static inline void set_fs(mm_segment_t val) | 53 | #else |
51 | { | 54 | #define USER_DS MAKE_MM_SEG(TASK_SIZE) |
52 | #ifdef CONFIG_MMU | 55 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) |
53 | __asm__ __volatile__ ("movec %0,%/sfc\n\t" | 56 | #define get_ds() (KERNEL_DS) |
54 | "movec %0,%/dfc\n\t" | 57 | #define get_fs() (current_thread_info()->addr_limit) |
55 | : /* no outputs */ : "r" (val.seg) : "memory"); | 58 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
56 | #endif | 59 | #endif |
57 | } | ||
58 | 60 | ||
59 | #define segment_eq(a,b) ((a).seg == (b).seg) | 61 | #define segment_eq(a,b) ((a).seg == (b).seg) |
60 | 62 | ||
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 4dfb3952b375..00c2c5397d37 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #define MACH_HP300 9 | 40 | #define MACH_HP300 9 |
41 | #define MACH_Q40 10 | 41 | #define MACH_Q40 10 |
42 | #define MACH_SUN3X 11 | 42 | #define MACH_SUN3X 11 |
43 | #define MACH_M54XX 12 | ||
43 | 44 | ||
44 | #define COMMAND_LINE_SIZE 256 | 45 | #define COMMAND_LINE_SIZE 256 |
45 | 46 | ||
@@ -211,23 +212,27 @@ extern unsigned long m68k_machtype; | |||
211 | #define CPUB_68030 1 | 212 | #define CPUB_68030 1 |
212 | #define CPUB_68040 2 | 213 | #define CPUB_68040 2 |
213 | #define CPUB_68060 3 | 214 | #define CPUB_68060 3 |
215 | #define CPUB_COLDFIRE 4 | ||
214 | 216 | ||
215 | #define CPU_68020 (1<<CPUB_68020) | 217 | #define CPU_68020 (1<<CPUB_68020) |
216 | #define CPU_68030 (1<<CPUB_68030) | 218 | #define CPU_68030 (1<<CPUB_68030) |
217 | #define CPU_68040 (1<<CPUB_68040) | 219 | #define CPU_68040 (1<<CPUB_68040) |
218 | #define CPU_68060 (1<<CPUB_68060) | 220 | #define CPU_68060 (1<<CPUB_68060) |
221 | #define CPU_COLDFIRE (1<<CPUB_COLDFIRE) | ||
219 | 222 | ||
220 | #define FPUB_68881 0 | 223 | #define FPUB_68881 0 |
221 | #define FPUB_68882 1 | 224 | #define FPUB_68882 1 |
222 | #define FPUB_68040 2 /* Internal FPU */ | 225 | #define FPUB_68040 2 /* Internal FPU */ |
223 | #define FPUB_68060 3 /* Internal FPU */ | 226 | #define FPUB_68060 3 /* Internal FPU */ |
224 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ | 227 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ |
228 | #define FPUB_COLDFIRE 5 /* ColdFire FPU */ | ||
225 | 229 | ||
226 | #define FPU_68881 (1<<FPUB_68881) | 230 | #define FPU_68881 (1<<FPUB_68881) |
227 | #define FPU_68882 (1<<FPUB_68882) | 231 | #define FPU_68882 (1<<FPUB_68882) |
228 | #define FPU_68040 (1<<FPUB_68040) | 232 | #define FPU_68040 (1<<FPUB_68040) |
229 | #define FPU_68060 (1<<FPUB_68060) | 233 | #define FPU_68060 (1<<FPUB_68060) |
230 | #define FPU_SUNFPA (1<<FPUB_SUNFPA) | 234 | #define FPU_SUNFPA (1<<FPUB_SUNFPA) |
235 | #define FPU_COLDFIRE (1<<FPUB_COLDFIRE) | ||
231 | 236 | ||
232 | #define MMUB_68851 0 | 237 | #define MMUB_68851 0 |
233 | #define MMUB_68030 1 /* Internal MMU */ | 238 | #define MMUB_68030 1 /* Internal MMU */ |
@@ -235,6 +240,7 @@ extern unsigned long m68k_machtype; | |||
235 | #define MMUB_68060 3 /* Internal MMU */ | 240 | #define MMUB_68060 3 /* Internal MMU */ |
236 | #define MMUB_APOLLO 4 /* Custom Apollo */ | 241 | #define MMUB_APOLLO 4 /* Custom Apollo */ |
237 | #define MMUB_SUN3 5 /* Custom Sun-3 */ | 242 | #define MMUB_SUN3 5 /* Custom Sun-3 */ |
243 | #define MMUB_COLDFIRE 6 /* Internal MMU */ | ||
238 | 244 | ||
239 | #define MMU_68851 (1<<MMUB_68851) | 245 | #define MMU_68851 (1<<MMUB_68851) |
240 | #define MMU_68030 (1<<MMUB_68030) | 246 | #define MMU_68030 (1<<MMUB_68030) |
@@ -242,6 +248,7 @@ extern unsigned long m68k_machtype; | |||
242 | #define MMU_68060 (1<<MMUB_68060) | 248 | #define MMU_68060 (1<<MMUB_68060) |
243 | #define MMU_SUN3 (1<<MMUB_SUN3) | 249 | #define MMU_SUN3 (1<<MMUB_SUN3) |
244 | #define MMU_APOLLO (1<<MMUB_APOLLO) | 250 | #define MMU_APOLLO (1<<MMUB_APOLLO) |
251 | #define MMU_COLDFIRE (1<<MMUB_COLDFIRE) | ||
245 | 252 | ||
246 | #ifdef __KERNEL__ | 253 | #ifdef __KERNEL__ |
247 | 254 | ||
@@ -341,6 +348,13 @@ extern int m68k_is040or060; | |||
341 | # endif | 348 | # endif |
342 | #endif | 349 | #endif |
343 | 350 | ||
351 | #if !defined(CONFIG_COLDFIRE) | ||
352 | # define CPU_IS_COLDFIRE (0) | ||
353 | #else | ||
354 | # define CPU_IS_COLDFIRE (1) | ||
355 | # define MMU_IS_COLDFIRE (1) | ||
356 | #endif | ||
357 | |||
344 | #define CPU_TYPE (m68k_cputype) | 358 | #define CPU_TYPE (m68k_cputype) |
345 | 359 | ||
346 | #ifdef CONFIG_M68KFPU_EMU | 360 | #ifdef CONFIG_M68KFPU_EMU |
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index a29dd74a17cb..523db2a51cf3 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h | |||
@@ -15,11 +15,7 @@ struct sigcontext { | |||
15 | unsigned long sc_pc; | 15 | unsigned long sc_pc; |
16 | unsigned short sc_formatvec; | 16 | unsigned short sc_formatvec; |
17 | #ifndef __uClinux__ | 17 | #ifndef __uClinux__ |
18 | # ifdef __mcoldfire__ | ||
19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ | ||
20 | # else | ||
21 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | 18 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ |
22 | # endif | ||
23 | unsigned long sc_fpcntl[3]; | 19 | unsigned long sc_fpcntl[3]; |
24 | unsigned char sc_fpstate[216]; | 20 | unsigned char sc_fpstate[216]; |
25 | #endif | 21 | #endif |
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 790988967ba7..29fa6da4f17c 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | #include <asm/segment.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * On machines with 4k pages we default to an 8k thread size, though we | 9 | * On machines with 4k pages we default to an 8k thread size, though we |
@@ -26,6 +27,7 @@ struct thread_info { | |||
26 | struct task_struct *task; /* main task structure */ | 27 | struct task_struct *task; /* main task structure */ |
27 | unsigned long flags; | 28 | unsigned long flags; |
28 | struct exec_domain *exec_domain; /* execution domain */ | 29 | struct exec_domain *exec_domain; /* execution domain */ |
30 | mm_segment_t addr_limit; /* thread address space */ | ||
29 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 31 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
30 | __u32 cpu; /* should always be 0 on m68k */ | 32 | __u32 cpu; /* should always be 0 on m68k */ |
31 | unsigned long tp_value; /* thread pointer */ | 33 | unsigned long tp_value; /* thread pointer */ |
@@ -39,6 +41,7 @@ struct thread_info { | |||
39 | { \ | 41 | { \ |
40 | .task = &tsk, \ | 42 | .task = &tsk, \ |
41 | .exec_domain = &default_exec_domain, \ | 43 | .exec_domain = &default_exec_domain, \ |
44 | .addr_limit = KERNEL_DS, \ | ||
42 | .preempt_count = INIT_PREEMPT_COUNT, \ | 45 | .preempt_count = INIT_PREEMPT_COUNT, \ |
43 | .restart_block = { \ | 46 | .restart_block = { \ |
44 | .fn = do_no_restart_syscall, \ | 47 | .fn = do_no_restart_syscall, \ |
@@ -47,34 +50,6 @@ struct thread_info { | |||
47 | 50 | ||
48 | #define init_stack (init_thread_union.stack) | 51 | #define init_stack (init_thread_union.stack) |
49 | 52 | ||
50 | #ifdef CONFIG_MMU | ||
51 | |||
52 | #ifndef __ASSEMBLY__ | ||
53 | #include <asm/current.h> | ||
54 | #endif | ||
55 | |||
56 | #ifdef ASM_OFFSETS_C | ||
57 | #define task_thread_info(tsk) ((struct thread_info *) NULL) | ||
58 | #else | ||
59 | #include <asm/asm-offsets.h> | ||
60 | #define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO)) | ||
61 | #endif | ||
62 | |||
63 | #define init_thread_info (init_task.thread.info) | ||
64 | #define task_stack_page(tsk) ((tsk)->stack) | ||
65 | #define current_thread_info() task_thread_info(current) | ||
66 | |||
67 | #define __HAVE_THREAD_FUNCTIONS | ||
68 | |||
69 | #define setup_thread_stack(p, org) ({ \ | ||
70 | *(struct task_struct **)(p)->stack = (p); \ | ||
71 | task_thread_info(p)->task = (p); \ | ||
72 | }) | ||
73 | |||
74 | #define end_of_stack(p) ((unsigned long *)(p)->stack + 1) | ||
75 | |||
76 | #else /* !CONFIG_MMU */ | ||
77 | |||
78 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
79 | /* how to get the thread information struct from C */ | 54 | /* how to get the thread information struct from C */ |
80 | static inline struct thread_info *current_thread_info(void) | 55 | static inline struct thread_info *current_thread_info(void) |
@@ -92,8 +67,6 @@ static inline struct thread_info *current_thread_info(void) | |||
92 | 67 | ||
93 | #define init_thread_info (init_thread_union.thread_info) | 68 | #define init_thread_info (init_thread_union.thread_info) |
94 | 69 | ||
95 | #endif /* CONFIG_MMU */ | ||
96 | |||
97 | /* entry.S relies on these definitions! | 70 | /* entry.S relies on these definitions! |
98 | * bits 0-7 are tested at every exception exit | 71 | * bits 0-7 are tested at every exception exit |
99 | * bits 8-15 are also tested at syscall exit | 72 | * bits 8-15 are also tested at syscall exit |
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h index a6b4ed4fc90f..965ea35c9a40 100644 --- a/arch/m68k/include/asm/tlbflush.h +++ b/arch/m68k/include/asm/tlbflush.h | |||
@@ -5,10 +5,13 @@ | |||
5 | #ifndef CONFIG_SUN3 | 5 | #ifndef CONFIG_SUN3 |
6 | 6 | ||
7 | #include <asm/current.h> | 7 | #include <asm/current.h> |
8 | #include <asm/mcfmmu.h> | ||
8 | 9 | ||
9 | static inline void flush_tlb_kernel_page(void *addr) | 10 | static inline void flush_tlb_kernel_page(void *addr) |
10 | { | 11 | { |
11 | if (CPU_IS_040_OR_060) { | 12 | if (CPU_IS_COLDFIRE) { |
13 | mmu_write(MMUOR, MMUOR_CNL); | ||
14 | } else if (CPU_IS_040_OR_060) { | ||
12 | mm_segment_t old_fs = get_fs(); | 15 | mm_segment_t old_fs = get_fs(); |
13 | set_fs(KERNEL_DS); | 16 | set_fs(KERNEL_DS); |
14 | __asm__ __volatile__(".chip 68040\n\t" | 17 | __asm__ __volatile__(".chip 68040\n\t" |
@@ -25,12 +28,15 @@ static inline void flush_tlb_kernel_page(void *addr) | |||
25 | */ | 28 | */ |
26 | static inline void __flush_tlb(void) | 29 | static inline void __flush_tlb(void) |
27 | { | 30 | { |
28 | if (CPU_IS_040_OR_060) | 31 | if (CPU_IS_COLDFIRE) { |
32 | mmu_write(MMUOR, MMUOR_CNL); | ||
33 | } else if (CPU_IS_040_OR_060) { | ||
29 | __asm__ __volatile__(".chip 68040\n\t" | 34 | __asm__ __volatile__(".chip 68040\n\t" |
30 | "pflushan\n\t" | 35 | "pflushan\n\t" |
31 | ".chip 68k"); | 36 | ".chip 68k"); |
32 | else if (CPU_IS_020_OR_030) | 37 | } else if (CPU_IS_020_OR_030) { |
33 | __asm__ __volatile__("pflush #0,#4"); | 38 | __asm__ __volatile__("pflush #0,#4"); |
39 | } | ||
34 | } | 40 | } |
35 | 41 | ||
36 | static inline void __flush_tlb040_one(unsigned long addr) | 42 | static inline void __flush_tlb040_one(unsigned long addr) |
@@ -43,7 +49,9 @@ static inline void __flush_tlb040_one(unsigned long addr) | |||
43 | 49 | ||
44 | static inline void __flush_tlb_one(unsigned long addr) | 50 | static inline void __flush_tlb_one(unsigned long addr) |
45 | { | 51 | { |
46 | if (CPU_IS_040_OR_060) | 52 | if (CPU_IS_COLDFIRE) |
53 | mmu_write(MMUOR, MMUOR_CNL); | ||
54 | else if (CPU_IS_040_OR_060) | ||
47 | __flush_tlb040_one(addr); | 55 | __flush_tlb040_one(addr); |
48 | else if (CPU_IS_020_OR_030) | 56 | else if (CPU_IS_020_OR_030) |
49 | __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr)); | 57 | __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr)); |
@@ -56,12 +64,15 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
56 | */ | 64 | */ |
57 | static inline void flush_tlb_all(void) | 65 | static inline void flush_tlb_all(void) |
58 | { | 66 | { |
59 | if (CPU_IS_040_OR_060) | 67 | if (CPU_IS_COLDFIRE) { |
68 | mmu_write(MMUOR, MMUOR_CNL); | ||
69 | } else if (CPU_IS_040_OR_060) { | ||
60 | __asm__ __volatile__(".chip 68040\n\t" | 70 | __asm__ __volatile__(".chip 68040\n\t" |
61 | "pflusha\n\t" | 71 | "pflusha\n\t" |
62 | ".chip 68k"); | 72 | ".chip 68k"); |
63 | else if (CPU_IS_020_OR_030) | 73 | } else if (CPU_IS_020_OR_030) { |
64 | __asm__ __volatile__("pflusha"); | 74 | __asm__ __volatile__("pflusha"); |
75 | } | ||
65 | } | 76 | } |
66 | 77 | ||
67 | static inline void flush_tlb_mm(struct mm_struct *mm) | 78 | static inline void flush_tlb_mm(struct mm_struct *mm) |
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h index 151068f64f44..4aff3358fbaf 100644 --- a/arch/m68k/include/asm/traps.h +++ b/arch/m68k/include/asm/traps.h | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | typedef void (*e_vector)(void); | 19 | typedef void (*e_vector)(void); |
20 | extern e_vector vectors[]; | 20 | extern e_vector vectors[]; |
21 | extern e_vector *_ramvec; | ||
21 | 22 | ||
22 | asmlinkage void auto_inthandler(void); | 23 | asmlinkage void auto_inthandler(void); |
23 | asmlinkage void user_inthandler(void); | 24 | asmlinkage void user_inthandler(void); |
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 7107f3fbdbb6..9c80cd515b20 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
@@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr, | |||
21 | } | 21 | } |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Not all varients of the 68k family support the notion of address spaces. | ||
25 | * The traditional 680x0 parts do, and they use the sfc/dfc registers and | ||
26 | * the "moves" instruction to access user space from kernel space. Other | ||
27 | * family members like ColdFire don't support this, and only have a single | ||
28 | * address space, and use the usual "move" instruction for user space access. | ||
29 | * | ||
30 | * Outside of this difference the user space access functions are the same. | ||
31 | * So lets keep the code simple and just define in what we need to use. | ||
32 | */ | ||
33 | #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES | ||
34 | #define MOVES "moves" | ||
35 | #else | ||
36 | #define MOVES "move" | ||
37 | #endif | ||
38 | |||
39 | /* | ||
24 | * The exception table consists of pairs of addresses: the first is the | 40 | * The exception table consists of pairs of addresses: the first is the |
25 | * address of an instruction that is allowed to fault, and the second is | 41 | * address of an instruction that is allowed to fault, and the second is |
26 | * the address at which the program should continue. No registers are | 42 | * the address at which the program should continue. No registers are |
@@ -43,7 +59,7 @@ extern int __get_user_bad(void); | |||
43 | 59 | ||
44 | #define __put_user_asm(res, x, ptr, bwl, reg, err) \ | 60 | #define __put_user_asm(res, x, ptr, bwl, reg, err) \ |
45 | asm volatile ("\n" \ | 61 | asm volatile ("\n" \ |
46 | "1: moves."#bwl" %2,%1\n" \ | 62 | "1: "MOVES"."#bwl" %2,%1\n" \ |
47 | "2:\n" \ | 63 | "2:\n" \ |
48 | " .section .fixup,\"ax\"\n" \ | 64 | " .section .fixup,\"ax\"\n" \ |
49 | " .even\n" \ | 65 | " .even\n" \ |
@@ -83,8 +99,8 @@ asm volatile ("\n" \ | |||
83 | { \ | 99 | { \ |
84 | const void __user *__pu_ptr = (ptr); \ | 100 | const void __user *__pu_ptr = (ptr); \ |
85 | asm volatile ("\n" \ | 101 | asm volatile ("\n" \ |
86 | "1: moves.l %2,(%1)+\n" \ | 102 | "1: "MOVES".l %2,(%1)+\n" \ |
87 | "2: moves.l %R2,(%1)\n" \ | 103 | "2: "MOVES".l %R2,(%1)\n" \ |
88 | "3:\n" \ | 104 | "3:\n" \ |
89 | " .section .fixup,\"ax\"\n" \ | 105 | " .section .fixup,\"ax\"\n" \ |
90 | " .even\n" \ | 106 | " .even\n" \ |
@@ -115,12 +131,12 @@ asm volatile ("\n" \ | |||
115 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ | 131 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ |
116 | type __gu_val; \ | 132 | type __gu_val; \ |
117 | asm volatile ("\n" \ | 133 | asm volatile ("\n" \ |
118 | "1: moves."#bwl" %2,%1\n" \ | 134 | "1: "MOVES"."#bwl" %2,%1\n" \ |
119 | "2:\n" \ | 135 | "2:\n" \ |
120 | " .section .fixup,\"ax\"\n" \ | 136 | " .section .fixup,\"ax\"\n" \ |
121 | " .even\n" \ | 137 | " .even\n" \ |
122 | "10: move.l %3,%0\n" \ | 138 | "10: move.l %3,%0\n" \ |
123 | " sub."#bwl" %1,%1\n" \ | 139 | " sub.l %1,%1\n" \ |
124 | " jra 2b\n" \ | 140 | " jra 2b\n" \ |
125 | " .previous\n" \ | 141 | " .previous\n" \ |
126 | "\n" \ | 142 | "\n" \ |
@@ -152,8 +168,8 @@ asm volatile ("\n" \ | |||
152 | const void *__gu_ptr = (ptr); \ | 168 | const void *__gu_ptr = (ptr); \ |
153 | u64 __gu_val; \ | 169 | u64 __gu_val; \ |
154 | asm volatile ("\n" \ | 170 | asm volatile ("\n" \ |
155 | "1: moves.l (%2)+,%1\n" \ | 171 | "1: "MOVES".l (%2)+,%1\n" \ |
156 | "2: moves.l (%2),%R1\n" \ | 172 | "2: "MOVES".l (%2),%R1\n" \ |
157 | "3:\n" \ | 173 | "3:\n" \ |
158 | " .section .fixup,\"ax\"\n" \ | 174 | " .section .fixup,\"ax\"\n" \ |
159 | " .even\n" \ | 175 | " .even\n" \ |
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned | |||
188 | 204 | ||
189 | #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ | 205 | #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ |
190 | asm volatile ("\n" \ | 206 | asm volatile ("\n" \ |
191 | "1: moves."#s1" (%2)+,%3\n" \ | 207 | "1: "MOVES"."#s1" (%2)+,%3\n" \ |
192 | " move."#s1" %3,(%1)+\n" \ | 208 | " move."#s1" %3,(%1)+\n" \ |
193 | "2: moves."#s2" (%2)+,%3\n" \ | 209 | "2: "MOVES"."#s2" (%2)+,%3\n" \ |
194 | " move."#s2" %3,(%1)+\n" \ | 210 | " move."#s2" %3,(%1)+\n" \ |
195 | " .ifnc \""#s3"\",\"\"\n" \ | 211 | " .ifnc \""#s3"\",\"\"\n" \ |
196 | "3: moves."#s3" (%2)+,%3\n" \ | 212 | "3: "MOVES"."#s3" (%2)+,%3\n" \ |
197 | " move."#s3" %3,(%1)+\n" \ | 213 | " move."#s3" %3,(%1)+\n" \ |
198 | " .endif\n" \ | 214 | " .endif\n" \ |
199 | "4:\n" \ | 215 | "4:\n" \ |
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) | |||
269 | #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ | 285 | #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ |
270 | asm volatile ("\n" \ | 286 | asm volatile ("\n" \ |
271 | " move."#s1" (%2)+,%3\n" \ | 287 | " move."#s1" (%2)+,%3\n" \ |
272 | "11: moves."#s1" %3,(%1)+\n" \ | 288 | "11: "MOVES"."#s1" %3,(%1)+\n" \ |
273 | "12: move."#s2" (%2)+,%3\n" \ | 289 | "12: move."#s2" (%2)+,%3\n" \ |
274 | "21: moves."#s2" %3,(%1)+\n" \ | 290 | "21: "MOVES"."#s2" %3,(%1)+\n" \ |
275 | "22:\n" \ | 291 | "22:\n" \ |
276 | " .ifnc \""#s3"\",\"\"\n" \ | 292 | " .ifnc \""#s3"\",\"\"\n" \ |
277 | " move."#s3" (%2)+,%3\n" \ | 293 | " move."#s3" (%2)+,%3\n" \ |
278 | "31: moves."#s3" %3,(%1)+\n" \ | 294 | "31: "MOVES"."#s3" %3,(%1)+\n" \ |
279 | "32:\n" \ | 295 | "32:\n" \ |
280 | " .endif\n" \ | 296 | " .endif\n" \ |
281 | "4:\n" \ | 297 | "4:\n" \ |
diff --git a/arch/m68k/include/asm/ucontext.h b/arch/m68k/include/asm/ucontext.h index 00dcc5176c57..e4e22669edc0 100644 --- a/arch/m68k/include/asm/ucontext.h +++ b/arch/m68k/include/asm/ucontext.h | |||
@@ -7,11 +7,7 @@ typedef greg_t gregset_t[NGREG]; | |||
7 | 7 | ||
8 | typedef struct fpregset { | 8 | typedef struct fpregset { |
9 | int f_fpcntl[3]; | 9 | int f_fpcntl[3]; |
10 | #ifdef __mcoldfire__ | ||
11 | int f_fpregs[8][2]; | ||
12 | #else | ||
13 | int f_fpregs[8*3]; | 10 | int f_fpregs[8*3]; |
14 | #endif | ||
15 | } fpregset_t; | 11 | } fpregset_t; |
16 | 12 | ||
17 | struct mcontext { | 13 | struct mcontext { |
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index c5696193281a..40d29a788b05 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -2,19 +2,24 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | extra-$(CONFIG_MMU) := head.o | 5 | extra-$(CONFIG_AMIGA) := head.o |
6 | extra-$(CONFIG_ATARI) := head.o | ||
7 | extra-$(CONFIG_MAC) := head.o | ||
8 | extra-$(CONFIG_APOLLO) := head.o | ||
9 | extra-$(CONFIG_VME) := head.o | ||
10 | extra-$(CONFIG_HP300) := head.o | ||
11 | extra-$(CONFIG_Q40) := head.o | ||
12 | extra-$(CONFIG_SUN3X) := head.o | ||
6 | extra-$(CONFIG_SUN3) := sun3-head.o | 13 | extra-$(CONFIG_SUN3) := sun3-head.o |
7 | extra-y += vmlinux.lds | 14 | extra-y += vmlinux.lds |
8 | 15 | ||
9 | obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \ | 16 | obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o |
10 | signal.o sys_m68k.o syscalltable.o time.o traps.o | 17 | obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o |
11 | 18 | ||
12 | obj-$(CONFIG_MMU) += ints.o vectors.o | 19 | obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o |
20 | obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o | ||
13 | 21 | ||
14 | ifndef CONFIG_MMU_SUN3 | 22 | ifndef CONFIG_MMU_SUN3 |
15 | obj-y += dma.o | 23 | obj-y += dma.o |
16 | endif | ||
17 | ifndef CONFIG_MMU | ||
18 | obj-y += init_task.o | ||
19 | endif | 24 | endif |
20 | 25 | ||
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 983fed9d469b..a972b00cd77d 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -24,8 +24,7 @@ int main(void) | |||
24 | /* offsets into the task struct */ | 24 | /* offsets into the task struct */ |
25 | DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); | 25 | DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); |
26 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | 26 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); |
27 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); | 27 | DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); |
28 | DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); | ||
29 | 28 | ||
30 | /* offsets into the thread struct */ | 29 | /* offsets into the thread struct */ |
31 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | 30 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 081cf96f243b..b8daf64e347d 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -1,4 +1,4 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
2 | #include "entry_mm.S" | 2 | #include "entry_mm.S" |
3 | #else | 3 | #else |
4 | #include "entry_no.S" | 4 | #include "entry_no.S" |
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index c713f514843d..675a854966a6 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
@@ -99,7 +99,8 @@ do_trace_exit: | |||
99 | jra .Lret_from_exception | 99 | jra .Lret_from_exception |
100 | 100 | ||
101 | ENTRY(ret_from_signal) | 101 | ENTRY(ret_from_signal) |
102 | tstb %curptr@(TASK_INFO+TINFO_FLAGS+2) | 102 | movel %curptr@(TASK_STACK),%a1 |
103 | tstb %a1@(TINFO_FLAGS+2) | ||
103 | jge 1f | 104 | jge 1f |
104 | jbsr syscall_trace | 105 | jbsr syscall_trace |
105 | 1: RESTORE_SWITCH_STACK | 106 | 1: RESTORE_SWITCH_STACK |
@@ -120,11 +121,13 @@ ENTRY(system_call) | |||
120 | SAVE_ALL_SYS | 121 | SAVE_ALL_SYS |
121 | 122 | ||
122 | GET_CURRENT(%d1) | 123 | GET_CURRENT(%d1) |
124 | movel %d1,%a1 | ||
125 | |||
123 | | save top of frame | 126 | | save top of frame |
124 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | 127 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) |
125 | 128 | ||
126 | | syscall trace? | 129 | | syscall trace? |
127 | tstb %curptr@(TASK_INFO+TINFO_FLAGS+2) | 130 | tstb %a1@(TINFO_FLAGS+2) |
128 | jmi do_trace_entry | 131 | jmi do_trace_entry |
129 | cmpl #NR_syscalls,%d0 | 132 | cmpl #NR_syscalls,%d0 |
130 | jcc badsys | 133 | jcc badsys |
@@ -133,7 +136,8 @@ syscall: | |||
133 | movel %d0,%sp@(PT_OFF_D0) | save the return value | 136 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
134 | ret_from_syscall: | 137 | ret_from_syscall: |
135 | |oriw #0x0700,%sr | 138 | |oriw #0x0700,%sr |
136 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 | 139 | movel %curptr@(TASK_STACK),%a1 |
140 | movew %a1@(TINFO_FLAGS+2),%d0 | ||
137 | jne syscall_exit_work | 141 | jne syscall_exit_work |
138 | 1: RESTORE_ALL | 142 | 1: RESTORE_ALL |
139 | 143 | ||
@@ -159,7 +163,8 @@ ENTRY(ret_from_exception) | |||
159 | andw #ALLOWINT,%sr | 163 | andw #ALLOWINT,%sr |
160 | 164 | ||
161 | resume_userspace: | 165 | resume_userspace: |
162 | moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0 | 166 | movel %curptr@(TASK_STACK),%a1 |
167 | moveb %a1@(TINFO_FLAGS+3),%d0 | ||
163 | jne exit_work | 168 | jne exit_work |
164 | 1: RESTORE_ALL | 169 | 1: RESTORE_ALL |
165 | 170 | ||
@@ -199,7 +204,8 @@ do_delayed_trace: | |||
199 | ENTRY(auto_inthandler) | 204 | ENTRY(auto_inthandler) |
200 | SAVE_ALL_INT | 205 | SAVE_ALL_INT |
201 | GET_CURRENT(%d0) | 206 | GET_CURRENT(%d0) |
202 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 207 | movel %d0,%a1 |
208 | addqb #1,%a1@(TINFO_PREEMPT+1) | ||
203 | | put exception # in d0 | 209 | | put exception # in d0 |
204 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | 210 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
205 | subw #VEC_SPUR,%d0 | 211 | subw #VEC_SPUR,%d0 |
@@ -211,7 +217,8 @@ auto_irqhandler_fixup = . + 2 | |||
211 | addql #8,%sp | pop parameters off stack | 217 | addql #8,%sp | pop parameters off stack |
212 | 218 | ||
213 | ret_from_interrupt: | 219 | ret_from_interrupt: |
214 | subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 220 | movel %curptr@(TASK_STACK),%a1 |
221 | subqb #1,%a1@(TINFO_PREEMPT+1) | ||
215 | jeq ret_from_last_interrupt | 222 | jeq ret_from_last_interrupt |
216 | 2: RESTORE_ALL | 223 | 2: RESTORE_ALL |
217 | 224 | ||
@@ -232,7 +239,8 @@ ret_from_last_interrupt: | |||
232 | ENTRY(user_inthandler) | 239 | ENTRY(user_inthandler) |
233 | SAVE_ALL_INT | 240 | SAVE_ALL_INT |
234 | GET_CURRENT(%d0) | 241 | GET_CURRENT(%d0) |
235 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 242 | movel %d0,%a1 |
243 | addqb #1,%a1@(TINFO_PREEMPT+1) | ||
236 | | put exception # in d0 | 244 | | put exception # in d0 |
237 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | 245 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
238 | user_irqvec_fixup = . + 2 | 246 | user_irqvec_fixup = . + 2 |
@@ -243,7 +251,8 @@ user_irqvec_fixup = . + 2 | |||
243 | jsr do_IRQ | process the IRQ | 251 | jsr do_IRQ | process the IRQ |
244 | addql #8,%sp | pop parameters off stack | 252 | addql #8,%sp | pop parameters off stack |
245 | 253 | ||
246 | subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 254 | movel %curptr@(TASK_STACK),%a1 |
255 | subqb #1,%a1@(TINFO_PREEMPT+1) | ||
247 | jeq ret_from_last_interrupt | 256 | jeq ret_from_last_interrupt |
248 | RESTORE_ALL | 257 | RESTORE_ALL |
249 | 258 | ||
@@ -252,13 +261,15 @@ user_irqvec_fixup = . + 2 | |||
252 | ENTRY(bad_inthandler) | 261 | ENTRY(bad_inthandler) |
253 | SAVE_ALL_INT | 262 | SAVE_ALL_INT |
254 | GET_CURRENT(%d0) | 263 | GET_CURRENT(%d0) |
255 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 264 | movel %d0,%a1 |
265 | addqb #1,%a1@(TINFO_PREEMPT+1) | ||
256 | 266 | ||
257 | movel %sp,%sp@- | 267 | movel %sp,%sp@- |
258 | jsr handle_badint | 268 | jsr handle_badint |
259 | addql #4,%sp | 269 | addql #4,%sp |
260 | 270 | ||
261 | subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 271 | movel %curptr@(TASK_STACK),%a1 |
272 | subqb #1,%a1@(TINFO_PREEMPT+1) | ||
262 | jeq ret_from_last_interrupt | 273 | jeq ret_from_last_interrupt |
263 | RESTORE_ALL | 274 | RESTORE_ALL |
264 | 275 | ||
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S index 1b4289061a64..d80cba45589f 100644 --- a/arch/m68k/kernel/entry_no.S +++ b/arch/m68k/kernel/entry_no.S | |||
@@ -44,8 +44,7 @@ | |||
44 | 44 | ||
45 | ENTRY(buserr) | 45 | ENTRY(buserr) |
46 | SAVE_ALL_INT | 46 | SAVE_ALL_INT |
47 | moveq #-1,%d0 | 47 | GET_CURRENT(%d0) |
48 | movel %d0,%sp@(PT_OFF_ORIG_D0) | ||
49 | movel %sp,%sp@- /* stack frame pointer argument */ | 48 | movel %sp,%sp@- /* stack frame pointer argument */ |
50 | jsr buserr_c | 49 | jsr buserr_c |
51 | addql #4,%sp | 50 | addql #4,%sp |
@@ -53,8 +52,7 @@ ENTRY(buserr) | |||
53 | 52 | ||
54 | ENTRY(trap) | 53 | ENTRY(trap) |
55 | SAVE_ALL_INT | 54 | SAVE_ALL_INT |
56 | moveq #-1,%d0 | 55 | GET_CURRENT(%d0) |
57 | movel %d0,%sp@(PT_OFF_ORIG_D0) | ||
58 | movel %sp,%sp@- /* stack frame pointer argument */ | 56 | movel %sp,%sp@- /* stack frame pointer argument */ |
59 | jsr trap_c | 57 | jsr trap_c |
60 | addql #4,%sp | 58 | addql #4,%sp |
@@ -65,8 +63,7 @@ ENTRY(trap) | |||
65 | .globl dbginterrupt | 63 | .globl dbginterrupt |
66 | ENTRY(dbginterrupt) | 64 | ENTRY(dbginterrupt) |
67 | SAVE_ALL_INT | 65 | SAVE_ALL_INT |
68 | moveq #-1,%d0 | 66 | GET_CURRENT(%d0) |
69 | movel %d0,%sp@(PT_OFF_ORIG_D0) | ||
70 | movel %sp,%sp@- /* stack frame pointer argument */ | 67 | movel %sp,%sp@- /* stack frame pointer argument */ |
71 | jsr dbginterrupt_c | 68 | jsr dbginterrupt_c |
72 | addql #4,%sp | 69 | addql #4,%sp |
diff --git a/arch/m68k/kernel/init_task.c b/arch/m68k/kernel/init_task.c index cbf9dc3cc51d..c744cfc6bfa1 100644 --- a/arch/m68k/kernel/init_task.c +++ b/arch/m68k/kernel/init_task.c | |||
@@ -19,7 +19,6 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | |||
19 | * | 19 | * |
20 | * All other task structs will be allocated on slabs in fork.c | 20 | * All other task structs will be allocated on slabs in fork.c |
21 | */ | 21 | */ |
22 | __asm__(".align 4"); | ||
23 | struct task_struct init_task = INIT_TASK(init_task); | 22 | struct task_struct init_task = INIT_TASK(init_task); |
24 | 23 | ||
25 | EXPORT_SYMBOL(init_task); | 24 | EXPORT_SYMBOL(init_task); |
@@ -27,7 +26,7 @@ EXPORT_SYMBOL(init_task); | |||
27 | /* | 26 | /* |
28 | * Initial thread structure. | 27 | * Initial thread structure. |
29 | * | 28 | * |
30 | * We need to make sure that this is 8192-byte aligned due to the | 29 | * We need to make sure that this is THREAD size aligned due to the |
31 | * way process stacks are handled. This is done by having a special | 30 | * way process stacks are handled. This is done by having a special |
32 | * "init_task" linker map entry.. | 31 | * "init_task" linker map entry.. |
33 | */ | 32 | */ |
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c index 1b7a14d1a000..774c1bd59c36 100644 --- a/arch/m68k/kernel/m68k_ksyms.c +++ b/arch/m68k/kernel/m68k_ksyms.c | |||
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(__ashrdi3); | |||
14 | EXPORT_SYMBOL(__lshrdi3); | 14 | EXPORT_SYMBOL(__lshrdi3); |
15 | EXPORT_SYMBOL(__muldi3); | 15 | EXPORT_SYMBOL(__muldi3); |
16 | 16 | ||
17 | #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) | 17 | #if defined(CONFIG_CPU_HAS_NO_MULDIV64) |
18 | /* | 18 | /* |
19 | * Simpler 68k and ColdFire parts also need a few other gcc functions. | 19 | * Simpler 68k and ColdFire parts also need a few other gcc functions. |
20 | */ | 20 | */ |
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c index 1bc223aa07ec..125f34e00bf0 100644 --- a/arch/m68k/kernel/process_mm.c +++ b/arch/m68k/kernel/process_mm.c | |||
@@ -33,22 +33,6 @@ | |||
33 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | 35 | ||
36 | /* | ||
37 | * Initial task/thread structure. Make this a per-architecture thing, | ||
38 | * because different architectures tend to have different | ||
39 | * alignment requirements and potentially different initial | ||
40 | * setup. | ||
41 | */ | ||
42 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
43 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
44 | union thread_union init_thread_union __init_task_data | ||
45 | __attribute__((aligned(THREAD_SIZE))) = | ||
46 | { INIT_THREAD_INFO(init_task) }; | ||
47 | |||
48 | /* initial task structure */ | ||
49 | struct task_struct init_task = INIT_TASK(init_task); | ||
50 | |||
51 | EXPORT_SYMBOL(init_task); | ||
52 | 36 | ||
53 | asmlinkage void ret_from_fork(void); | 37 | asmlinkage void ret_from_fork(void); |
54 | 38 | ||
@@ -188,9 +172,7 @@ void flush_thread(void) | |||
188 | 172 | ||
189 | current->thread.fs = __USER_DS; | 173 | current->thread.fs = __USER_DS; |
190 | if (!FPU_IS_EMU) | 174 | if (!FPU_IS_EMU) |
191 | asm volatile (".chip 68k/68881\n\t" | 175 | asm volatile ("frestore %0@" : : "a" (&zero) : "memory"); |
192 | "frestore %0@\n\t" | ||
193 | ".chip 68k" : : "a" (&zero)); | ||
194 | } | 176 | } |
195 | 177 | ||
196 | /* | 178 | /* |
@@ -264,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
264 | /* Copy the current fpu state */ | 246 | /* Copy the current fpu state */ |
265 | asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); | 247 | asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); |
266 | 248 | ||
267 | if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) | 249 | if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { |
268 | asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" | 250 | if (CPU_IS_COLDFIRE) { |
269 | "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" | 251 | asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t" |
270 | : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) | 252 | "fmovel %/fpiar,%1\n\t" |
271 | : "memory"); | 253 | "fmovel %/fpcr,%2\n\t" |
254 | "fmovel %/fpsr,%3" | ||
255 | : | ||
256 | : "m" (p->thread.fp[0]), | ||
257 | "m" (p->thread.fpcntl[0]), | ||
258 | "m" (p->thread.fpcntl[1]), | ||
259 | "m" (p->thread.fpcntl[2]) | ||
260 | : "memory"); | ||
261 | } else { | ||
262 | asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" | ||
263 | "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" | ||
264 | : | ||
265 | : "m" (p->thread.fp[0]), | ||
266 | "m" (p->thread.fpcntl[0]) | ||
267 | : "memory"); | ||
268 | } | ||
269 | } | ||
270 | |||
272 | /* Restore the state in case the fpu was busy */ | 271 | /* Restore the state in case the fpu was busy */ |
273 | asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); | 272 | asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); |
274 | } | 273 | } |
@@ -301,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) | |||
301 | if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) | 300 | if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) |
302 | return 0; | 301 | return 0; |
303 | 302 | ||
304 | asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" | 303 | if (CPU_IS_COLDFIRE) { |
305 | :: "m" (fpu->fpcntl[0]) | 304 | asm volatile ("fmovel %/fpiar,%0\n\t" |
306 | : "memory"); | 305 | "fmovel %/fpcr,%1\n\t" |
307 | asm volatile ("fmovemx %/fp0-%/fp7,%0" | 306 | "fmovel %/fpsr,%2\n\t" |
308 | :: "m" (fpu->fpregs[0]) | 307 | "fmovemd %/fp0-%/fp7,%3" |
309 | : "memory"); | 308 | : |
309 | : "m" (fpu->fpcntl[0]), | ||
310 | "m" (fpu->fpcntl[1]), | ||
311 | "m" (fpu->fpcntl[2]), | ||
312 | "m" (fpu->fpregs[0]) | ||
313 | : "memory"); | ||
314 | } else { | ||
315 | asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" | ||
316 | : | ||
317 | : "m" (fpu->fpcntl[0]) | ||
318 | : "memory"); | ||
319 | asm volatile ("fmovemx %/fp0-%/fp7,%0" | ||
320 | : | ||
321 | : "m" (fpu->fpregs[0]) | ||
322 | : "memory"); | ||
323 | } | ||
324 | |||
310 | return 1; | 325 | return 1; |
311 | } | 326 | } |
312 | EXPORT_SYMBOL(dump_fpu); | 327 | EXPORT_SYMBOL(dump_fpu); |
diff --git a/arch/m68k/kernel/ptrace_mm.c b/arch/m68k/kernel/ptrace_mm.c index 0b252683cefb..7bc999b73529 100644 --- a/arch/m68k/kernel/ptrace_mm.c +++ b/arch/m68k/kernel/ptrace_mm.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/ptrace.h> | 18 | #include <linux/ptrace.h> |
19 | #include <linux/user.h> | 19 | #include <linux/user.h> |
20 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
21 | #include <linux/tracehook.h> | ||
21 | 22 | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | #include <asm/page.h> | 24 | #include <asm/page.h> |
@@ -275,3 +276,20 @@ asmlinkage void syscall_trace(void) | |||
275 | current->exit_code = 0; | 276 | current->exit_code = 0; |
276 | } | 277 | } |
277 | } | 278 | } |
279 | |||
280 | #ifdef CONFIG_COLDFIRE | ||
281 | asmlinkage int syscall_trace_enter(void) | ||
282 | { | ||
283 | int ret = 0; | ||
284 | |||
285 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
286 | ret = tracehook_report_syscall_entry(task_pt_regs(current)); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | asmlinkage void syscall_trace_leave(void) | ||
291 | { | ||
292 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
293 | tracehook_report_syscall_exit(task_pt_regs(current), 0); | ||
294 | } | ||
295 | #endif /* CONFIG_COLDFIRE */ | ||
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index c3b45061dd08..d872ce4807c9 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c | |||
@@ -221,7 +221,8 @@ void __init setup_arch(char **cmdline_p) | |||
221 | #endif | 221 | #endif |
222 | 222 | ||
223 | /* The bootinfo is located right after the kernel bss */ | 223 | /* The bootinfo is located right after the kernel bss */ |
224 | m68k_parse_bootinfo((const struct bi_record *)_end); | 224 | if (!CPU_IS_COLDFIRE) |
225 | m68k_parse_bootinfo((const struct bi_record *)_end); | ||
225 | 226 | ||
226 | if (CPU_IS_040) | 227 | if (CPU_IS_040) |
227 | m68k_is040or060 = 4; | 228 | m68k_is040or060 = 4; |
@@ -235,7 +236,7 @@ void __init setup_arch(char **cmdline_p) | |||
235 | * with them, we should add a test to check_bugs() below] */ | 236 | * with them, we should add a test to check_bugs() below] */ |
236 | #ifndef CONFIG_M68KFPU_EMU_ONLY | 237 | #ifndef CONFIG_M68KFPU_EMU_ONLY |
237 | /* clear the fpu if we have one */ | 238 | /* clear the fpu if we have one */ |
238 | if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) { | 239 | if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) { |
239 | volatile int zero = 0; | 240 | volatile int zero = 0; |
240 | asm volatile ("frestore %0" : : "m" (zero)); | 241 | asm volatile ("frestore %0" : : "m" (zero)); |
241 | } | 242 | } |
@@ -258,6 +259,10 @@ void __init setup_arch(char **cmdline_p) | |||
258 | init_mm.end_data = (unsigned long)_edata; | 259 | init_mm.end_data = (unsigned long)_edata; |
259 | init_mm.brk = (unsigned long)_end; | 260 | init_mm.brk = (unsigned long)_end; |
260 | 261 | ||
262 | #if defined(CONFIG_BOOTPARAM) | ||
263 | strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE); | ||
264 | m68k_command_line[CL_SIZE - 1] = 0; | ||
265 | #endif /* CONFIG_BOOTPARAM */ | ||
261 | *cmdline_p = m68k_command_line; | 266 | *cmdline_p = m68k_command_line; |
262 | memcpy(boot_command_line, *cmdline_p, CL_SIZE); | 267 | memcpy(boot_command_line, *cmdline_p, CL_SIZE); |
263 | 268 | ||
@@ -323,6 +328,11 @@ void __init setup_arch(char **cmdline_p) | |||
323 | config_sun3x(); | 328 | config_sun3x(); |
324 | break; | 329 | break; |
325 | #endif | 330 | #endif |
331 | #ifdef CONFIG_COLDFIRE | ||
332 | case MACH_M54XX: | ||
333 | config_BSP(NULL, 0); | ||
334 | break; | ||
335 | #endif | ||
326 | default: | 336 | default: |
327 | panic("No configuration setup"); | 337 | panic("No configuration setup"); |
328 | } | 338 | } |
@@ -384,6 +394,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
384 | #define LOOP_CYCLES_68030 (8) | 394 | #define LOOP_CYCLES_68030 (8) |
385 | #define LOOP_CYCLES_68040 (3) | 395 | #define LOOP_CYCLES_68040 (3) |
386 | #define LOOP_CYCLES_68060 (1) | 396 | #define LOOP_CYCLES_68060 (1) |
397 | #define LOOP_CYCLES_COLDFIRE (2) | ||
387 | 398 | ||
388 | if (CPU_IS_020) { | 399 | if (CPU_IS_020) { |
389 | cpu = "68020"; | 400 | cpu = "68020"; |
@@ -397,6 +408,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
397 | } else if (CPU_IS_060) { | 408 | } else if (CPU_IS_060) { |
398 | cpu = "68060"; | 409 | cpu = "68060"; |
399 | clockfactor = LOOP_CYCLES_68060; | 410 | clockfactor = LOOP_CYCLES_68060; |
411 | } else if (CPU_IS_COLDFIRE) { | ||
412 | cpu = "ColdFire"; | ||
413 | clockfactor = LOOP_CYCLES_COLDFIRE; | ||
400 | } else { | 414 | } else { |
401 | cpu = "680x0"; | 415 | cpu = "680x0"; |
402 | clockfactor = 0; | 416 | clockfactor = 0; |
@@ -415,6 +429,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
415 | fpu = "68060"; | 429 | fpu = "68060"; |
416 | else if (m68k_fputype & FPU_SUNFPA) | 430 | else if (m68k_fputype & FPU_SUNFPA) |
417 | fpu = "Sun FPA"; | 431 | fpu = "Sun FPA"; |
432 | else if (m68k_fputype & FPU_COLDFIRE) | ||
433 | fpu = "ColdFire"; | ||
418 | else | 434 | else |
419 | fpu = "none"; | 435 | fpu = "none"; |
420 | #endif | 436 | #endif |
@@ -431,6 +447,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
431 | mmu = "Sun-3"; | 447 | mmu = "Sun-3"; |
432 | else if (m68k_mmutype & MMU_APOLLO) | 448 | else if (m68k_mmutype & MMU_APOLLO) |
433 | mmu = "Apollo"; | 449 | mmu = "Apollo"; |
450 | else if (m68k_mmutype & MMU_COLDFIRE) | ||
451 | mmu = "ColdFire"; | ||
434 | else | 452 | else |
435 | mmu = "unknown"; | 453 | mmu = "unknown"; |
436 | 454 | ||
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 2ed8c0fb1517..ca3df0dc7e88 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c | |||
@@ -47,7 +47,6 @@ EXPORT_SYMBOL(memory_end); | |||
47 | char __initdata command_line[COMMAND_LINE_SIZE]; | 47 | char __initdata command_line[COMMAND_LINE_SIZE]; |
48 | 48 | ||
49 | /* machine dependent timer functions */ | 49 | /* machine dependent timer functions */ |
50 | void (*mach_gettod)(int*, int*, int*, int*, int*, int*); | ||
51 | int (*mach_set_clock_mmss)(unsigned long); | 50 | int (*mach_set_clock_mmss)(unsigned long); |
52 | 51 | ||
53 | /* machine dependent reboot functions */ | 52 | /* machine dependent reboot functions */ |
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c index a0afc239304e..cb856f9da655 100644 --- a/arch/m68k/kernel/signal_mm.c +++ b/arch/m68k/kernel/signal_mm.c | |||
@@ -56,7 +56,11 @@ static const int frame_extra_sizes[16] = { | |||
56 | [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ | 56 | [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ |
57 | [2] = sizeof(((struct frame *)0)->un.fmt2), | 57 | [2] = sizeof(((struct frame *)0)->un.fmt2), |
58 | [3] = sizeof(((struct frame *)0)->un.fmt3), | 58 | [3] = sizeof(((struct frame *)0)->un.fmt3), |
59 | #ifdef CONFIG_COLDFIRE | ||
60 | [4] = 0, | ||
61 | #else | ||
59 | [4] = sizeof(((struct frame *)0)->un.fmt4), | 62 | [4] = sizeof(((struct frame *)0)->un.fmt4), |
63 | #endif | ||
60 | [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ | 64 | [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ |
61 | [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ | 65 | [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ |
62 | [7] = sizeof(((struct frame *)0)->un.fmt7), | 66 | [7] = sizeof(((struct frame *)0)->un.fmt7), |
@@ -84,7 +88,11 @@ int handle_kernel_fault(struct pt_regs *regs) | |||
84 | regs->stkadj = frame_extra_sizes[regs->format]; | 88 | regs->stkadj = frame_extra_sizes[regs->format]; |
85 | tregs = (struct pt_regs *)((long)regs + regs->stkadj); | 89 | tregs = (struct pt_regs *)((long)regs + regs->stkadj); |
86 | tregs->vector = regs->vector; | 90 | tregs->vector = regs->vector; |
91 | #ifdef CONFIG_COLDFIRE | ||
92 | tregs->format = 4; | ||
93 | #else | ||
87 | tregs->format = 0; | 94 | tregs->format = 0; |
95 | #endif | ||
88 | tregs->pc = fixup->fixup; | 96 | tregs->pc = fixup->fixup; |
89 | tregs->sr = regs->sr; | 97 | tregs->sr = regs->sr; |
90 | 98 | ||
@@ -195,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc) | |||
195 | 203 | ||
196 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | 204 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { |
197 | /* Verify the frame format. */ | 205 | /* Verify the frame format. */ |
198 | if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version)) | 206 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && |
207 | (sc->sc_fpstate[0] != fpu_version)) | ||
199 | goto out; | 208 | goto out; |
200 | if (CPU_IS_020_OR_030) { | 209 | if (CPU_IS_020_OR_030) { |
201 | if (m68k_fputype & FPU_68881 && | 210 | if (m68k_fputype & FPU_68881 && |
@@ -214,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc) | |||
214 | sc->sc_fpstate[3] == 0x60 || | 223 | sc->sc_fpstate[3] == 0x60 || |
215 | sc->sc_fpstate[3] == 0xe0)) | 224 | sc->sc_fpstate[3] == 0xe0)) |
216 | goto out; | 225 | goto out; |
226 | } else if (CPU_IS_COLDFIRE) { | ||
227 | if (!(sc->sc_fpstate[0] == 0x00 || | ||
228 | sc->sc_fpstate[0] == 0x05 || | ||
229 | sc->sc_fpstate[0] == 0xe5)) | ||
230 | goto out; | ||
217 | } else | 231 | } else |
218 | goto out; | 232 | goto out; |
219 | 233 | ||
220 | __asm__ volatile (".chip 68k/68881\n\t" | 234 | if (CPU_IS_COLDFIRE) { |
221 | "fmovemx %0,%%fp0-%%fp1\n\t" | 235 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t" |
222 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | 236 | "fmovel %1,%%fpcr\n\t" |
223 | ".chip 68k" | 237 | "fmovel %2,%%fpsr\n\t" |
224 | : /* no outputs */ | 238 | "fmovel %3,%%fpiar" |
225 | : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); | 239 | : /* no outputs */ |
240 | : "m" (sc->sc_fpregs[0]), | ||
241 | "m" (sc->sc_fpcntl[0]), | ||
242 | "m" (sc->sc_fpcntl[1]), | ||
243 | "m" (sc->sc_fpcntl[2])); | ||
244 | } else { | ||
245 | __asm__ volatile (".chip 68k/68881\n\t" | ||
246 | "fmovemx %0,%%fp0-%%fp1\n\t" | ||
247 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
248 | ".chip 68k" | ||
249 | : /* no outputs */ | ||
250 | : "m" (*sc->sc_fpregs), | ||
251 | "m" (*sc->sc_fpcntl)); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | if (CPU_IS_COLDFIRE) { | ||
256 | __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate)); | ||
257 | } else { | ||
258 | __asm__ volatile (".chip 68k/68881\n\t" | ||
259 | "frestore %0\n\t" | ||
260 | ".chip 68k" | ||
261 | : : "m" (*sc->sc_fpstate)); | ||
226 | } | 262 | } |
227 | __asm__ volatile (".chip 68k/68881\n\t" | ||
228 | "frestore %0\n\t" | ||
229 | ".chip 68k" : : "m" (*sc->sc_fpstate)); | ||
230 | err = 0; | 263 | err = 0; |
231 | 264 | ||
232 | out: | 265 | out: |
@@ -241,7 +274,7 @@ out: | |||
241 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | 274 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) |
242 | { | 275 | { |
243 | unsigned char fpstate[FPCONTEXT_SIZE]; | 276 | unsigned char fpstate[FPCONTEXT_SIZE]; |
244 | int context_size = CPU_IS_060 ? 8 : 0; | 277 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); |
245 | fpregset_t fpregs; | 278 | fpregset_t fpregs; |
246 | int err = 1; | 279 | int err = 1; |
247 | 280 | ||
@@ -260,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) | |||
260 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) | 293 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) |
261 | goto out; | 294 | goto out; |
262 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | 295 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { |
263 | if (!CPU_IS_060) | 296 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) |
264 | context_size = fpstate[1]; | 297 | context_size = fpstate[1]; |
265 | /* Verify the frame format. */ | 298 | /* Verify the frame format. */ |
266 | if (!CPU_IS_060 && (fpstate[0] != fpu_version)) | 299 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && |
300 | (fpstate[0] != fpu_version)) | ||
267 | goto out; | 301 | goto out; |
268 | if (CPU_IS_020_OR_030) { | 302 | if (CPU_IS_020_OR_030) { |
269 | if (m68k_fputype & FPU_68881 && | 303 | if (m68k_fputype & FPU_68881 && |
@@ -282,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc) | |||
282 | fpstate[3] == 0x60 || | 316 | fpstate[3] == 0x60 || |
283 | fpstate[3] == 0xe0)) | 317 | fpstate[3] == 0xe0)) |
284 | goto out; | 318 | goto out; |
319 | } else if (CPU_IS_COLDFIRE) { | ||
320 | if (!(fpstate[3] == 0x00 || | ||
321 | fpstate[3] == 0x05 || | ||
322 | fpstate[3] == 0xe5)) | ||
323 | goto out; | ||
285 | } else | 324 | } else |
286 | goto out; | 325 | goto out; |
287 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | 326 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, |
288 | sizeof(fpregs))) | 327 | sizeof(fpregs))) |
289 | goto out; | 328 | goto out; |
290 | __asm__ volatile (".chip 68k/68881\n\t" | 329 | |
291 | "fmovemx %0,%%fp0-%%fp7\n\t" | 330 | if (CPU_IS_COLDFIRE) { |
292 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | 331 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t" |
293 | ".chip 68k" | 332 | "fmovel %1,%%fpcr\n\t" |
294 | : /* no outputs */ | 333 | "fmovel %2,%%fpsr\n\t" |
295 | : "m" (*fpregs.f_fpregs), | 334 | "fmovel %3,%%fpiar" |
296 | "m" (*fpregs.f_fpcntl)); | 335 | : /* no outputs */ |
336 | : "m" (fpregs.f_fpregs[0]), | ||
337 | "m" (fpregs.f_fpcntl[0]), | ||
338 | "m" (fpregs.f_fpcntl[1]), | ||
339 | "m" (fpregs.f_fpcntl[2])); | ||
340 | } else { | ||
341 | __asm__ volatile (".chip 68k/68881\n\t" | ||
342 | "fmovemx %0,%%fp0-%%fp7\n\t" | ||
343 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
344 | ".chip 68k" | ||
345 | : /* no outputs */ | ||
346 | : "m" (*fpregs.f_fpregs), | ||
347 | "m" (*fpregs.f_fpcntl)); | ||
348 | } | ||
297 | } | 349 | } |
298 | if (context_size && | 350 | if (context_size && |
299 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, | 351 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, |
300 | context_size)) | 352 | context_size)) |
301 | goto out; | 353 | goto out; |
302 | __asm__ volatile (".chip 68k/68881\n\t" | 354 | |
303 | "frestore %0\n\t" | 355 | if (CPU_IS_COLDFIRE) { |
304 | ".chip 68k" : : "m" (*fpstate)); | 356 | __asm__ volatile ("frestore %0" : : "m" (*fpstate)); |
357 | } else { | ||
358 | __asm__ volatile (".chip 68k/68881\n\t" | ||
359 | "frestore %0\n\t" | ||
360 | ".chip 68k" | ||
361 | : : "m" (*fpstate)); | ||
362 | } | ||
305 | err = 0; | 363 | err = 0; |
306 | 364 | ||
307 | out: | 365 | out: |
@@ -336,8 +394,12 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, | |||
336 | regs->format = formatvec >> 12; | 394 | regs->format = formatvec >> 12; |
337 | regs->vector = formatvec & 0xfff; | 395 | regs->vector = formatvec & 0xfff; |
338 | #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) | 396 | #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) |
339 | __asm__ __volatile__ | 397 | __asm__ __volatile__ ( |
340 | (" movel %0,%/a0\n\t" | 398 | #ifdef CONFIG_COLDFIRE |
399 | " movel %0,%/sp\n\t" | ||
400 | " bra ret_from_signal\n" | ||
401 | #else | ||
402 | " movel %0,%/a0\n\t" | ||
341 | " subl %1,%/a0\n\t" /* make room on stack */ | 403 | " subl %1,%/a0\n\t" /* make room on stack */ |
342 | " movel %/a0,%/sp\n\t" /* set stack pointer */ | 404 | " movel %/a0,%/sp\n\t" /* set stack pointer */ |
343 | /* move switch_stack and pt_regs */ | 405 | /* move switch_stack and pt_regs */ |
@@ -350,6 +412,7 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, | |||
350 | "2: movel %4@+,%/a0@+\n\t" | 412 | "2: movel %4@+,%/a0@+\n\t" |
351 | " dbra %1,2b\n\t" | 413 | " dbra %1,2b\n\t" |
352 | " bral ret_from_signal\n" | 414 | " bral ret_from_signal\n" |
415 | #endif | ||
353 | : /* no outputs, it doesn't ever return */ | 416 | : /* no outputs, it doesn't ever return */ |
354 | : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), | 417 | : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), |
355 | "n" (frame_offset), "a" (buf + fsize/4) | 418 | "n" (frame_offset), "a" (buf + fsize/4) |
@@ -516,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | |||
516 | return; | 579 | return; |
517 | } | 580 | } |
518 | 581 | ||
519 | __asm__ volatile (".chip 68k/68881\n\t" | 582 | if (CPU_IS_COLDFIRE) { |
520 | "fsave %0\n\t" | 583 | __asm__ volatile ("fsave %0" |
521 | ".chip 68k" | 584 | : : "m" (*sc->sc_fpstate) : "memory"); |
522 | : : "m" (*sc->sc_fpstate) : "memory"); | 585 | } else { |
586 | __asm__ volatile (".chip 68k/68881\n\t" | ||
587 | "fsave %0\n\t" | ||
588 | ".chip 68k" | ||
589 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
590 | } | ||
523 | 591 | ||
524 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | 592 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { |
525 | fpu_version = sc->sc_fpstate[0]; | 593 | fpu_version = sc->sc_fpstate[0]; |
@@ -530,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | |||
530 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) | 598 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) |
531 | sc->sc_fpstate[0x38] |= 1 << 3; | 599 | sc->sc_fpstate[0x38] |= 1 << 3; |
532 | } | 600 | } |
533 | __asm__ volatile (".chip 68k/68881\n\t" | 601 | |
534 | "fmovemx %%fp0-%%fp1,%0\n\t" | 602 | if (CPU_IS_COLDFIRE) { |
535 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | 603 | __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t" |
536 | ".chip 68k" | 604 | "fmovel %%fpcr,%1\n\t" |
537 | : "=m" (*sc->sc_fpregs), | 605 | "fmovel %%fpsr,%2\n\t" |
538 | "=m" (*sc->sc_fpcntl) | 606 | "fmovel %%fpiar,%3" |
539 | : /* no inputs */ | 607 | : "=m" (sc->sc_fpregs[0]), |
540 | : "memory"); | 608 | "=m" (sc->sc_fpcntl[0]), |
609 | "=m" (sc->sc_fpcntl[1]), | ||
610 | "=m" (sc->sc_fpcntl[2]) | ||
611 | : /* no inputs */ | ||
612 | : "memory"); | ||
613 | } else { | ||
614 | __asm__ volatile (".chip 68k/68881\n\t" | ||
615 | "fmovemx %%fp0-%%fp1,%0\n\t" | ||
616 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
617 | ".chip 68k" | ||
618 | : "=m" (*sc->sc_fpregs), | ||
619 | "=m" (*sc->sc_fpcntl) | ||
620 | : /* no inputs */ | ||
621 | : "memory"); | ||
622 | } | ||
541 | } | 623 | } |
542 | } | 624 | } |
543 | 625 | ||
544 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | 626 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) |
545 | { | 627 | { |
546 | unsigned char fpstate[FPCONTEXT_SIZE]; | 628 | unsigned char fpstate[FPCONTEXT_SIZE]; |
547 | int context_size = CPU_IS_060 ? 8 : 0; | 629 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); |
548 | int err = 0; | 630 | int err = 0; |
549 | 631 | ||
550 | if (FPU_IS_EMU) { | 632 | if (FPU_IS_EMU) { |
@@ -557,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * | |||
557 | return err; | 639 | return err; |
558 | } | 640 | } |
559 | 641 | ||
560 | __asm__ volatile (".chip 68k/68881\n\t" | 642 | if (CPU_IS_COLDFIRE) { |
561 | "fsave %0\n\t" | 643 | __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory"); |
562 | ".chip 68k" | 644 | } else { |
563 | : : "m" (*fpstate) : "memory"); | 645 | __asm__ volatile (".chip 68k/68881\n\t" |
646 | "fsave %0\n\t" | ||
647 | ".chip 68k" | ||
648 | : : "m" (*fpstate) : "memory"); | ||
649 | } | ||
564 | 650 | ||
565 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); | 651 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); |
566 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | 652 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { |
567 | fpregset_t fpregs; | 653 | fpregset_t fpregs; |
568 | if (!CPU_IS_060) | 654 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) |
569 | context_size = fpstate[1]; | 655 | context_size = fpstate[1]; |
570 | fpu_version = fpstate[0]; | 656 | fpu_version = fpstate[0]; |
571 | if (CPU_IS_020_OR_030 && | 657 | if (CPU_IS_020_OR_030 && |
@@ -575,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * | |||
575 | if (*(unsigned short *) fpstate == 0x1f38) | 661 | if (*(unsigned short *) fpstate == 0x1f38) |
576 | fpstate[0x38] |= 1 << 3; | 662 | fpstate[0x38] |= 1 << 3; |
577 | } | 663 | } |
578 | __asm__ volatile (".chip 68k/68881\n\t" | 664 | if (CPU_IS_COLDFIRE) { |
579 | "fmovemx %%fp0-%%fp7,%0\n\t" | 665 | __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t" |
580 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | 666 | "fmovel %%fpcr,%1\n\t" |
581 | ".chip 68k" | 667 | "fmovel %%fpsr,%2\n\t" |
582 | : "=m" (*fpregs.f_fpregs), | 668 | "fmovel %%fpiar,%3" |
583 | "=m" (*fpregs.f_fpcntl) | 669 | : "=m" (fpregs.f_fpregs[0]), |
584 | : /* no inputs */ | 670 | "=m" (fpregs.f_fpcntl[0]), |
585 | : "memory"); | 671 | "=m" (fpregs.f_fpcntl[1]), |
672 | "=m" (fpregs.f_fpcntl[2]) | ||
673 | : /* no inputs */ | ||
674 | : "memory"); | ||
675 | } else { | ||
676 | __asm__ volatile (".chip 68k/68881\n\t" | ||
677 | "fmovemx %%fp0-%%fp7,%0\n\t" | ||
678 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
679 | ".chip 68k" | ||
680 | : "=m" (*fpregs.f_fpregs), | ||
681 | "=m" (*fpregs.f_fpcntl) | ||
682 | : /* no inputs */ | ||
683 | : "memory"); | ||
684 | } | ||
586 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | 685 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, |
587 | sizeof(fpregs)); | 686 | sizeof(fpregs)); |
588 | } | 687 | } |
@@ -679,8 +778,7 @@ static inline void push_cache (unsigned long vaddr) | |||
679 | "cpushl %%bc,(%0)\n\t" | 778 | "cpushl %%bc,(%0)\n\t" |
680 | ".chip 68k" | 779 | ".chip 68k" |
681 | : : "a" (temp)); | 780 | : : "a" (temp)); |
682 | } | 781 | } else if (!CPU_IS_COLDFIRE) { |
683 | else { | ||
684 | /* | 782 | /* |
685 | * 68030/68020 have no writeback cache; | 783 | * 68030/68020 have no writeback cache; |
686 | * still need to clear icache. | 784 | * still need to clear icache. |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index a5cf40c26de5..75ab79b3bdeb 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
@@ -1,4 +1,4 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
2 | #include "time_mm.c" | 2 | #include "time_mm.c" |
3 | #else | 3 | #else |
4 | #include "time_no.c" | 4 | #include "time_no.c" |
diff --git a/arch/m68k/kernel/time_no.c b/arch/m68k/kernel/time_no.c index 6623909f70e6..3ef0f7768dcd 100644 --- a/arch/m68k/kernel/time_no.c +++ b/arch/m68k/kernel/time_no.c | |||
@@ -26,6 +26,9 @@ | |||
26 | 26 | ||
27 | #define TICK_SIZE (tick_nsec / 1000) | 27 | #define TICK_SIZE (tick_nsec / 1000) |
28 | 28 | ||
29 | /* machine dependent timer functions */ | ||
30 | void (*mach_gettod)(int*, int*, int*, int*, int*, int*); | ||
31 | |||
29 | static inline int set_rtc_mmss(unsigned long nowtime) | 32 | static inline int set_rtc_mmss(unsigned long nowtime) |
30 | { | 33 | { |
31 | if (mach_set_clock_mmss) | 34 | if (mach_set_clock_mmss) |
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 89362f2bb56a..a76452ca964e 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c | |||
@@ -706,6 +706,88 @@ create_atc_entry: | |||
706 | #endif /* CPU_M68020_OR_M68030 */ | 706 | #endif /* CPU_M68020_OR_M68030 */ |
707 | #endif /* !CONFIG_SUN3 */ | 707 | #endif /* !CONFIG_SUN3 */ |
708 | 708 | ||
709 | #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) | ||
710 | #include <asm/mcfmmu.h> | ||
711 | |||
712 | /* | ||
713 | * The following table converts the FS encoding of a ColdFire | ||
714 | * exception stack frame into the error_code value needed by | ||
715 | * do_fault. | ||
716 | */ | ||
717 | static const unsigned char fs_err_code[] = { | ||
718 | 0, /* 0000 */ | ||
719 | 0, /* 0001 */ | ||
720 | 0, /* 0010 */ | ||
721 | 0, /* 0011 */ | ||
722 | 1, /* 0100 */ | ||
723 | 0, /* 0101 */ | ||
724 | 0, /* 0110 */ | ||
725 | 0, /* 0111 */ | ||
726 | 2, /* 1000 */ | ||
727 | 3, /* 1001 */ | ||
728 | 2, /* 1010 */ | ||
729 | 0, /* 1011 */ | ||
730 | 1, /* 1100 */ | ||
731 | 1, /* 1101 */ | ||
732 | 0, /* 1110 */ | ||
733 | 0 /* 1111 */ | ||
734 | }; | ||
735 | |||
736 | static inline void access_errorcf(unsigned int fs, struct frame *fp) | ||
737 | { | ||
738 | unsigned long mmusr, addr; | ||
739 | unsigned int err_code; | ||
740 | int need_page_fault; | ||
741 | |||
742 | mmusr = mmu_read(MMUSR); | ||
743 | addr = mmu_read(MMUAR); | ||
744 | |||
745 | /* | ||
746 | * error_code: | ||
747 | * bit 0 == 0 means no page found, 1 means protection fault | ||
748 | * bit 1 == 0 means read, 1 means write | ||
749 | */ | ||
750 | switch (fs) { | ||
751 | case 5: /* 0101 TLB opword X miss */ | ||
752 | need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0); | ||
753 | addr = fp->ptregs.pc; | ||
754 | break; | ||
755 | case 6: /* 0110 TLB extension word X miss */ | ||
756 | need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1); | ||
757 | addr = fp->ptregs.pc + sizeof(long); | ||
758 | break; | ||
759 | case 10: /* 1010 TLB W miss */ | ||
760 | need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0); | ||
761 | break; | ||
762 | case 14: /* 1110 TLB R miss */ | ||
763 | need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0); | ||
764 | break; | ||
765 | default: | ||
766 | /* 0000 Normal */ | ||
767 | /* 0001 Reserved */ | ||
768 | /* 0010 Interrupt during debug service routine */ | ||
769 | /* 0011 Reserved */ | ||
770 | /* 0100 X Protection */ | ||
771 | /* 0111 IFP in emulator mode */ | ||
772 | /* 1000 W Protection*/ | ||
773 | /* 1001 Write error*/ | ||
774 | /* 1011 Reserved*/ | ||
775 | /* 1100 R Protection*/ | ||
776 | /* 1101 R Protection*/ | ||
777 | /* 1111 OEP in emulator mode*/ | ||
778 | need_page_fault = 1; | ||
779 | break; | ||
780 | } | ||
781 | |||
782 | if (need_page_fault) { | ||
783 | err_code = fs_err_code[fs]; | ||
784 | if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */ | ||
785 | err_code |= 2; /* bit1 - write, bit0 - protection */ | ||
786 | do_page_fault(&fp->ptregs, addr, err_code); | ||
787 | } | ||
788 | } | ||
789 | #endif /* CONFIG_COLDFIRE CONFIG_MMU */ | ||
790 | |||
709 | asmlinkage void buserr_c(struct frame *fp) | 791 | asmlinkage void buserr_c(struct frame *fp) |
710 | { | 792 | { |
711 | /* Only set esp0 if coming from user mode */ | 793 | /* Only set esp0 if coming from user mode */ |
@@ -716,6 +798,28 @@ asmlinkage void buserr_c(struct frame *fp) | |||
716 | printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); | 798 | printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); |
717 | #endif | 799 | #endif |
718 | 800 | ||
801 | #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) | ||
802 | if (CPU_IS_COLDFIRE) { | ||
803 | unsigned int fs; | ||
804 | fs = (fp->ptregs.vector & 0x3) | | ||
805 | ((fp->ptregs.vector & 0xc00) >> 8); | ||
806 | switch (fs) { | ||
807 | case 0x5: | ||
808 | case 0x6: | ||
809 | case 0x7: | ||
810 | case 0x9: | ||
811 | case 0xa: | ||
812 | case 0xd: | ||
813 | case 0xe: | ||
814 | case 0xf: | ||
815 | access_errorcf(fs, fp); | ||
816 | return; | ||
817 | default: | ||
818 | break; | ||
819 | } | ||
820 | } | ||
821 | #endif /* CONFIG_COLDFIRE && CONFIG_MMU */ | ||
822 | |||
719 | switch (fp->ptregs.format) { | 823 | switch (fp->ptregs.format) { |
720 | #if defined (CONFIG_M68060) | 824 | #if defined (CONFIG_M68060) |
721 | case 4: /* 68060 access error */ | 825 | case 4: /* 68060 access error */ |
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux-nommu.lds index 4e2389340837..8e66ccb0935e 100644 --- a/arch/m68k/kernel/vmlinux.lds_no.S +++ b/arch/m68k/kernel/vmlinux-nommu.lds | |||
@@ -69,6 +69,7 @@ SECTIONS { | |||
69 | SCHED_TEXT | 69 | SCHED_TEXT |
70 | LOCK_TEXT | 70 | LOCK_TEXT |
71 | *(.text..lock) | 71 | *(.text..lock) |
72 | *(.fixup) | ||
72 | 73 | ||
73 | . = ALIGN(16); /* Exception table */ | 74 | . = ALIGN(16); /* Exception table */ |
74 | __start___ex_table = .; | 75 | __start___ex_table = .; |
@@ -161,6 +162,13 @@ SECTIONS { | |||
161 | _edata = . ; | 162 | _edata = . ; |
162 | } > DATA | 163 | } > DATA |
163 | 164 | ||
165 | .m68k_fixup : { | ||
166 | __start_fixup = .; | ||
167 | *(.m68k_fixup) | ||
168 | __stop_fixup = .; | ||
169 | } > DATA | ||
170 | NOTES > DATA | ||
171 | |||
164 | .init.text : { | 172 | .init.text : { |
165 | . = ALIGN(PAGE_SIZE); | 173 | . = ALIGN(PAGE_SIZE); |
166 | __init_begin = .; | 174 | __init_begin = .; |
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index d0993594f558..63407c836826 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds | |||
@@ -31,7 +31,9 @@ SECTIONS | |||
31 | 31 | ||
32 | RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) | 32 | RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) |
33 | 33 | ||
34 | _sbss = .; | ||
34 | BSS_SECTION(0, 0, 0) | 35 | BSS_SECTION(0, 0, 0) |
36 | _ebss = .; | ||
35 | 37 | ||
36 | _edata = .; /* End of data section */ | 38 | _edata = .; /* End of data section */ |
37 | 39 | ||
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 8080469ee6c1..ad0f46d64c0b 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds | |||
@@ -44,7 +44,9 @@ __init_begin = .; | |||
44 | . = ALIGN(PAGE_SIZE); | 44 | . = ALIGN(PAGE_SIZE); |
45 | __init_end = .; | 45 | __init_end = .; |
46 | 46 | ||
47 | _sbss = .; | ||
47 | BSS_SECTION(0, 0, 0) | 48 | BSS_SECTION(0, 0, 0) |
49 | _ebss = .; | ||
48 | 50 | ||
49 | _end = . ; | 51 | _end = . ; |
50 | 52 | ||
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S index 030dabf0bc53..69ec79638870 100644 --- a/arch/m68k/kernel/vmlinux.lds.S +++ b/arch/m68k/kernel/vmlinux.lds.S | |||
@@ -1,5 +1,14 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
2 | #include "vmlinux.lds_mm.S" | 2 | PHDRS |
3 | { | ||
4 | text PT_LOAD FILEHDR PHDRS FLAGS (7); | ||
5 | data PT_LOAD FLAGS (7); | ||
6 | } | ||
7 | #ifdef CONFIG_SUN3 | ||
8 | #include "vmlinux-sun3.lds" | ||
3 | #else | 9 | #else |
4 | #include "vmlinux.lds_no.S" | 10 | #include "vmlinux-std.lds" |
11 | #endif | ||
12 | #else | ||
13 | #include "vmlinux-nommu.lds" | ||
5 | #endif | 14 | #endif |
diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S deleted file mode 100644 index 99ba315bd0a8..000000000000 --- a/arch/m68k/kernel/vmlinux.lds_mm.S +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | PHDRS | ||
2 | { | ||
3 | text PT_LOAD FILEHDR PHDRS FLAGS (7); | ||
4 | data PT_LOAD FLAGS (7); | ||
5 | } | ||
6 | #ifdef CONFIG_SUN3 | ||
7 | #include "vmlinux-sun3.lds" | ||
8 | #else | ||
9 | #include "vmlinux-std.lds" | ||
10 | #endif | ||
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile index 1a1bd9067e90..a9d782d34276 100644 --- a/arch/m68k/lib/Makefile +++ b/arch/m68k/lib/Makefile | |||
@@ -6,9 +6,11 @@ | |||
6 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ | 6 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ |
7 | memcpy.o memset.o memmove.o | 7 | memcpy.o memset.o memmove.o |
8 | 8 | ||
9 | ifdef CONFIG_MMU | 9 | lib-$(CONFIG_MMU) += string.o uaccess.o |
10 | lib-y += string.o uaccess.o checksum_mm.o | 10 | lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o |
11 | else | 11 | lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o |
12 | lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o | 12 | |
13 | ifndef CONFIG_GENERIC_CSUM | ||
14 | lib-y += checksum.o | ||
13 | endif | 15 | endif |
14 | 16 | ||
diff --git a/arch/m68k/lib/checksum_mm.c b/arch/m68k/lib/checksum.c index 6216f12a756b..6216f12a756b 100644 --- a/arch/m68k/lib/checksum_mm.c +++ b/arch/m68k/lib/checksum.c | |||
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c deleted file mode 100644 index e4c6354da765..000000000000 --- a/arch/m68k/lib/checksum_no.c +++ /dev/null | |||
@@ -1,156 +0,0 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * IP/TCP/UDP checksumming routines | ||
7 | * | ||
8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
10 | * Tom May, <ftom@netcom.com> | ||
11 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | ||
12 | * Lots of code moved from tcp.c and ip.c; see those files | ||
13 | * for more names. | ||
14 | * | ||
15 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: | ||
16 | * Fixed some nasty bugs, causing some horrible crashes. | ||
17 | * A: At some points, the sum (%0) was used as | ||
18 | * length-counter instead of the length counter | ||
19 | * (%1). Thanks to Roman Hodek for pointing this out. | ||
20 | * B: GCC seems to mess up if one uses too many | ||
21 | * data-registers to hold input values and one tries to | ||
22 | * specify d0 and d1 as scratch registers. Letting gcc choose these | ||
23 | * registers itself solves the problem. | ||
24 | * | ||
25 | * This program is free software; you can redistribute it and/or | ||
26 | * modify it under the terms of the GNU General Public License | ||
27 | * as published by the Free Software Foundation; either version | ||
28 | * 2 of the License, or (at your option) any later version. | ||
29 | */ | ||
30 | |||
31 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most | ||
32 | of the assembly has to go. */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <net/checksum.h> | ||
36 | |||
37 | static inline unsigned short from32to16(unsigned long x) | ||
38 | { | ||
39 | /* add up 16-bit and 16-bit for 16+c bit */ | ||
40 | x = (x & 0xffff) + (x >> 16); | ||
41 | /* add up carry.. */ | ||
42 | x = (x & 0xffff) + (x >> 16); | ||
43 | return x; | ||
44 | } | ||
45 | |||
46 | static unsigned long do_csum(const unsigned char * buff, int len) | ||
47 | { | ||
48 | int odd, count; | ||
49 | unsigned long result = 0; | ||
50 | |||
51 | if (len <= 0) | ||
52 | goto out; | ||
53 | odd = 1 & (unsigned long) buff; | ||
54 | if (odd) { | ||
55 | result = *buff; | ||
56 | len--; | ||
57 | buff++; | ||
58 | } | ||
59 | count = len >> 1; /* nr of 16-bit words.. */ | ||
60 | if (count) { | ||
61 | if (2 & (unsigned long) buff) { | ||
62 | result += *(unsigned short *) buff; | ||
63 | count--; | ||
64 | len -= 2; | ||
65 | buff += 2; | ||
66 | } | ||
67 | count >>= 1; /* nr of 32-bit words.. */ | ||
68 | if (count) { | ||
69 | unsigned long carry = 0; | ||
70 | do { | ||
71 | unsigned long w = *(unsigned long *) buff; | ||
72 | count--; | ||
73 | buff += 4; | ||
74 | result += carry; | ||
75 | result += w; | ||
76 | carry = (w > result); | ||
77 | } while (count); | ||
78 | result += carry; | ||
79 | result = (result & 0xffff) + (result >> 16); | ||
80 | } | ||
81 | if (len & 2) { | ||
82 | result += *(unsigned short *) buff; | ||
83 | buff += 2; | ||
84 | } | ||
85 | } | ||
86 | if (len & 1) | ||
87 | result += (*buff << 8); | ||
88 | result = from32to16(result); | ||
89 | if (odd) | ||
90 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | ||
91 | out: | ||
92 | return result; | ||
93 | } | ||
94 | |||
95 | #ifdef CONFIG_COLDFIRE | ||
96 | /* | ||
97 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
98 | * which always checksum on 4 octet boundaries. | ||
99 | */ | ||
100 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
101 | { | ||
102 | return (__force __sum16)~do_csum(iph,ihl*4); | ||
103 | } | ||
104 | EXPORT_SYMBOL(ip_fast_csum); | ||
105 | #endif | ||
106 | |||
107 | /* | ||
108 | * computes the checksum of a memory block at buff, length len, | ||
109 | * and adds in "sum" (32-bit) | ||
110 | * | ||
111 | * returns a 32-bit number suitable for feeding into itself | ||
112 | * or csum_tcpudp_magic | ||
113 | * | ||
114 | * this function must be called with even lengths, except | ||
115 | * for the last fragment, which may be odd | ||
116 | * | ||
117 | * it's best to have buff aligned on a 32-bit boundary | ||
118 | */ | ||
119 | __wsum csum_partial(const void *buff, int len, __wsum sum) | ||
120 | { | ||
121 | unsigned int result = do_csum(buff, len); | ||
122 | |||
123 | /* add in old sum, and carry.. */ | ||
124 | result += (__force u32)sum; | ||
125 | if ((__force u32)sum > result) | ||
126 | result += 1; | ||
127 | return (__force __wsum)result; | ||
128 | } | ||
129 | |||
130 | EXPORT_SYMBOL(csum_partial); | ||
131 | |||
132 | /* | ||
133 | * copy from fs while checksumming, otherwise like csum_partial | ||
134 | */ | ||
135 | |||
136 | __wsum | ||
137 | csum_partial_copy_from_user(const void __user *src, void *dst, | ||
138 | int len, __wsum sum, int *csum_err) | ||
139 | { | ||
140 | if (csum_err) *csum_err = 0; | ||
141 | memcpy(dst, (__force const void *)src, len); | ||
142 | return csum_partial(dst, len, sum); | ||
143 | } | ||
144 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
145 | |||
146 | /* | ||
147 | * copy from ds while checksumming, otherwise like csum_partial | ||
148 | */ | ||
149 | |||
150 | __wsum | ||
151 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | ||
152 | { | ||
153 | memcpy(dst, src, len); | ||
154 | return csum_partial(dst, len, sum); | ||
155 | } | ||
156 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 13854ed8cd9a..5664386338da 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c | |||
@@ -15,17 +15,17 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from, | |||
15 | asm volatile ("\n" | 15 | asm volatile ("\n" |
16 | " tst.l %0\n" | 16 | " tst.l %0\n" |
17 | " jeq 2f\n" | 17 | " jeq 2f\n" |
18 | "1: moves.l (%1)+,%3\n" | 18 | "1: "MOVES".l (%1)+,%3\n" |
19 | " move.l %3,(%2)+\n" | 19 | " move.l %3,(%2)+\n" |
20 | " subq.l #1,%0\n" | 20 | " subq.l #1,%0\n" |
21 | " jne 1b\n" | 21 | " jne 1b\n" |
22 | "2: btst #1,%5\n" | 22 | "2: btst #1,%5\n" |
23 | " jeq 4f\n" | 23 | " jeq 4f\n" |
24 | "3: moves.w (%1)+,%3\n" | 24 | "3: "MOVES".w (%1)+,%3\n" |
25 | " move.w %3,(%2)+\n" | 25 | " move.w %3,(%2)+\n" |
26 | "4: btst #0,%5\n" | 26 | "4: btst #0,%5\n" |
27 | " jeq 6f\n" | 27 | " jeq 6f\n" |
28 | "5: moves.b (%1)+,%3\n" | 28 | "5: "MOVES".b (%1)+,%3\n" |
29 | " move.b %3,(%2)+\n" | 29 | " move.b %3,(%2)+\n" |
30 | "6:\n" | 30 | "6:\n" |
31 | " .section .fixup,\"ax\"\n" | 31 | " .section .fixup,\"ax\"\n" |
@@ -68,17 +68,17 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, | |||
68 | " tst.l %0\n" | 68 | " tst.l %0\n" |
69 | " jeq 4f\n" | 69 | " jeq 4f\n" |
70 | "1: move.l (%1)+,%3\n" | 70 | "1: move.l (%1)+,%3\n" |
71 | "2: moves.l %3,(%2)+\n" | 71 | "2: "MOVES".l %3,(%2)+\n" |
72 | "3: subq.l #1,%0\n" | 72 | "3: subq.l #1,%0\n" |
73 | " jne 1b\n" | 73 | " jne 1b\n" |
74 | "4: btst #1,%5\n" | 74 | "4: btst #1,%5\n" |
75 | " jeq 6f\n" | 75 | " jeq 6f\n" |
76 | " move.w (%1)+,%3\n" | 76 | " move.w (%1)+,%3\n" |
77 | "5: moves.w %3,(%2)+\n" | 77 | "5: "MOVES".w %3,(%2)+\n" |
78 | "6: btst #0,%5\n" | 78 | "6: btst #0,%5\n" |
79 | " jeq 8f\n" | 79 | " jeq 8f\n" |
80 | " move.b (%1)+,%3\n" | 80 | " move.b (%1)+,%3\n" |
81 | "7: moves.b %3,(%2)+\n" | 81 | "7: "MOVES".b %3,(%2)+\n" |
82 | "8:\n" | 82 | "8:\n" |
83 | " .section .fixup,\"ax\"\n" | 83 | " .section .fixup,\"ax\"\n" |
84 | " .even\n" | 84 | " .even\n" |
@@ -115,7 +115,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) | |||
115 | return count; | 115 | return count; |
116 | 116 | ||
117 | asm volatile ("\n" | 117 | asm volatile ("\n" |
118 | "1: moves.b (%2)+,%4\n" | 118 | "1: "MOVES".b (%2)+,%4\n" |
119 | " move.b %4,(%1)+\n" | 119 | " move.b %4,(%1)+\n" |
120 | " jeq 2f\n" | 120 | " jeq 2f\n" |
121 | " subq.l #1,%3\n" | 121 | " subq.l #1,%3\n" |
@@ -152,7 +152,7 @@ long strnlen_user(const char __user *src, long n) | |||
152 | asm volatile ("\n" | 152 | asm volatile ("\n" |
153 | "1: subq.l #1,%1\n" | 153 | "1: subq.l #1,%1\n" |
154 | " jmi 3f\n" | 154 | " jmi 3f\n" |
155 | "2: moves.b (%0)+,%2\n" | 155 | "2: "MOVES".b (%0)+,%2\n" |
156 | " tst.b %2\n" | 156 | " tst.b %2\n" |
157 | " jne 1b\n" | 157 | " jne 1b\n" |
158 | " jra 4f\n" | 158 | " jra 4f\n" |
@@ -188,15 +188,15 @@ unsigned long __clear_user(void __user *to, unsigned long n) | |||
188 | asm volatile ("\n" | 188 | asm volatile ("\n" |
189 | " tst.l %0\n" | 189 | " tst.l %0\n" |
190 | " jeq 3f\n" | 190 | " jeq 3f\n" |
191 | "1: moves.l %2,(%1)+\n" | 191 | "1: "MOVES".l %2,(%1)+\n" |
192 | "2: subq.l #1,%0\n" | 192 | "2: subq.l #1,%0\n" |
193 | " jne 1b\n" | 193 | " jne 1b\n" |
194 | "3: btst #1,%4\n" | 194 | "3: btst #1,%4\n" |
195 | " jeq 5f\n" | 195 | " jeq 5f\n" |
196 | "4: moves.w %2,(%1)+\n" | 196 | "4: "MOVES".w %2,(%1)+\n" |
197 | "5: btst #0,%4\n" | 197 | "5: btst #0,%4\n" |
198 | " jeq 7f\n" | 198 | " jeq 7f\n" |
199 | "6: moves.b %2,(%1)\n" | 199 | "6: "MOVES".b %2,(%1)\n" |
200 | "7:\n" | 200 | "7:\n" |
201 | " .section .fixup,\"ax\"\n" | 201 | " .section .fixup,\"ax\"\n" |
202 | " .even\n" | 202 | " .even\n" |
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile index 09cadf1058d5..cfbf3205724a 100644 --- a/arch/m68k/mm/Makefile +++ b/arch/m68k/mm/Makefile | |||
@@ -4,6 +4,8 @@ | |||
4 | 4 | ||
5 | obj-y := init.o | 5 | obj-y := init.o |
6 | 6 | ||
7 | obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o | 7 | obj-$(CONFIG_MMU) += cache.o fault.o |
8 | obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o | 8 | obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o |
9 | obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o | 9 | obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o |
10 | obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o | ||
11 | |||
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c index 5437fff5fe07..95d0bf66e2e2 100644 --- a/arch/m68k/mm/cache.c +++ b/arch/m68k/mm/cache.c | |||
@@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr) | |||
74 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | 74 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ |
75 | void flush_icache_range(unsigned long address, unsigned long endaddr) | 75 | void flush_icache_range(unsigned long address, unsigned long endaddr) |
76 | { | 76 | { |
77 | 77 | if (CPU_IS_COLDFIRE) { | |
78 | if (CPU_IS_040_OR_060) { | 78 | unsigned long start, end; |
79 | start = address & ICACHE_SET_MASK; | ||
80 | end = endaddr & ICACHE_SET_MASK; | ||
81 | if (start > end) { | ||
82 | flush_cf_icache(0, end); | ||
83 | end = ICACHE_MAX_ADDR; | ||
84 | } | ||
85 | flush_cf_icache(start, end); | ||
86 | } else if (CPU_IS_040_OR_060) { | ||
79 | address &= PAGE_MASK; | 87 | address &= PAGE_MASK; |
80 | 88 | ||
81 | do { | 89 | do { |
@@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range); | |||
100 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | 108 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, |
101 | unsigned long addr, int len) | 109 | unsigned long addr, int len) |
102 | { | 110 | { |
103 | if (CPU_IS_040_OR_060) { | 111 | if (CPU_IS_COLDFIRE) { |
112 | unsigned long start, end; | ||
113 | start = addr & ICACHE_SET_MASK; | ||
114 | end = (addr + len) & ICACHE_SET_MASK; | ||
115 | if (start > end) { | ||
116 | flush_cf_icache(0, end); | ||
117 | end = ICACHE_MAX_ADDR; | ||
118 | } | ||
119 | flush_cf_icache(start, end); | ||
120 | |||
121 | } else if (CPU_IS_040_OR_060) { | ||
104 | asm volatile ("nop\n\t" | 122 | asm volatile ("nop\n\t" |
105 | ".chip 68040\n\t" | 123 | ".chip 68040\n\t" |
106 | "cpushp %%bc,(%0)\n\t" | 124 | "cpushp %%bc,(%0)\n\t" |
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c index bbe525434ccb..89f3b203814b 100644 --- a/arch/m68k/mm/init_mm.c +++ b/arch/m68k/mm/init_mm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/pgalloc.h> | 25 | #include <asm/pgalloc.h> |
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
27 | #include <asm/traps.h> | ||
27 | #include <asm/machdep.h> | 28 | #include <asm/machdep.h> |
28 | #include <asm/io.h> | 29 | #include <asm/io.h> |
29 | #ifdef CONFIG_ATARI | 30 | #ifdef CONFIG_ATARI |
@@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable); | |||
75 | 76 | ||
76 | extern pmd_t *zero_pgtable; | 77 | extern pmd_t *zero_pgtable; |
77 | 78 | ||
79 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) | ||
80 | #define VECTORS &vectors[0] | ||
81 | #else | ||
82 | #define VECTORS _ramvec | ||
83 | #endif | ||
84 | |||
85 | void __init print_memmap(void) | ||
86 | { | ||
87 | #define UL(x) ((unsigned long) (x)) | ||
88 | #define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10 | ||
89 | #define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20 | ||
90 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024) | ||
91 | |||
92 | pr_notice("Virtual kernel memory layout:\n" | ||
93 | " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n" | ||
94 | " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n" | ||
95 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n" | ||
96 | " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n" | ||
97 | " .init : 0x%p" " - 0x%p" " (%4d KiB)\n" | ||
98 | " .text : 0x%p" " - 0x%p" " (%4d KiB)\n" | ||
99 | " .data : 0x%p" " - 0x%p" " (%4d KiB)\n" | ||
100 | " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n", | ||
101 | MLK(VECTORS, VECTORS + 256), | ||
102 | MLM(KMAP_START, KMAP_END), | ||
103 | MLM(VMALLOC_START, VMALLOC_END), | ||
104 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | ||
105 | MLK_ROUNDUP(__init_begin, __init_end), | ||
106 | MLK_ROUNDUP(_stext, _etext), | ||
107 | MLK_ROUNDUP(_sdata, _edata), | ||
108 | MLK_ROUNDUP(_sbss, _ebss)); | ||
109 | } | ||
110 | |||
78 | void __init mem_init(void) | 111 | void __init mem_init(void) |
79 | { | 112 | { |
80 | pg_data_t *pgdat; | 113 | pg_data_t *pgdat; |
@@ -106,7 +139,7 @@ void __init mem_init(void) | |||
106 | } | 139 | } |
107 | } | 140 | } |
108 | 141 | ||
109 | #ifndef CONFIG_SUN3 | 142 | #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) |
110 | /* insert pointer tables allocated so far into the tablelist */ | 143 | /* insert pointer tables allocated so far into the tablelist */ |
111 | init_pointer_table((unsigned long)kernel_pg_dir); | 144 | init_pointer_table((unsigned long)kernel_pg_dir); |
112 | for (i = 0; i < PTRS_PER_PGD; i++) { | 145 | for (i = 0; i < PTRS_PER_PGD; i++) { |
@@ -125,6 +158,7 @@ void __init mem_init(void) | |||
125 | codepages << (PAGE_SHIFT-10), | 158 | codepages << (PAGE_SHIFT-10), |
126 | datapages << (PAGE_SHIFT-10), | 159 | datapages << (PAGE_SHIFT-10), |
127 | initpages << (PAGE_SHIFT-10)); | 160 | initpages << (PAGE_SHIFT-10)); |
161 | print_memmap(); | ||
128 | } | 162 | } |
129 | 163 | ||
130 | #ifdef CONFIG_BLK_DEV_INITRD | 164 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 69345849454b..1cc2bed4c3dd 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c | |||
@@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla | |||
171 | break; | 171 | break; |
172 | } | 172 | } |
173 | } else { | 173 | } else { |
174 | physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | 174 | physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | |
175 | _PAGE_DIRTY | _PAGE_READWRITE); | ||
175 | switch (cacheflag) { | 176 | switch (cacheflag) { |
176 | case IOMAP_NOCACHE_SER: | 177 | case IOMAP_NOCACHE_SER: |
177 | case IOMAP_NOCACHE_NONSER: | 178 | case IOMAP_NOCACHE_NONSER: |
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c new file mode 100644 index 000000000000..babd5a97cdcb --- /dev/null +++ b/arch/m68k/mm/mcfmmu.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * Based upon linux/arch/m68k/mm/sun3mmu.c | ||
3 | * Based upon linux/arch/ppc/mm/mmu_context.c | ||
4 | * | ||
5 | * Implementations of mm routines specific to the Coldfire MMU. | ||
6 | * | ||
7 | * Copyright (c) 2008 Freescale Semiconductor, Inc. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | |||
17 | #include <asm/setup.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/mmu_context.h> | ||
21 | #include <asm/mcf_pgalloc.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | |||
24 | #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) | ||
25 | |||
26 | mm_context_t next_mmu_context; | ||
27 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | ||
28 | atomic_t nr_free_contexts; | ||
29 | struct mm_struct *context_mm[LAST_CONTEXT+1]; | ||
30 | extern unsigned long num_pages; | ||
31 | |||
32 | void free_initmem(void) | ||
33 | { | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * ColdFire paging_init derived from sun3. | ||
38 | */ | ||
39 | void __init paging_init(void) | ||
40 | { | ||
41 | pgd_t *pg_dir; | ||
42 | pte_t *pg_table; | ||
43 | unsigned long address, size; | ||
44 | unsigned long next_pgtable, bootmem_end; | ||
45 | unsigned long zones_size[MAX_NR_ZONES]; | ||
46 | enum zone_type zone; | ||
47 | int i; | ||
48 | |||
49 | empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); | ||
50 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | ||
51 | |||
52 | pg_dir = swapper_pg_dir; | ||
53 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | ||
54 | |||
55 | size = num_pages * sizeof(pte_t); | ||
56 | size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); | ||
57 | next_pgtable = (unsigned long) alloc_bootmem_pages(size); | ||
58 | |||
59 | bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; | ||
60 | pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; | ||
61 | |||
62 | address = PAGE_OFFSET; | ||
63 | while (address < (unsigned long)high_memory) { | ||
64 | pg_table = (pte_t *) next_pgtable; | ||
65 | next_pgtable += PTRS_PER_PTE * sizeof(pte_t); | ||
66 | pgd_val(*pg_dir) = (unsigned long) pg_table; | ||
67 | pg_dir++; | ||
68 | |||
69 | /* now change pg_table to kernel virtual addresses */ | ||
70 | for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { | ||
71 | pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); | ||
72 | if (address >= (unsigned long) high_memory) | ||
73 | pte_val(pte) = 0; | ||
74 | |||
75 | set_pte(pg_table, pte); | ||
76 | address += PAGE_SIZE; | ||
77 | } | ||
78 | } | ||
79 | |||
80 | current->mm = NULL; | ||
81 | |||
82 | for (zone = 0; zone < MAX_NR_ZONES; zone++) | ||
83 | zones_size[zone] = 0x0; | ||
84 | zones_size[ZONE_DMA] = num_pages; | ||
85 | free_area_init(zones_size); | ||
86 | } | ||
87 | |||
88 | int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) | ||
89 | { | ||
90 | unsigned long flags, mmuar; | ||
91 | struct mm_struct *mm; | ||
92 | pgd_t *pgd; | ||
93 | pmd_t *pmd; | ||
94 | pte_t *pte; | ||
95 | int asid; | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | |||
99 | mmuar = (dtlb) ? mmu_read(MMUAR) : | ||
100 | regs->pc + (extension_word * sizeof(long)); | ||
101 | |||
102 | mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; | ||
103 | if (!mm) { | ||
104 | local_irq_restore(flags); | ||
105 | return -1; | ||
106 | } | ||
107 | |||
108 | pgd = pgd_offset(mm, mmuar); | ||
109 | if (pgd_none(*pgd)) { | ||
110 | local_irq_restore(flags); | ||
111 | return -1; | ||
112 | } | ||
113 | |||
114 | pmd = pmd_offset(pgd, mmuar); | ||
115 | if (pmd_none(*pmd)) { | ||
116 | local_irq_restore(flags); | ||
117 | return -1; | ||
118 | } | ||
119 | |||
120 | pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) | ||
121 | : pte_offset_map(pmd, mmuar); | ||
122 | if (pte_none(*pte) || !pte_present(*pte)) { | ||
123 | local_irq_restore(flags); | ||
124 | return -1; | ||
125 | } | ||
126 | |||
127 | if (write) { | ||
128 | if (!pte_write(*pte)) { | ||
129 | local_irq_restore(flags); | ||
130 | return -1; | ||
131 | } | ||
132 | set_pte(pte, pte_mkdirty(*pte)); | ||
133 | } | ||
134 | |||
135 | set_pte(pte, pte_mkyoung(*pte)); | ||
136 | asid = mm->context & 0xff; | ||
137 | if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) | ||
138 | set_pte(pte, pte_wrprotect(*pte)); | ||
139 | |||
140 | mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | | ||
141 | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK) | ||
142 | >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V); | ||
143 | |||
144 | mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | | ||
145 | ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); | ||
146 | |||
147 | if (dtlb) | ||
148 | mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); | ||
149 | else | ||
150 | mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); | ||
151 | |||
152 | local_irq_restore(flags); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Initialize the context management stuff. | ||
158 | * The following was taken from arch/ppc/mmu_context.c | ||
159 | */ | ||
160 | void __init mmu_context_init(void) | ||
161 | { | ||
162 | /* | ||
163 | * Some processors have too few contexts to reserve one for | ||
164 | * init_mm, and require using context 0 for a normal task. | ||
165 | * Other processors reserve the use of context zero for the kernel. | ||
166 | * This code assumes FIRST_CONTEXT < 32. | ||
167 | */ | ||
168 | context_map[0] = (1 << FIRST_CONTEXT) - 1; | ||
169 | next_mmu_context = FIRST_CONTEXT; | ||
170 | atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Steal a context from a task that has one at the moment. | ||
175 | * This is only used on 8xx and 4xx and we presently assume that | ||
176 | * they don't do SMP. If they do then thicfpgalloc.hs will have to check | ||
177 | * whether the MM we steal is in use. | ||
178 | * We also assume that this is only used on systems that don't | ||
179 | * use an MMU hash table - this is true for 8xx and 4xx. | ||
180 | * This isn't an LRU system, it just frees up each context in | ||
181 | * turn (sort-of pseudo-random replacement :). This would be the | ||
182 | * place to implement an LRU scheme if anyone was motivated to do it. | ||
183 | * -- paulus | ||
184 | */ | ||
185 | void steal_context(void) | ||
186 | { | ||
187 | struct mm_struct *mm; | ||
188 | /* | ||
189 | * free up context `next_mmu_context' | ||
190 | * if we shouldn't free context 0, don't... | ||
191 | */ | ||
192 | if (next_mmu_context < FIRST_CONTEXT) | ||
193 | next_mmu_context = FIRST_CONTEXT; | ||
194 | mm = context_mm[next_mmu_context]; | ||
195 | flush_tlb_mm(mm); | ||
196 | destroy_context(mm); | ||
197 | } | ||
198 | |||
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 34c77ce24fba..a5dbb74fe1de 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c | |||
@@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr) | |||
203 | 203 | ||
204 | void cache_clear (unsigned long paddr, int len) | 204 | void cache_clear (unsigned long paddr, int len) |
205 | { | 205 | { |
206 | if (CPU_IS_040_OR_060) { | 206 | if (CPU_IS_COLDFIRE) { |
207 | flush_cf_bcache(0, DCACHE_MAX_ADDR); | ||
208 | } else if (CPU_IS_040_OR_060) { | ||
207 | int tmp; | 209 | int tmp; |
208 | 210 | ||
209 | /* | 211 | /* |
@@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear); | |||
250 | 252 | ||
251 | void cache_push (unsigned long paddr, int len) | 253 | void cache_push (unsigned long paddr, int len) |
252 | { | 254 | { |
253 | if (CPU_IS_040_OR_060) { | 255 | if (CPU_IS_COLDFIRE) { |
256 | flush_cf_bcache(0, DCACHE_MAX_ADDR); | ||
257 | } else if (CPU_IS_040_OR_060) { | ||
254 | int tmp = PAGE_SIZE; | 258 | int tmp = PAGE_SIZE; |
255 | 259 | ||
256 | /* | 260 | /* |
diff --git a/arch/m68k/platform/54xx/config.c b/arch/m68k/platform/54xx/config.c index 78130984db95..ee043540bfa2 100644 --- a/arch/m68k/platform/54xx/config.c +++ b/arch/m68k/platform/54xx/config.c | |||
@@ -13,11 +13,17 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mm.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | #include <asm/pgalloc.h> | ||
16 | #include <asm/machdep.h> | 19 | #include <asm/machdep.h> |
17 | #include <asm/coldfire.h> | 20 | #include <asm/coldfire.h> |
18 | #include <asm/m54xxsim.h> | 21 | #include <asm/m54xxsim.h> |
19 | #include <asm/mcfuart.h> | 22 | #include <asm/mcfuart.h> |
20 | #include <asm/m54xxgpt.h> | 23 | #include <asm/m54xxgpt.h> |
24 | #ifdef CONFIG_MMU | ||
25 | #include <asm/mmu_context.h> | ||
26 | #endif | ||
21 | 27 | ||
22 | /***************************************************************************/ | 28 | /***************************************************************************/ |
23 | 29 | ||
@@ -95,8 +101,49 @@ static void mcf54xx_reset(void) | |||
95 | 101 | ||
96 | /***************************************************************************/ | 102 | /***************************************************************************/ |
97 | 103 | ||
104 | #ifdef CONFIG_MMU | ||
105 | |||
106 | unsigned long num_pages; | ||
107 | |||
108 | static void __init mcf54xx_bootmem_alloc(void) | ||
109 | { | ||
110 | unsigned long start_pfn; | ||
111 | unsigned long memstart; | ||
112 | |||
113 | /* _rambase and _ramend will be naturally page aligned */ | ||
114 | m68k_memory[0].addr = _rambase; | ||
115 | m68k_memory[0].size = _ramend - _rambase; | ||
116 | |||
117 | /* compute total pages in system */ | ||
118 | num_pages = (_ramend - _rambase) >> PAGE_SHIFT; | ||
119 | |||
120 | /* page numbers */ | ||
121 | memstart = PAGE_ALIGN(_ramstart); | ||
122 | min_low_pfn = _rambase >> PAGE_SHIFT; | ||
123 | start_pfn = memstart >> PAGE_SHIFT; | ||
124 | max_low_pfn = _ramend >> PAGE_SHIFT; | ||
125 | high_memory = (void *)_ramend; | ||
126 | |||
127 | m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; | ||
128 | module_fixup(NULL, __start_fixup, __stop_fixup); | ||
129 | |||
130 | /* setup bootmem data */ | ||
131 | m68k_setup_node(0); | ||
132 | memstart += init_bootmem_node(NODE_DATA(0), start_pfn, | ||
133 | min_low_pfn, max_low_pfn); | ||
134 | free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart); | ||
135 | } | ||
136 | |||
137 | #endif /* CONFIG_MMU */ | ||
138 | |||
139 | /***************************************************************************/ | ||
140 | |||
98 | void __init config_BSP(char *commandp, int size) | 141 | void __init config_BSP(char *commandp, int size) |
99 | { | 142 | { |
143 | #ifdef CONFIG_MMU | ||
144 | mcf54xx_bootmem_alloc(); | ||
145 | mmu_context_init(); | ||
146 | #endif | ||
100 | mach_reset = mcf54xx_reset; | 147 | mach_reset = mcf54xx_reset; |
101 | m54xx_uarts_init(); | 148 | m54xx_uarts_init(); |
102 | } | 149 | } |
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile index e4dfd8fde068..ee61bf84d4a0 100644 --- a/arch/m68k/platform/68328/Makefile +++ b/arch/m68k/platform/68328/Makefile | |||
@@ -14,12 +14,8 @@ obj-$(CONFIG_M68328) += config.o | |||
14 | obj-$(CONFIG_ROM) += romvec.o | 14 | obj-$(CONFIG_ROM) += romvec.o |
15 | 15 | ||
16 | extra-y := head.o | 16 | extra-y := head.o |
17 | extra-$(CONFIG_M68328) += bootlogo.rh head.o | ||
18 | |||
19 | $(obj)/bootlogo.rh: $(src)/bootlogo.h | ||
20 | perl $(src)/bootlogo.pl < $(src)/bootlogo.h > $(obj)/bootlogo.rh | ||
21 | 17 | ||
22 | $(obj)/head.o: $(obj)/$(head-y) | 18 | $(obj)/head.o: $(obj)/$(head-y) |
23 | ln -sf $(head-y) $(obj)/head.o | 19 | ln -sf $(head-y) $(obj)/head.o |
24 | 20 | ||
25 | clean-files := $(obj)/bootlogo.rh $(obj)/head.o $(head-y) | 21 | clean-files := $(obj)/head.o $(head-y) |
diff --git a/arch/m68k/platform/68328/bootlogo.h b/arch/m68k/platform/68328/bootlogo.h index 67bc2c17386e..b896c933fafc 100644 --- a/arch/m68k/platform/68328/bootlogo.h +++ b/arch/m68k/platform/68328/bootlogo.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #define bootlogo_width 160 | 1 | #define bootlogo_width 160 |
2 | #define bootlogo_height 160 | 2 | #define bootlogo_height 160 |
3 | static unsigned char bootlogo_bits[] = { | 3 | unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = { |
4 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00, | 4 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00, |
5 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 5 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
6 | 0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 6 | 0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
diff --git a/arch/m68k/platform/68328/bootlogo.pl b/arch/m68k/platform/68328/bootlogo.pl deleted file mode 100644 index b04ae3f50da5..000000000000 --- a/arch/m68k/platform/68328/bootlogo.pl +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | |||
2 | $_ = join("", <>); | ||
3 | |||
4 | s/(0x[0-9a-f]{2})/sprintf("0x%.2x",ord(pack("b8",unpack("B8",chr(hex($1))))))/gei; | ||
5 | |||
6 | s/^ / .byte /gm; | ||
7 | s/[,};]+$//gm; | ||
8 | s/^static.*//gm; | ||
9 | |||
10 | print $_; | ||
diff --git a/arch/m68k/platform/68328/config.c b/arch/m68k/platform/68328/config.c index a7bd21deb00f..d70bf2623db1 100644 --- a/arch/m68k/platform/68328/config.c +++ b/arch/m68k/platform/68328/config.c | |||
@@ -20,6 +20,9 @@ | |||
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
22 | #include <asm/MC68328.h> | 22 | #include <asm/MC68328.h> |
23 | #if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD) | ||
24 | #include "bootlogo.h" | ||
25 | #endif | ||
23 | 26 | ||
24 | /***************************************************************************/ | 27 | /***************************************************************************/ |
25 | 28 | ||
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S index aecff532b343..2ebfd6420818 100644 --- a/arch/m68k/platform/68328/head-pilot.S +++ b/arch/m68k/platform/68328/head-pilot.S | |||
@@ -24,19 +24,7 @@ | |||
24 | .global _ramstart | 24 | .global _ramstart |
25 | .global _ramend | 25 | .global _ramend |
26 | 26 | ||
27 | .global penguin_bits | 27 | .global bootlogo_bits |
28 | |||
29 | #ifdef CONFIG_PILOT | ||
30 | |||
31 | #define IMR 0xFFFFF304 | ||
32 | |||
33 | .data | ||
34 | .align 16 | ||
35 | |||
36 | penguin_bits: | ||
37 | #include "bootlogo.rh" | ||
38 | |||
39 | #endif | ||
40 | 28 | ||
41 | /*****************************************************************************/ | 29 | /*****************************************************************************/ |
42 | 30 | ||
@@ -185,9 +173,6 @@ L3: | |||
185 | moveq #79, %d7 | 173 | moveq #79, %d7 |
186 | movel %d0, _ramend | 174 | movel %d0, _ramend |
187 | 175 | ||
188 | movel %a3, %d0 | ||
189 | movel %d0, rom_length | ||
190 | |||
191 | pea 0 | 176 | pea 0 |
192 | pea env | 177 | pea env |
193 | pea %sp@(4) | 178 | pea %sp@(4) |
@@ -196,7 +181,7 @@ L3: | |||
196 | DBG_PUTC('H') | 181 | DBG_PUTC('H') |
197 | 182 | ||
198 | #ifdef CONFIG_PILOT | 183 | #ifdef CONFIG_PILOT |
199 | movel #penguin_bits, 0xFFFFFA00 | 184 | movel #bootlogo_bits, 0xFFFFFA00 |
200 | moveb #10, 0xFFFFFA05 | 185 | moveb #10, 0xFFFFFA05 |
201 | movew #160, 0xFFFFFA08 | 186 | movew #160, 0xFFFFFA08 |
202 | movew #160, 0xFFFFFA0A | 187 | movew #160, 0xFFFFFA0A |
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S index 6ec77d3ea0b3..a5ff96d0295f 100644 --- a/arch/m68k/platform/68328/head-rom.S +++ b/arch/m68k/platform/68328/head-rom.S | |||
@@ -8,7 +8,7 @@ | |||
8 | .global _ramend | 8 | .global _ramend |
9 | 9 | ||
10 | #ifdef CONFIG_INIT_LCD | 10 | #ifdef CONFIG_INIT_LCD |
11 | .global splash_bits | 11 | .global bootlogo_bits |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | .data | 14 | .data |
@@ -29,16 +29,11 @@ _ramend: | |||
29 | 29 | ||
30 | #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) | 30 | #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) |
31 | 31 | ||
32 | #ifdef CONFIG_INIT_LCD | ||
33 | splash_bits: | ||
34 | #include "bootlogo.rh" | ||
35 | #endif | ||
36 | |||
37 | .text | 32 | .text |
38 | _start: | 33 | _start: |
39 | _stext: movew #0x2700,%sr | 34 | _stext: movew #0x2700,%sr |
40 | #ifdef CONFIG_INIT_LCD | 35 | #ifdef CONFIG_INIT_LCD |
41 | movel #splash_bits, 0xfffffA00 /* LSSA */ | 36 | movel #bootlogo_bits, 0xfffffA00 /* LSSA */ |
42 | moveb #0x28, 0xfffffA05 /* LVPW */ | 37 | moveb #0x28, 0xfffffA05 /* LVPW */ |
43 | movew #0x280, 0xFFFFFa08 /* LXMAX */ | 38 | movew #0x280, 0xFFFFFa08 /* LXMAX */ |
44 | movew #0x1df, 0xFFFFFa0a /* LYMAX */ | 39 | movew #0x1df, 0xFFFFFa0a /* LYMAX */ |
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S index 3157461a8d1d..863889fc31c9 100644 --- a/arch/m68k/platform/coldfire/entry.S +++ b/arch/m68k/platform/coldfire/entry.S | |||
@@ -54,7 +54,6 @@ sw_usp: | |||
54 | .globl ret_from_signal | 54 | .globl ret_from_signal |
55 | .globl sys_call_table | 55 | .globl sys_call_table |
56 | .globl inthandler | 56 | .globl inthandler |
57 | .globl fasthandler | ||
58 | 57 | ||
59 | enosys: | 58 | enosys: |
60 | mov.l #sys_ni_syscall,%d3 | 59 | mov.l #sys_ni_syscall,%d3 |
@@ -63,6 +62,7 @@ enosys: | |||
63 | ENTRY(system_call) | 62 | ENTRY(system_call) |
64 | SAVE_ALL_SYS | 63 | SAVE_ALL_SYS |
65 | move #0x2000,%sr /* enable intrs again */ | 64 | move #0x2000,%sr /* enable intrs again */ |
65 | GET_CURRENT(%d2) | ||
66 | 66 | ||
67 | cmpl #NR_syscalls,%d0 | 67 | cmpl #NR_syscalls,%d0 |
68 | jcc enosys | 68 | jcc enosys |
@@ -166,6 +166,7 @@ Lsignal_return: | |||
166 | */ | 166 | */ |
167 | ENTRY(inthandler) | 167 | ENTRY(inthandler) |
168 | SAVE_ALL_INT | 168 | SAVE_ALL_INT |
169 | GET_CURRENT(%d2) | ||
169 | 170 | ||
170 | movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ | 171 | movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ |
171 | andl #0x03fc,%d0 /* mask out vector only */ | 172 | andl #0x03fc,%d0 /* mask out vector only */ |
@@ -191,7 +192,9 @@ ENTRY(resume) | |||
191 | movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ | 192 | movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ |
192 | RDUSP /* movel %usp,%a3 */ | 193 | RDUSP /* movel %usp,%a3 */ |
193 | movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */ | 194 | movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */ |
194 | 195 | #ifdef CONFIG_MMU | |
196 | movel %a1,%a2 /* set new current */ | ||
197 | #endif | ||
195 | movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */ | 198 | movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */ |
196 | WRUSP /* movel %a3,%usp */ | 199 | WRUSP /* movel %a3,%usp */ |
197 | movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */ | 200 | movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */ |
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S index c33483824a2e..38f04a3f6207 100644 --- a/arch/m68k/platform/coldfire/head.S +++ b/arch/m68k/platform/coldfire/head.S | |||
@@ -3,7 +3,7 @@ | |||
3 | /* | 3 | /* |
4 | * head.S -- common startup code for ColdFire CPUs. | 4 | * head.S -- common startup code for ColdFire CPUs. |
5 | * | 5 | * |
6 | * (C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>. | 6 | * (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /*****************************************************************************/ | 9 | /*****************************************************************************/ |
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/coldfire.h> | 14 | #include <asm/coldfire.h> |
15 | #include <asm/mcfsim.h> | 15 | #include <asm/mcfsim.h> |
16 | #include <asm/mcfmmu.h> | ||
16 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
17 | 18 | ||
18 | /*****************************************************************************/ | 19 | /*****************************************************************************/ |
@@ -135,6 +136,14 @@ _init_sp: | |||
135 | 136 | ||
136 | __HEAD | 137 | __HEAD |
137 | 138 | ||
139 | #ifdef CONFIG_MMU | ||
140 | _start0: | ||
141 | jmp _start | ||
142 | .global kernel_pg_dir | ||
143 | .equ kernel_pg_dir,_start0 | ||
144 | .equ .,_start0+0x1000 | ||
145 | #endif | ||
146 | |||
138 | /* | 147 | /* |
139 | * This is the codes first entry point. This is where it all | 148 | * This is the codes first entry point. This is where it all |
140 | * begins... | 149 | * begins... |
@@ -143,6 +152,9 @@ __HEAD | |||
143 | _start: | 152 | _start: |
144 | nop /* filler */ | 153 | nop /* filler */ |
145 | movew #0x2700, %sr /* no interrupts */ | 154 | movew #0x2700, %sr /* no interrupts */ |
155 | movel #CACHE_INIT,%d0 /* disable cache */ | ||
156 | movec %d0,%CACR | ||
157 | nop | ||
146 | #if defined(CONFIG_UBOOT) | 158 | #if defined(CONFIG_UBOOT) |
147 | movel %sp,_init_sp /* save initial stack pointer */ | 159 | movel %sp,_init_sp /* save initial stack pointer */ |
148 | #endif | 160 | #endif |
@@ -176,9 +188,6 @@ _start: | |||
176 | * it is very similar. Define the exact settings in the headers | 188 | * it is very similar. Define the exact settings in the headers |
177 | * then the code here is the same for all. | 189 | * then the code here is the same for all. |
178 | */ | 190 | */ |
179 | movel #CACHE_INIT,%d0 /* invalidate whole cache */ | ||
180 | movec %d0,%CACR | ||
181 | nop | ||
182 | movel #ACR0_MODE,%d0 /* set RAM region for caching */ | 191 | movel #ACR0_MODE,%d0 /* set RAM region for caching */ |
183 | movec %d0,%ACR0 | 192 | movec %d0,%ACR0 |
184 | movel #ACR1_MODE,%d0 /* anything else to cache? */ | 193 | movel #ACR1_MODE,%d0 /* anything else to cache? */ |
@@ -193,6 +202,26 @@ _start: | |||
193 | movec %d0,%CACR | 202 | movec %d0,%CACR |
194 | nop | 203 | nop |
195 | 204 | ||
205 | #ifdef CONFIG_MMU | ||
206 | /* | ||
207 | * Identity mapping for the kernel region. | ||
208 | */ | ||
209 | movel #(MMUBASE+1),%d0 /* enable MMUBAR registers */ | ||
210 | movec %d0,%MMUBAR | ||
211 | movel #MMUOR_CA,%d0 /* clear TLB entries */ | ||
212 | movel %d0,MMUOR | ||
213 | movel #0,%d0 /* set ASID to 0 */ | ||
214 | movec %d0,%asid | ||
215 | |||
216 | movel #MMUCR_EN,%d0 /* Enable the identity map */ | ||
217 | movel %d0,MMUCR | ||
218 | nop /* sync i-pipeline */ | ||
219 | |||
220 | movel #_vstart,%a0 /* jump to "virtual" space */ | ||
221 | jmp %a0@ | ||
222 | _vstart: | ||
223 | #endif /* CONFIG_MMU */ | ||
224 | |||
196 | #ifdef CONFIG_ROMFS_FS | 225 | #ifdef CONFIG_ROMFS_FS |
197 | /* | 226 | /* |
198 | * Move ROM filesystem above bss :-) | 227 | * Move ROM filesystem above bss :-) |
@@ -238,6 +267,22 @@ _clear_bss: | |||
238 | lea init_thread_union,%a0 | 267 | lea init_thread_union,%a0 |
239 | lea THREAD_SIZE(%a0),%sp | 268 | lea THREAD_SIZE(%a0),%sp |
240 | 269 | ||
270 | #ifdef CONFIG_MMU | ||
271 | .global m68k_cputype | ||
272 | .global m68k_mmutype | ||
273 | .global m68k_fputype | ||
274 | .global m68k_machtype | ||
275 | movel #CPU_COLDFIRE,%d0 | ||
276 | movel %d0,m68k_cputype /* Mark us as a ColdFire */ | ||
277 | movel #MMU_COLDFIRE,%d0 | ||
278 | movel %d0,m68k_mmutype | ||
279 | movel #FPU_COLDFIRE,%d0 | ||
280 | movel %d0,m68k_fputype | ||
281 | movel #MACH_M54XX,%d0 | ||
282 | movel %d0,m68k_machtype /* Mark us as a 54xx machine */ | ||
283 | lea init_task,%a2 /* Set "current" init task */ | ||
284 | #endif | ||
285 | |||
241 | /* | 286 | /* |
242 | * Assember start up done, start code proper. | 287 | * Assember start up done, start code proper. |
243 | */ | 288 | */ |
diff --git a/arch/m68k/platform/coldfire/sltimers.c b/arch/m68k/platform/coldfire/sltimers.c index b7f822b552bb..54e1452f853a 100644 --- a/arch/m68k/platform/coldfire/sltimers.c +++ b/arch/m68k/platform/coldfire/sltimers.c | |||
@@ -98,16 +98,19 @@ static struct irqaction mcfslt_timer_irq = { | |||
98 | static cycle_t mcfslt_read_clk(struct clocksource *cs) | 98 | static cycle_t mcfslt_read_clk(struct clocksource *cs) |
99 | { | 99 | { |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | u32 cycles; | 101 | u32 cycles, scnt; |
102 | u16 scnt; | ||
103 | 102 | ||
104 | local_irq_save(flags); | 103 | local_irq_save(flags); |
105 | scnt = __raw_readl(TA(MCFSLT_SCNT)); | 104 | scnt = __raw_readl(TA(MCFSLT_SCNT)); |
106 | cycles = mcfslt_cnt; | 105 | cycles = mcfslt_cnt; |
106 | if (__raw_readl(TA(MCFSLT_SSR)) & MCFSLT_SSR_TE) { | ||
107 | cycles += mcfslt_cycles_per_jiffy; | ||
108 | scnt = __raw_readl(TA(MCFSLT_SCNT)); | ||
109 | } | ||
107 | local_irq_restore(flags); | 110 | local_irq_restore(flags); |
108 | 111 | ||
109 | /* subtract because slice timers count down */ | 112 | /* subtract because slice timers count down */ |
110 | return cycles - scnt; | 113 | return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt); |
111 | } | 114 | } |
112 | 115 | ||
113 | static struct clocksource mcfslt_clk = { | 116 | static struct clocksource mcfslt_clk = { |