diff options
author | Greg Ungerer <gerg@uclinux.org> | 2011-10-14 00:43:30 -0400 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2011-12-29 19:20:21 -0500 |
commit | e08d703cc2ab6e47dbd10a74eb029f7dfa93d71d (patch) | |
tree | f81a41ca2a92ed39a723b024b984d220373e6e01 /arch | |
parent | 2c9b82adb5ea65745d5d73d97bb0e1cc16cba4a0 (diff) |
m68k: modify user space access functions to support ColdFire CPUs
Modify the user space access functions to support the ColdFire V4e cores
running with MMU enabled.
The ColdFire processors do not support the "moves" instruction used by
the traditional 680x0 processors for moving data into and out of another
address space. They only support the notion of a single address space,
and you use the usual "move" instruction to access that.
Create a new config symbol (CONFIG_CPU_HAS_ADDRESS_SPACES) to mark the
CPU types that support separate address spaces, and thus also support
the sfc/dfc registers and the "moves" instruction that go along with that.
The code is almost identical for user space access, so lets just use a
define to choose either the "move" or "moves" in the assembler code.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: Matt Waddel <mwaddel@yahoo.com>
Acked-by: Kurt Mahan <kmahan@xmission.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/m68k/Kconfig | 3 | ||||
-rw-r--r-- | arch/m68k/Kconfig.cpu | 4 | ||||
-rw-r--r-- | arch/m68k/include/asm/segment.h | 4 | ||||
-rw-r--r-- | arch/m68k/include/asm/uaccess_mm.h | 42 | ||||
-rw-r--r-- | arch/m68k/lib/uaccess.c | 22 |
5 files changed, 49 insertions, 26 deletions
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 32fd3642e71b..5f860cf67afc 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -66,6 +66,9 @@ config CPU_HAS_NO_BITFIELDS | |||
66 | config CPU_HAS_NO_MULDIV64 | 66 | config CPU_HAS_NO_MULDIV64 |
67 | bool | 67 | bool |
68 | 68 | ||
69 | config CPU_HAS_ADDRESS_SPACES | ||
70 | bool | ||
71 | |||
69 | config HZ | 72 | config HZ |
70 | int | 73 | int |
71 | default 1000 if CLEOPATRA | 74 | default 1000 if CLEOPATRA |
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 017f4fc388d2..5ae1d63ef5e9 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu | |||
@@ -38,6 +38,7 @@ config M68020 | |||
38 | bool "68020 support" | 38 | bool "68020 support" |
39 | depends on MMU | 39 | depends on MMU |
40 | select GENERIC_ATOMIC64 | 40 | select GENERIC_ATOMIC64 |
41 | select CPU_HAS_ADDRESS_SPACES | ||
41 | help | 42 | help |
42 | If you anticipate running this kernel on a computer with a MC68020 | 43 | If you anticipate running this kernel on a computer with a MC68020 |
43 | processor, say Y. Otherwise, say N. Note that the 68020 requires a | 44 | processor, say Y. Otherwise, say N. Note that the 68020 requires a |
@@ -48,6 +49,7 @@ config M68030 | |||
48 | bool "68030 support" | 49 | bool "68030 support" |
49 | depends on MMU && !MMU_SUN3 | 50 | depends on MMU && !MMU_SUN3 |
50 | select GENERIC_ATOMIC64 | 51 | select GENERIC_ATOMIC64 |
52 | select CPU_HAS_ADDRESS_SPACES | ||
51 | help | 53 | help |
52 | If you anticipate running this kernel on a computer with a MC68030 | 54 | If you anticipate running this kernel on a computer with a MC68030 |
53 | processor, say Y. Otherwise, say N. Note that a MC68EC030 will not | 55 | processor, say Y. Otherwise, say N. Note that a MC68EC030 will not |
@@ -57,6 +59,7 @@ config M68040 | |||
57 | bool "68040 support" | 59 | bool "68040 support" |
58 | depends on MMU && !MMU_SUN3 | 60 | depends on MMU && !MMU_SUN3 |
59 | select GENERIC_ATOMIC64 | 61 | select GENERIC_ATOMIC64 |
62 | select CPU_HAS_ADDRESS_SPACES | ||
60 | help | 63 | help |
61 | If you anticipate running this kernel on a computer with a MC68LC040 | 64 | If you anticipate running this kernel on a computer with a MC68LC040 |
62 | or MC68040 processor, say Y. Otherwise, say N. Note that an | 65 | or MC68040 processor, say Y. Otherwise, say N. Note that an |
@@ -67,6 +70,7 @@ config M68060 | |||
67 | bool "68060 support" | 70 | bool "68060 support" |
68 | depends on MMU && !MMU_SUN3 | 71 | depends on MMU && !MMU_SUN3 |
69 | select GENERIC_ATOMIC64 | 72 | select GENERIC_ATOMIC64 |
73 | select CPU_HAS_ADDRESS_SPACES | ||
70 | help | 74 | help |
71 | If you anticipate running this kernel on a computer with a MC68060 | 75 | If you anticipate running this kernel on a computer with a MC68060 |
72 | processor, say Y. Otherwise, say N. | 76 | processor, say Y. Otherwise, say N. |
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index ee959219fdfe..1a142e9ceaad 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h | |||
@@ -31,7 +31,7 @@ typedef struct { | |||
31 | 31 | ||
32 | static inline mm_segment_t get_fs(void) | 32 | static inline mm_segment_t get_fs(void) |
33 | { | 33 | { |
34 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES |
35 | mm_segment_t _v; | 35 | mm_segment_t _v; |
36 | __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); | 36 | __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); |
37 | 37 | ||
@@ -49,7 +49,7 @@ static inline mm_segment_t get_ds(void) | |||
49 | 49 | ||
50 | static inline void set_fs(mm_segment_t val) | 50 | static inline void set_fs(mm_segment_t val) |
51 | { | 51 | { |
52 | #ifdef CONFIG_MMU | 52 | #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES |
53 | __asm__ __volatile__ ("movec %0,%/sfc\n\t" | 53 | __asm__ __volatile__ ("movec %0,%/sfc\n\t" |
54 | "movec %0,%/dfc\n\t" | 54 | "movec %0,%/dfc\n\t" |
55 | : /* no outputs */ : "r" (val.seg) : "memory"); | 55 | : /* no outputs */ : "r" (val.seg) : "memory"); |
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 7107f3fbdbb6..9c80cd515b20 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
@@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr, | |||
21 | } | 21 | } |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Not all varients of the 68k family support the notion of address spaces. | ||
25 | * The traditional 680x0 parts do, and they use the sfc/dfc registers and | ||
26 | * the "moves" instruction to access user space from kernel space. Other | ||
27 | * family members like ColdFire don't support this, and only have a single | ||
28 | * address space, and use the usual "move" instruction for user space access. | ||
29 | * | ||
30 | * Outside of this difference the user space access functions are the same. | ||
31 | * So lets keep the code simple and just define in what we need to use. | ||
32 | */ | ||
33 | #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES | ||
34 | #define MOVES "moves" | ||
35 | #else | ||
36 | #define MOVES "move" | ||
37 | #endif | ||
38 | |||
39 | /* | ||
24 | * The exception table consists of pairs of addresses: the first is the | 40 | * The exception table consists of pairs of addresses: the first is the |
25 | * address of an instruction that is allowed to fault, and the second is | 41 | * address of an instruction that is allowed to fault, and the second is |
26 | * the address at which the program should continue. No registers are | 42 | * the address at which the program should continue. No registers are |
@@ -43,7 +59,7 @@ extern int __get_user_bad(void); | |||
43 | 59 | ||
44 | #define __put_user_asm(res, x, ptr, bwl, reg, err) \ | 60 | #define __put_user_asm(res, x, ptr, bwl, reg, err) \ |
45 | asm volatile ("\n" \ | 61 | asm volatile ("\n" \ |
46 | "1: moves."#bwl" %2,%1\n" \ | 62 | "1: "MOVES"."#bwl" %2,%1\n" \ |
47 | "2:\n" \ | 63 | "2:\n" \ |
48 | " .section .fixup,\"ax\"\n" \ | 64 | " .section .fixup,\"ax\"\n" \ |
49 | " .even\n" \ | 65 | " .even\n" \ |
@@ -83,8 +99,8 @@ asm volatile ("\n" \ | |||
83 | { \ | 99 | { \ |
84 | const void __user *__pu_ptr = (ptr); \ | 100 | const void __user *__pu_ptr = (ptr); \ |
85 | asm volatile ("\n" \ | 101 | asm volatile ("\n" \ |
86 | "1: moves.l %2,(%1)+\n" \ | 102 | "1: "MOVES".l %2,(%1)+\n" \ |
87 | "2: moves.l %R2,(%1)\n" \ | 103 | "2: "MOVES".l %R2,(%1)\n" \ |
88 | "3:\n" \ | 104 | "3:\n" \ |
89 | " .section .fixup,\"ax\"\n" \ | 105 | " .section .fixup,\"ax\"\n" \ |
90 | " .even\n" \ | 106 | " .even\n" \ |
@@ -115,12 +131,12 @@ asm volatile ("\n" \ | |||
115 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ | 131 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ |
116 | type __gu_val; \ | 132 | type __gu_val; \ |
117 | asm volatile ("\n" \ | 133 | asm volatile ("\n" \ |
118 | "1: moves."#bwl" %2,%1\n" \ | 134 | "1: "MOVES"."#bwl" %2,%1\n" \ |
119 | "2:\n" \ | 135 | "2:\n" \ |
120 | " .section .fixup,\"ax\"\n" \ | 136 | " .section .fixup,\"ax\"\n" \ |
121 | " .even\n" \ | 137 | " .even\n" \ |
122 | "10: move.l %3,%0\n" \ | 138 | "10: move.l %3,%0\n" \ |
123 | " sub."#bwl" %1,%1\n" \ | 139 | " sub.l %1,%1\n" \ |
124 | " jra 2b\n" \ | 140 | " jra 2b\n" \ |
125 | " .previous\n" \ | 141 | " .previous\n" \ |
126 | "\n" \ | 142 | "\n" \ |
@@ -152,8 +168,8 @@ asm volatile ("\n" \ | |||
152 | const void *__gu_ptr = (ptr); \ | 168 | const void *__gu_ptr = (ptr); \ |
153 | u64 __gu_val; \ | 169 | u64 __gu_val; \ |
154 | asm volatile ("\n" \ | 170 | asm volatile ("\n" \ |
155 | "1: moves.l (%2)+,%1\n" \ | 171 | "1: "MOVES".l (%2)+,%1\n" \ |
156 | "2: moves.l (%2),%R1\n" \ | 172 | "2: "MOVES".l (%2),%R1\n" \ |
157 | "3:\n" \ | 173 | "3:\n" \ |
158 | " .section .fixup,\"ax\"\n" \ | 174 | " .section .fixup,\"ax\"\n" \ |
159 | " .even\n" \ | 175 | " .even\n" \ |
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned | |||
188 | 204 | ||
189 | #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ | 205 | #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ |
190 | asm volatile ("\n" \ | 206 | asm volatile ("\n" \ |
191 | "1: moves."#s1" (%2)+,%3\n" \ | 207 | "1: "MOVES"."#s1" (%2)+,%3\n" \ |
192 | " move."#s1" %3,(%1)+\n" \ | 208 | " move."#s1" %3,(%1)+\n" \ |
193 | "2: moves."#s2" (%2)+,%3\n" \ | 209 | "2: "MOVES"."#s2" (%2)+,%3\n" \ |
194 | " move."#s2" %3,(%1)+\n" \ | 210 | " move."#s2" %3,(%1)+\n" \ |
195 | " .ifnc \""#s3"\",\"\"\n" \ | 211 | " .ifnc \""#s3"\",\"\"\n" \ |
196 | "3: moves."#s3" (%2)+,%3\n" \ | 212 | "3: "MOVES"."#s3" (%2)+,%3\n" \ |
197 | " move."#s3" %3,(%1)+\n" \ | 213 | " move."#s3" %3,(%1)+\n" \ |
198 | " .endif\n" \ | 214 | " .endif\n" \ |
199 | "4:\n" \ | 215 | "4:\n" \ |
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) | |||
269 | #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ | 285 | #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ |
270 | asm volatile ("\n" \ | 286 | asm volatile ("\n" \ |
271 | " move."#s1" (%2)+,%3\n" \ | 287 | " move."#s1" (%2)+,%3\n" \ |
272 | "11: moves."#s1" %3,(%1)+\n" \ | 288 | "11: "MOVES"."#s1" %3,(%1)+\n" \ |
273 | "12: move."#s2" (%2)+,%3\n" \ | 289 | "12: move."#s2" (%2)+,%3\n" \ |
274 | "21: moves."#s2" %3,(%1)+\n" \ | 290 | "21: "MOVES"."#s2" %3,(%1)+\n" \ |
275 | "22:\n" \ | 291 | "22:\n" \ |
276 | " .ifnc \""#s3"\",\"\"\n" \ | 292 | " .ifnc \""#s3"\",\"\"\n" \ |
277 | " move."#s3" (%2)+,%3\n" \ | 293 | " move."#s3" (%2)+,%3\n" \ |
278 | "31: moves."#s3" %3,(%1)+\n" \ | 294 | "31: "MOVES"."#s3" %3,(%1)+\n" \ |
279 | "32:\n" \ | 295 | "32:\n" \ |
280 | " .endif\n" \ | 296 | " .endif\n" \ |
281 | "4:\n" \ | 297 | "4:\n" \ |
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 13854ed8cd9a..5664386338da 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c | |||
@@ -15,17 +15,17 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from, | |||
15 | asm volatile ("\n" | 15 | asm volatile ("\n" |
16 | " tst.l %0\n" | 16 | " tst.l %0\n" |
17 | " jeq 2f\n" | 17 | " jeq 2f\n" |
18 | "1: moves.l (%1)+,%3\n" | 18 | "1: "MOVES".l (%1)+,%3\n" |
19 | " move.l %3,(%2)+\n" | 19 | " move.l %3,(%2)+\n" |
20 | " subq.l #1,%0\n" | 20 | " subq.l #1,%0\n" |
21 | " jne 1b\n" | 21 | " jne 1b\n" |
22 | "2: btst #1,%5\n" | 22 | "2: btst #1,%5\n" |
23 | " jeq 4f\n" | 23 | " jeq 4f\n" |
24 | "3: moves.w (%1)+,%3\n" | 24 | "3: "MOVES".w (%1)+,%3\n" |
25 | " move.w %3,(%2)+\n" | 25 | " move.w %3,(%2)+\n" |
26 | "4: btst #0,%5\n" | 26 | "4: btst #0,%5\n" |
27 | " jeq 6f\n" | 27 | " jeq 6f\n" |
28 | "5: moves.b (%1)+,%3\n" | 28 | "5: "MOVES".b (%1)+,%3\n" |
29 | " move.b %3,(%2)+\n" | 29 | " move.b %3,(%2)+\n" |
30 | "6:\n" | 30 | "6:\n" |
31 | " .section .fixup,\"ax\"\n" | 31 | " .section .fixup,\"ax\"\n" |
@@ -68,17 +68,17 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, | |||
68 | " tst.l %0\n" | 68 | " tst.l %0\n" |
69 | " jeq 4f\n" | 69 | " jeq 4f\n" |
70 | "1: move.l (%1)+,%3\n" | 70 | "1: move.l (%1)+,%3\n" |
71 | "2: moves.l %3,(%2)+\n" | 71 | "2: "MOVES".l %3,(%2)+\n" |
72 | "3: subq.l #1,%0\n" | 72 | "3: subq.l #1,%0\n" |
73 | " jne 1b\n" | 73 | " jne 1b\n" |
74 | "4: btst #1,%5\n" | 74 | "4: btst #1,%5\n" |
75 | " jeq 6f\n" | 75 | " jeq 6f\n" |
76 | " move.w (%1)+,%3\n" | 76 | " move.w (%1)+,%3\n" |
77 | "5: moves.w %3,(%2)+\n" | 77 | "5: "MOVES".w %3,(%2)+\n" |
78 | "6: btst #0,%5\n" | 78 | "6: btst #0,%5\n" |
79 | " jeq 8f\n" | 79 | " jeq 8f\n" |
80 | " move.b (%1)+,%3\n" | 80 | " move.b (%1)+,%3\n" |
81 | "7: moves.b %3,(%2)+\n" | 81 | "7: "MOVES".b %3,(%2)+\n" |
82 | "8:\n" | 82 | "8:\n" |
83 | " .section .fixup,\"ax\"\n" | 83 | " .section .fixup,\"ax\"\n" |
84 | " .even\n" | 84 | " .even\n" |
@@ -115,7 +115,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) | |||
115 | return count; | 115 | return count; |
116 | 116 | ||
117 | asm volatile ("\n" | 117 | asm volatile ("\n" |
118 | "1: moves.b (%2)+,%4\n" | 118 | "1: "MOVES".b (%2)+,%4\n" |
119 | " move.b %4,(%1)+\n" | 119 | " move.b %4,(%1)+\n" |
120 | " jeq 2f\n" | 120 | " jeq 2f\n" |
121 | " subq.l #1,%3\n" | 121 | " subq.l #1,%3\n" |
@@ -152,7 +152,7 @@ long strnlen_user(const char __user *src, long n) | |||
152 | asm volatile ("\n" | 152 | asm volatile ("\n" |
153 | "1: subq.l #1,%1\n" | 153 | "1: subq.l #1,%1\n" |
154 | " jmi 3f\n" | 154 | " jmi 3f\n" |
155 | "2: moves.b (%0)+,%2\n" | 155 | "2: "MOVES".b (%0)+,%2\n" |
156 | " tst.b %2\n" | 156 | " tst.b %2\n" |
157 | " jne 1b\n" | 157 | " jne 1b\n" |
158 | " jra 4f\n" | 158 | " jra 4f\n" |
@@ -188,15 +188,15 @@ unsigned long __clear_user(void __user *to, unsigned long n) | |||
188 | asm volatile ("\n" | 188 | asm volatile ("\n" |
189 | " tst.l %0\n" | 189 | " tst.l %0\n" |
190 | " jeq 3f\n" | 190 | " jeq 3f\n" |
191 | "1: moves.l %2,(%1)+\n" | 191 | "1: "MOVES".l %2,(%1)+\n" |
192 | "2: subq.l #1,%0\n" | 192 | "2: subq.l #1,%0\n" |
193 | " jne 1b\n" | 193 | " jne 1b\n" |
194 | "3: btst #1,%4\n" | 194 | "3: btst #1,%4\n" |
195 | " jeq 5f\n" | 195 | " jeq 5f\n" |
196 | "4: moves.w %2,(%1)+\n" | 196 | "4: "MOVES".w %2,(%1)+\n" |
197 | "5: btst #0,%4\n" | 197 | "5: btst #0,%4\n" |
198 | " jeq 7f\n" | 198 | " jeq 7f\n" |
199 | "6: moves.b %2,(%1)\n" | 199 | "6: "MOVES".b %2,(%1)\n" |
200 | "7:\n" | 200 | "7:\n" |
201 | " .section .fixup,\"ax\"\n" | 201 | " .section .fixup,\"ax\"\n" |
202 | " .even\n" | 202 | " .even\n" |