aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/include
diff options
context:
space:
mode:
authorGreg Ungerer <gerg@uclinux.org>2011-10-14 00:43:30 -0400
committerGreg Ungerer <gerg@uclinux.org>2011-12-29 19:20:21 -0500
commite08d703cc2ab6e47dbd10a74eb029f7dfa93d71d (patch)
treef81a41ca2a92ed39a723b024b984d220373e6e01 /arch/m68k/include
parent2c9b82adb5ea65745d5d73d97bb0e1cc16cba4a0 (diff)
m68k: modify user space access functions to support ColdFire CPUs
Modify the user space access functions to support the ColdFire V4e cores running with MMU enabled. The ColdFire processors do not support the "moves" instruction used by the traditional 680x0 processors for moving data into and out of another address space. They only support the notion of a single address space, and you use the usual "move" instruction to access that. Create a new config symbol (CONFIG_CPU_HAS_ADDRESS_SPACES) to mark the CPU types that support separate address spaces, and thus also support the sfc/dfc registers and the "moves" instruction that go along with that. The code is almost identical for user space access, so lets just use a define to choose either the "move" or "moves" in the assembler code. Signed-off-by: Greg Ungerer <gerg@uclinux.org> Acked-by: Matt Waddel <mwaddel@yahoo.com> Acked-by: Kurt Mahan <kmahan@xmission.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/segment.h4
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h42
2 files changed, 31 insertions, 15 deletions
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index ee959219fdfe..1a142e9ceaad 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -31,7 +31,7 @@ typedef struct {
31 31
32static inline mm_segment_t get_fs(void) 32static inline mm_segment_t get_fs(void)
33{ 33{
34#ifdef CONFIG_MMU 34#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
35 mm_segment_t _v; 35 mm_segment_t _v;
36 __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); 36 __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
37 37
@@ -49,7 +49,7 @@ static inline mm_segment_t get_ds(void)
49 49
50static inline void set_fs(mm_segment_t val) 50static inline void set_fs(mm_segment_t val)
51{ 51{
52#ifdef CONFIG_MMU 52#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
53 __asm__ __volatile__ ("movec %0,%/sfc\n\t" 53 __asm__ __volatile__ ("movec %0,%/sfc\n\t"
54 "movec %0,%/dfc\n\t" 54 "movec %0,%/dfc\n\t"
55 : /* no outputs */ : "r" (val.seg) : "memory"); 55 : /* no outputs */ : "r" (val.seg) : "memory");
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 7107f3fbdbb6..9c80cd515b20 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr,
21} 21}
22 22
23/* 23/*
24 * Not all varients of the 68k family support the notion of address spaces.
25 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
26 * the "moves" instruction to access user space from kernel space. Other
27 * family members like ColdFire don't support this, and only have a single
28 * address space, and use the usual "move" instruction for user space access.
29 *
30 * Outside of this difference the user space access functions are the same.
31 * So lets keep the code simple and just define in what we need to use.
32 */
33#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
34#define MOVES "moves"
35#else
36#define MOVES "move"
37#endif
38
39/*
24 * The exception table consists of pairs of addresses: the first is the 40 * The exception table consists of pairs of addresses: the first is the
25 * address of an instruction that is allowed to fault, and the second is 41 * address of an instruction that is allowed to fault, and the second is
26 * the address at which the program should continue. No registers are 42 * the address at which the program should continue. No registers are
@@ -43,7 +59,7 @@ extern int __get_user_bad(void);
43 59
44#define __put_user_asm(res, x, ptr, bwl, reg, err) \ 60#define __put_user_asm(res, x, ptr, bwl, reg, err) \
45asm volatile ("\n" \ 61asm volatile ("\n" \
46 "1: moves."#bwl" %2,%1\n" \ 62 "1: "MOVES"."#bwl" %2,%1\n" \
47 "2:\n" \ 63 "2:\n" \
48 " .section .fixup,\"ax\"\n" \ 64 " .section .fixup,\"ax\"\n" \
49 " .even\n" \ 65 " .even\n" \
@@ -83,8 +99,8 @@ asm volatile ("\n" \
83 { \ 99 { \
84 const void __user *__pu_ptr = (ptr); \ 100 const void __user *__pu_ptr = (ptr); \
85 asm volatile ("\n" \ 101 asm volatile ("\n" \
86 "1: moves.l %2,(%1)+\n" \ 102 "1: "MOVES".l %2,(%1)+\n" \
87 "2: moves.l %R2,(%1)\n" \ 103 "2: "MOVES".l %R2,(%1)\n" \
88 "3:\n" \ 104 "3:\n" \
89 " .section .fixup,\"ax\"\n" \ 105 " .section .fixup,\"ax\"\n" \
90 " .even\n" \ 106 " .even\n" \
@@ -115,12 +131,12 @@ asm volatile ("\n" \
115#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 131#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
116 type __gu_val; \ 132 type __gu_val; \
117 asm volatile ("\n" \ 133 asm volatile ("\n" \
118 "1: moves."#bwl" %2,%1\n" \ 134 "1: "MOVES"."#bwl" %2,%1\n" \
119 "2:\n" \ 135 "2:\n" \
120 " .section .fixup,\"ax\"\n" \ 136 " .section .fixup,\"ax\"\n" \
121 " .even\n" \ 137 " .even\n" \
122 "10: move.l %3,%0\n" \ 138 "10: move.l %3,%0\n" \
123 " sub."#bwl" %1,%1\n" \ 139 " sub.l %1,%1\n" \
124 " jra 2b\n" \ 140 " jra 2b\n" \
125 " .previous\n" \ 141 " .previous\n" \
126 "\n" \ 142 "\n" \
@@ -152,8 +168,8 @@ asm volatile ("\n" \
152 const void *__gu_ptr = (ptr); \ 168 const void *__gu_ptr = (ptr); \
153 u64 __gu_val; \ 169 u64 __gu_val; \
154 asm volatile ("\n" \ 170 asm volatile ("\n" \
155 "1: moves.l (%2)+,%1\n" \ 171 "1: "MOVES".l (%2)+,%1\n" \
156 "2: moves.l (%2),%R1\n" \ 172 "2: "MOVES".l (%2),%R1\n" \
157 "3:\n" \ 173 "3:\n" \
158 " .section .fixup,\"ax\"\n" \ 174 " .section .fixup,\"ax\"\n" \
159 " .even\n" \ 175 " .even\n" \
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned
188 204
189#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ 205#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
190 asm volatile ("\n" \ 206 asm volatile ("\n" \
191 "1: moves."#s1" (%2)+,%3\n" \ 207 "1: "MOVES"."#s1" (%2)+,%3\n" \
192 " move."#s1" %3,(%1)+\n" \ 208 " move."#s1" %3,(%1)+\n" \
193 "2: moves."#s2" (%2)+,%3\n" \ 209 "2: "MOVES"."#s2" (%2)+,%3\n" \
194 " move."#s2" %3,(%1)+\n" \ 210 " move."#s2" %3,(%1)+\n" \
195 " .ifnc \""#s3"\",\"\"\n" \ 211 " .ifnc \""#s3"\",\"\"\n" \
196 "3: moves."#s3" (%2)+,%3\n" \ 212 "3: "MOVES"."#s3" (%2)+,%3\n" \
197 " move."#s3" %3,(%1)+\n" \ 213 " move."#s3" %3,(%1)+\n" \
198 " .endif\n" \ 214 " .endif\n" \
199 "4:\n" \ 215 "4:\n" \
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
269#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ 285#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
270 asm volatile ("\n" \ 286 asm volatile ("\n" \
271 " move."#s1" (%2)+,%3\n" \ 287 " move."#s1" (%2)+,%3\n" \
272 "11: moves."#s1" %3,(%1)+\n" \ 288 "11: "MOVES"."#s1" %3,(%1)+\n" \
273 "12: move."#s2" (%2)+,%3\n" \ 289 "12: move."#s2" (%2)+,%3\n" \
274 "21: moves."#s2" %3,(%1)+\n" \ 290 "21: "MOVES"."#s2" %3,(%1)+\n" \
275 "22:\n" \ 291 "22:\n" \
276 " .ifnc \""#s3"\",\"\"\n" \ 292 " .ifnc \""#s3"\",\"\"\n" \
277 " move."#s3" (%2)+,%3\n" \ 293 " move."#s3" (%2)+,%3\n" \
278 "31: moves."#s3" %3,(%1)+\n" \ 294 "31: "MOVES"."#s3" %3,(%1)+\n" \
279 "32:\n" \ 295 "32:\n" \
280 " .endif\n" \ 296 " .endif\n" \
281 "4:\n" \ 297 "4:\n" \