diff options
-rw-r--r-- | arch/metag/include/asm/io.h | 165 | ||||
-rw-r--r-- | arch/metag/include/asm/uaccess.h | 241 | ||||
-rw-r--r-- | arch/metag/lib/usercopy.c | 1341 | ||||
-rw-r--r-- | arch/metag/mm/ioremap.c | 89 | ||||
-rw-r--r-- | arch/metag/mm/maccess.c | 68 |
5 files changed, 1904 insertions, 0 deletions
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h new file mode 100644 index 000000000000..9359e5048442 --- /dev/null +++ b/arch/metag/include/asm/io.h | |||
@@ -0,0 +1,165 @@ | |||
1 | #ifndef _ASM_METAG_IO_H | ||
2 | #define _ASM_METAG_IO_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #define IO_SPACE_LIMIT 0 | ||
7 | |||
8 | #define page_to_bus page_to_phys | ||
9 | #define bus_to_page phys_to_page | ||
10 | |||
11 | /* | ||
12 | * Generic I/O | ||
13 | */ | ||
14 | |||
15 | #define __raw_readb __raw_readb | ||
16 | static inline u8 __raw_readb(const volatile void __iomem *addr) | ||
17 | { | ||
18 | u8 ret; | ||
19 | asm volatile("GETB %0,[%1]" | ||
20 | : "=da" (ret) | ||
21 | : "da" (addr) | ||
22 | : "memory"); | ||
23 | return ret; | ||
24 | } | ||
25 | |||
26 | #define __raw_readw __raw_readw | ||
27 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
28 | { | ||
29 | u16 ret; | ||
30 | asm volatile("GETW %0,[%1]" | ||
31 | : "=da" (ret) | ||
32 | : "da" (addr) | ||
33 | : "memory"); | ||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | #define __raw_readl __raw_readl | ||
38 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
39 | { | ||
40 | u32 ret; | ||
41 | asm volatile("GETD %0,[%1]" | ||
42 | : "=da" (ret) | ||
43 | : "da" (addr) | ||
44 | : "memory"); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | #define __raw_readq __raw_readq | ||
49 | static inline u64 __raw_readq(const volatile void __iomem *addr) | ||
50 | { | ||
51 | u64 ret; | ||
52 | asm volatile("GETL %0,%t0,[%1]" | ||
53 | : "=da" (ret) | ||
54 | : "da" (addr) | ||
55 | : "memory"); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | #define __raw_writeb __raw_writeb | ||
60 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
61 | { | ||
62 | asm volatile("SETB [%0],%1" | ||
63 | : | ||
64 | : "da" (addr), | ||
65 | "da" (b) | ||
66 | : "memory"); | ||
67 | } | ||
68 | |||
69 | #define __raw_writew __raw_writew | ||
70 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | ||
71 | { | ||
72 | asm volatile("SETW [%0],%1" | ||
73 | : | ||
74 | : "da" (addr), | ||
75 | "da" (b) | ||
76 | : "memory"); | ||
77 | } | ||
78 | |||
79 | #define __raw_writel __raw_writel | ||
80 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | ||
81 | { | ||
82 | asm volatile("SETD [%0],%1" | ||
83 | : | ||
84 | : "da" (addr), | ||
85 | "da" (b) | ||
86 | : "memory"); | ||
87 | } | ||
88 | |||
89 | #define __raw_writeq __raw_writeq | ||
90 | static inline void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
91 | { | ||
92 | asm volatile("SETL [%0],%1,%t1" | ||
93 | : | ||
94 | : "da" (addr), | ||
95 | "da" (b) | ||
96 | : "memory"); | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * The generic io.h can define all the other generic accessors | ||
101 | */ | ||
102 | |||
103 | #include <asm-generic/io.h> | ||
104 | |||
105 | /* | ||
106 | * Despite being a 32bit architecture, Meta can do 64bit memory accesses | ||
107 | * (assuming the bus supports it). | ||
108 | */ | ||
109 | |||
110 | #define readq __raw_readq | ||
111 | #define writeq __raw_writeq | ||
112 | |||
113 | /* | ||
114 | * Meta specific I/O for accessing non-MMU areas. | ||
115 | * | ||
116 | * These can be provided with a physical address rather than an __iomem pointer | ||
117 | * and should only be used by core architecture code for accessing fixed core | ||
118 | * registers. Generic drivers should use ioremap and the generic I/O accessors. | ||
119 | */ | ||
120 | |||
121 | #define metag_in8(addr) __raw_readb((volatile void __iomem *)(addr)) | ||
122 | #define metag_in16(addr) __raw_readw((volatile void __iomem *)(addr)) | ||
123 | #define metag_in32(addr) __raw_readl((volatile void __iomem *)(addr)) | ||
124 | #define metag_in64(addr) __raw_readq((volatile void __iomem *)(addr)) | ||
125 | |||
126 | #define metag_out8(b, addr) __raw_writeb(b, (volatile void __iomem *)(addr)) | ||
127 | #define metag_out16(b, addr) __raw_writew(b, (volatile void __iomem *)(addr)) | ||
128 | #define metag_out32(b, addr) __raw_writel(b, (volatile void __iomem *)(addr)) | ||
129 | #define metag_out64(b, addr) __raw_writeq(b, (volatile void __iomem *)(addr)) | ||
130 | |||
131 | /* | ||
132 | * io remapping functions | ||
133 | */ | ||
134 | |||
135 | extern void __iomem *__ioremap(unsigned long offset, | ||
136 | size_t size, unsigned long flags); | ||
137 | extern void __iounmap(void __iomem *addr); | ||
138 | |||
139 | /** | ||
140 | * ioremap - map bus memory into CPU space | ||
141 | * @offset: bus address of the memory | ||
142 | * @size: size of the resource to map | ||
143 | * | ||
144 | * ioremap performs a platform specific sequence of operations to | ||
145 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
146 | * writew/writel functions and the other mmio helpers. The returned | ||
147 | * address is not guaranteed to be usable directly as a virtual | ||
148 | * address. | ||
149 | */ | ||
150 | #define ioremap(offset, size) \ | ||
151 | __ioremap((offset), (size), 0) | ||
152 | |||
153 | #define ioremap_nocache(offset, size) \ | ||
154 | __ioremap((offset), (size), 0) | ||
155 | |||
156 | #define ioremap_cached(offset, size) \ | ||
157 | __ioremap((offset), (size), _PAGE_CACHEABLE) | ||
158 | |||
159 | #define ioremap_wc(offset, size) \ | ||
160 | __ioremap((offset), (size), _PAGE_WR_COMBINE) | ||
161 | |||
162 | #define iounmap(addr) \ | ||
163 | __iounmap(addr) | ||
164 | |||
165 | #endif /* _ASM_METAG_IO_H */ | ||
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h new file mode 100644 index 000000000000..0748b0a97986 --- /dev/null +++ b/arch/metag/include/asm/uaccess.h | |||
@@ -0,0 +1,241 @@ | |||
1 | #ifndef __METAG_UACCESS_H | ||
2 | #define __METAG_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * User space memory access functions | ||
6 | */ | ||
7 | #include <linux/sched.h> | ||
8 | |||
9 | #define VERIFY_READ 0 | ||
10 | #define VERIFY_WRITE 1 | ||
11 | |||
12 | /* | ||
13 | * The fs value determines whether argument validity checking should be | ||
14 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
15 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
16 | * | ||
17 | * For historical reasons, these macros are grossly misnamed. | ||
18 | */ | ||
19 | |||
20 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
21 | |||
22 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | ||
23 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
24 | |||
25 | #define get_ds() (KERNEL_DS) | ||
26 | #define get_fs() (current_thread_info()->addr_limit) | ||
27 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
28 | |||
29 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
30 | |||
31 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | ||
32 | /* | ||
33 | * Explicitly allow NULL pointers here. Parts of the kernel such | ||
34 | * as readv/writev use access_ok to validate pointers, but want | ||
35 | * to allow NULL pointers for various reasons. NULL pointers are | ||
36 | * safe to allow through because the first page is not mappable on | ||
37 | * Meta. | ||
38 | * | ||
39 | * We also wish to avoid letting user code access the system area | ||
40 | * and the kernel half of the address space. | ||
41 | */ | ||
42 | #define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ | ||
43 | ((addr) > PAGE_OFFSET && \ | ||
44 | (addr) < LINCORE_BASE)) | ||
45 | |||
46 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
47 | { | ||
48 | return __kernel_ok || !__user_bad(addr, size); | ||
49 | } | ||
50 | |||
51 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ | ||
52 | (unsigned long)(size)) | ||
53 | |||
54 | static inline int verify_area(int type, const void *addr, unsigned long size) | ||
55 | { | ||
56 | return access_ok(type, addr, size) ? 0 : -EFAULT; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * The exception table consists of pairs of addresses: the first is the | ||
61 | * address of an instruction that is allowed to fault, and the second is | ||
62 | * the address at which the program should continue. No registers are | ||
63 | * modified, so it is entirely up to the continuation code to figure out | ||
64 | * what to do. | ||
65 | * | ||
66 | * All the routines below use bits of fixup code that are out of line | ||
67 | * with the main instruction path. This means when everything is well, | ||
68 | * we don't even have to jump over them. Further, they do not intrude | ||
69 | * on our cache or tlb entries. | ||
70 | */ | ||
71 | struct exception_table_entry { | ||
72 | unsigned long insn, fixup; | ||
73 | }; | ||
74 | |||
75 | extern int fixup_exception(struct pt_regs *regs); | ||
76 | |||
77 | /* | ||
78 | * These are the main single-value transfer routines. They automatically | ||
79 | * use the right size if we just have the right pointer type. | ||
80 | */ | ||
81 | |||
82 | #define put_user(x, ptr) \ | ||
83 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
84 | #define __put_user(x, ptr) \ | ||
85 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
86 | |||
87 | extern void __put_user_bad(void); | ||
88 | |||
89 | #define __put_user_nocheck(x, ptr, size) \ | ||
90 | ({ \ | ||
91 | long __pu_err; \ | ||
92 | __put_user_size((x), (ptr), (size), __pu_err); \ | ||
93 | __pu_err; \ | ||
94 | }) | ||
95 | |||
96 | #define __put_user_check(x, ptr, size) \ | ||
97 | ({ \ | ||
98 | long __pu_err = -EFAULT; \ | ||
99 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
100 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ | ||
101 | __put_user_size((x), __pu_addr, (size), __pu_err); \ | ||
102 | __pu_err; \ | ||
103 | }) | ||
104 | |||
105 | extern long __put_user_asm_b(unsigned int x, void __user *addr); | ||
106 | extern long __put_user_asm_w(unsigned int x, void __user *addr); | ||
107 | extern long __put_user_asm_d(unsigned int x, void __user *addr); | ||
108 | extern long __put_user_asm_l(unsigned long long x, void __user *addr); | ||
109 | |||
110 | #define __put_user_size(x, ptr, size, retval) \ | ||
111 | do { \ | ||
112 | retval = 0; \ | ||
113 | switch (size) { \ | ||
114 | case 1: \ | ||
115 | retval = __put_user_asm_b((unsigned int)x, ptr); break; \ | ||
116 | case 2: \ | ||
117 | retval = __put_user_asm_w((unsigned int)x, ptr); break; \ | ||
118 | case 4: \ | ||
119 | retval = __put_user_asm_d((unsigned int)x, ptr); break; \ | ||
120 | case 8: \ | ||
121 | retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ | ||
122 | default: \ | ||
123 | __put_user_bad(); \ | ||
124 | } \ | ||
125 | } while (0) | ||
126 | |||
127 | #define get_user(x, ptr) \ | ||
128 | __get_user_check((x), (ptr), sizeof(*(ptr))) | ||
129 | #define __get_user(x, ptr) \ | ||
130 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
131 | |||
132 | extern long __get_user_bad(void); | ||
133 | |||
134 | #define __get_user_nocheck(x, ptr, size) \ | ||
135 | ({ \ | ||
136 | long __gu_err, __gu_val; \ | ||
137 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
138 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
139 | __gu_err; \ | ||
140 | }) | ||
141 | |||
142 | #define __get_user_check(x, ptr, size) \ | ||
143 | ({ \ | ||
144 | long __gu_err = -EFAULT, __gu_val = 0; \ | ||
145 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
146 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ | ||
147 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | ||
148 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
149 | __gu_err; \ | ||
150 | }) | ||
151 | |||
152 | extern unsigned char __get_user_asm_b(const void __user *addr, long *err); | ||
153 | extern unsigned short __get_user_asm_w(const void __user *addr, long *err); | ||
154 | extern unsigned int __get_user_asm_d(const void __user *addr, long *err); | ||
155 | |||
156 | #define __get_user_size(x, ptr, size, retval) \ | ||
157 | do { \ | ||
158 | retval = 0; \ | ||
159 | switch (size) { \ | ||
160 | case 1: \ | ||
161 | x = __get_user_asm_b(ptr, &retval); break; \ | ||
162 | case 2: \ | ||
163 | x = __get_user_asm_w(ptr, &retval); break; \ | ||
164 | case 4: \ | ||
165 | x = __get_user_asm_d(ptr, &retval); break; \ | ||
166 | default: \ | ||
167 | (x) = __get_user_bad(); \ | ||
168 | } \ | ||
169 | } while (0) | ||
170 | |||
171 | /* | ||
172 | * Copy a null terminated string from userspace. | ||
173 | * | ||
174 | * Must return: | ||
175 | * -EFAULT for an exception | ||
176 | * count if we hit the buffer limit | ||
177 | * bytes copied if we hit a null byte | ||
178 | * (without the null byte) | ||
179 | */ | ||
180 | |||
181 | extern long __must_check __strncpy_from_user(char *dst, const char __user *src, | ||
182 | long count); | ||
183 | |||
184 | #define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) | ||
185 | |||
186 | /* | ||
187 | * Return the size of a string (including the ending 0) | ||
188 | * | ||
189 | * Return 0 on exception, a value greater than N if too long | ||
190 | */ | ||
191 | extern long __must_check strnlen_user(const char __user *src, long count); | ||
192 | |||
193 | #define strlen_user(str) strnlen_user(str, 32767) | ||
194 | |||
195 | extern unsigned long __must_check __copy_user_zeroing(void *to, | ||
196 | const void __user *from, | ||
197 | unsigned long n); | ||
198 | |||
199 | static inline unsigned long | ||
200 | copy_from_user(void *to, const void __user *from, unsigned long n) | ||
201 | { | ||
202 | if (access_ok(VERIFY_READ, from, n)) | ||
203 | return __copy_user_zeroing(to, from, n); | ||
204 | return n; | ||
205 | } | ||
206 | |||
207 | #define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) | ||
208 | #define __copy_from_user_inatomic __copy_from_user | ||
209 | |||
210 | extern unsigned long __must_check __copy_user(void __user *to, | ||
211 | const void *from, | ||
212 | unsigned long n); | ||
213 | |||
214 | static inline unsigned long copy_to_user(void __user *to, const void *from, | ||
215 | unsigned long n) | ||
216 | { | ||
217 | if (access_ok(VERIFY_WRITE, to, n)) | ||
218 | return __copy_user(to, from, n); | ||
219 | return n; | ||
220 | } | ||
221 | |||
222 | #define __copy_to_user(to, from, n) __copy_user(to, from, n) | ||
223 | #define __copy_to_user_inatomic __copy_to_user | ||
224 | |||
225 | /* | ||
226 | * Zero Userspace | ||
227 | */ | ||
228 | |||
229 | extern unsigned long __must_check __do_clear_user(void __user *to, | ||
230 | unsigned long n); | ||
231 | |||
232 | static inline unsigned long clear_user(void __user *to, unsigned long n) | ||
233 | { | ||
234 | if (access_ok(VERIFY_WRITE, to, n)) | ||
235 | return __do_clear_user(to, n); | ||
236 | return n; | ||
237 | } | ||
238 | |||
239 | #define __clear_user(to, n) __do_clear_user(to, n) | ||
240 | |||
241 | #endif /* _METAG_UACCESS_H */ | ||
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c new file mode 100644 index 000000000000..92f6dbb34e83 --- /dev/null +++ b/arch/metag/lib/usercopy.c | |||
@@ -0,0 +1,1341 @@ | |||
1 | /* | ||
2 | * User address space access functions. | ||
3 | * The non-inlined parts of asm-metag/uaccess.h are here. | ||
4 | * | ||
5 | * Copyright (C) 2006, Imagination Technologies. | ||
6 | * Copyright (C) 2000, Axis Communications AB. | ||
7 | * | ||
8 | * Written by Hans-Peter Nilsson. | ||
9 | * Pieces used from memcpy, originally by Kenny Ranerup long time ago. | ||
10 | * Modified for Meta by Will Newton. | ||
11 | */ | ||
12 | |||
13 | #include <linux/uaccess.h> | ||
14 | #include <asm/cache.h> /* def of L1_CACHE_BYTES */ | ||
15 | |||
16 | #define USE_RAPF | ||
17 | #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES) | ||
18 | |||
19 | |||
20 | /* The "double write" in this code is because the Meta will not fault | ||
21 | * immediately unless the memory pipe is forced to by e.g. a data stall or | ||
22 | * another memory op. The second write should be discarded by the write | ||
23 | * combiner so should have virtually no cost. | ||
24 | */ | ||
25 | |||
26 | #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
27 | asm volatile ( \ | ||
28 | COPY \ | ||
29 | "1:\n" \ | ||
30 | " .section .fixup,\"ax\"\n" \ | ||
31 | " MOV D1Ar1,#0\n" \ | ||
32 | FIXUP \ | ||
33 | " MOVT D1Ar1,#HI(1b)\n" \ | ||
34 | " JUMP D1Ar1,#LO(1b)\n" \ | ||
35 | " .previous\n" \ | ||
36 | " .section __ex_table,\"a\"\n" \ | ||
37 | TENTRY \ | ||
38 | " .previous\n" \ | ||
39 | : "=r" (to), "=r" (from), "=r" (ret) \ | ||
40 | : "0" (to), "1" (from), "2" (ret) \ | ||
41 | : "D1Ar1", "memory") | ||
42 | |||
43 | |||
44 | #define __asm_copy_to_user_1(to, from, ret) \ | ||
45 | __asm_copy_user_cont(to, from, ret, \ | ||
46 | " GETB D1Ar1,[%1++]\n" \ | ||
47 | " SETB [%0],D1Ar1\n" \ | ||
48 | "2: SETB [%0++],D1Ar1\n", \ | ||
49 | "3: ADD %2,%2,#1\n", \ | ||
50 | " .long 2b,3b\n") | ||
51 | |||
52 | #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
53 | __asm_copy_user_cont(to, from, ret, \ | ||
54 | " GETW D1Ar1,[%1++]\n" \ | ||
55 | " SETW [%0],D1Ar1\n" \ | ||
56 | "2: SETW [%0++],D1Ar1\n" COPY, \ | ||
57 | "3: ADD %2,%2,#2\n" FIXUP, \ | ||
58 | " .long 2b,3b\n" TENTRY) | ||
59 | |||
60 | #define __asm_copy_to_user_2(to, from, ret) \ | ||
61 | __asm_copy_to_user_2x_cont(to, from, ret, "", "", "") | ||
62 | |||
63 | #define __asm_copy_to_user_3(to, from, ret) \ | ||
64 | __asm_copy_to_user_2x_cont(to, from, ret, \ | ||
65 | " GETB D1Ar1,[%1++]\n" \ | ||
66 | " SETB [%0],D1Ar1\n" \ | ||
67 | "4: SETB [%0++],D1Ar1\n", \ | ||
68 | "5: ADD %2,%2,#1\n", \ | ||
69 | " .long 4b,5b\n") | ||
70 | |||
71 | #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
72 | __asm_copy_user_cont(to, from, ret, \ | ||
73 | " GETD D1Ar1,[%1++]\n" \ | ||
74 | " SETD [%0],D1Ar1\n" \ | ||
75 | "2: SETD [%0++],D1Ar1\n" COPY, \ | ||
76 | "3: ADD %2,%2,#4\n" FIXUP, \ | ||
77 | " .long 2b,3b\n" TENTRY) | ||
78 | |||
79 | #define __asm_copy_to_user_4(to, from, ret) \ | ||
80 | __asm_copy_to_user_4x_cont(to, from, ret, "", "", "") | ||
81 | |||
82 | #define __asm_copy_to_user_5(to, from, ret) \ | ||
83 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
84 | " GETB D1Ar1,[%1++]\n" \ | ||
85 | " SETB [%0],D1Ar1\n" \ | ||
86 | "4: SETB [%0++],D1Ar1\n", \ | ||
87 | "5: ADD %2,%2,#1\n", \ | ||
88 | " .long 4b,5b\n") | ||
89 | |||
90 | #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
91 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
92 | " GETW D1Ar1,[%1++]\n" \ | ||
93 | " SETW [%0],D1Ar1\n" \ | ||
94 | "4: SETW [%0++],D1Ar1\n" COPY, \ | ||
95 | "5: ADD %2,%2,#2\n" FIXUP, \ | ||
96 | " .long 4b,5b\n" TENTRY) | ||
97 | |||
98 | #define __asm_copy_to_user_6(to, from, ret) \ | ||
99 | __asm_copy_to_user_6x_cont(to, from, ret, "", "", "") | ||
100 | |||
101 | #define __asm_copy_to_user_7(to, from, ret) \ | ||
102 | __asm_copy_to_user_6x_cont(to, from, ret, \ | ||
103 | " GETB D1Ar1,[%1++]\n" \ | ||
104 | " SETB [%0],D1Ar1\n" \ | ||
105 | "6: SETB [%0++],D1Ar1\n", \ | ||
106 | "7: ADD %2,%2,#1\n", \ | ||
107 | " .long 6b,7b\n") | ||
108 | |||
109 | #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
110 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
111 | " GETD D1Ar1,[%1++]\n" \ | ||
112 | " SETD [%0],D1Ar1\n" \ | ||
113 | "4: SETD [%0++],D1Ar1\n" COPY, \ | ||
114 | "5: ADD %2,%2,#4\n" FIXUP, \ | ||
115 | " .long 4b,5b\n" TENTRY) | ||
116 | |||
117 | #define __asm_copy_to_user_8(to, from, ret) \ | ||
118 | __asm_copy_to_user_8x_cont(to, from, ret, "", "", "") | ||
119 | |||
120 | #define __asm_copy_to_user_9(to, from, ret) \ | ||
121 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
122 | " GETB D1Ar1,[%1++]\n" \ | ||
123 | " SETB [%0],D1Ar1\n" \ | ||
124 | "6: SETB [%0++],D1Ar1\n", \ | ||
125 | "7: ADD %2,%2,#1\n", \ | ||
126 | " .long 6b,7b\n") | ||
127 | |||
128 | #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
129 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
130 | " GETW D1Ar1,[%1++]\n" \ | ||
131 | " SETW [%0],D1Ar1\n" \ | ||
132 | "6: SETW [%0++],D1Ar1\n" COPY, \ | ||
133 | "7: ADD %2,%2,#2\n" FIXUP, \ | ||
134 | " .long 6b,7b\n" TENTRY) | ||
135 | |||
136 | #define __asm_copy_to_user_10(to, from, ret) \ | ||
137 | __asm_copy_to_user_10x_cont(to, from, ret, "", "", "") | ||
138 | |||
139 | #define __asm_copy_to_user_11(to, from, ret) \ | ||
140 | __asm_copy_to_user_10x_cont(to, from, ret, \ | ||
141 | " GETB D1Ar1,[%1++]\n" \ | ||
142 | " SETB [%0],D1Ar1\n" \ | ||
143 | "8: SETB [%0++],D1Ar1\n", \ | ||
144 | "9: ADD %2,%2,#1\n", \ | ||
145 | " .long 8b,9b\n") | ||
146 | |||
147 | #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
148 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
149 | " GETD D1Ar1,[%1++]\n" \ | ||
150 | " SETD [%0],D1Ar1\n" \ | ||
151 | "6: SETD [%0++],D1Ar1\n" COPY, \ | ||
152 | "7: ADD %2,%2,#4\n" FIXUP, \ | ||
153 | " .long 6b,7b\n" TENTRY) | ||
154 | #define __asm_copy_to_user_12(to, from, ret) \ | ||
155 | __asm_copy_to_user_12x_cont(to, from, ret, "", "", "") | ||
156 | |||
157 | #define __asm_copy_to_user_13(to, from, ret) \ | ||
158 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
159 | " GETB D1Ar1,[%1++]\n" \ | ||
160 | " SETB [%0],D1Ar1\n" \ | ||
161 | "8: SETB [%0++],D1Ar1\n", \ | ||
162 | "9: ADD %2,%2,#1\n", \ | ||
163 | " .long 8b,9b\n") | ||
164 | |||
165 | #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
166 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
167 | " GETW D1Ar1,[%1++]\n" \ | ||
168 | " SETW [%0],D1Ar1\n" \ | ||
169 | "8: SETW [%0++],D1Ar1\n" COPY, \ | ||
170 | "9: ADD %2,%2,#2\n" FIXUP, \ | ||
171 | " .long 8b,9b\n" TENTRY) | ||
172 | |||
173 | #define __asm_copy_to_user_14(to, from, ret) \ | ||
174 | __asm_copy_to_user_14x_cont(to, from, ret, "", "", "") | ||
175 | |||
176 | #define __asm_copy_to_user_15(to, from, ret) \ | ||
177 | __asm_copy_to_user_14x_cont(to, from, ret, \ | ||
178 | " GETB D1Ar1,[%1++]\n" \ | ||
179 | " SETB [%0],D1Ar1\n" \ | ||
180 | "10: SETB [%0++],D1Ar1\n", \ | ||
181 | "11: ADD %2,%2,#1\n", \ | ||
182 | " .long 10b,11b\n") | ||
183 | |||
184 | #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
185 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
186 | " GETD D1Ar1,[%1++]\n" \ | ||
187 | " SETD [%0],D1Ar1\n" \ | ||
188 | "8: SETD [%0++],D1Ar1\n" COPY, \ | ||
189 | "9: ADD %2,%2,#4\n" FIXUP, \ | ||
190 | " .long 8b,9b\n" TENTRY) | ||
191 | |||
192 | #define __asm_copy_to_user_16(to, from, ret) \ | ||
193 | __asm_copy_to_user_16x_cont(to, from, ret, "", "", "") | ||
194 | |||
195 | #define __asm_copy_to_user_8x64(to, from, ret) \ | ||
196 | asm volatile ( \ | ||
197 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ | ||
198 | " SETL [%0],D0Ar2,D1Ar1\n" \ | ||
199 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
200 | "1:\n" \ | ||
201 | " .section .fixup,\"ax\"\n" \ | ||
202 | "3: ADD %2,%2,#8\n" \ | ||
203 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
204 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
205 | " .previous\n" \ | ||
206 | " .section __ex_table,\"a\"\n" \ | ||
207 | " .long 2b,3b\n" \ | ||
208 | " .previous\n" \ | ||
209 | : "=r" (to), "=r" (from), "=r" (ret) \ | ||
210 | : "0" (to), "1" (from), "2" (ret) \ | ||
211 | : "D1Ar1", "D0Ar2", "memory") | ||
212 | |||
213 | /* | ||
214 | * optimized copying loop using RAPF when 64 bit aligned | ||
215 | * | ||
216 | * n will be automatically decremented inside the loop | ||
217 | * ret will be left intact. if error occurs we will rewind | ||
218 | * so that the original non optimized code will fill up | ||
219 | * this value correctly. | ||
220 | * | ||
221 | * on fault: | ||
222 | * > n will hold total number of uncopied bytes | ||
223 | * | ||
224 | * > {'to','from'} will be rewind back so that | ||
225 | * the non-optimized code will do the proper fix up | ||
226 | * | ||
227 | * DCACHE drops the cacheline which helps in reducing cache | ||
228 | * pollution. | ||
229 | * | ||
230 | * We introduce an extra SETL at the end of the loop to | ||
231 | * ensure we don't fall off the loop before we catch all | ||
232 | * erros. | ||
233 | * | ||
234 | * NOTICE: | ||
235 | * LSM_STEP in TXSTATUS must be cleared in fix up code. | ||
236 | * since we're using M{S,G}ETL, a fault might happen at | ||
237 | * any address in the middle of M{S,G}ETL causing | ||
238 | * the value of LSM_STEP to be incorrect which can | ||
239 | * cause subsequent use of M{S,G}ET{L,D} to go wrong. | ||
240 | * ie: if LSM_STEP was 1 when a fault occurs, the | ||
241 | * next call to M{S,G}ET{L,D} will skip the first | ||
242 | * copy/getting as it think that the first 1 has already | ||
243 | * been done. | ||
244 | * | ||
245 | */ | ||
246 | #define __asm_copy_user_64bit_rapf_loop( \ | ||
247 | to, from, ret, n, id, FIXUP) \ | ||
248 | asm volatile ( \ | ||
249 | ".balign 8\n" \ | ||
250 | "MOV RAPF, %1\n" \ | ||
251 | "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ | ||
252 | "MOV D0Ar6, #0\n" \ | ||
253 | "LSR D1Ar5, %3, #6\n" \ | ||
254 | "SUB TXRPT, D1Ar5, #2\n" \ | ||
255 | "MOV RAPF, %1\n" \ | ||
256 | "$Lloop"id":\n" \ | ||
257 | "ADD RAPF, %1, #64\n" \ | ||
258 | "21:\n" \ | ||
259 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
260 | "22:\n" \ | ||
261 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
262 | "SUB %3, %3, #32\n" \ | ||
263 | "23:\n" \ | ||
264 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
265 | "24:\n" \ | ||
266 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
267 | "SUB %3, %3, #32\n" \ | ||
268 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
269 | "BR $Lloop"id"\n" \ | ||
270 | \ | ||
271 | "MOV RAPF, %1\n" \ | ||
272 | "25:\n" \ | ||
273 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
274 | "26:\n" \ | ||
275 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
276 | "SUB %3, %3, #32\n" \ | ||
277 | "27:\n" \ | ||
278 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
279 | "28:\n" \ | ||
280 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
281 | "SUB %0, %0, #8\n" \ | ||
282 | "29:\n" \ | ||
283 | "SETL [%0++], D0.7, D1.7\n" \ | ||
284 | "SUB %3, %3, #32\n" \ | ||
285 | "1:" \ | ||
286 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
287 | "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ | ||
288 | "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ | ||
289 | "GETL D0.5, D1.5, [A0StP+#-24]\n" \ | ||
290 | "GETL D0.6, D1.6, [A0StP+#-16]\n" \ | ||
291 | "GETL D0.7, D1.7, [A0StP+#-8]\n" \ | ||
292 | "SUB A0StP, A0StP, #40\n" \ | ||
293 | " .section .fixup,\"ax\"\n" \ | ||
294 | "4:\n" \ | ||
295 | " ADD %0, %0, #8\n" \ | ||
296 | "3:\n" \ | ||
297 | " MOV D0Ar2, TXSTATUS\n" \ | ||
298 | " MOV D1Ar1, TXSTATUS\n" \ | ||
299 | " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ | ||
300 | " MOV TXSTATUS, D1Ar1\n" \ | ||
301 | FIXUP \ | ||
302 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
303 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
304 | " .previous\n" \ | ||
305 | " .section __ex_table,\"a\"\n" \ | ||
306 | " .long 21b,3b\n" \ | ||
307 | " .long 22b,3b\n" \ | ||
308 | " .long 23b,3b\n" \ | ||
309 | " .long 24b,3b\n" \ | ||
310 | " .long 25b,3b\n" \ | ||
311 | " .long 26b,3b\n" \ | ||
312 | " .long 27b,3b\n" \ | ||
313 | " .long 28b,3b\n" \ | ||
314 | " .long 29b,4b\n" \ | ||
315 | " .previous\n" \ | ||
316 | : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ | ||
317 | : "0" (to), "1" (from), "2" (ret), "3" (n) \ | ||
318 | : "D1Ar1", "D0Ar2", "memory") | ||
319 | |||
320 | /* rewind 'to' and 'from' pointers when a fault occurs | ||
321 | * | ||
322 | * Rationale: | ||
323 | * A fault always occurs on writing to user buffer. A fault | ||
324 | * is at a single address, so we need to rewind by only 4 | ||
325 | * bytes. | ||
326 | * Since we do a complete read from kernel buffer before | ||
327 | * writing, we need to rewind it also. The amount to be | ||
328 | * rewind equals the number of faulty writes in MSETD | ||
329 | * which is: [4 - (LSM_STEP-1)]*8 | ||
330 | * LSM_STEP is bits 10:8 in TXSTATUS which is already read | ||
331 | * and stored in D0Ar2 | ||
332 | * | ||
333 | * NOTE: If a fault occurs at the last operation in M{G,S}ETL | ||
334 | * LSM_STEP will be 0. ie: we do 4 writes in our case, if | ||
335 | * a fault happens at the 4th write, LSM_STEP will be 0 | ||
336 | * instead of 4. The code copes with that. | ||
337 | * | ||
338 | * n is updated by the number of successful writes, which is: | ||
339 | * n = n - (LSM_STEP-1)*8 | ||
340 | */ | ||
341 | #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ | ||
342 | __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ | ||
343 | "LSR D0Ar2, D0Ar2, #8\n" \ | ||
344 | "AND D0Ar2, D0Ar2, #0x7\n" \ | ||
345 | "ADDZ D0Ar2, D0Ar2, #4\n" \ | ||
346 | "SUB D0Ar2, D0Ar2, #1\n" \ | ||
347 | "MOV D1Ar1, #4\n" \ | ||
348 | "SUB D0Ar2, D1Ar1, D0Ar2\n" \ | ||
349 | "LSL D0Ar2, D0Ar2, #3\n" \ | ||
350 | "LSL D1Ar1, D1Ar1, #3\n" \ | ||
351 | "SUB D1Ar1, D1Ar1, D0Ar2\n" \ | ||
352 | "SUB %0, %0, #8\n" \ | ||
353 | "SUB %1, %1,D0Ar2\n" \ | ||
354 | "SUB %3, %3, D1Ar1\n") | ||
355 | |||
356 | /* | ||
357 | * optimized copying loop using RAPF when 32 bit aligned | ||
358 | * | ||
359 | * n will be automatically decremented inside the loop | ||
360 | * ret will be left intact. if error occurs we will rewind | ||
361 | * so that the original non optimized code will fill up | ||
362 | * this value correctly. | ||
363 | * | ||
364 | * on fault: | ||
365 | * > n will hold total number of uncopied bytes | ||
366 | * | ||
367 | * > {'to','from'} will be rewind back so that | ||
368 | * the non-optimized code will do the proper fix up | ||
369 | * | ||
370 | * DCACHE drops the cacheline which helps in reducing cache | ||
371 | * pollution. | ||
372 | * | ||
373 | * We introduce an extra SETD at the end of the loop to | ||
374 | * ensure we don't fall off the loop before we catch all | ||
375 | * erros. | ||
376 | * | ||
377 | * NOTICE: | ||
378 | * LSM_STEP in TXSTATUS must be cleared in fix up code. | ||
379 | * since we're using M{S,G}ETL, a fault might happen at | ||
380 | * any address in the middle of M{S,G}ETL causing | ||
381 | * the value of LSM_STEP to be incorrect which can | ||
382 | * cause subsequent use of M{S,G}ET{L,D} to go wrong. | ||
383 | * ie: if LSM_STEP was 1 when a fault occurs, the | ||
384 | * next call to M{S,G}ET{L,D} will skip the first | ||
385 | * copy/getting as it think that the first 1 has already | ||
386 | * been done. | ||
387 | * | ||
388 | */ | ||
389 | #define __asm_copy_user_32bit_rapf_loop( \ | ||
390 | to, from, ret, n, id, FIXUP) \ | ||
391 | asm volatile ( \ | ||
392 | ".balign 8\n" \ | ||
393 | "MOV RAPF, %1\n" \ | ||
394 | "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ | ||
395 | "MOV D0Ar6, #0\n" \ | ||
396 | "LSR D1Ar5, %3, #6\n" \ | ||
397 | "SUB TXRPT, D1Ar5, #2\n" \ | ||
398 | "MOV RAPF, %1\n" \ | ||
399 | "$Lloop"id":\n" \ | ||
400 | "ADD RAPF, %1, #64\n" \ | ||
401 | "21:\n" \ | ||
402 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
403 | "22:\n" \ | ||
404 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
405 | "SUB %3, %3, #16\n" \ | ||
406 | "23:\n" \ | ||
407 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
408 | "24:\n" \ | ||
409 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
410 | "SUB %3, %3, #16\n" \ | ||
411 | "25:\n" \ | ||
412 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
413 | "26:\n" \ | ||
414 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
415 | "SUB %3, %3, #16\n" \ | ||
416 | "27:\n" \ | ||
417 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
418 | "28:\n" \ | ||
419 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
420 | "SUB %3, %3, #16\n" \ | ||
421 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
422 | "BR $Lloop"id"\n" \ | ||
423 | \ | ||
424 | "MOV RAPF, %1\n" \ | ||
425 | "29:\n" \ | ||
426 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
427 | "30:\n" \ | ||
428 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
429 | "SUB %3, %3, #16\n" \ | ||
430 | "31:\n" \ | ||
431 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
432 | "32:\n" \ | ||
433 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
434 | "SUB %3, %3, #16\n" \ | ||
435 | "33:\n" \ | ||
436 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
437 | "34:\n" \ | ||
438 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
439 | "SUB %3, %3, #16\n" \ | ||
440 | "35:\n" \ | ||
441 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
442 | "36:\n" \ | ||
443 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
444 | "SUB %0, %0, #4\n" \ | ||
445 | "37:\n" \ | ||
446 | "SETD [%0++], D0.7\n" \ | ||
447 | "SUB %3, %3, #16\n" \ | ||
448 | "1:" \ | ||
449 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
450 | "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ | ||
451 | "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ | ||
452 | "GETL D0.5, D1.5, [A0StP+#-24]\n" \ | ||
453 | "GETL D0.6, D1.6, [A0StP+#-16]\n" \ | ||
454 | "GETL D0.7, D1.7, [A0StP+#-8]\n" \ | ||
455 | "SUB A0StP, A0StP, #40\n" \ | ||
456 | " .section .fixup,\"ax\"\n" \ | ||
457 | "4:\n" \ | ||
458 | " ADD %0, %0, #4\n" \ | ||
459 | "3:\n" \ | ||
460 | " MOV D0Ar2, TXSTATUS\n" \ | ||
461 | " MOV D1Ar1, TXSTATUS\n" \ | ||
462 | " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ | ||
463 | " MOV TXSTATUS, D1Ar1\n" \ | ||
464 | FIXUP \ | ||
465 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
466 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
467 | " .previous\n" \ | ||
468 | " .section __ex_table,\"a\"\n" \ | ||
469 | " .long 21b,3b\n" \ | ||
470 | " .long 22b,3b\n" \ | ||
471 | " .long 23b,3b\n" \ | ||
472 | " .long 24b,3b\n" \ | ||
473 | " .long 25b,3b\n" \ | ||
474 | " .long 26b,3b\n" \ | ||
475 | " .long 27b,3b\n" \ | ||
476 | " .long 28b,3b\n" \ | ||
477 | " .long 29b,3b\n" \ | ||
478 | " .long 30b,3b\n" \ | ||
479 | " .long 31b,3b\n" \ | ||
480 | " .long 32b,3b\n" \ | ||
481 | " .long 33b,3b\n" \ | ||
482 | " .long 34b,3b\n" \ | ||
483 | " .long 35b,3b\n" \ | ||
484 | " .long 36b,3b\n" \ | ||
485 | " .long 37b,4b\n" \ | ||
486 | " .previous\n" \ | ||
487 | : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ | ||
488 | : "0" (to), "1" (from), "2" (ret), "3" (n) \ | ||
489 | : "D1Ar1", "D0Ar2", "memory") | ||
490 | |||
491 | /* rewind 'to' and 'from' pointers when a fault occurs | ||
492 | * | ||
493 | * Rationale: | ||
494 | * A fault always occurs on writing to user buffer. A fault | ||
495 | * is at a single address, so we need to rewind by only 4 | ||
496 | * bytes. | ||
497 | * Since we do a complete read from kernel buffer before | ||
498 | * writing, we need to rewind it also. The amount to be | ||
499 | * rewind equals the number of faulty writes in MSETD | ||
500 | * which is: [4 - (LSM_STEP-1)]*4 | ||
501 | * LSM_STEP is bits 10:8 in TXSTATUS which is already read | ||
502 | * and stored in D0Ar2 | ||
503 | * | ||
504 | * NOTE: If a fault occurs at the last operation in M{G,S}ETL | ||
505 | * LSM_STEP will be 0. ie: we do 4 writes in our case, if | ||
506 | * a fault happens at the 4th write, LSM_STEP will be 0 | ||
507 | * instead of 4. The code copes with that. | ||
508 | * | ||
509 | * n is updated by the number of successful writes, which is: | ||
510 | * n = n - (LSM_STEP-1)*4 | ||
511 | */ | ||
512 | #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ | ||
513 | __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ | ||
514 | "LSR D0Ar2, D0Ar2, #8\n" \ | ||
515 | "AND D0Ar2, D0Ar2, #0x7\n" \ | ||
516 | "ADDZ D0Ar2, D0Ar2, #4\n" \ | ||
517 | "SUB D0Ar2, D0Ar2, #1\n" \ | ||
518 | "MOV D1Ar1, #4\n" \ | ||
519 | "SUB D0Ar2, D1Ar1, D0Ar2\n" \ | ||
520 | "LSL D0Ar2, D0Ar2, #2\n" \ | ||
521 | "LSL D1Ar1, D1Ar1, #2\n" \ | ||
522 | "SUB D1Ar1, D1Ar1, D0Ar2\n" \ | ||
523 | "SUB %0, %0, #4\n" \ | ||
524 | "SUB %1, %1, D0Ar2\n" \ | ||
525 | "SUB %3, %3, D1Ar1\n") | ||
526 | |||
527 | unsigned long __copy_user(void __user *pdst, const void *psrc, | ||
528 | unsigned long n) | ||
529 | { | ||
530 | register char __user *dst asm ("A0.2") = pdst; | ||
531 | register const char *src asm ("A1.2") = psrc; | ||
532 | unsigned long retn = 0; | ||
533 | |||
534 | if (n == 0) | ||
535 | return 0; | ||
536 | |||
537 | if ((unsigned long) src & 1) { | ||
538 | __asm_copy_to_user_1(dst, src, retn); | ||
539 | n--; | ||
540 | } | ||
541 | if ((unsigned long) dst & 1) { | ||
542 | /* Worst case - byte copy */ | ||
543 | while (n > 0) { | ||
544 | __asm_copy_to_user_1(dst, src, retn); | ||
545 | n--; | ||
546 | } | ||
547 | } | ||
548 | if (((unsigned long) src & 2) && n >= 2) { | ||
549 | __asm_copy_to_user_2(dst, src, retn); | ||
550 | n -= 2; | ||
551 | } | ||
552 | if ((unsigned long) dst & 2) { | ||
553 | /* Second worst case - word copy */ | ||
554 | while (n >= 2) { | ||
555 | __asm_copy_to_user_2(dst, src, retn); | ||
556 | n -= 2; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | #ifdef USE_RAPF | ||
561 | /* 64 bit copy loop */ | ||
562 | if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) { | ||
563 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
564 | /* copy user using 64 bit rapf copy */ | ||
565 | __asm_copy_to_user_64bit_rapf_loop(dst, src, retn, | ||
566 | n, "64cu"); | ||
567 | } | ||
568 | while (n >= 8) { | ||
569 | __asm_copy_to_user_8x64(dst, src, retn); | ||
570 | n -= 8; | ||
571 | } | ||
572 | } | ||
573 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
574 | /* copy user using 32 bit rapf copy */ | ||
575 | __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu"); | ||
576 | } | ||
577 | #else | ||
578 | /* 64 bit copy loop */ | ||
579 | if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) { | ||
580 | while (n >= 8) { | ||
581 | __asm_copy_to_user_8x64(dst, src, retn); | ||
582 | n -= 8; | ||
583 | } | ||
584 | } | ||
585 | #endif | ||
586 | |||
587 | while (n >= 16) { | ||
588 | __asm_copy_to_user_16(dst, src, retn); | ||
589 | n -= 16; | ||
590 | } | ||
591 | |||
592 | while (n >= 4) { | ||
593 | __asm_copy_to_user_4(dst, src, retn); | ||
594 | n -= 4; | ||
595 | } | ||
596 | |||
597 | switch (n) { | ||
598 | case 0: | ||
599 | break; | ||
600 | case 1: | ||
601 | __asm_copy_to_user_1(dst, src, retn); | ||
602 | break; | ||
603 | case 2: | ||
604 | __asm_copy_to_user_2(dst, src, retn); | ||
605 | break; | ||
606 | case 3: | ||
607 | __asm_copy_to_user_3(dst, src, retn); | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | return retn; | ||
612 | } | ||
613 | |||
614 | #define __asm_copy_from_user_1(to, from, ret) \ | ||
615 | __asm_copy_user_cont(to, from, ret, \ | ||
616 | " GETB D1Ar1,[%1++]\n" \ | ||
617 | "2: SETB [%0++],D1Ar1\n", \ | ||
618 | "3: ADD %2,%2,#1\n" \ | ||
619 | " SETB [%0++],D1Ar1\n", \ | ||
620 | " .long 2b,3b\n") | ||
621 | |||
622 | #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
623 | __asm_copy_user_cont(to, from, ret, \ | ||
624 | " GETW D1Ar1,[%1++]\n" \ | ||
625 | "2: SETW [%0++],D1Ar1\n" COPY, \ | ||
626 | "3: ADD %2,%2,#2\n" \ | ||
627 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
628 | " .long 2b,3b\n" TENTRY) | ||
629 | |||
630 | #define __asm_copy_from_user_2(to, from, ret) \ | ||
631 | __asm_copy_from_user_2x_cont(to, from, ret, "", "", "") | ||
632 | |||
633 | #define __asm_copy_from_user_3(to, from, ret) \ | ||
634 | __asm_copy_from_user_2x_cont(to, from, ret, \ | ||
635 | " GETB D1Ar1,[%1++]\n" \ | ||
636 | "4: SETB [%0++],D1Ar1\n", \ | ||
637 | "5: ADD %2,%2,#1\n" \ | ||
638 | " SETB [%0++],D1Ar1\n", \ | ||
639 | " .long 4b,5b\n") | ||
640 | |||
641 | #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
642 | __asm_copy_user_cont(to, from, ret, \ | ||
643 | " GETD D1Ar1,[%1++]\n" \ | ||
644 | "2: SETD [%0++],D1Ar1\n" COPY, \ | ||
645 | "3: ADD %2,%2,#4\n" \ | ||
646 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
647 | " .long 2b,3b\n" TENTRY) | ||
648 | |||
649 | #define __asm_copy_from_user_4(to, from, ret) \ | ||
650 | __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") | ||
651 | |||
652 | #define __asm_copy_from_user_5(to, from, ret) \ | ||
653 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
654 | " GETB D1Ar1,[%1++]\n" \ | ||
655 | "4: SETB [%0++],D1Ar1\n", \ | ||
656 | "5: ADD %2,%2,#1\n" \ | ||
657 | " SETB [%0++],D1Ar1\n", \ | ||
658 | " .long 4b,5b\n") | ||
659 | |||
660 | #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
661 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
662 | " GETW D1Ar1,[%1++]\n" \ | ||
663 | "4: SETW [%0++],D1Ar1\n" COPY, \ | ||
664 | "5: ADD %2,%2,#2\n" \ | ||
665 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
666 | " .long 4b,5b\n" TENTRY) | ||
667 | |||
668 | #define __asm_copy_from_user_6(to, from, ret) \ | ||
669 | __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") | ||
670 | |||
671 | #define __asm_copy_from_user_7(to, from, ret) \ | ||
672 | __asm_copy_from_user_6x_cont(to, from, ret, \ | ||
673 | " GETB D1Ar1,[%1++]\n" \ | ||
674 | "6: SETB [%0++],D1Ar1\n", \ | ||
675 | "7: ADD %2,%2,#1\n" \ | ||
676 | " SETB [%0++],D1Ar1\n", \ | ||
677 | " .long 6b,7b\n") | ||
678 | |||
679 | #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
680 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
681 | " GETD D1Ar1,[%1++]\n" \ | ||
682 | "4: SETD [%0++],D1Ar1\n" COPY, \ | ||
683 | "5: ADD %2,%2,#4\n" \ | ||
684 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
685 | " .long 4b,5b\n" TENTRY) | ||
686 | |||
687 | #define __asm_copy_from_user_8(to, from, ret) \ | ||
688 | __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") | ||
689 | |||
690 | #define __asm_copy_from_user_9(to, from, ret) \ | ||
691 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
692 | " GETB D1Ar1,[%1++]\n" \ | ||
693 | "6: SETB [%0++],D1Ar1\n", \ | ||
694 | "7: ADD %2,%2,#1\n" \ | ||
695 | " SETB [%0++],D1Ar1\n", \ | ||
696 | " .long 6b,7b\n") | ||
697 | |||
698 | #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
699 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
700 | " GETW D1Ar1,[%1++]\n" \ | ||
701 | "6: SETW [%0++],D1Ar1\n" COPY, \ | ||
702 | "7: ADD %2,%2,#2\n" \ | ||
703 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
704 | " .long 6b,7b\n" TENTRY) | ||
705 | |||
706 | #define __asm_copy_from_user_10(to, from, ret) \ | ||
707 | __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") | ||
708 | |||
709 | #define __asm_copy_from_user_11(to, from, ret) \ | ||
710 | __asm_copy_from_user_10x_cont(to, from, ret, \ | ||
711 | " GETB D1Ar1,[%1++]\n" \ | ||
712 | "8: SETB [%0++],D1Ar1\n", \ | ||
713 | "9: ADD %2,%2,#1\n" \ | ||
714 | " SETB [%0++],D1Ar1\n", \ | ||
715 | " .long 8b,9b\n") | ||
716 | |||
717 | #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
718 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
719 | " GETD D1Ar1,[%1++]\n" \ | ||
720 | "6: SETD [%0++],D1Ar1\n" COPY, \ | ||
721 | "7: ADD %2,%2,#4\n" \ | ||
722 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
723 | " .long 6b,7b\n" TENTRY) | ||
724 | |||
725 | #define __asm_copy_from_user_12(to, from, ret) \ | ||
726 | __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") | ||
727 | |||
728 | #define __asm_copy_from_user_13(to, from, ret) \ | ||
729 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
730 | " GETB D1Ar1,[%1++]\n" \ | ||
731 | "8: SETB [%0++],D1Ar1\n", \ | ||
732 | "9: ADD %2,%2,#1\n" \ | ||
733 | " SETB [%0++],D1Ar1\n", \ | ||
734 | " .long 8b,9b\n") | ||
735 | |||
736 | #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
737 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
738 | " GETW D1Ar1,[%1++]\n" \ | ||
739 | "8: SETW [%0++],D1Ar1\n" COPY, \ | ||
740 | "9: ADD %2,%2,#2\n" \ | ||
741 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
742 | " .long 8b,9b\n" TENTRY) | ||
743 | |||
744 | #define __asm_copy_from_user_14(to, from, ret) \ | ||
745 | __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") | ||
746 | |||
747 | #define __asm_copy_from_user_15(to, from, ret) \ | ||
748 | __asm_copy_from_user_14x_cont(to, from, ret, \ | ||
749 | " GETB D1Ar1,[%1++]\n" \ | ||
750 | "10: SETB [%0++],D1Ar1\n", \ | ||
751 | "11: ADD %2,%2,#1\n" \ | ||
752 | " SETB [%0++],D1Ar1\n", \ | ||
753 | " .long 10b,11b\n") | ||
754 | |||
755 | #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
756 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
757 | " GETD D1Ar1,[%1++]\n" \ | ||
758 | "8: SETD [%0++],D1Ar1\n" COPY, \ | ||
759 | "9: ADD %2,%2,#4\n" \ | ||
760 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
761 | " .long 8b,9b\n" TENTRY) | ||
762 | |||
763 | #define __asm_copy_from_user_16(to, from, ret) \ | ||
764 | __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") | ||
765 | |||
766 | #define __asm_copy_from_user_8x64(to, from, ret) \ | ||
767 | asm volatile ( \ | ||
768 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ | ||
769 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
770 | "1:\n" \ | ||
771 | " .section .fixup,\"ax\"\n" \ | ||
772 | " MOV D1Ar1,#0\n" \ | ||
773 | " MOV D0Ar2,#0\n" \ | ||
774 | "3: ADD %2,%2,#8\n" \ | ||
775 | " SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
776 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
777 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
778 | " .previous\n" \ | ||
779 | " .section __ex_table,\"a\"\n" \ | ||
780 | " .long 2b,3b\n" \ | ||
781 | " .previous\n" \ | ||
782 | : "=a" (to), "=r" (from), "=r" (ret) \ | ||
783 | : "0" (to), "1" (from), "2" (ret) \ | ||
784 | : "D1Ar1", "D0Ar2", "memory") | ||
785 | |||
786 | /* rewind 'from' pointer when a fault occurs | ||
787 | * | ||
788 | * Rationale: | ||
789 | * A fault occurs while reading from user buffer, which is the | ||
790 | * source. Since the fault is at a single address, we only | ||
791 | * need to rewind by 8 bytes. | ||
792 | * Since we don't write to kernel buffer until we read first, | ||
793 | * the kernel buffer is at the right state and needn't be | ||
794 | * corrected. | ||
795 | */ | ||
796 | #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ | ||
797 | __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ | ||
798 | "SUB %1, %1, #8\n") | ||
799 | |||
800 | /* rewind 'from' pointer when a fault occurs | ||
801 | * | ||
802 | * Rationale: | ||
803 | * A fault occurs while reading from user buffer, which is the | ||
804 | * source. Since the fault is at a single address, we only | ||
805 | * need to rewind by 4 bytes. | ||
806 | * Since we don't write to kernel buffer until we read first, | ||
807 | * the kernel buffer is at the right state and needn't be | ||
808 | * corrected. | ||
809 | */ | ||
810 | #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ | ||
811 | __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ | ||
812 | "SUB %1, %1, #4\n") | ||
813 | |||
814 | |||
815 | /* Copy from user to kernel, zeroing the bytes that were inaccessible in | ||
816 | userland. The return-value is the number of bytes that were | ||
817 | inaccessible. */ | ||
818 | unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | ||
819 | unsigned long n) | ||
820 | { | ||
821 | register char *dst asm ("A0.2") = pdst; | ||
822 | register const char __user *src asm ("A1.2") = psrc; | ||
823 | unsigned long retn = 0; | ||
824 | |||
825 | if (n == 0) | ||
826 | return 0; | ||
827 | |||
828 | if ((unsigned long) src & 1) { | ||
829 | __asm_copy_from_user_1(dst, src, retn); | ||
830 | n--; | ||
831 | } | ||
832 | if ((unsigned long) dst & 1) { | ||
833 | /* Worst case - byte copy */ | ||
834 | while (n > 0) { | ||
835 | __asm_copy_from_user_1(dst, src, retn); | ||
836 | n--; | ||
837 | if (retn) | ||
838 | goto copy_exception_bytes; | ||
839 | } | ||
840 | } | ||
841 | if (((unsigned long) src & 2) && n >= 2) { | ||
842 | __asm_copy_from_user_2(dst, src, retn); | ||
843 | n -= 2; | ||
844 | } | ||
845 | if ((unsigned long) dst & 2) { | ||
846 | /* Second worst case - word copy */ | ||
847 | while (n >= 2) { | ||
848 | __asm_copy_from_user_2(dst, src, retn); | ||
849 | n -= 2; | ||
850 | if (retn) | ||
851 | goto copy_exception_bytes; | ||
852 | } | ||
853 | } | ||
854 | |||
855 | /* We only need one check after the unalignment-adjustments, | ||
856 | because if both adjustments were done, either both or | ||
857 | neither reference had an exception. */ | ||
858 | if (retn != 0) | ||
859 | goto copy_exception_bytes; | ||
860 | |||
861 | #ifdef USE_RAPF | ||
862 | /* 64 bit copy loop */ | ||
863 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { | ||
864 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
865 | /* Copy using fast 64bit rapf */ | ||
866 | __asm_copy_from_user_64bit_rapf_loop(dst, src, retn, | ||
867 | n, "64cuz"); | ||
868 | } | ||
869 | while (n >= 8) { | ||
870 | __asm_copy_from_user_8x64(dst, src, retn); | ||
871 | n -= 8; | ||
872 | if (retn) | ||
873 | goto copy_exception_bytes; | ||
874 | } | ||
875 | } | ||
876 | |||
877 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
878 | /* Copy using fast 32bit rapf */ | ||
879 | __asm_copy_from_user_32bit_rapf_loop(dst, src, retn, | ||
880 | n, "32cuz"); | ||
881 | } | ||
882 | #else | ||
883 | /* 64 bit copy loop */ | ||
884 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { | ||
885 | while (n >= 8) { | ||
886 | __asm_copy_from_user_8x64(dst, src, retn); | ||
887 | n -= 8; | ||
888 | if (retn) | ||
889 | goto copy_exception_bytes; | ||
890 | } | ||
891 | } | ||
892 | #endif | ||
893 | |||
894 | while (n >= 4) { | ||
895 | __asm_copy_from_user_4(dst, src, retn); | ||
896 | n -= 4; | ||
897 | |||
898 | if (retn) | ||
899 | goto copy_exception_bytes; | ||
900 | } | ||
901 | |||
902 | /* If we get here, there were no memory read faults. */ | ||
903 | switch (n) { | ||
904 | /* These copies are at least "naturally aligned" (so we don't | ||
905 | have to check each byte), due to the src alignment code. | ||
906 | The *_3 case *will* get the correct count for retn. */ | ||
907 | case 0: | ||
908 | /* This case deliberately left in (if you have doubts check the | ||
909 | generated assembly code). */ | ||
910 | break; | ||
911 | case 1: | ||
912 | __asm_copy_from_user_1(dst, src, retn); | ||
913 | break; | ||
914 | case 2: | ||
915 | __asm_copy_from_user_2(dst, src, retn); | ||
916 | break; | ||
917 | case 3: | ||
918 | __asm_copy_from_user_3(dst, src, retn); | ||
919 | break; | ||
920 | } | ||
921 | |||
922 | /* If we get here, retn correctly reflects the number of failing | ||
923 | bytes. */ | ||
924 | return retn; | ||
925 | |||
926 | copy_exception_bytes: | ||
927 | /* We already have "retn" bytes cleared, and need to clear the | ||
928 | remaining "n" bytes. A non-optimized simple byte-for-byte in-line | ||
929 | memset is preferred here, since this isn't speed-critical code and | ||
930 | we'd rather have this a leaf-function than calling memset. */ | ||
931 | { | ||
932 | char *endp; | ||
933 | for (endp = dst + n; dst < endp; dst++) | ||
934 | *dst = 0; | ||
935 | } | ||
936 | |||
937 | return retn + n; | ||
938 | } | ||
939 | |||
940 | #define __asm_clear_8x64(to, ret) \ | ||
941 | asm volatile ( \ | ||
942 | " MOV D0Ar2,#0\n" \ | ||
943 | " MOV D1Ar1,#0\n" \ | ||
944 | " SETL [%0],D0Ar2,D1Ar1\n" \ | ||
945 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
946 | "1:\n" \ | ||
947 | " .section .fixup,\"ax\"\n" \ | ||
948 | "3: ADD %1,%1,#8\n" \ | ||
949 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
950 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
951 | " .previous\n" \ | ||
952 | " .section __ex_table,\"a\"\n" \ | ||
953 | " .long 2b,3b\n" \ | ||
954 | " .previous\n" \ | ||
955 | : "=r" (to), "=r" (ret) \ | ||
956 | : "0" (to), "1" (ret) \ | ||
957 | : "D1Ar1", "D0Ar2", "memory") | ||
958 | |||
959 | /* Zero userspace. */ | ||
960 | |||
961 | #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
962 | asm volatile ( \ | ||
963 | " MOV D1Ar1,#0\n" \ | ||
964 | CLEAR \ | ||
965 | "1:\n" \ | ||
966 | " .section .fixup,\"ax\"\n" \ | ||
967 | FIXUP \ | ||
968 | " MOVT D1Ar1,#HI(1b)\n" \ | ||
969 | " JUMP D1Ar1,#LO(1b)\n" \ | ||
970 | " .previous\n" \ | ||
971 | " .section __ex_table,\"a\"\n" \ | ||
972 | TENTRY \ | ||
973 | " .previous" \ | ||
974 | : "=r" (to), "=r" (ret) \ | ||
975 | : "0" (to), "1" (ret) \ | ||
976 | : "D1Ar1", "memory") | ||
977 | |||
978 | #define __asm_clear_1(to, ret) \ | ||
979 | __asm_clear(to, ret, \ | ||
980 | " SETB [%0],D1Ar1\n" \ | ||
981 | "2: SETB [%0++],D1Ar1\n", \ | ||
982 | "3: ADD %1,%1,#1\n", \ | ||
983 | " .long 2b,3b\n") | ||
984 | |||
985 | #define __asm_clear_2(to, ret) \ | ||
986 | __asm_clear(to, ret, \ | ||
987 | " SETW [%0],D1Ar1\n" \ | ||
988 | "2: SETW [%0++],D1Ar1\n", \ | ||
989 | "3: ADD %1,%1,#2\n", \ | ||
990 | " .long 2b,3b\n") | ||
991 | |||
992 | #define __asm_clear_3(to, ret) \ | ||
993 | __asm_clear(to, ret, \ | ||
994 | "2: SETW [%0++],D1Ar1\n" \ | ||
995 | " SETB [%0],D1Ar1\n" \ | ||
996 | "3: SETB [%0++],D1Ar1\n", \ | ||
997 | "4: ADD %1,%1,#2\n" \ | ||
998 | "5: ADD %1,%1,#1\n", \ | ||
999 | " .long 2b,4b\n" \ | ||
1000 | " .long 3b,5b\n") | ||
1001 | |||
1002 | #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1003 | __asm_clear(to, ret, \ | ||
1004 | " SETD [%0],D1Ar1\n" \ | ||
1005 | "2: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1006 | "3: ADD %1,%1,#4\n" FIXUP, \ | ||
1007 | " .long 2b,3b\n" TENTRY) | ||
1008 | |||
1009 | #define __asm_clear_4(to, ret) \ | ||
1010 | __asm_clear_4x_cont(to, ret, "", "", "") | ||
1011 | |||
1012 | #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1013 | __asm_clear_4x_cont(to, ret, \ | ||
1014 | " SETD [%0],D1Ar1\n" \ | ||
1015 | "4: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1016 | "5: ADD %1,%1,#4\n" FIXUP, \ | ||
1017 | " .long 4b,5b\n" TENTRY) | ||
1018 | |||
1019 | #define __asm_clear_8(to, ret) \ | ||
1020 | __asm_clear_8x_cont(to, ret, "", "", "") | ||
1021 | |||
1022 | #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1023 | __asm_clear_8x_cont(to, ret, \ | ||
1024 | " SETD [%0],D1Ar1\n" \ | ||
1025 | "6: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1026 | "7: ADD %1,%1,#4\n" FIXUP, \ | ||
1027 | " .long 6b,7b\n" TENTRY) | ||
1028 | |||
1029 | #define __asm_clear_12(to, ret) \ | ||
1030 | __asm_clear_12x_cont(to, ret, "", "", "") | ||
1031 | |||
1032 | #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1033 | __asm_clear_12x_cont(to, ret, \ | ||
1034 | " SETD [%0],D1Ar1\n" \ | ||
1035 | "8: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1036 | "9: ADD %1,%1,#4\n" FIXUP, \ | ||
1037 | " .long 8b,9b\n" TENTRY) | ||
1038 | |||
1039 | #define __asm_clear_16(to, ret) \ | ||
1040 | __asm_clear_16x_cont(to, ret, "", "", "") | ||
1041 | |||
1042 | unsigned long __do_clear_user(void __user *pto, unsigned long pn) | ||
1043 | { | ||
1044 | register char __user *dst asm ("D0Re0") = pto; | ||
1045 | register unsigned long n asm ("D1Re0") = pn; | ||
1046 | register unsigned long retn asm ("D0Ar6") = 0; | ||
1047 | |||
1048 | if ((unsigned long) dst & 1) { | ||
1049 | __asm_clear_1(dst, retn); | ||
1050 | n--; | ||
1051 | } | ||
1052 | |||
1053 | if ((unsigned long) dst & 2) { | ||
1054 | __asm_clear_2(dst, retn); | ||
1055 | n -= 2; | ||
1056 | } | ||
1057 | |||
1058 | /* 64 bit copy loop */ | ||
1059 | if (!((__force unsigned long) dst & 7)) { | ||
1060 | while (n >= 8) { | ||
1061 | __asm_clear_8x64(dst, retn); | ||
1062 | n -= 8; | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1066 | while (n >= 16) { | ||
1067 | __asm_clear_16(dst, retn); | ||
1068 | n -= 16; | ||
1069 | } | ||
1070 | |||
1071 | while (n >= 4) { | ||
1072 | __asm_clear_4(dst, retn); | ||
1073 | n -= 4; | ||
1074 | } | ||
1075 | |||
1076 | switch (n) { | ||
1077 | case 0: | ||
1078 | break; | ||
1079 | case 1: | ||
1080 | __asm_clear_1(dst, retn); | ||
1081 | break; | ||
1082 | case 2: | ||
1083 | __asm_clear_2(dst, retn); | ||
1084 | break; | ||
1085 | case 3: | ||
1086 | __asm_clear_3(dst, retn); | ||
1087 | break; | ||
1088 | } | ||
1089 | |||
1090 | return retn; | ||
1091 | } | ||
1092 | |||
1093 | unsigned char __get_user_asm_b(const void __user *addr, long *err) | ||
1094 | { | ||
1095 | register unsigned char x asm ("D0Re0") = 0; | ||
1096 | asm volatile ( | ||
1097 | " GETB %0,[%2]\n" | ||
1098 | "1:\n" | ||
1099 | " GETB %0,[%2]\n" | ||
1100 | "2:\n" | ||
1101 | " .section .fixup,\"ax\"\n" | ||
1102 | "3: MOV D0FrT,%3\n" | ||
1103 | " SETD [%1],D0FrT\n" | ||
1104 | " MOVT D0FrT,#HI(2b)\n" | ||
1105 | " JUMP D0FrT,#LO(2b)\n" | ||
1106 | " .previous\n" | ||
1107 | " .section __ex_table,\"a\"\n" | ||
1108 | " .long 1b,3b\n" | ||
1109 | " .previous\n" | ||
1110 | : "=r" (x) | ||
1111 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1112 | : "D0FrT"); | ||
1113 | return x; | ||
1114 | } | ||
1115 | |||
1116 | unsigned short __get_user_asm_w(const void __user *addr, long *err) | ||
1117 | { | ||
1118 | register unsigned short x asm ("D0Re0") = 0; | ||
1119 | asm volatile ( | ||
1120 | " GETW %0,[%2]\n" | ||
1121 | "1:\n" | ||
1122 | " GETW %0,[%2]\n" | ||
1123 | "2:\n" | ||
1124 | " .section .fixup,\"ax\"\n" | ||
1125 | "3: MOV D0FrT,%3\n" | ||
1126 | " SETD [%1],D0FrT\n" | ||
1127 | " MOVT D0FrT,#HI(2b)\n" | ||
1128 | " JUMP D0FrT,#LO(2b)\n" | ||
1129 | " .previous\n" | ||
1130 | " .section __ex_table,\"a\"\n" | ||
1131 | " .long 1b,3b\n" | ||
1132 | " .previous\n" | ||
1133 | : "=r" (x) | ||
1134 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1135 | : "D0FrT"); | ||
1136 | return x; | ||
1137 | } | ||
1138 | |||
1139 | unsigned int __get_user_asm_d(const void __user *addr, long *err) | ||
1140 | { | ||
1141 | register unsigned int x asm ("D0Re0") = 0; | ||
1142 | asm volatile ( | ||
1143 | " GETD %0,[%2]\n" | ||
1144 | "1:\n" | ||
1145 | " GETD %0,[%2]\n" | ||
1146 | "2:\n" | ||
1147 | " .section .fixup,\"ax\"\n" | ||
1148 | "3: MOV D0FrT,%3\n" | ||
1149 | " SETD [%1],D0FrT\n" | ||
1150 | " MOVT D0FrT,#HI(2b)\n" | ||
1151 | " JUMP D0FrT,#LO(2b)\n" | ||
1152 | " .previous\n" | ||
1153 | " .section __ex_table,\"a\"\n" | ||
1154 | " .long 1b,3b\n" | ||
1155 | " .previous\n" | ||
1156 | : "=r" (x) | ||
1157 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1158 | : "D0FrT"); | ||
1159 | return x; | ||
1160 | } | ||
1161 | |||
1162 | long __put_user_asm_b(unsigned int x, void __user *addr) | ||
1163 | { | ||
1164 | register unsigned int err asm ("D0Re0") = 0; | ||
1165 | asm volatile ( | ||
1166 | " MOV %0,#0\n" | ||
1167 | " SETB [%2],%1\n" | ||
1168 | "1:\n" | ||
1169 | " SETB [%2],%1\n" | ||
1170 | "2:\n" | ||
1171 | ".section .fixup,\"ax\"\n" | ||
1172 | "3: MOV %0,%3\n" | ||
1173 | " MOVT D0FrT,#HI(2b)\n" | ||
1174 | " JUMP D0FrT,#LO(2b)\n" | ||
1175 | ".previous\n" | ||
1176 | ".section __ex_table,\"a\"\n" | ||
1177 | " .long 1b,3b\n" | ||
1178 | ".previous" | ||
1179 | : "=r"(err) | ||
1180 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1181 | : "D0FrT"); | ||
1182 | return err; | ||
1183 | } | ||
1184 | |||
1185 | long __put_user_asm_w(unsigned int x, void __user *addr) | ||
1186 | { | ||
1187 | register unsigned int err asm ("D0Re0") = 0; | ||
1188 | asm volatile ( | ||
1189 | " MOV %0,#0\n" | ||
1190 | " SETW [%2],%1\n" | ||
1191 | "1:\n" | ||
1192 | " SETW [%2],%1\n" | ||
1193 | "2:\n" | ||
1194 | ".section .fixup,\"ax\"\n" | ||
1195 | "3: MOV %0,%3\n" | ||
1196 | " MOVT D0FrT,#HI(2b)\n" | ||
1197 | " JUMP D0FrT,#LO(2b)\n" | ||
1198 | ".previous\n" | ||
1199 | ".section __ex_table,\"a\"\n" | ||
1200 | " .long 1b,3b\n" | ||
1201 | ".previous" | ||
1202 | : "=r"(err) | ||
1203 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1204 | : "D0FrT"); | ||
1205 | return err; | ||
1206 | } | ||
1207 | |||
1208 | long __put_user_asm_d(unsigned int x, void __user *addr) | ||
1209 | { | ||
1210 | register unsigned int err asm ("D0Re0") = 0; | ||
1211 | asm volatile ( | ||
1212 | " MOV %0,#0\n" | ||
1213 | " SETD [%2],%1\n" | ||
1214 | "1:\n" | ||
1215 | " SETD [%2],%1\n" | ||
1216 | "2:\n" | ||
1217 | ".section .fixup,\"ax\"\n" | ||
1218 | "3: MOV %0,%3\n" | ||
1219 | " MOVT D0FrT,#HI(2b)\n" | ||
1220 | " JUMP D0FrT,#LO(2b)\n" | ||
1221 | ".previous\n" | ||
1222 | ".section __ex_table,\"a\"\n" | ||
1223 | " .long 1b,3b\n" | ||
1224 | ".previous" | ||
1225 | : "=r"(err) | ||
1226 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1227 | : "D0FrT"); | ||
1228 | return err; | ||
1229 | } | ||
1230 | |||
1231 | long __put_user_asm_l(unsigned long long x, void __user *addr) | ||
1232 | { | ||
1233 | register unsigned int err asm ("D0Re0") = 0; | ||
1234 | asm volatile ( | ||
1235 | " MOV %0,#0\n" | ||
1236 | " SETL [%2],%1,%t1\n" | ||
1237 | "1:\n" | ||
1238 | " SETL [%2],%1,%t1\n" | ||
1239 | "2:\n" | ||
1240 | ".section .fixup,\"ax\"\n" | ||
1241 | "3: MOV %0,%3\n" | ||
1242 | " MOVT D0FrT,#HI(2b)\n" | ||
1243 | " JUMP D0FrT,#LO(2b)\n" | ||
1244 | ".previous\n" | ||
1245 | ".section __ex_table,\"a\"\n" | ||
1246 | " .long 1b,3b\n" | ||
1247 | ".previous" | ||
1248 | : "=r"(err) | ||
1249 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1250 | : "D0FrT"); | ||
1251 | return err; | ||
1252 | } | ||
1253 | |||
1254 | long strnlen_user(const char __user *src, long count) | ||
1255 | { | ||
1256 | long res; | ||
1257 | |||
1258 | if (!access_ok(VERIFY_READ, src, 0)) | ||
1259 | return 0; | ||
1260 | |||
1261 | asm volatile (" MOV D0Ar4, %1\n" | ||
1262 | " MOV D0Ar6, %2\n" | ||
1263 | "0:\n" | ||
1264 | " SUBS D0FrT, D0Ar6, #0\n" | ||
1265 | " SUB D0Ar6, D0Ar6, #1\n" | ||
1266 | " BLE 2f\n" | ||
1267 | " GETB D0FrT, [D0Ar4+#1++]\n" | ||
1268 | "1:\n" | ||
1269 | " TST D0FrT, #255\n" | ||
1270 | " BNE 0b\n" | ||
1271 | "2:\n" | ||
1272 | " SUB %0, %2, D0Ar6\n" | ||
1273 | "3:\n" | ||
1274 | " .section .fixup,\"ax\"\n" | ||
1275 | "4:\n" | ||
1276 | " MOV %0, #0\n" | ||
1277 | " MOVT D0FrT,#HI(3b)\n" | ||
1278 | " JUMP D0FrT,#LO(3b)\n" | ||
1279 | " .previous\n" | ||
1280 | " .section __ex_table,\"a\"\n" | ||
1281 | " .long 1b,4b\n" | ||
1282 | " .previous\n" | ||
1283 | : "=r" (res) | ||
1284 | : "r" (src), "r" (count) | ||
1285 | : "D0FrT", "D0Ar4", "D0Ar6", "cc"); | ||
1286 | |||
1287 | return res; | ||
1288 | } | ||
1289 | |||
1290 | long __strncpy_from_user(char *dst, const char __user *src, long count) | ||
1291 | { | ||
1292 | long res; | ||
1293 | |||
1294 | if (count == 0) | ||
1295 | return 0; | ||
1296 | |||
1297 | /* | ||
1298 | * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop. | ||
1299 | * So do we. | ||
1300 | * | ||
1301 | * This code is deduced from: | ||
1302 | * | ||
1303 | * char tmp2; | ||
1304 | * long tmp1, tmp3; | ||
1305 | * tmp1 = count; | ||
1306 | * while ((*dst++ = (tmp2 = *src++)) != 0 | ||
1307 | * && --tmp1) | ||
1308 | * ; | ||
1309 | * | ||
1310 | * res = count - tmp1; | ||
1311 | * | ||
1312 | * with tweaks. | ||
1313 | */ | ||
1314 | |||
1315 | asm volatile (" MOV %0,%3\n" | ||
1316 | "1:\n" | ||
1317 | " GETB D0FrT,[%2++]\n" | ||
1318 | "2:\n" | ||
1319 | " CMP D0FrT,#0\n" | ||
1320 | " SETB [%1++],D0FrT\n" | ||
1321 | " BEQ 3f\n" | ||
1322 | " SUBS %0,%0,#1\n" | ||
1323 | " BNZ 1b\n" | ||
1324 | "3:\n" | ||
1325 | " SUB %0,%3,%0\n" | ||
1326 | "4:\n" | ||
1327 | " .section .fixup,\"ax\"\n" | ||
1328 | "5:\n" | ||
1329 | " MOV %0,%7\n" | ||
1330 | " MOVT D0FrT,#HI(4b)\n" | ||
1331 | " JUMP D0FrT,#LO(4b)\n" | ||
1332 | " .previous\n" | ||
1333 | " .section __ex_table,\"a\"\n" | ||
1334 | " .long 2b,5b\n" | ||
1335 | " .previous" | ||
1336 | : "=r" (res), "=r" (dst), "=r" (src), "=r" (count) | ||
1337 | : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT) | ||
1338 | : "D0FrT", "memory", "cc"); | ||
1339 | |||
1340 | return res; | ||
1341 | } | ||
diff --git a/arch/metag/mm/ioremap.c b/arch/metag/mm/ioremap.c new file mode 100644 index 000000000000..a136a435fdaa --- /dev/null +++ b/arch/metag/mm/ioremap.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * Needed for memory-mapped I/O devices mapped outside our normal DRAM | ||
4 | * window (that is, all memory-mapped I/O devices). | ||
5 | * | ||
6 | * Copyright (C) 1995,1996 Linus Torvalds | ||
7 | * | ||
8 | * Meta port based on CRIS-port by Axis Communications AB | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/mm.h> | ||
16 | |||
17 | #include <asm/pgtable.h> | ||
18 | |||
19 | /* | ||
20 | * Remap an arbitrary physical address space into the kernel virtual | ||
21 | * address space. Needed when the kernel wants to access high addresses | ||
22 | * directly. | ||
23 | * | ||
24 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
25 | * have to convert them into an offset in a page-aligned mapping, but the | ||
26 | * caller shouldn't need to know that small detail. | ||
27 | */ | ||
28 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, | ||
29 | unsigned long flags) | ||
30 | { | ||
31 | unsigned long addr; | ||
32 | struct vm_struct *area; | ||
33 | unsigned long offset, last_addr; | ||
34 | pgprot_t prot; | ||
35 | |||
36 | /* Don't allow wraparound or zero size */ | ||
37 | last_addr = phys_addr + size - 1; | ||
38 | if (!size || last_addr < phys_addr) | ||
39 | return NULL; | ||
40 | |||
41 | /* Custom region addresses are accessible and uncached by default. */ | ||
42 | if (phys_addr >= LINSYSCUSTOM_BASE && | ||
43 | phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT)) | ||
44 | return (__force void __iomem *) phys_addr; | ||
45 | |||
46 | /* | ||
47 | * Mappings have to be page-aligned | ||
48 | */ | ||
49 | offset = phys_addr & ~PAGE_MASK; | ||
50 | phys_addr &= PAGE_MASK; | ||
51 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
52 | prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY | | ||
53 | _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 | | ||
54 | flags); | ||
55 | |||
56 | /* | ||
57 | * Ok, go for it.. | ||
58 | */ | ||
59 | area = get_vm_area(size, VM_IOREMAP); | ||
60 | if (!area) | ||
61 | return NULL; | ||
62 | area->phys_addr = phys_addr; | ||
63 | addr = (unsigned long) area->addr; | ||
64 | if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { | ||
65 | vunmap((void *) addr); | ||
66 | return NULL; | ||
67 | } | ||
68 | return (__force void __iomem *) (offset + (char *)addr); | ||
69 | } | ||
70 | EXPORT_SYMBOL(__ioremap); | ||
71 | |||
72 | void __iounmap(void __iomem *addr) | ||
73 | { | ||
74 | struct vm_struct *p; | ||
75 | |||
76 | if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE && | ||
77 | (__force unsigned long)addr < (LINSYSCUSTOM_BASE + | ||
78 | LINSYSCUSTOM_LIMIT)) | ||
79 | return; | ||
80 | |||
81 | p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); | ||
82 | if (unlikely(!p)) { | ||
83 | pr_err("iounmap: bad address %p\n", addr); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | kfree(p); | ||
88 | } | ||
89 | EXPORT_SYMBOL(__iounmap); | ||
diff --git a/arch/metag/mm/maccess.c b/arch/metag/mm/maccess.c new file mode 100644 index 000000000000..eba2cfc935b1 --- /dev/null +++ b/arch/metag/mm/maccess.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * safe read and write memory routines callable while atomic | ||
3 | * | ||
4 | * Copyright 2012 Imagination Technologies | ||
5 | */ | ||
6 | |||
7 | #include <linux/uaccess.h> | ||
8 | #include <asm/io.h> | ||
9 | |||
10 | /* | ||
11 | * The generic probe_kernel_write() uses the user copy code which can split the | ||
12 | * writes if the source is unaligned, and repeats writes to make exceptions | ||
13 | * precise. We override it here to avoid these things happening to memory mapped | ||
14 | * IO memory where they could have undesired effects. | ||
15 | * Due to the use of CACHERD instruction this only works on Meta2 onwards. | ||
16 | */ | ||
17 | #ifdef CONFIG_METAG_META21 | ||
18 | long probe_kernel_write(void *dst, const void *src, size_t size) | ||
19 | { | ||
20 | unsigned long ldst = (unsigned long)dst; | ||
21 | void __iomem *iodst = (void __iomem *)dst; | ||
22 | unsigned long lsrc = (unsigned long)src; | ||
23 | const u8 *psrc = (u8 *)src; | ||
24 | unsigned int pte, i; | ||
25 | u8 bounce[8] __aligned(8); | ||
26 | |||
27 | if (!size) | ||
28 | return 0; | ||
29 | |||
30 | /* Use the write combine bit to decide is the destination is MMIO. */ | ||
31 | pte = __builtin_meta2_cacherd(dst); | ||
32 | |||
33 | /* Check the mapping is valid and writeable. */ | ||
34 | if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) | ||
35 | != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) | ||
36 | return -EFAULT; | ||
37 | |||
38 | /* Fall back to generic version for cases we're not interested in. */ | ||
39 | if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */ | ||
40 | (ldst & (size - 1)) || /* destination unaligned */ | ||
41 | size > 8 || /* more than max write size */ | ||
42 | (size & (size - 1))) /* non power of 2 size */ | ||
43 | return __probe_kernel_write(dst, src, size); | ||
44 | |||
45 | /* If src is unaligned, copy to the aligned bounce buffer first. */ | ||
46 | if (lsrc & (size - 1)) { | ||
47 | for (i = 0; i < size; ++i) | ||
48 | bounce[i] = psrc[i]; | ||
49 | psrc = bounce; | ||
50 | } | ||
51 | |||
52 | switch (size) { | ||
53 | case 1: | ||
54 | writeb(*psrc, iodst); | ||
55 | break; | ||
56 | case 2: | ||
57 | writew(*(const u16 *)psrc, iodst); | ||
58 | break; | ||
59 | case 4: | ||
60 | writel(*(const u32 *)psrc, iodst); | ||
61 | break; | ||
62 | case 8: | ||
63 | writeq(*(const u64 *)psrc, iodst); | ||
64 | break; | ||
65 | } | ||
66 | return 0; | ||
67 | } | ||
68 | #endif | ||