summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-22 13:02:41 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-03-28 18:24:07 -0400
commit3a0e75adecc8da026a5befb2c5828d08c999373c (patch)
tree3fa4044f96db80cb151bec030505e7df78da0983
parent0b46a94e84c1323d54f8b82eacd3143400fb9521 (diff)
xtensa: get rid of zeroing, use RAW_COPY_USER
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/uaccess.h54
-rw-r--r--arch/xtensa/lib/usercopy.S116
3 files changed, 57 insertions, 114 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f4126cf997a4..043d37d45919 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -29,6 +29,7 @@ config XTENSA
29 select NO_BOOTMEM 29 select NO_BOOTMEM
30 select PERF_USE_VMALLOC 30 select PERF_USE_VMALLOC
31 select VIRT_TO_BUS 31 select VIRT_TO_BUS
32 select ARCH_HAS_RAW_COPY_USER
32 help 33 help
33 Xtensa processors are 32-bit RISC machines designed by Tensilica 34 Xtensa processors are 32-bit RISC machines designed by Tensilica
34 primarily for embedded systems. These processors are both 35 primarily for embedded systems. These processors are both
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 0f338774af99..8e93ed8ad1fe 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -234,60 +234,22 @@ __asm__ __volatile__( \
234 * Copy to/from user space 234 * Copy to/from user space
235 */ 235 */
236 236
237/*
238 * We use a generic, arbitrary-sized copy subroutine. The Xtensa
239 * architecture would cause heavy code bloat if we tried to inline
240 * these functions and provide __constant_copy_* equivalents like the
241 * i386 versions. __xtensa_copy_user is quite efficient. See the
242 * .fixup section of __xtensa_copy_user for a discussion on the
243 * X_zeroing equivalents for Xtensa.
244 */
245
246extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 237extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
247#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
248
249 238
250static inline unsigned long 239static inline unsigned long
251__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 240raw_copy_from_user(void *to, const void __user *from, unsigned long n)
252{ 241{
253 return __copy_user(to, from, n); 242 prefetchw(to);
254} 243 return __xtensa_copy_user(to, (__force const void *)from, n);
255
256static inline unsigned long
257__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
258{
259 return __copy_user(to, from, n);
260}
261
262static inline unsigned long
263__generic_copy_to_user(void *to, const void *from, unsigned long n)
264{
265 prefetch(from);
266 if (access_ok(VERIFY_WRITE, to, n))
267 return __copy_user(to, from, n);
268 return n;
269} 244}
270
271static inline unsigned long 245static inline unsigned long
272__generic_copy_from_user(void *to, const void *from, unsigned long n) 246raw_copy_to_user(void __user *to, const void *from, unsigned long n)
273{ 247{
274 prefetchw(to); 248 prefetchw(from);
275 if (access_ok(VERIFY_READ, from, n)) 249 return __xtensa_copy_user((__force void *)to, from, n);
276 return __copy_user(to, from, n);
277 else
278 memset(to, 0, n);
279 return n;
280} 250}
281 251#define INLINE_COPY_FROM_USER
282#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) 252#define INLINE_COPY_TO_USER
283#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
284#define __copy_to_user(to, from, n) \
285 __generic_copy_to_user_nocheck((to), (from), (n))
286#define __copy_from_user(to, from, n) \
287 __generic_copy_from_user_nocheck((to), (from), (n))
288#define __copy_to_user_inatomic __copy_to_user
289#define __copy_from_user_inatomic __copy_from_user
290
291 253
292/* 254/*
293 * We need to return the number of bytes not cleared. Our memset() 255 * We need to return the number of bytes not cleared. Our memset()
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 7ea4dd68893e..d9cd766bde3e 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -102,9 +102,9 @@ __xtensa_copy_user:
102 bltui a4, 7, .Lbytecopy # do short copies byte by byte 102 bltui a4, 7, .Lbytecopy # do short copies byte by byte
103 103
104 # copy 1 byte 104 # copy 1 byte
105 EX(l8ui, a6, a3, 0, l_fixup) 105 EX(l8ui, a6, a3, 0, fixup)
106 addi a3, a3, 1 106 addi a3, a3, 1
107 EX(s8i, a6, a5, 0, s_fixup) 107 EX(s8i, a6, a5, 0, fixup)
108 addi a5, a5, 1 108 addi a5, a5, 1
109 addi a4, a4, -1 109 addi a4, a4, -1
110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
@@ -112,11 +112,11 @@ __xtensa_copy_user:
112.Ldst2mod4: # dst 16-bit aligned 112.Ldst2mod4: # dst 16-bit aligned
113 # copy 2 bytes 113 # copy 2 bytes
114 bltui a4, 6, .Lbytecopy # do short copies byte by byte 114 bltui a4, 6, .Lbytecopy # do short copies byte by byte
115 EX(l8ui, a6, a3, 0, l_fixup) 115 EX(l8ui, a6, a3, 0, fixup)
116 EX(l8ui, a7, a3, 1, l_fixup) 116 EX(l8ui, a7, a3, 1, fixup)
117 addi a3, a3, 2 117 addi a3, a3, 2
118 EX(s8i, a6, a5, 0, s_fixup) 118 EX(s8i, a6, a5, 0, fixup)
119 EX(s8i, a7, a5, 1, s_fixup) 119 EX(s8i, a7, a5, 1, fixup)
120 addi a5, a5, 2 120 addi a5, a5, 2
121 addi a4, a4, -2 121 addi a4, a4, -2
122 j .Ldstaligned # dst is now aligned, return to main algorithm 122 j .Ldstaligned # dst is now aligned, return to main algorithm
@@ -135,9 +135,9 @@ __xtensa_copy_user:
135 add a7, a3, a4 # a7 = end address for source 135 add a7, a3, a4 # a7 = end address for source
136#endif /* !XCHAL_HAVE_LOOPS */ 136#endif /* !XCHAL_HAVE_LOOPS */
137.Lnextbyte: 137.Lnextbyte:
138 EX(l8ui, a6, a3, 0, l_fixup) 138 EX(l8ui, a6, a3, 0, fixup)
139 addi a3, a3, 1 139 addi a3, a3, 1
140 EX(s8i, a6, a5, 0, s_fixup) 140 EX(s8i, a6, a5, 0, fixup)
141 addi a5, a5, 1 141 addi a5, a5, 1
142#if !XCHAL_HAVE_LOOPS 142#if !XCHAL_HAVE_LOOPS
143 blt a3, a7, .Lnextbyte 143 blt a3, a7, .Lnextbyte
@@ -161,15 +161,15 @@ __xtensa_copy_user:
161 add a8, a8, a3 # a8 = end of last 16B source chunk 161 add a8, a8, a3 # a8 = end of last 16B source chunk
162#endif /* !XCHAL_HAVE_LOOPS */ 162#endif /* !XCHAL_HAVE_LOOPS */
163.Loop1: 163.Loop1:
164 EX(l32i, a6, a3, 0, l_fixup) 164 EX(l32i, a6, a3, 0, fixup)
165 EX(l32i, a7, a3, 4, l_fixup) 165 EX(l32i, a7, a3, 4, fixup)
166 EX(s32i, a6, a5, 0, s_fixup) 166 EX(s32i, a6, a5, 0, fixup)
167 EX(l32i, a6, a3, 8, l_fixup) 167 EX(l32i, a6, a3, 8, fixup)
168 EX(s32i, a7, a5, 4, s_fixup) 168 EX(s32i, a7, a5, 4, fixup)
169 EX(l32i, a7, a3, 12, l_fixup) 169 EX(l32i, a7, a3, 12, fixup)
170 EX(s32i, a6, a5, 8, s_fixup) 170 EX(s32i, a6, a5, 8, fixup)
171 addi a3, a3, 16 171 addi a3, a3, 16
172 EX(s32i, a7, a5, 12, s_fixup) 172 EX(s32i, a7, a5, 12, fixup)
173 addi a5, a5, 16 173 addi a5, a5, 16
174#if !XCHAL_HAVE_LOOPS 174#if !XCHAL_HAVE_LOOPS
175 blt a3, a8, .Loop1 175 blt a3, a8, .Loop1
@@ -177,31 +177,31 @@ __xtensa_copy_user:
177.Loop1done: 177.Loop1done:
178 bbci.l a4, 3, .L2 178 bbci.l a4, 3, .L2
179 # copy 8 bytes 179 # copy 8 bytes
180 EX(l32i, a6, a3, 0, l_fixup) 180 EX(l32i, a6, a3, 0, fixup)
181 EX(l32i, a7, a3, 4, l_fixup) 181 EX(l32i, a7, a3, 4, fixup)
182 addi a3, a3, 8 182 addi a3, a3, 8
183 EX(s32i, a6, a5, 0, s_fixup) 183 EX(s32i, a6, a5, 0, fixup)
184 EX(s32i, a7, a5, 4, s_fixup) 184 EX(s32i, a7, a5, 4, fixup)
185 addi a5, a5, 8 185 addi a5, a5, 8
186.L2: 186.L2:
187 bbci.l a4, 2, .L3 187 bbci.l a4, 2, .L3
188 # copy 4 bytes 188 # copy 4 bytes
189 EX(l32i, a6, a3, 0, l_fixup) 189 EX(l32i, a6, a3, 0, fixup)
190 addi a3, a3, 4 190 addi a3, a3, 4
191 EX(s32i, a6, a5, 0, s_fixup) 191 EX(s32i, a6, a5, 0, fixup)
192 addi a5, a5, 4 192 addi a5, a5, 4
193.L3: 193.L3:
194 bbci.l a4, 1, .L4 194 bbci.l a4, 1, .L4
195 # copy 2 bytes 195 # copy 2 bytes
196 EX(l16ui, a6, a3, 0, l_fixup) 196 EX(l16ui, a6, a3, 0, fixup)
197 addi a3, a3, 2 197 addi a3, a3, 2
198 EX(s16i, a6, a5, 0, s_fixup) 198 EX(s16i, a6, a5, 0, fixup)
199 addi a5, a5, 2 199 addi a5, a5, 2
200.L4: 200.L4:
201 bbci.l a4, 0, .L5 201 bbci.l a4, 0, .L5
202 # copy 1 byte 202 # copy 1 byte
203 EX(l8ui, a6, a3, 0, l_fixup) 203 EX(l8ui, a6, a3, 0, fixup)
204 EX(s8i, a6, a5, 0, s_fixup) 204 EX(s8i, a6, a5, 0, fixup)
205.L5: 205.L5:
206 movi a2, 0 # return success for len bytes copied 206 movi a2, 0 # return success for len bytes copied
207 retw 207 retw
@@ -217,7 +217,7 @@ __xtensa_copy_user:
217 # copy 16 bytes per iteration for word-aligned dst and unaligned src 217 # copy 16 bytes per iteration for word-aligned dst and unaligned src
218 and a10, a3, a8 # save unalignment offset for below 218 and a10, a3, a8 # save unalignment offset for below
219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) 219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
220 EX(l32i, a6, a3, 0, l_fixup) # load first word 220 EX(l32i, a6, a3, 0, fixup) # load first word
221#if XCHAL_HAVE_LOOPS 221#if XCHAL_HAVE_LOOPS
222 loopnez a7, .Loop2done 222 loopnez a7, .Loop2done
223#else /* !XCHAL_HAVE_LOOPS */ 223#else /* !XCHAL_HAVE_LOOPS */
@@ -226,19 +226,19 @@ __xtensa_copy_user:
226 add a12, a12, a3 # a12 = end of last 16B source chunk 226 add a12, a12, a3 # a12 = end of last 16B source chunk
227#endif /* !XCHAL_HAVE_LOOPS */ 227#endif /* !XCHAL_HAVE_LOOPS */
228.Loop2: 228.Loop2:
229 EX(l32i, a7, a3, 4, l_fixup) 229 EX(l32i, a7, a3, 4, fixup)
230 EX(l32i, a8, a3, 8, l_fixup) 230 EX(l32i, a8, a3, 8, fixup)
231 ALIGN( a6, a6, a7) 231 ALIGN( a6, a6, a7)
232 EX(s32i, a6, a5, 0, s_fixup) 232 EX(s32i, a6, a5, 0, fixup)
233 EX(l32i, a9, a3, 12, l_fixup) 233 EX(l32i, a9, a3, 12, fixup)
234 ALIGN( a7, a7, a8) 234 ALIGN( a7, a7, a8)
235 EX(s32i, a7, a5, 4, s_fixup) 235 EX(s32i, a7, a5, 4, fixup)
236 EX(l32i, a6, a3, 16, l_fixup) 236 EX(l32i, a6, a3, 16, fixup)
237 ALIGN( a8, a8, a9) 237 ALIGN( a8, a8, a9)
238 EX(s32i, a8, a5, 8, s_fixup) 238 EX(s32i, a8, a5, 8, fixup)
239 addi a3, a3, 16 239 addi a3, a3, 16
240 ALIGN( a9, a9, a6) 240 ALIGN( a9, a9, a6)
241 EX(s32i, a9, a5, 12, s_fixup) 241 EX(s32i, a9, a5, 12, fixup)
242 addi a5, a5, 16 242 addi a5, a5, 16
243#if !XCHAL_HAVE_LOOPS 243#if !XCHAL_HAVE_LOOPS
244 blt a3, a12, .Loop2 244 blt a3, a12, .Loop2
@@ -246,39 +246,39 @@ __xtensa_copy_user:
246.Loop2done: 246.Loop2done:
247 bbci.l a4, 3, .L12 247 bbci.l a4, 3, .L12
248 # copy 8 bytes 248 # copy 8 bytes
249 EX(l32i, a7, a3, 4, l_fixup) 249 EX(l32i, a7, a3, 4, fixup)
250 EX(l32i, a8, a3, 8, l_fixup) 250 EX(l32i, a8, a3, 8, fixup)
251 ALIGN( a6, a6, a7) 251 ALIGN( a6, a6, a7)
252 EX(s32i, a6, a5, 0, s_fixup) 252 EX(s32i, a6, a5, 0, fixup)
253 addi a3, a3, 8 253 addi a3, a3, 8
254 ALIGN( a7, a7, a8) 254 ALIGN( a7, a7, a8)
255 EX(s32i, a7, a5, 4, s_fixup) 255 EX(s32i, a7, a5, 4, fixup)
256 addi a5, a5, 8 256 addi a5, a5, 8
257 mov a6, a8 257 mov a6, a8
258.L12: 258.L12:
259 bbci.l a4, 2, .L13 259 bbci.l a4, 2, .L13
260 # copy 4 bytes 260 # copy 4 bytes
261 EX(l32i, a7, a3, 4, l_fixup) 261 EX(l32i, a7, a3, 4, fixup)
262 addi a3, a3, 4 262 addi a3, a3, 4
263 ALIGN( a6, a6, a7) 263 ALIGN( a6, a6, a7)
264 EX(s32i, a6, a5, 0, s_fixup) 264 EX(s32i, a6, a5, 0, fixup)
265 addi a5, a5, 4 265 addi a5, a5, 4
266 mov a6, a7 266 mov a6, a7
267.L13: 267.L13:
268 add a3, a3, a10 # readjust a3 with correct misalignment 268 add a3, a3, a10 # readjust a3 with correct misalignment
269 bbci.l a4, 1, .L14 269 bbci.l a4, 1, .L14
270 # copy 2 bytes 270 # copy 2 bytes
271 EX(l8ui, a6, a3, 0, l_fixup) 271 EX(l8ui, a6, a3, 0, fixup)
272 EX(l8ui, a7, a3, 1, l_fixup) 272 EX(l8ui, a7, a3, 1, fixup)
273 addi a3, a3, 2 273 addi a3, a3, 2
274 EX(s8i, a6, a5, 0, s_fixup) 274 EX(s8i, a6, a5, 0, fixup)
275 EX(s8i, a7, a5, 1, s_fixup) 275 EX(s8i, a7, a5, 1, fixup)
276 addi a5, a5, 2 276 addi a5, a5, 2
277.L14: 277.L14:
278 bbci.l a4, 0, .L15 278 bbci.l a4, 0, .L15
279 # copy 1 byte 279 # copy 1 byte
280 EX(l8ui, a6, a3, 0, l_fixup) 280 EX(l8ui, a6, a3, 0, fixup)
281 EX(s8i, a6, a5, 0, s_fixup) 281 EX(s8i, a6, a5, 0, fixup)
282.L15: 282.L15:
283 movi a2, 0 # return success for len bytes copied 283 movi a2, 0 # return success for len bytes copied
284 retw 284 retw
@@ -291,30 +291,10 @@ __xtensa_copy_user:
291 * bytes_copied = a5 - a2 291 * bytes_copied = a5 - a2
292 * retval = bytes_not_copied = original len - bytes_copied 292 * retval = bytes_not_copied = original len - bytes_copied
293 * retval = a11 - (a5 - a2) 293 * retval = a11 - (a5 - a2)
294 *
295 * Clearing the remaining pieces of kernel memory plugs security
296 * holes. This functionality is the equivalent of the *_zeroing
297 * functions that some architectures provide.
298 */ 294 */
299 295
300.Lmemset:
301 .word memset
302 296
303s_fixup: 297fixup:
304 sub a2, a5, a2 /* a2 <-- bytes copied */ 298 sub a2, a5, a2 /* a2 <-- bytes copied */
305 sub a2, a11, a2 /* a2 <-- bytes not copied */ 299 sub a2, a11, a2 /* a2 <-- bytes not copied */
306 retw 300 retw
307
308l_fixup:
309 sub a2, a5, a2 /* a2 <-- bytes copied */
310 sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
311
312 /* void *memset(void *s, int c, size_t n); */
313 mov a6, a5 /* s */
314 movi a7, 0 /* c */
315 mov a8, a2 /* n */
316 l32r a4, .Lmemset
317 callx4 a4
318 /* Ignore memset return value in a6. */
319 /* a2 still contains bytes not copied. */
320 retw