aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa/uaccess.h
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2008-11-06 09:40:46 -0500
committerChris Zankel <chris@zankel.net>2008-11-06 13:25:09 -0500
commit367b8112fe2ea5c39a7bb4d263dcdd9b612fae18 (patch)
tree9f3349189718dd2c5678faf0ab38f389786b6925 /include/asm-xtensa/uaccess.h
parent206ead28377fee86b129637edada8c77816cc0d6 (diff)
xtensa: move headers files to arch/xtensa/include
Move all header files for xtensa to arch/xtensa/include and platform and variant header files to the appropriate arch/xtensa/platforms/ and arch/xtensa/variants/ directories. Moving the files gets also rid of all uses of symlinks in the Makefile. This has been completed already for the majority of the architectures and xtensa is one out of six missing. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'include/asm-xtensa/uaccess.h')
-rw-r--r--include/asm-xtensa/uaccess.h500
1 files changed, 0 insertions, 500 deletions
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
deleted file mode 100644
index b8528426ab1f..000000000000
--- a/include/asm-xtensa/uaccess.h
+++ /dev/null
@@ -1,500 +0,0 @@
1/*
2 * include/asm-xtensa/uaccess.h
3 *
4 * User space memory access functions
5 *
6 * These routines provide basic accessing functions to the user memory
7 * space for the kernel. This header file provides fuctions such as:
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Copyright (C) 2001 - 2005 Tensilica Inc.
14 */
15
16#ifndef _XTENSA_UACCESS_H
17#define _XTENSA_UACCESS_H
18
19#include <linux/errno.h>
20
21#define VERIFY_READ 0
22#define VERIFY_WRITE 1
23
24#ifdef __ASSEMBLY__
25
26#include <asm/current.h>
27#include <asm/asm-offsets.h>
28#include <asm/processor.h>
29#include <asm/types.h>
30
31/*
32 * These assembly macros mirror the C macros that follow below. They
33 * should always have identical functionality. See
34 * arch/xtensa/kernel/sys.S for usage.
35 */
36
37#define KERNEL_DS 0
38#define USER_DS 1
39
40#define get_ds (KERNEL_DS)
41
42/*
43 * get_fs reads current->thread.current_ds into a register.
44 * On Entry:
45 * <ad> anything
46 * <sp> stack
47 * On Exit:
48 * <ad> contains current->thread.current_ds
49 */
50 .macro get_fs ad, sp
51 GET_CURRENT(\ad,\sp)
52 l32i \ad, \ad, THREAD_CURRENT_DS
53 .endm
54
55/*
56 * set_fs sets current->thread.current_ds to some value.
57 * On Entry:
58 * <at> anything (temp register)
59 * <av> value to write
60 * <sp> stack
61 * On Exit:
62 * <at> destroyed (actually, current)
63 * <av> preserved, value to write
64 */
65 .macro set_fs at, av, sp
66 GET_CURRENT(\at,\sp)
67 s32i \av, \at, THREAD_CURRENT_DS
68 .endm
69
70/*
71 * kernel_ok determines whether we should bypass addr/size checking.
72 * See the equivalent C-macro version below for clarity.
73 * On success, kernel_ok branches to a label indicated by parameter
74 * <success>. This implies that the macro falls through to the next
75 * insruction on an error.
76 *
77 * Note that while this macro can be used independently, we designed
78 * in for optimal use in the access_ok macro below (i.e., we fall
79 * through on error).
80 *
81 * On Entry:
82 * <at> anything (temp register)
83 * <success> label to branch to on success; implies
84 * fall-through macro on error
85 * <sp> stack pointer
86 * On Exit:
87 * <at> destroyed (actually, current->thread.current_ds)
88 */
89
90#if ((KERNEL_DS != 0) || (USER_DS == 0))
91# error Assembly macro kernel_ok fails
92#endif
93 .macro kernel_ok at, sp, success
94 get_fs \at, \sp
95 beqz \at, \success
96 .endm
97
98/*
99 * user_ok determines whether the access to user-space memory is allowed.
100 * See the equivalent C-macro version below for clarity.
101 *
102 * On error, user_ok branches to a label indicated by parameter
103 * <error>. This implies that the macro falls through to the next
104 * instruction on success.
105 *
106 * Note that while this macro can be used independently, we designed
107 * in for optimal use in the access_ok macro below (i.e., we fall
108 * through on success).
109 *
110 * On Entry:
111 * <aa> register containing memory address
112 * <as> register containing memory size
113 * <at> temp register
114 * <error> label to branch to on error; implies fall-through
115 * macro on success
116 * On Exit:
117 * <aa> preserved
118 * <as> preserved
119 * <at> destroyed (actually, (TASK_SIZE + 1 - size))
120 */
121 .macro user_ok aa, as, at, error
122 movi \at, __XTENSA_UL_CONST(TASK_SIZE)
123 bgeu \as, \at, \error
124 sub \at, \at, \as
125 bgeu \aa, \at, \error
126 .endm
127
128/*
129 * access_ok determines whether a memory access is allowed. See the
130 * equivalent C-macro version below for clarity.
131 *
132 * On error, access_ok branches to a label indicated by parameter
133 * <error>. This implies that the macro falls through to the next
134 * instruction on success.
135 *
136 * Note that we assume success is the common case, and we optimize the
137 * branch fall-through case on success.
138 *
139 * On Entry:
140 * <aa> register containing memory address
141 * <as> register containing memory size
142 * <at> temp register
143 * <sp>
144 * <error> label to branch to on error; implies fall-through
145 * macro on success
146 * On Exit:
147 * <aa> preserved
148 * <as> preserved
149 * <at> destroyed
150 */
151 .macro access_ok aa, as, at, sp, error
152 kernel_ok \at, \sp, .Laccess_ok_\@
153 user_ok \aa, \as, \at, \error
154.Laccess_ok_\@:
155 .endm
156
157#else /* __ASSEMBLY__ not defined */
158
159#include <linux/sched.h>
160#include <asm/types.h>
161
162/*
163 * The fs value determines whether argument validity checking should
164 * be performed or not. If get_fs() == USER_DS, checking is
165 * performed, with get_fs() == KERNEL_DS, checking is bypassed.
166 *
167 * For historical reasons (Data Segment Register?), these macros are
168 * grossly misnamed.
169 */
170
171#define KERNEL_DS ((mm_segment_t) { 0 })
172#define USER_DS ((mm_segment_t) { 1 })
173
174#define get_ds() (KERNEL_DS)
175#define get_fs() (current->thread.current_ds)
176#define set_fs(val) (current->thread.current_ds = (val))
177
178#define segment_eq(a,b) ((a).seg == (b).seg)
179
180#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
181#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
182#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
183#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
184
185/*
186 * These are the main single-value transfer routines. They
187 * automatically use the right size if we just have the right pointer
188 * type.
189 *
190 * This gets kind of ugly. We want to return _two_ values in
191 * "get_user()" and yet we don't want to do any pointers, because that
192 * is too much of a performance impact. Thus we have a few rather ugly
193 * macros here, and hide all the uglyness from the user.
194 *
195 * Careful to not
196 * (a) re-use the arguments for side effects (sizeof is ok)
197 * (b) require any knowledge of processes at this stage
198 */
199#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
200#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
201
202/*
203 * The "__xxx" versions of the user access functions are versions that
204 * do not verify the address space, that must have been done previously
205 * with a separate "access_ok()" call (this is used when we do multiple
206 * accesses to the same area of user memory).
207 */
208#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
209#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
210
211
212extern long __put_user_bad(void);
213
214#define __put_user_nocheck(x,ptr,size) \
215({ \
216 long __pu_err; \
217 __put_user_size((x),(ptr),(size),__pu_err); \
218 __pu_err; \
219})
220
221#define __put_user_check(x,ptr,size) \
222({ \
223 long __pu_err = -EFAULT; \
224 __typeof__(*(ptr)) *__pu_addr = (ptr); \
225 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
226 __put_user_size((x),__pu_addr,(size),__pu_err); \
227 __pu_err; \
228})
229
230#define __put_user_size(x,ptr,size,retval) \
231do { \
232 int __cb; \
233 retval = 0; \
234 switch (size) { \
235 case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
236 case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
237 case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
238 case 8: { \
239 __typeof__(*ptr) __v64 = x; \
240 retval = __copy_to_user(ptr,&__v64,8); \
241 break; \
242 } \
243 default: __put_user_bad(); \
244 } \
245} while (0)
246
247
248/*
249 * Consider a case of a user single load/store would cause both an
250 * unaligned exception and an MMU-related exception (unaligned
251 * exceptions happen first):
252 *
253 * User code passes a bad variable ptr to a system call.
254 * Kernel tries to access the variable.
255 * Unaligned exception occurs.
256 * Unaligned exception handler tries to make aligned accesses.
257 * Double exception occurs for MMU-related cause (e.g., page not mapped).
258 * do_page_fault() thinks the fault address belongs to the kernel, not the
259 * user, and panics.
260 *
261 * The kernel currently prohibits user unaligned accesses. We use the
262 * __check_align_* macros to check for unaligned addresses before
263 * accessing user space so we don't crash the kernel. Both
264 * __put_user_asm and __get_user_asm use these alignment macros, so
265 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
266 * sync.
267 */
268
269#define __check_align_1 ""
270
271#define __check_align_2 \
272 " _bbci.l %3, 0, 1f \n" \
273 " movi %0, %4 \n" \
274 " _j 2f \n"
275
276#define __check_align_4 \
277 " _bbsi.l %3, 0, 0f \n" \
278 " _bbci.l %3, 1, 1f \n" \
279 "0: movi %0, %4 \n" \
280 " _j 2f \n"
281
282
283/*
284 * We don't tell gcc that we are accessing memory, but this is OK
285 * because we do not write to any memory gcc knows about, so there
286 * are no aliasing issues.
287 *
288 * WARNING: If you modify this macro at all, verify that the
289 * __check_align_* macros still work.
290 */
291#define __put_user_asm(x, addr, err, align, insn, cb) \
292 __asm__ __volatile__( \
293 __check_align_##align \
294 "1: "insn" %2, %3, 0 \n" \
295 "2: \n" \
296 " .section .fixup,\"ax\" \n" \
297 " .align 4 \n" \
298 "4: \n" \
299 " .long 2b \n" \
300 "5: \n" \
301 " l32r %1, 4b \n" \
302 " movi %0, %4 \n" \
303 " jx %1 \n" \
304 " .previous \n" \
305 " .section __ex_table,\"a\" \n" \
306 " .long 1b, 5b \n" \
307 " .previous" \
308 :"=r" (err), "=r" (cb) \
309 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
310
311#define __get_user_nocheck(x,ptr,size) \
312({ \
313 long __gu_err, __gu_val; \
314 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
315 (x) = (__typeof__(*(ptr)))__gu_val; \
316 __gu_err; \
317})
318
319#define __get_user_check(x,ptr,size) \
320({ \
321 long __gu_err = -EFAULT, __gu_val = 0; \
322 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
323 if (access_ok(VERIFY_READ,__gu_addr,size)) \
324 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
325 (x) = (__typeof__(*(ptr)))__gu_val; \
326 __gu_err; \
327})
328
329extern long __get_user_bad(void);
330
331#define __get_user_size(x,ptr,size,retval) \
332do { \
333 int __cb; \
334 retval = 0; \
335 switch (size) { \
336 case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
337 case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
338 case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
339 case 8: retval = __copy_from_user(&x,ptr,8); break; \
340 default: (x) = __get_user_bad(); \
341 } \
342} while (0)
343
344
345/*
346 * WARNING: If you modify this macro at all, verify that the
347 * __check_align_* macros still work.
348 */
349#define __get_user_asm(x, addr, err, align, insn, cb) \
350 __asm__ __volatile__( \
351 __check_align_##align \
352 "1: "insn" %2, %3, 0 \n" \
353 "2: \n" \
354 " .section .fixup,\"ax\" \n" \
355 " .align 4 \n" \
356 "4: \n" \
357 " .long 2b \n" \
358 "5: \n" \
359 " l32r %1, 4b \n" \
360 " movi %2, 0 \n" \
361 " movi %0, %4 \n" \
362 " jx %1 \n" \
363 " .previous \n" \
364 " .section __ex_table,\"a\" \n" \
365 " .long 1b, 5b \n" \
366 " .previous" \
367 :"=r" (err), "=r" (cb), "=r" (x) \
368 :"r" (addr), "i" (-EFAULT), "0" (err))
369
370
371/*
372 * Copy to/from user space
373 */
374
375/*
376 * We use a generic, arbitrary-sized copy subroutine. The Xtensa
377 * architecture would cause heavy code bloat if we tried to inline
378 * these functions and provide __constant_copy_* equivalents like the
379 * i386 versions. __xtensa_copy_user is quite efficient. See the
380 * .fixup section of __xtensa_copy_user for a discussion on the
381 * X_zeroing equivalents for Xtensa.
382 */
383
384extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
385#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
386
387
388static inline unsigned long
389__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
390{
391 return __copy_user(to,from,n);
392}
393
394static inline unsigned long
395__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
396{
397 return __copy_user(to,from,n);
398}
399
400static inline unsigned long
401__generic_copy_to_user(void *to, const void *from, unsigned long n)
402{
403 prefetch(from);
404 if (access_ok(VERIFY_WRITE, to, n))
405 return __copy_user(to,from,n);
406 return n;
407}
408
409static inline unsigned long
410__generic_copy_from_user(void *to, const void *from, unsigned long n)
411{
412 prefetchw(to);
413 if (access_ok(VERIFY_READ, from, n))
414 return __copy_user(to,from,n);
415 else
416 memset(to, 0, n);
417 return n;
418}
419
420#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
421#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
422#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
423#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
424#define __copy_to_user_inatomic __copy_to_user
425#define __copy_from_user_inatomic __copy_from_user
426
427
428/*
429 * We need to return the number of bytes not cleared. Our memset()
430 * returns zero if a problem occurs while accessing user-space memory.
431 * In that event, return no memory cleared. Otherwise, zero for
432 * success.
433 */
434
435static inline unsigned long
436__xtensa_clear_user(void *addr, unsigned long size)
437{
438 if ( ! memset(addr, 0, size) )
439 return size;
440 return 0;
441}
442
443static inline unsigned long
444clear_user(void *addr, unsigned long size)
445{
446 if (access_ok(VERIFY_WRITE, addr, size))
447 return __xtensa_clear_user(addr, size);
448 return size ? -EFAULT : 0;
449}
450
451#define __clear_user __xtensa_clear_user
452
453
454extern long __strncpy_user(char *, const char *, long);
455#define __strncpy_from_user __strncpy_user
456
457static inline long
458strncpy_from_user(char *dst, const char *src, long count)
459{
460 if (access_ok(VERIFY_READ, src, 1))
461 return __strncpy_from_user(dst, src, count);
462 return -EFAULT;
463}
464
465
466#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
467
468/*
469 * Return the size of a string (including the ending 0!)
470 */
471extern long __strnlen_user(const char *, long);
472
473static inline long strnlen_user(const char *str, long len)
474{
475 unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
476
477 if ((unsigned long)str > top)
478 return 0;
479 return __strnlen_user(str, len);
480}
481
482
483struct exception_table_entry
484{
485 unsigned long insn, fixup;
486};
487
488/* Returns 0 if exception not found and fixup.unit otherwise. */
489
490extern unsigned long search_exception_table(unsigned long addr);
491extern void sort_exception_table(void);
492
493/* Returns the new pc */
494#define fixup_exception(map_reg, fixup_unit, pc) \
495({ \
496 fixup_unit; \
497})
498
499#endif /* __ASSEMBLY__ */
500#endif /* _XTENSA_UACCESS_H */