diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-alpha/uaccess.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-alpha/uaccess.h')
-rw-r--r-- | include/asm-alpha/uaccess.h | 517 |
1 files changed, 517 insertions, 0 deletions
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h new file mode 100644 index 000000000000..4c39ee750f38 --- /dev/null +++ b/include/asm-alpha/uaccess.h | |||
@@ -0,0 +1,517 @@ | |||
1 | #ifndef __ALPHA_UACCESS_H | ||
2 | #define __ALPHA_UACCESS_H | ||
3 | |||
4 | #include <linux/errno.h> | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | |||
8 | /* | ||
9 | * The fs value determines whether argument validity checking should be | ||
10 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
11 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
12 | * | ||
13 | * Or at least it did once upon a time. Nowadays it is a mask that | ||
14 | * defines which bits of the address space are off limits. This is a | ||
15 | * wee bit faster than the above. | ||
16 | * | ||
17 | * For historical reasons, these macros are grossly misnamed. | ||
18 | */ | ||
19 | |||
20 | #define KERNEL_DS ((mm_segment_t) { 0UL }) | ||
21 | #define USER_DS ((mm_segment_t) { -0x40000000000UL }) | ||
22 | |||
23 | #define VERIFY_READ 0 | ||
24 | #define VERIFY_WRITE 1 | ||
25 | |||
26 | #define get_fs() (current_thread_info()->addr_limit) | ||
27 | #define get_ds() (KERNEL_DS) | ||
28 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
29 | |||
30 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
31 | |||
32 | /* | ||
33 | * Is a address valid? This does a straightforward calculation rather | ||
34 | * than tests. | ||
35 | * | ||
36 | * Address valid if: | ||
37 | * - "addr" doesn't have any high-bits set | ||
38 | * - AND "size" doesn't have any high-bits set | ||
39 | * - AND "addr+size" doesn't have any high-bits set | ||
40 | * - OR we are in kernel mode. | ||
41 | */ | ||
42 | #define __access_ok(addr,size,segment) \ | ||
43 | (((segment).seg & (addr | size | (addr+size))) == 0) | ||
44 | |||
45 | #define access_ok(type,addr,size) \ | ||
46 | ({ \ | ||
47 | __chk_user_ptr(addr); \ | ||
48 | __access_ok(((unsigned long)(addr)),(size),get_fs()); \ | ||
49 | }) | ||
50 | |||
51 | /* this function will go away soon - use access_ok() instead */ | ||
52 | extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) | ||
53 | { | ||
54 | return access_ok(type,addr,size) ? 0 : -EFAULT; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * These are the main single-value transfer routines. They automatically | ||
59 | * use the right size if we just have the right pointer type. | ||
60 | * | ||
61 | * As the alpha uses the same address space for kernel and user | ||
62 | * data, we can just do these as direct assignments. (Of course, the | ||
63 | * exception handling means that it's no longer "just"...) | ||
64 | * | ||
65 | * Careful to not | ||
66 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) | ||
67 | * (b) require any knowledge of processes at this stage | ||
68 | */ | ||
69 | #define put_user(x,ptr) \ | ||
70 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) | ||
71 | #define get_user(x,ptr) \ | ||
72 | __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) | ||
73 | |||
74 | /* | ||
75 | * The "__xxx" versions do not do address space checking, useful when | ||
76 | * doing multiple accesses to the same area (the programmer has to do the | ||
77 | * checks by hand with "access_ok()") | ||
78 | */ | ||
79 | #define __put_user(x,ptr) \ | ||
80 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
81 | #define __get_user(x,ptr) \ | ||
82 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
83 | |||
84 | /* | ||
85 | * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to | ||
86 | * encode the bits we need for resolving the exception. See the | ||
87 | * more extensive comments with fixup_inline_exception below for | ||
88 | * more information. | ||
89 | */ | ||
90 | |||
91 | extern void __get_user_unknown(void); | ||
92 | |||
93 | #define __get_user_nocheck(x,ptr,size) \ | ||
94 | ({ \ | ||
95 | long __gu_err = 0; \ | ||
96 | unsigned long __gu_val; \ | ||
97 | __chk_user_ptr(ptr); \ | ||
98 | switch (size) { \ | ||
99 | case 1: __get_user_8(ptr); break; \ | ||
100 | case 2: __get_user_16(ptr); break; \ | ||
101 | case 4: __get_user_32(ptr); break; \ | ||
102 | case 8: __get_user_64(ptr); break; \ | ||
103 | default: __get_user_unknown(); break; \ | ||
104 | } \ | ||
105 | (x) = (__typeof__(*(ptr))) __gu_val; \ | ||
106 | __gu_err; \ | ||
107 | }) | ||
108 | |||
109 | #define __get_user_check(x,ptr,size,segment) \ | ||
110 | ({ \ | ||
111 | long __gu_err = -EFAULT; \ | ||
112 | unsigned long __gu_val = 0; \ | ||
113 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
114 | if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ | ||
115 | __gu_err = 0; \ | ||
116 | switch (size) { \ | ||
117 | case 1: __get_user_8(__gu_addr); break; \ | ||
118 | case 2: __get_user_16(__gu_addr); break; \ | ||
119 | case 4: __get_user_32(__gu_addr); break; \ | ||
120 | case 8: __get_user_64(__gu_addr); break; \ | ||
121 | default: __get_user_unknown(); break; \ | ||
122 | } \ | ||
123 | } \ | ||
124 | (x) = (__typeof__(*(ptr))) __gu_val; \ | ||
125 | __gu_err; \ | ||
126 | }) | ||
127 | |||
128 | struct __large_struct { unsigned long buf[100]; }; | ||
129 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
130 | |||
131 | #define __get_user_64(addr) \ | ||
132 | __asm__("1: ldq %0,%2\n" \ | ||
133 | "2:\n" \ | ||
134 | ".section __ex_table,\"a\"\n" \ | ||
135 | " .long 1b - .\n" \ | ||
136 | " lda %0, 2b-1b(%1)\n" \ | ||
137 | ".previous" \ | ||
138 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
139 | : "m"(__m(addr)), "1"(__gu_err)) | ||
140 | |||
141 | #define __get_user_32(addr) \ | ||
142 | __asm__("1: ldl %0,%2\n" \ | ||
143 | "2:\n" \ | ||
144 | ".section __ex_table,\"a\"\n" \ | ||
145 | " .long 1b - .\n" \ | ||
146 | " lda %0, 2b-1b(%1)\n" \ | ||
147 | ".previous" \ | ||
148 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
149 | : "m"(__m(addr)), "1"(__gu_err)) | ||
150 | |||
151 | #ifdef __alpha_bwx__ | ||
152 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ | ||
153 | |||
154 | #define __get_user_16(addr) \ | ||
155 | __asm__("1: ldwu %0,%2\n" \ | ||
156 | "2:\n" \ | ||
157 | ".section __ex_table,\"a\"\n" \ | ||
158 | " .long 1b - .\n" \ | ||
159 | " lda %0, 2b-1b(%1)\n" \ | ||
160 | ".previous" \ | ||
161 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
162 | : "m"(__m(addr)), "1"(__gu_err)) | ||
163 | |||
164 | #define __get_user_8(addr) \ | ||
165 | __asm__("1: ldbu %0,%2\n" \ | ||
166 | "2:\n" \ | ||
167 | ".section __ex_table,\"a\"\n" \ | ||
168 | " .long 1b - .\n" \ | ||
169 | " lda %0, 2b-1b(%1)\n" \ | ||
170 | ".previous" \ | ||
171 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
172 | : "m"(__m(addr)), "1"(__gu_err)) | ||
173 | #else | ||
174 | /* Unfortunately, we can't get an unaligned access trap for the sub-word | ||
175 | load, so we have to do a general unaligned operation. */ | ||
176 | |||
177 | #define __get_user_16(addr) \ | ||
178 | { \ | ||
179 | long __gu_tmp; \ | ||
180 | __asm__("1: ldq_u %0,0(%3)\n" \ | ||
181 | "2: ldq_u %1,1(%3)\n" \ | ||
182 | " extwl %0,%3,%0\n" \ | ||
183 | " extwh %1,%3,%1\n" \ | ||
184 | " or %0,%1,%0\n" \ | ||
185 | "3:\n" \ | ||
186 | ".section __ex_table,\"a\"\n" \ | ||
187 | " .long 1b - .\n" \ | ||
188 | " lda %0, 3b-1b(%2)\n" \ | ||
189 | " .long 2b - .\n" \ | ||
190 | " lda %0, 3b-2b(%2)\n" \ | ||
191 | ".previous" \ | ||
192 | : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ | ||
193 | : "r"(addr), "2"(__gu_err)); \ | ||
194 | } | ||
195 | |||
196 | #define __get_user_8(addr) \ | ||
197 | __asm__("1: ldq_u %0,0(%2)\n" \ | ||
198 | " extbl %0,%2,%0\n" \ | ||
199 | "2:\n" \ | ||
200 | ".section __ex_table,\"a\"\n" \ | ||
201 | " .long 1b - .\n" \ | ||
202 | " lda %0, 2b-1b(%1)\n" \ | ||
203 | ".previous" \ | ||
204 | : "=&r"(__gu_val), "=r"(__gu_err) \ | ||
205 | : "r"(addr), "1"(__gu_err)) | ||
206 | #endif | ||
207 | |||
208 | extern void __put_user_unknown(void); | ||
209 | |||
210 | #define __put_user_nocheck(x,ptr,size) \ | ||
211 | ({ \ | ||
212 | long __pu_err = 0; \ | ||
213 | __chk_user_ptr(ptr); \ | ||
214 | switch (size) { \ | ||
215 | case 1: __put_user_8(x,ptr); break; \ | ||
216 | case 2: __put_user_16(x,ptr); break; \ | ||
217 | case 4: __put_user_32(x,ptr); break; \ | ||
218 | case 8: __put_user_64(x,ptr); break; \ | ||
219 | default: __put_user_unknown(); break; \ | ||
220 | } \ | ||
221 | __pu_err; \ | ||
222 | }) | ||
223 | |||
224 | #define __put_user_check(x,ptr,size,segment) \ | ||
225 | ({ \ | ||
226 | long __pu_err = -EFAULT; \ | ||
227 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
228 | if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ | ||
229 | __pu_err = 0; \ | ||
230 | switch (size) { \ | ||
231 | case 1: __put_user_8(x,__pu_addr); break; \ | ||
232 | case 2: __put_user_16(x,__pu_addr); break; \ | ||
233 | case 4: __put_user_32(x,__pu_addr); break; \ | ||
234 | case 8: __put_user_64(x,__pu_addr); break; \ | ||
235 | default: __put_user_unknown(); break; \ | ||
236 | } \ | ||
237 | } \ | ||
238 | __pu_err; \ | ||
239 | }) | ||
240 | |||
241 | /* | ||
242 | * The "__put_user_xx()" macros tell gcc they read from memory | ||
243 | * instead of writing: this is because they do not write to | ||
244 | * any memory gcc knows about, so there are no aliasing issues | ||
245 | */ | ||
246 | #define __put_user_64(x,addr) \ | ||
247 | __asm__ __volatile__("1: stq %r2,%1\n" \ | ||
248 | "2:\n" \ | ||
249 | ".section __ex_table,\"a\"\n" \ | ||
250 | " .long 1b - .\n" \ | ||
251 | " lda $31,2b-1b(%0)\n" \ | ||
252 | ".previous" \ | ||
253 | : "=r"(__pu_err) \ | ||
254 | : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) | ||
255 | |||
256 | #define __put_user_32(x,addr) \ | ||
257 | __asm__ __volatile__("1: stl %r2,%1\n" \ | ||
258 | "2:\n" \ | ||
259 | ".section __ex_table,\"a\"\n" \ | ||
260 | " .long 1b - .\n" \ | ||
261 | " lda $31,2b-1b(%0)\n" \ | ||
262 | ".previous" \ | ||
263 | : "=r"(__pu_err) \ | ||
264 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
265 | |||
266 | #ifdef __alpha_bwx__ | ||
267 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ | ||
268 | |||
269 | #define __put_user_16(x,addr) \ | ||
270 | __asm__ __volatile__("1: stw %r2,%1\n" \ | ||
271 | "2:\n" \ | ||
272 | ".section __ex_table,\"a\"\n" \ | ||
273 | " .long 1b - .\n" \ | ||
274 | " lda $31,2b-1b(%0)\n" \ | ||
275 | ".previous" \ | ||
276 | : "=r"(__pu_err) \ | ||
277 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
278 | |||
279 | #define __put_user_8(x,addr) \ | ||
280 | __asm__ __volatile__("1: stb %r2,%1\n" \ | ||
281 | "2:\n" \ | ||
282 | ".section __ex_table,\"a\"\n" \ | ||
283 | " .long 1b - .\n" \ | ||
284 | " lda $31,2b-1b(%0)\n" \ | ||
285 | ".previous" \ | ||
286 | : "=r"(__pu_err) \ | ||
287 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
288 | #else | ||
289 | /* Unfortunately, we can't get an unaligned access trap for the sub-word | ||
290 | write, so we have to do a general unaligned operation. */ | ||
291 | |||
292 | #define __put_user_16(x,addr) \ | ||
293 | { \ | ||
294 | long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ | ||
295 | __asm__ __volatile__( \ | ||
296 | "1: ldq_u %2,1(%5)\n" \ | ||
297 | "2: ldq_u %1,0(%5)\n" \ | ||
298 | " inswh %6,%5,%4\n" \ | ||
299 | " inswl %6,%5,%3\n" \ | ||
300 | " mskwh %2,%5,%2\n" \ | ||
301 | " mskwl %1,%5,%1\n" \ | ||
302 | " or %2,%4,%2\n" \ | ||
303 | " or %1,%3,%1\n" \ | ||
304 | "3: stq_u %2,1(%5)\n" \ | ||
305 | "4: stq_u %1,0(%5)\n" \ | ||
306 | "5:\n" \ | ||
307 | ".section __ex_table,\"a\"\n" \ | ||
308 | " .long 1b - .\n" \ | ||
309 | " lda $31, 5b-1b(%0)\n" \ | ||
310 | " .long 2b - .\n" \ | ||
311 | " lda $31, 5b-2b(%0)\n" \ | ||
312 | " .long 3b - .\n" \ | ||
313 | " lda $31, 5b-3b(%0)\n" \ | ||
314 | " .long 4b - .\n" \ | ||
315 | " lda $31, 5b-4b(%0)\n" \ | ||
316 | ".previous" \ | ||
317 | : "=r"(__pu_err), "=&r"(__pu_tmp1), \ | ||
318 | "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ | ||
319 | "=&r"(__pu_tmp4) \ | ||
320 | : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ | ||
321 | } | ||
322 | |||
323 | #define __put_user_8(x,addr) \ | ||
324 | { \ | ||
325 | long __pu_tmp1, __pu_tmp2; \ | ||
326 | __asm__ __volatile__( \ | ||
327 | "1: ldq_u %1,0(%4)\n" \ | ||
328 | " insbl %3,%4,%2\n" \ | ||
329 | " mskbl %1,%4,%1\n" \ | ||
330 | " or %1,%2,%1\n" \ | ||
331 | "2: stq_u %1,0(%4)\n" \ | ||
332 | "3:\n" \ | ||
333 | ".section __ex_table,\"a\"\n" \ | ||
334 | " .long 1b - .\n" \ | ||
335 | " lda $31, 3b-1b(%0)\n" \ | ||
336 | " .long 2b - .\n" \ | ||
337 | " lda $31, 3b-2b(%0)\n" \ | ||
338 | ".previous" \ | ||
339 | : "=r"(__pu_err), \ | ||
340 | "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ | ||
341 | : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ | ||
342 | } | ||
343 | #endif | ||
344 | |||
345 | |||
346 | /* | ||
347 | * Complex access routines | ||
348 | */ | ||
349 | |||
350 | /* This little bit of silliness is to get the GP loaded for a function | ||
351 | that ordinarily wouldn't. Otherwise we could have it done by the macro | ||
352 | directly, which can be optimized the linker. */ | ||
353 | #ifdef MODULE | ||
354 | #define __module_address(sym) "r"(sym), | ||
355 | #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym | ||
356 | #else | ||
357 | #define __module_address(sym) | ||
358 | #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" | ||
359 | #endif | ||
360 | |||
361 | extern void __copy_user(void); | ||
362 | |||
363 | extern inline long | ||
364 | __copy_tofrom_user_nocheck(void *to, const void *from, long len) | ||
365 | { | ||
366 | register void * __cu_to __asm__("$6") = to; | ||
367 | register const void * __cu_from __asm__("$7") = from; | ||
368 | register long __cu_len __asm__("$0") = len; | ||
369 | |||
370 | __asm__ __volatile__( | ||
371 | __module_call(28, 3, __copy_user) | ||
372 | : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) | ||
373 | : __module_address(__copy_user) | ||
374 | "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) | ||
375 | : "$1","$2","$3","$4","$5","$28","memory"); | ||
376 | |||
377 | return __cu_len; | ||
378 | } | ||
379 | |||
380 | extern inline long | ||
381 | __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) | ||
382 | { | ||
383 | if (__access_ok((unsigned long)validate, len, get_fs())) | ||
384 | len = __copy_tofrom_user_nocheck(to, from, len); | ||
385 | return len; | ||
386 | } | ||
387 | |||
388 | #define __copy_to_user(to,from,n) \ | ||
389 | ({ \ | ||
390 | __chk_user_ptr(to); \ | ||
391 | __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ | ||
392 | }) | ||
393 | #define __copy_from_user(to,from,n) \ | ||
394 | ({ \ | ||
395 | __chk_user_ptr(from); \ | ||
396 | __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ | ||
397 | }) | ||
398 | |||
399 | #define __copy_to_user_inatomic __copy_to_user | ||
400 | #define __copy_from_user_inatomic __copy_from_user | ||
401 | |||
402 | |||
403 | extern inline long | ||
404 | copy_to_user(void __user *to, const void *from, long n) | ||
405 | { | ||
406 | return __copy_tofrom_user((__force void *)to, from, n, to); | ||
407 | } | ||
408 | |||
409 | extern inline long | ||
410 | copy_from_user(void *to, const void __user *from, long n) | ||
411 | { | ||
412 | return __copy_tofrom_user(to, (__force void *)from, n, from); | ||
413 | } | ||
414 | |||
415 | extern void __do_clear_user(void); | ||
416 | |||
417 | extern inline long | ||
418 | __clear_user(void __user *to, long len) | ||
419 | { | ||
420 | register void __user * __cl_to __asm__("$6") = to; | ||
421 | register long __cl_len __asm__("$0") = len; | ||
422 | __asm__ __volatile__( | ||
423 | __module_call(28, 2, __do_clear_user) | ||
424 | : "=r"(__cl_len), "=r"(__cl_to) | ||
425 | : __module_address(__do_clear_user) | ||
426 | "0"(__cl_len), "1"(__cl_to) | ||
427 | : "$1","$2","$3","$4","$5","$28","memory"); | ||
428 | return __cl_len; | ||
429 | } | ||
430 | |||
431 | extern inline long | ||
432 | clear_user(void __user *to, long len) | ||
433 | { | ||
434 | if (__access_ok((unsigned long)to, len, get_fs())) | ||
435 | len = __clear_user(to, len); | ||
436 | return len; | ||
437 | } | ||
438 | |||
439 | #undef __module_address | ||
440 | #undef __module_call | ||
441 | |||
442 | /* Returns: -EFAULT if exception before terminator, N if the entire | ||
443 | buffer filled, else strlen. */ | ||
444 | |||
445 | extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); | ||
446 | |||
447 | extern inline long | ||
448 | strncpy_from_user(char *to, const char __user *from, long n) | ||
449 | { | ||
450 | long ret = -EFAULT; | ||
451 | if (__access_ok((unsigned long)from, 0, get_fs())) | ||
452 | ret = __strncpy_from_user(to, from, n); | ||
453 | return ret; | ||
454 | } | ||
455 | |||
456 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ | ||
457 | extern long __strlen_user(const char __user *); | ||
458 | |||
459 | extern inline long strlen_user(const char __user *str) | ||
460 | { | ||
461 | return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; | ||
462 | } | ||
463 | |||
464 | /* Returns: 0 if exception before NUL or reaching the supplied limit (N), | ||
465 | * a value greater than N if the limit would be exceeded, else strlen. */ | ||
466 | extern long __strnlen_user(const char __user *, long); | ||
467 | |||
468 | extern inline long strnlen_user(const char __user *str, long n) | ||
469 | { | ||
470 | return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * About the exception table: | ||
475 | * | ||
476 | * - insn is a 32-bit pc-relative offset from the faulting insn. | ||
477 | * - nextinsn is a 16-bit offset off of the faulting instruction | ||
478 | * (not off of the *next* instruction as branches are). | ||
479 | * - errreg is the register in which to place -EFAULT. | ||
480 | * - valreg is the final target register for the load sequence | ||
481 | * and will be zeroed. | ||
482 | * | ||
483 | * Either errreg or valreg may be $31, in which case nothing happens. | ||
484 | * | ||
485 | * The exception fixup information "just so happens" to be arranged | ||
486 | * as in a MEM format instruction. This lets us emit our three | ||
487 | * values like so: | ||
488 | * | ||
489 | * lda valreg, nextinsn(errreg) | ||
490 | * | ||
491 | */ | ||
492 | |||
493 | struct exception_table_entry | ||
494 | { | ||
495 | signed int insn; | ||
496 | union exception_fixup { | ||
497 | unsigned unit; | ||
498 | struct { | ||
499 | signed int nextinsn : 16; | ||
500 | unsigned int errreg : 5; | ||
501 | unsigned int valreg : 5; | ||
502 | } bits; | ||
503 | } fixup; | ||
504 | }; | ||
505 | |||
506 | /* Returns the new pc */ | ||
507 | #define fixup_exception(map_reg, fixup, pc) \ | ||
508 | ({ \ | ||
509 | if ((fixup)->fixup.bits.valreg != 31) \ | ||
510 | map_reg((fixup)->fixup.bits.valreg) = 0; \ | ||
511 | if ((fixup)->fixup.bits.errreg != 31) \ | ||
512 | map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ | ||
513 | (pc) + (fixup)->fixup.bits.nextinsn; \ | ||
514 | }) | ||
515 | |||
516 | |||
517 | #endif /* __ALPHA_UACCESS_H */ | ||