aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-parisc/uaccess.h')
-rw-r--r--include/asm-parisc/uaccess.h122
1 files changed, 39 insertions, 83 deletions
diff --git a/include/asm-parisc/uaccess.h b/include/asm-parisc/uaccess.h
index d973e8b3466c..4878b9501f24 100644
--- a/include/asm-parisc/uaccess.h
+++ b/include/asm-parisc/uaccess.h
@@ -4,7 +4,6 @@
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
6 */ 6 */
7#include <linux/sched.h>
8#include <asm/page.h> 7#include <asm/page.h>
9#include <asm/system.h> 8#include <asm/system.h>
10#include <asm/cache.h> 9#include <asm/cache.h>
@@ -43,16 +42,18 @@ static inline long access_ok(int type, const void __user * addr,
43#define put_user __put_user 42#define put_user __put_user
44#define get_user __get_user 43#define get_user __get_user
45 44
46#if BITS_PER_LONG == 32 45#if !defined(CONFIG_64BIT)
47#define LDD_KERNEL(ptr) __get_kernel_bad(); 46#define LDD_KERNEL(ptr) __get_kernel_bad();
48#define LDD_USER(ptr) __get_user_bad(); 47#define LDD_USER(ptr) __get_user_bad();
49#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) 48#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
50#define STD_USER(x, ptr) __put_user_asm64(x,ptr) 49#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
50#define ASM_WORD_INSN ".word\t"
51#else 51#else
52#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr) 52#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
53#define LDD_USER(ptr) __get_user_asm("ldd",ptr) 53#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
54#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr) 54#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
55#define STD_USER(x, ptr) __put_user_asm("std",x,ptr) 55#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
56#define ASM_WORD_INSN ".dword\t"
56#endif 57#endif
57 58
58/* 59/*
@@ -66,6 +67,11 @@ struct exception_table_entry {
66 long fixup; /* fixup routine */ 67 long fixup; /* fixup routine */
67}; 68};
68 69
70#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
71 ".section __ex_table,\"aw\"\n" \
72 ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
73 ".previous\n"
74
69/* 75/*
70 * The page fault handler stores, in a per-cpu area, the following information 76 * The page fault handler stores, in a per-cpu area, the following information
71 * if a fixup routine is available. 77 * if a fixup routine is available.
@@ -104,43 +110,19 @@ struct exception_data {
104 __gu_err; \ 110 __gu_err; \
105}) 111})
106 112
107#ifdef __LP64__
108#define __get_kernel_asm(ldx,ptr) \
109 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
110 "\t.section __ex_table,\"aw\"\n" \
111 "\t.dword\t1b,fixup_get_user_skip_1\n" \
112 "\t.previous" \
113 : "=r"(__gu_val), "=r"(__gu_err) \
114 : "r"(ptr), "1"(__gu_err) \
115 : "r1");
116
117#define __get_user_asm(ldx,ptr) \
118 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
119 "\t.section __ex_table,\"aw\"\n" \
120 "\t.dword\t1b,fixup_get_user_skip_1\n" \
121 "\t.previous" \
122 : "=r"(__gu_val), "=r"(__gu_err) \
123 : "r"(ptr), "1"(__gu_err) \
124 : "r1");
125#else
126#define __get_kernel_asm(ldx,ptr) \ 113#define __get_kernel_asm(ldx,ptr) \
127 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \ 114 __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
128 "\t.section __ex_table,\"aw\"\n" \ 115 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
129 "\t.word\t1b,fixup_get_user_skip_1\n" \
130 "\t.previous" \
131 : "=r"(__gu_val), "=r"(__gu_err) \ 116 : "=r"(__gu_val), "=r"(__gu_err) \
132 : "r"(ptr), "1"(__gu_err) \ 117 : "r"(ptr), "1"(__gu_err) \
133 : "r1"); 118 : "r1");
134 119
135#define __get_user_asm(ldx,ptr) \ 120#define __get_user_asm(ldx,ptr) \
136 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \ 121 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
137 "\t.section __ex_table,\"aw\"\n" \ 122 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
138 "\t.word\t1b,fixup_get_user_skip_1\n" \
139 "\t.previous" \
140 : "=r"(__gu_val), "=r"(__gu_err) \ 123 : "=r"(__gu_val), "=r"(__gu_err) \
141 : "r"(ptr), "1"(__gu_err) \ 124 : "r"(ptr), "1"(__gu_err) \
142 : "r1"); 125 : "r1");
143#endif /* !__LP64__ */
144 126
145#define __put_user(x,ptr) \ 127#define __put_user(x,ptr) \
146({ \ 128({ \
@@ -179,80 +161,54 @@ struct exception_data {
179 * r8/r9 are already listed as err/val. 161 * r8/r9 are already listed as err/val.
180 */ 162 */
181 163
182#ifdef __LP64__
183#define __put_kernel_asm(stx,x,ptr) \ 164#define __put_kernel_asm(stx,x,ptr) \
184 __asm__ __volatile__ ( \ 165 __asm__ __volatile__ ( \
185 "\n1:\t" stx "\t%2,0(%1)\n" \ 166 "\n1:\t" stx "\t%2,0(%1)\n\t" \
186 "\t.section __ex_table,\"aw\"\n" \ 167 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
187 "\t.dword\t1b,fixup_put_user_skip_1\n" \
188 "\t.previous" \
189 : "=r"(__pu_err) \ 168 : "=r"(__pu_err) \
190 : "r"(ptr), "r"(x), "0"(__pu_err) \ 169 : "r"(ptr), "r"(x), "0"(__pu_err) \
191 : "r1") 170 : "r1")
192 171
193#define __put_user_asm(stx,x,ptr) \ 172#define __put_user_asm(stx,x,ptr) \
194 __asm__ __volatile__ ( \ 173 __asm__ __volatile__ ( \
195 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \ 174 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
196 "\t.section __ex_table,\"aw\"\n" \ 175 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
197 "\t.dword\t1b,fixup_put_user_skip_1\n" \
198 "\t.previous" \
199 : "=r"(__pu_err) \
200 : "r"(ptr), "r"(x), "0"(__pu_err) \
201 : "r1")
202#else
203#define __put_kernel_asm(stx,x,ptr) \
204 __asm__ __volatile__ ( \
205 "\n1:\t" stx "\t%2,0(%1)\n" \
206 "\t.section __ex_table,\"aw\"\n" \
207 "\t.word\t1b,fixup_put_user_skip_1\n" \
208 "\t.previous" \
209 : "=r"(__pu_err) \ 176 : "=r"(__pu_err) \
210 : "r"(ptr), "r"(x), "0"(__pu_err) \ 177 : "r"(ptr), "r"(x), "0"(__pu_err) \
211 : "r1") 178 : "r1")
212 179
213#define __put_user_asm(stx,x,ptr) \
214 __asm__ __volatile__ ( \
215 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
216 "\t.section __ex_table,\"aw\"\n" \
217 "\t.word\t1b,fixup_put_user_skip_1\n" \
218 "\t.previous" \
219 : "=r"(__pu_err) \
220 : "r"(ptr), "r"(x), "0"(__pu_err) \
221 : "r1")
222 180
223#define __put_kernel_asm64(__val,ptr) do { \ 181#if !defined(CONFIG_64BIT)
224 u64 __val64 = (u64)(__val); \ 182
225 u32 hi = (__val64) >> 32; \ 183#define __put_kernel_asm64(__val,ptr) do { \
226 u32 lo = (__val64) & 0xffffffff; \ 184 u64 __val64 = (u64)(__val); \
185 u32 hi = (__val64) >> 32; \
186 u32 lo = (__val64) & 0xffffffff; \
227 __asm__ __volatile__ ( \ 187 __asm__ __volatile__ ( \
228 "\n1:\tstw %2,0(%1)\n" \ 188 "\n1:\tstw %2,0(%1)" \
229 "\n2:\tstw %3,4(%1)\n" \ 189 "\n2:\tstw %3,4(%1)\n\t" \
230 "\t.section __ex_table,\"aw\"\n" \ 190 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
231 "\t.word\t1b,fixup_put_user_skip_2\n" \ 191 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
232 "\t.word\t2b,fixup_put_user_skip_1\n" \
233 "\t.previous" \
234 : "=r"(__pu_err) \ 192 : "=r"(__pu_err) \
235 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 193 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
236 : "r1"); \ 194 : "r1"); \
237} while (0) 195} while (0)
238 196
239#define __put_user_asm64(__val,ptr) do { \ 197#define __put_user_asm64(__val,ptr) do { \
240 u64 __val64 = (u64)__val; \ 198 u64 __val64 = (u64)(__val); \
241 u32 hi = (__val64) >> 32; \ 199 u32 hi = (__val64) >> 32; \
242 u32 lo = (__val64) & 0xffffffff; \ 200 u32 lo = (__val64) & 0xffffffff; \
243 __asm__ __volatile__ ( \ 201 __asm__ __volatile__ ( \
244 "\n1:\tstw %2,0(%%sr3,%1)\n" \ 202 "\n1:\tstw %2,0(%%sr3,%1)" \
245 "\n2:\tstw %3,4(%%sr3,%1)\n" \ 203 "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
246 "\t.section __ex_table,\"aw\"\n" \ 204 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
247 "\t.word\t1b,fixup_get_user_skip_2\n" \ 205 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
248 "\t.word\t2b,fixup_get_user_skip_1\n" \
249 "\t.previous" \
250 : "=r"(__pu_err) \ 206 : "=r"(__pu_err) \
251 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 207 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
252 : "r1"); \ 208 : "r1"); \
253} while (0) 209} while (0)
254 210
255#endif /* !__LP64__ */ 211#endif /* !defined(CONFIG_64BIT) */
256 212
257 213
258/* 214/*