summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-17 12:45:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-17 12:45:09 -0500
commit11f1a4b9755f5dbc3e822a96502ebe9b044b14d8 (patch)
treef03420a16608e4a107e03b556ce468741b3a4c82
parenta5e90b1b075f89f084047628d4ef181aded0bbfb (diff)
x86: reorganize SMAP handling in user space accesses
This reorganizes how we do the stac/clac instructions in the user access code. Instead of adding the instructions directly to the same inline asm that does the actual user level access and exception handling, add them at a higher level. This is mainly preparation for the next step, where we will expose an interface to allow users to mark several accesses together as being user space accesses, but it does already clean up some code: - the inlined trivial cases of copy_in_user() now do stac/clac just once over the accesses: they used to do one pair around the user space read, and another pair around the write-back. - the {get,put}_user_ex() macros that are used with the catch/try handling don't do any stac/clac at all, because that happens in the try/catch surrounding them. Other than those two cleanups that happened naturally from the re-organization, this should not make any difference. Yet. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/uaccess.h53
-rw-r--r--arch/x86/include/asm/uaccess_64.h94
2 files changed, 101 insertions, 46 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 09b1b0ab94b7..cc228f4713da 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -134,6 +134,9 @@ extern int __get_user_4(void);
134extern int __get_user_8(void); 134extern int __get_user_8(void);
135extern int __get_user_bad(void); 135extern int __get_user_bad(void);
136 136
137#define __uaccess_begin() stac()
138#define __uaccess_end() clac()
139
137/* 140/*
138 * This is a type: either unsigned long, if the argument fits into 141 * This is a type: either unsigned long, if the argument fits into
139 * that type, or otherwise unsigned long long. 142 * that type, or otherwise unsigned long long.
@@ -193,10 +196,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
193 196
194#ifdef CONFIG_X86_32 197#ifdef CONFIG_X86_32
195#define __put_user_asm_u64(x, addr, err, errret) \ 198#define __put_user_asm_u64(x, addr, err, errret) \
196 asm volatile(ASM_STAC "\n" \ 199 asm volatile("\n" \
197 "1: movl %%eax,0(%2)\n" \ 200 "1: movl %%eax,0(%2)\n" \
198 "2: movl %%edx,4(%2)\n" \ 201 "2: movl %%edx,4(%2)\n" \
199 "3: " ASM_CLAC "\n" \ 202 "3:" \
200 ".section .fixup,\"ax\"\n" \ 203 ".section .fixup,\"ax\"\n" \
201 "4: movl %3,%0\n" \ 204 "4: movl %3,%0\n" \
202 " jmp 3b\n" \ 205 " jmp 3b\n" \
@@ -207,10 +210,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
207 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 210 : "A" (x), "r" (addr), "i" (errret), "0" (err))
208 211
209#define __put_user_asm_ex_u64(x, addr) \ 212#define __put_user_asm_ex_u64(x, addr) \
210 asm volatile(ASM_STAC "\n" \ 213 asm volatile("\n" \
211 "1: movl %%eax,0(%1)\n" \ 214 "1: movl %%eax,0(%1)\n" \
212 "2: movl %%edx,4(%1)\n" \ 215 "2: movl %%edx,4(%1)\n" \
213 "3: " ASM_CLAC "\n" \ 216 "3:" \
214 _ASM_EXTABLE_EX(1b, 2b) \ 217 _ASM_EXTABLE_EX(1b, 2b) \
215 _ASM_EXTABLE_EX(2b, 3b) \ 218 _ASM_EXTABLE_EX(2b, 3b) \
216 : : "A" (x), "r" (addr)) 219 : : "A" (x), "r" (addr))
@@ -304,6 +307,10 @@ do { \
304 } \ 307 } \
305} while (0) 308} while (0)
306 309
310/*
311 * This doesn't do __uaccess_begin/end - the exception handling
312 * around it must do that.
313 */
307#define __put_user_size_ex(x, ptr, size) \ 314#define __put_user_size_ex(x, ptr, size) \
308do { \ 315do { \
309 __chk_user_ptr(ptr); \ 316 __chk_user_ptr(ptr); \
@@ -358,9 +365,9 @@ do { \
358} while (0) 365} while (0)
359 366
360#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 367#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
361 asm volatile(ASM_STAC "\n" \ 368 asm volatile("\n" \
362 "1: mov"itype" %2,%"rtype"1\n" \ 369 "1: mov"itype" %2,%"rtype"1\n" \
363 "2: " ASM_CLAC "\n" \ 370 "2:\n" \
364 ".section .fixup,\"ax\"\n" \ 371 ".section .fixup,\"ax\"\n" \
365 "3: mov %3,%0\n" \ 372 "3: mov %3,%0\n" \
366 " xor"itype" %"rtype"1,%"rtype"1\n" \ 373 " xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -370,6 +377,10 @@ do { \
370 : "=r" (err), ltype(x) \ 377 : "=r" (err), ltype(x) \
371 : "m" (__m(addr)), "i" (errret), "0" (err)) 378 : "m" (__m(addr)), "i" (errret), "0" (err))
372 379
380/*
381 * This doesn't do __uaccess_begin/end - the exception handling
382 * around it must do that.
383 */
373#define __get_user_size_ex(x, ptr, size) \ 384#define __get_user_size_ex(x, ptr, size) \
374do { \ 385do { \
375 __chk_user_ptr(ptr); \ 386 __chk_user_ptr(ptr); \
@@ -400,7 +411,9 @@ do { \
400#define __put_user_nocheck(x, ptr, size) \ 411#define __put_user_nocheck(x, ptr, size) \
401({ \ 412({ \
402 int __pu_err; \ 413 int __pu_err; \
414 __uaccess_begin(); \
403 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 415 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
416 __uaccess_end(); \
404 __builtin_expect(__pu_err, 0); \ 417 __builtin_expect(__pu_err, 0); \
405}) 418})
406 419
@@ -408,7 +421,9 @@ do { \
408({ \ 421({ \
409 int __gu_err; \ 422 int __gu_err; \
410 unsigned long __gu_val; \ 423 unsigned long __gu_val; \
424 __uaccess_begin(); \
411 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 425 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
426 __uaccess_end(); \
412 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 427 (x) = (__force __typeof__(*(ptr)))__gu_val; \
413 __builtin_expect(__gu_err, 0); \ 428 __builtin_expect(__gu_err, 0); \
414}) 429})
@@ -423,9 +438,9 @@ struct __large_struct { unsigned long buf[100]; };
423 * aliasing issues. 438 * aliasing issues.
424 */ 439 */
425#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 440#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
426 asm volatile(ASM_STAC "\n" \ 441 asm volatile("\n" \
427 "1: mov"itype" %"rtype"1,%2\n" \ 442 "1: mov"itype" %"rtype"1,%2\n" \
428 "2: " ASM_CLAC "\n" \ 443 "2:\n" \
429 ".section .fixup,\"ax\"\n" \ 444 ".section .fixup,\"ax\"\n" \
430 "3: mov %3,%0\n" \ 445 "3: mov %3,%0\n" \
431 " jmp 2b\n" \ 446 " jmp 2b\n" \
@@ -445,11 +460,11 @@ struct __large_struct { unsigned long buf[100]; };
445 */ 460 */
446#define uaccess_try do { \ 461#define uaccess_try do { \
447 current_thread_info()->uaccess_err = 0; \ 462 current_thread_info()->uaccess_err = 0; \
448 stac(); \ 463 __uaccess_begin(); \
449 barrier(); 464 barrier();
450 465
451#define uaccess_catch(err) \ 466#define uaccess_catch(err) \
452 clac(); \ 467 __uaccess_end(); \
453 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 468 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
454} while (0) 469} while (0)
455 470
@@ -547,12 +562,13 @@ extern void __cmpxchg_wrong_size(void)
547 __typeof__(ptr) __uval = (uval); \ 562 __typeof__(ptr) __uval = (uval); \
548 __typeof__(*(ptr)) __old = (old); \ 563 __typeof__(*(ptr)) __old = (old); \
549 __typeof__(*(ptr)) __new = (new); \ 564 __typeof__(*(ptr)) __new = (new); \
565 __uaccess_begin(); \
550 switch (size) { \ 566 switch (size) { \
551 case 1: \ 567 case 1: \
552 { \ 568 { \
553 asm volatile("\t" ASM_STAC "\n" \ 569 asm volatile("\n" \
554 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 570 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
555 "2:\t" ASM_CLAC "\n" \ 571 "2:\n" \
556 "\t.section .fixup, \"ax\"\n" \ 572 "\t.section .fixup, \"ax\"\n" \
557 "3:\tmov %3, %0\n" \ 573 "3:\tmov %3, %0\n" \
558 "\tjmp 2b\n" \ 574 "\tjmp 2b\n" \
@@ -566,9 +582,9 @@ extern void __cmpxchg_wrong_size(void)
566 } \ 582 } \
567 case 2: \ 583 case 2: \
568 { \ 584 { \
569 asm volatile("\t" ASM_STAC "\n" \ 585 asm volatile("\n" \
570 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 586 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
571 "2:\t" ASM_CLAC "\n" \ 587 "2:\n" \
572 "\t.section .fixup, \"ax\"\n" \ 588 "\t.section .fixup, \"ax\"\n" \
573 "3:\tmov %3, %0\n" \ 589 "3:\tmov %3, %0\n" \
574 "\tjmp 2b\n" \ 590 "\tjmp 2b\n" \
@@ -582,9 +598,9 @@ extern void __cmpxchg_wrong_size(void)
582 } \ 598 } \
583 case 4: \ 599 case 4: \
584 { \ 600 { \
585 asm volatile("\t" ASM_STAC "\n" \ 601 asm volatile("\n" \
586 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 602 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
587 "2:\t" ASM_CLAC "\n" \ 603 "2:\n" \
588 "\t.section .fixup, \"ax\"\n" \ 604 "\t.section .fixup, \"ax\"\n" \
589 "3:\tmov %3, %0\n" \ 605 "3:\tmov %3, %0\n" \
590 "\tjmp 2b\n" \ 606 "\tjmp 2b\n" \
@@ -601,9 +617,9 @@ extern void __cmpxchg_wrong_size(void)
601 if (!IS_ENABLED(CONFIG_X86_64)) \ 617 if (!IS_ENABLED(CONFIG_X86_64)) \
602 __cmpxchg_wrong_size(); \ 618 __cmpxchg_wrong_size(); \
603 \ 619 \
604 asm volatile("\t" ASM_STAC "\n" \ 620 asm volatile("\n" \
605 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 621 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
606 "2:\t" ASM_CLAC "\n" \ 622 "2:\n" \
607 "\t.section .fixup, \"ax\"\n" \ 623 "\t.section .fixup, \"ax\"\n" \
608 "3:\tmov %3, %0\n" \ 624 "3:\tmov %3, %0\n" \
609 "\tjmp 2b\n" \ 625 "\tjmp 2b\n" \
@@ -618,6 +634,7 @@ extern void __cmpxchg_wrong_size(void)
618 default: \ 634 default: \
619 __cmpxchg_wrong_size(); \ 635 __cmpxchg_wrong_size(); \
620 } \ 636 } \
637 __uaccess_end(); \
621 *__uval = __old; \ 638 *__uval = __old; \
622 __ret; \ 639 __ret; \
623}) 640})
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f2f9b39b274a..b89c34c4019b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -56,35 +56,49 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
56 if (!__builtin_constant_p(size)) 56 if (!__builtin_constant_p(size))
57 return copy_user_generic(dst, (__force void *)src, size); 57 return copy_user_generic(dst, (__force void *)src, size);
58 switch (size) { 58 switch (size) {
59 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, 59 case 1:
60 __uaccess_begin();
61 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
60 ret, "b", "b", "=q", 1); 62 ret, "b", "b", "=q", 1);
63 __uaccess_end();
61 return ret; 64 return ret;
62 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, 65 case 2:
66 __uaccess_begin();
67 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
63 ret, "w", "w", "=r", 2); 68 ret, "w", "w", "=r", 2);
69 __uaccess_end();
64 return ret; 70 return ret;
65 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, 71 case 4:
72 __uaccess_begin();
73 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
66 ret, "l", "k", "=r", 4); 74 ret, "l", "k", "=r", 4);
75 __uaccess_end();
67 return ret; 76 return ret;
68 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, 77 case 8:
78 __uaccess_begin();
79 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
69 ret, "q", "", "=r", 8); 80 ret, "q", "", "=r", 8);
81 __uaccess_end();
70 return ret; 82 return ret;
71 case 10: 83 case 10:
84 __uaccess_begin();
72 __get_user_asm(*(u64 *)dst, (u64 __user *)src, 85 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
73 ret, "q", "", "=r", 10); 86 ret, "q", "", "=r", 10);
74 if (unlikely(ret)) 87 if (likely(!ret))
75 return ret; 88 __get_user_asm(*(u16 *)(8 + (char *)dst),
76 __get_user_asm(*(u16 *)(8 + (char *)dst), 89 (u16 __user *)(8 + (char __user *)src),
77 (u16 __user *)(8 + (char __user *)src), 90 ret, "w", "w", "=r", 2);
78 ret, "w", "w", "=r", 2); 91 __uaccess_end();
79 return ret; 92 return ret;
80 case 16: 93 case 16:
94 __uaccess_begin();
81 __get_user_asm(*(u64 *)dst, (u64 __user *)src, 95 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
82 ret, "q", "", "=r", 16); 96 ret, "q", "", "=r", 16);
83 if (unlikely(ret)) 97 if (likely(!ret))
84 return ret; 98 __get_user_asm(*(u64 *)(8 + (char *)dst),
85 __get_user_asm(*(u64 *)(8 + (char *)dst), 99 (u64 __user *)(8 + (char __user *)src),
86 (u64 __user *)(8 + (char __user *)src), 100 ret, "q", "", "=r", 8);
87 ret, "q", "", "=r", 8); 101 __uaccess_end();
88 return ret; 102 return ret;
89 default: 103 default:
90 return copy_user_generic(dst, (__force void *)src, size); 104 return copy_user_generic(dst, (__force void *)src, size);
@@ -106,35 +120,51 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
106 if (!__builtin_constant_p(size)) 120 if (!__builtin_constant_p(size))
107 return copy_user_generic((__force void *)dst, src, size); 121 return copy_user_generic((__force void *)dst, src, size);
108 switch (size) { 122 switch (size) {
109 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, 123 case 1:
124 __uaccess_begin();
125 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
110 ret, "b", "b", "iq", 1); 126 ret, "b", "b", "iq", 1);
127 __uaccess_end();
111 return ret; 128 return ret;
112 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, 129 case 2:
130 __uaccess_begin();
131 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
113 ret, "w", "w", "ir", 2); 132 ret, "w", "w", "ir", 2);
133 __uaccess_end();
114 return ret; 134 return ret;
115 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, 135 case 4:
136 __uaccess_begin();
137 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
116 ret, "l", "k", "ir", 4); 138 ret, "l", "k", "ir", 4);
139 __uaccess_end();
117 return ret; 140 return ret;
118 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, 141 case 8:
142 __uaccess_begin();
143 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
119 ret, "q", "", "er", 8); 144 ret, "q", "", "er", 8);
145 __uaccess_end();
120 return ret; 146 return ret;
121 case 10: 147 case 10:
148 __uaccess_begin();
122 __put_user_asm(*(u64 *)src, (u64 __user *)dst, 149 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
123 ret, "q", "", "er", 10); 150 ret, "q", "", "er", 10);
124 if (unlikely(ret)) 151 if (likely(!ret)) {
125 return ret; 152 asm("":::"memory");
126 asm("":::"memory"); 153 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
127 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, 154 ret, "w", "w", "ir", 2);
128 ret, "w", "w", "ir", 2); 155 }
156 __uaccess_end();
129 return ret; 157 return ret;
130 case 16: 158 case 16:
159 __uaccess_begin();
131 __put_user_asm(*(u64 *)src, (u64 __user *)dst, 160 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
132 ret, "q", "", "er", 16); 161 ret, "q", "", "er", 16);
133 if (unlikely(ret)) 162 if (likely(!ret)) {
134 return ret; 163 asm("":::"memory");
135 asm("":::"memory"); 164 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
136 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, 165 ret, "q", "", "er", 8);
137 ret, "q", "", "er", 8); 166 }
167 __uaccess_end();
138 return ret; 168 return ret;
139 default: 169 default:
140 return copy_user_generic((__force void *)dst, src, size); 170 return copy_user_generic((__force void *)dst, src, size);
@@ -160,39 +190,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
160 switch (size) { 190 switch (size) {
161 case 1: { 191 case 1: {
162 u8 tmp; 192 u8 tmp;
193 __uaccess_begin();
163 __get_user_asm(tmp, (u8 __user *)src, 194 __get_user_asm(tmp, (u8 __user *)src,
164 ret, "b", "b", "=q", 1); 195 ret, "b", "b", "=q", 1);
165 if (likely(!ret)) 196 if (likely(!ret))
166 __put_user_asm(tmp, (u8 __user *)dst, 197 __put_user_asm(tmp, (u8 __user *)dst,
167 ret, "b", "b", "iq", 1); 198 ret, "b", "b", "iq", 1);
199 __uaccess_end();
168 return ret; 200 return ret;
169 } 201 }
170 case 2: { 202 case 2: {
171 u16 tmp; 203 u16 tmp;
204 __uaccess_begin();
172 __get_user_asm(tmp, (u16 __user *)src, 205 __get_user_asm(tmp, (u16 __user *)src,
173 ret, "w", "w", "=r", 2); 206 ret, "w", "w", "=r", 2);
174 if (likely(!ret)) 207 if (likely(!ret))
175 __put_user_asm(tmp, (u16 __user *)dst, 208 __put_user_asm(tmp, (u16 __user *)dst,
176 ret, "w", "w", "ir", 2); 209 ret, "w", "w", "ir", 2);
210 __uaccess_end();
177 return ret; 211 return ret;
178 } 212 }
179 213
180 case 4: { 214 case 4: {
181 u32 tmp; 215 u32 tmp;
216 __uaccess_begin();
182 __get_user_asm(tmp, (u32 __user *)src, 217 __get_user_asm(tmp, (u32 __user *)src,
183 ret, "l", "k", "=r", 4); 218 ret, "l", "k", "=r", 4);
184 if (likely(!ret)) 219 if (likely(!ret))
185 __put_user_asm(tmp, (u32 __user *)dst, 220 __put_user_asm(tmp, (u32 __user *)dst,
186 ret, "l", "k", "ir", 4); 221 ret, "l", "k", "ir", 4);
222 __uaccess_end();
187 return ret; 223 return ret;
188 } 224 }
189 case 8: { 225 case 8: {
190 u64 tmp; 226 u64 tmp;
227 __uaccess_begin();
191 __get_user_asm(tmp, (u64 __user *)src, 228 __get_user_asm(tmp, (u64 __user *)src,
192 ret, "q", "", "=r", 8); 229 ret, "q", "", "=r", 8);
193 if (likely(!ret)) 230 if (likely(!ret))
194 __put_user_asm(tmp, (u64 __user *)dst, 231 __put_user_asm(tmp, (u64 __user *)dst,
195 ret, "q", "", "er", 8); 232 ret, "q", "", "er", 8);
233 __uaccess_end();
196 return ret; 234 return ret;
197 } 235 }
198 default: 236 default: