aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-23 17:58:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-23 19:25:20 -0500
commitde9e478b9d49f3a0214310d921450cf5bb4a21e6 (patch)
treec93f837c7541bd021b72a492b2ddd701ef796b3d
parent4de8ebeff8ddefaceeb7fc6a9b1a514fc9624509 (diff)
x86: fix SMAP in 32-bit environments
In commit 11f1a4b9755f ("x86: reorganize SMAP handling in user space accesses") I changed how the stac/clac instructions were generated around the user space accesses, which then made it possible to do batched accesses efficiently for user string copies etc. However, in doing so, I completely spaced out, and didn't even think about the 32-bit case. And nobody really even seemed to notice, because SMAP doesn't even exist until modern Skylake processors, and you'd have to be crazy to run 32-bit kernels on a modern CPU. Which brings us to Andy Lutomirski. He actually tested the 32-bit kernel on new hardware, and noticed that it doesn't work. My bad. The trivial fix is to add the required uaccess begin/end markers around the raw accesses in <asm/uaccess_32.h>. I feel a bit bad about this patch, just because that header file really should be cleaned up to avoid all the duplicated code in it, and this commit just expands on the problem. But this just fixes the bug without any bigger cleanup surgery. Reported-and-tested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..3fe0eac59462 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
48 48
49 switch (n) { 49 switch (n) {
50 case 1: 50 case 1:
51 __uaccess_begin();
51 __put_user_size(*(u8 *)from, (u8 __user *)to, 52 __put_user_size(*(u8 *)from, (u8 __user *)to,
52 1, ret, 1); 53 1, ret, 1);
54 __uaccess_end();
53 return ret; 55 return ret;
54 case 2: 56 case 2:
57 __uaccess_begin();
55 __put_user_size(*(u16 *)from, (u16 __user *)to, 58 __put_user_size(*(u16 *)from, (u16 __user *)to,
56 2, ret, 2); 59 2, ret, 2);
60 __uaccess_end();
57 return ret; 61 return ret;
58 case 4: 62 case 4:
63 __uaccess_begin();
59 __put_user_size(*(u32 *)from, (u32 __user *)to, 64 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4); 65 4, ret, 4);
66 __uaccess_end();
61 return ret; 67 return ret;
62 case 8: 68 case 8:
69 __uaccess_begin();
63 __put_user_size(*(u64 *)from, (u64 __user *)to, 70 __put_user_size(*(u64 *)from, (u64 __user *)to,
64 8, ret, 8); 71 8, ret, 8);
72 __uaccess_end();
65 return ret; 73 return ret;
66 } 74 }
67 } 75 }
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
103 111
104 switch (n) { 112 switch (n) {
105 case 1: 113 case 1:
114 __uaccess_begin();
106 __get_user_size(*(u8 *)to, from, 1, ret, 1); 115 __get_user_size(*(u8 *)to, from, 1, ret, 1);
116 __uaccess_end();
107 return ret; 117 return ret;
108 case 2: 118 case 2:
119 __uaccess_begin();
109 __get_user_size(*(u16 *)to, from, 2, ret, 2); 120 __get_user_size(*(u16 *)to, from, 2, ret, 2);
121 __uaccess_end();
110 return ret; 122 return ret;
111 case 4: 123 case 4:
124 __uaccess_begin();
112 __get_user_size(*(u32 *)to, from, 4, ret, 4); 125 __get_user_size(*(u32 *)to, from, 4, ret, 4);
126 __uaccess_end();
113 return ret; 127 return ret;
114 } 128 }
115 } 129 }
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
148 162
149 switch (n) { 163 switch (n) {
150 case 1: 164 case 1:
165 __uaccess_begin();
151 __get_user_size(*(u8 *)to, from, 1, ret, 1); 166 __get_user_size(*(u8 *)to, from, 1, ret, 1);
167 __uaccess_end();
152 return ret; 168 return ret;
153 case 2: 169 case 2:
170 __uaccess_begin();
154 __get_user_size(*(u16 *)to, from, 2, ret, 2); 171 __get_user_size(*(u16 *)to, from, 2, ret, 2);
172 __uaccess_end();
155 return ret; 173 return ret;
156 case 4: 174 case 4:
175 __uaccess_begin();
157 __get_user_size(*(u32 *)to, from, 4, ret, 4); 176 __get_user_size(*(u32 *)to, from, 4, ret, 4);
177 __uaccess_end();
158 return ret; 178 return ret;
159 } 179 }
160 } 180 }
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
170 190
171 switch (n) { 191 switch (n) {
172 case 1: 192 case 1:
193 __uaccess_begin();
173 __get_user_size(*(u8 *)to, from, 1, ret, 1); 194 __get_user_size(*(u8 *)to, from, 1, ret, 1);
195 __uaccess_end();
174 return ret; 196 return ret;
175 case 2: 197 case 2:
198 __uaccess_begin();
176 __get_user_size(*(u16 *)to, from, 2, ret, 2); 199 __get_user_size(*(u16 *)to, from, 2, ret, 2);
200 __uaccess_end();
177 return ret; 201 return ret;
178 case 4: 202 case 4:
203 __uaccess_begin();
179 __get_user_size(*(u32 *)to, from, 4, ret, 4); 204 __get_user_size(*(u32 *)to, from, 4, ret, 4);
205 __uaccess_end();
180 return ret; 206 return ret;
181 } 207 }
182 } 208 }