aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2018-10-10 11:55:44 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2018-10-10 12:52:08 -0400
commit3b82a6ea23277f56c4f005872bb73c8ce29779d7 (patch)
tree9b9a4e0faa0221fbb22aee5a214424ba784d0e63
parent742fafa50b62cb9f379ba14a13443f52afdc0c5d (diff)
Revert "arm64: uaccess: implement unsafe accessors"
This reverts commit a1f33941f7e103bcf471eaf8461b212223c642d6. The unsafe accessors allow the PAN enable/disable calls to be made once for a group of accesses. Adding these means we can now have sequences that look like this: | user_access_begin(); | unsafe_put_user(static-value, x, err); | unsafe_put_user(helper-that-sleeps(), x, err); | user_access_end(); Calling schedule() without taking an exception doesn't switch the PSTATE or TTBRs. We can switch out of a uaccess-enabled region, and run other code with uaccess enabled for a different thread. We can also switch from uaccess-disabled code back into this region, meaning the unsafe_put_user()s will fault. For software-PAN, threads that do this will get stuck as handle_mm_fault() will determine the page has already been mapped in, but we fault again as the page tables aren't loaded. To solve this we need code in __switch_to() that save/restores the PAN state. Acked-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/uaccess.h61
1 files changed, 15 insertions, 46 deletions
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 8ac6e34922e7..07c34087bd5e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -276,9 +276,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
276 : "+r" (err), "=&r" (x) \ 276 : "+r" (err), "=&r" (x) \
277 : "r" (addr), "i" (-EFAULT)) 277 : "r" (addr), "i" (-EFAULT))
278 278
279#define __get_user_err_unsafe(x, ptr, err) \ 279#define __get_user_err(x, ptr, err) \
280do { \ 280do { \
281 unsigned long __gu_val; \ 281 unsigned long __gu_val; \
282 __chk_user_ptr(ptr); \
283 uaccess_enable_not_uao(); \
282 switch (sizeof(*(ptr))) { \ 284 switch (sizeof(*(ptr))) { \
283 case 1: \ 285 case 1: \
284 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 286 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
@@ -299,24 +301,17 @@ do { \
299 default: \ 301 default: \
300 BUILD_BUG(); \ 302 BUILD_BUG(); \
301 } \ 303 } \
302 (x) = (__force __typeof__(*(ptr)))__gu_val; \
303} while (0)
304
305#define __get_user_err_check(x, ptr, err) \
306do { \
307 __chk_user_ptr(ptr); \
308 uaccess_enable_not_uao(); \
309 __get_user_err_unsafe((x), (ptr), (err)); \
310 uaccess_disable_not_uao(); \ 304 uaccess_disable_not_uao(); \
305 (x) = (__force __typeof__(*(ptr)))__gu_val; \
311} while (0) 306} while (0)
312 307
313#define __get_user_err(x, ptr, err, accessor) \ 308#define __get_user_check(x, ptr, err) \
314({ \ 309({ \
315 __typeof__(*(ptr)) __user *__p = (ptr); \ 310 __typeof__(*(ptr)) __user *__p = (ptr); \
316 might_fault(); \ 311 might_fault(); \
317 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 312 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
318 __p = uaccess_mask_ptr(__p); \ 313 __p = uaccess_mask_ptr(__p); \
319 accessor((x), __p, (err)); \ 314 __get_user_err((x), __p, (err)); \
320 } else { \ 315 } else { \
321 (x) = 0; (err) = -EFAULT; \ 316 (x) = 0; (err) = -EFAULT; \
322 } \ 317 } \
@@ -324,14 +319,14 @@ do { \
324 319
325#define __get_user_error(x, ptr, err) \ 320#define __get_user_error(x, ptr, err) \
326({ \ 321({ \
327 __get_user_err((x), (ptr), (err), __get_user_err_check); \ 322 __get_user_check((x), (ptr), (err)); \
328 (void)0; \ 323 (void)0; \
329}) 324})
330 325
331#define __get_user(x, ptr) \ 326#define __get_user(x, ptr) \
332({ \ 327({ \
333 int __gu_err = 0; \ 328 int __gu_err = 0; \
334 __get_user_err((x), (ptr), __gu_err, __get_user_err_check); \ 329 __get_user_check((x), (ptr), __gu_err); \
335 __gu_err; \ 330 __gu_err; \
336}) 331})
337 332
@@ -351,9 +346,11 @@ do { \
351 : "+r" (err) \ 346 : "+r" (err) \
352 : "r" (x), "r" (addr), "i" (-EFAULT)) 347 : "r" (x), "r" (addr), "i" (-EFAULT))
353 348
354#define __put_user_err_unsafe(x, ptr, err) \ 349#define __put_user_err(x, ptr, err) \
355do { \ 350do { \
356 __typeof__(*(ptr)) __pu_val = (x); \ 351 __typeof__(*(ptr)) __pu_val = (x); \
352 __chk_user_ptr(ptr); \
353 uaccess_enable_not_uao(); \
357 switch (sizeof(*(ptr))) { \ 354 switch (sizeof(*(ptr))) { \
358 case 1: \ 355 case 1: \
359 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 356 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
@@ -374,24 +371,16 @@ do { \
374 default: \ 371 default: \
375 BUILD_BUG(); \ 372 BUILD_BUG(); \
376 } \ 373 } \
377} while (0)
378
379
380#define __put_user_err_check(x, ptr, err) \
381do { \
382 __chk_user_ptr(ptr); \
383 uaccess_enable_not_uao(); \
384 __put_user_err_unsafe((x), (ptr), (err)); \
385 uaccess_disable_not_uao(); \ 374 uaccess_disable_not_uao(); \
386} while (0) 375} while (0)
387 376
388#define __put_user_err(x, ptr, err, accessor) \ 377#define __put_user_check(x, ptr, err) \
389({ \ 378({ \
390 __typeof__(*(ptr)) __user *__p = (ptr); \ 379 __typeof__(*(ptr)) __user *__p = (ptr); \
391 might_fault(); \ 380 might_fault(); \
392 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 381 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
393 __p = uaccess_mask_ptr(__p); \ 382 __p = uaccess_mask_ptr(__p); \
394 accessor((x), __p, (err)); \ 383 __put_user_err((x), __p, (err)); \
395 } else { \ 384 } else { \
396 (err) = -EFAULT; \ 385 (err) = -EFAULT; \
397 } \ 386 } \
@@ -399,39 +388,19 @@ do { \
399 388
400#define __put_user_error(x, ptr, err) \ 389#define __put_user_error(x, ptr, err) \
401({ \ 390({ \
402 __put_user_err((x), (ptr), (err), __put_user_err_check); \ 391 __put_user_check((x), (ptr), (err)); \
403 (void)0; \ 392 (void)0; \
404}) 393})
405 394
406#define __put_user(x, ptr) \ 395#define __put_user(x, ptr) \
407({ \ 396({ \
408 int __pu_err = 0; \ 397 int __pu_err = 0; \
409 __put_user_err((x), (ptr), __pu_err, __put_user_err_check); \ 398 __put_user_check((x), (ptr), __pu_err); \
410 __pu_err; \ 399 __pu_err; \
411}) 400})
412 401
413#define put_user __put_user 402#define put_user __put_user
414 403
415
416#define user_access_begin() uaccess_enable_not_uao()
417#define user_access_end() uaccess_disable_not_uao()
418
419#define unsafe_get_user(x, ptr, err) \
420do { \
421 int __gu_err = 0; \
422 __get_user_err((x), (ptr), __gu_err, __get_user_err_unsafe); \
423 if (__gu_err != 0) \
424 goto err; \
425} while (0)
426
427#define unsafe_put_user(x, ptr, err) \
428do { \
429 int __pu_err = 0; \
430 __put_user_err((x), (ptr), __pu_err, __put_user_err_unsafe); \
431 if (__pu_err != 0) \
432 goto err; \
433} while (0)
434
435extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 404extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
436#define raw_copy_from_user(to, from, n) \ 405#define raw_copy_from_user(to, from, n) \
437({ \ 406({ \