summaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 17:41:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 17:41:04 -0400
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/tile
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/uaccess.h166
-rw-r--r--arch/tile/lib/exports.c7
-rw-r--r--arch/tile/lib/memcpy_32.S41
-rw-r--r--arch/tile/lib/memcpy_user_64.c15
5 files changed, 28 insertions, 202 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index aa48b6eaff2d..24c44e93804d 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += clkdev.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += errno.h 8generic-y += errno.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += extable.h
10generic-y += fb.h 11generic-y += fb.h
11generic-y += fcntl.h 12generic-y += fcntl.h
12generic-y += hw_irq.h 13generic-y += hw_irq.h
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index a77369e91e54..a803f6bb4d92 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -18,15 +18,11 @@
18/* 18/*
19 * User space memory access functions 19 * User space memory access functions
20 */ 20 */
21#include <linux/sched.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
23#include <asm-generic/uaccess-unaligned.h> 22#include <asm-generic/uaccess-unaligned.h>
24#include <asm/processor.h> 23#include <asm/processor.h>
25#include <asm/page.h> 24#include <asm/page.h>
26 25
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
30/* 26/*
31 * The fs value determines whether argument validity checking should be 27 * The fs value determines whether argument validity checking should be
32 * performed or not. If get_fs() == USER_DS, checking is performed, with 28 * performed or not. If get_fs() == USER_DS, checking is performed, with
@@ -102,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size);
102 likely(__range_ok((unsigned long)(addr), (size)) == 0); \ 98 likely(__range_ok((unsigned long)(addr), (size)) == 0); \
103}) 99})
104 100
105/* 101#include <asm/extable.h>
106 * The exception table consists of pairs of addresses: the first is the
107 * address of an instruction that is allowed to fault, and the second is
108 * the address at which the program should continue. No registers are
109 * modified, so it is entirely up to the continuation code to figure out
110 * what to do.
111 *
112 * All the routines below use bits of fixup code that are out of line
113 * with the main instruction path. This means when everything is well,
114 * we don't even have to jump over them. Further, they do not intrude
115 * on our cache or tlb entries.
116 */
117
118struct exception_table_entry {
119 unsigned long insn, fixup;
120};
121
122extern int fixup_exception(struct pt_regs *regs);
123 102
124/* 103/*
125 * This is a type: either unsigned long, if the argument fits into 104 * This is a type: either unsigned long, if the argument fits into
@@ -334,145 +313,16 @@ extern int __put_user_bad(void)
334 ((x) = 0, -EFAULT); \ 313 ((x) = 0, -EFAULT); \
335}) 314})
336 315
337/** 316extern unsigned long __must_check
338 * __copy_to_user() - copy data into user space, with less checking. 317raw_copy_to_user(void __user *to, const void *from, unsigned long n);
339 * @to: Destination address, in user space. 318extern unsigned long __must_check
340 * @from: Source address, in kernel space. 319raw_copy_from_user(void *to, const void __user *from, unsigned long n);
341 * @n: Number of bytes to copy. 320#define INLINE_COPY_FROM_USER
342 * 321#define INLINE_COPY_TO_USER
343 * Context: User context only. This function may sleep if pagefaults are
344 * enabled.
345 *
346 * Copy data from kernel space to user space. Caller must check
347 * the specified block with access_ok() before calling this function.
348 *
349 * Returns number of bytes that could not be copied.
350 * On success, this will be zero.
351 *
352 * An alternate version - __copy_to_user_inatomic() - is designed
353 * to be called from atomic context, typically bracketed by calls
354 * to pagefault_disable() and pagefault_enable().
355 */
356extern unsigned long __must_check __copy_to_user_inatomic(
357 void __user *to, const void *from, unsigned long n);
358
359static inline unsigned long __must_check
360__copy_to_user(void __user *to, const void *from, unsigned long n)
361{
362 might_fault();
363 return __copy_to_user_inatomic(to, from, n);
364}
365
366static inline unsigned long __must_check
367copy_to_user(void __user *to, const void *from, unsigned long n)
368{
369 if (access_ok(VERIFY_WRITE, to, n))
370 n = __copy_to_user(to, from, n);
371 return n;
372}
373
374/**
375 * __copy_from_user() - copy data from user space, with less checking.
376 * @to: Destination address, in kernel space.
377 * @from: Source address, in user space.
378 * @n: Number of bytes to copy.
379 *
380 * Context: User context only. This function may sleep if pagefaults are
381 * enabled.
382 *
383 * Copy data from user space to kernel space. Caller must check
384 * the specified block with access_ok() before calling this function.
385 *
386 * Returns number of bytes that could not be copied.
387 * On success, this will be zero.
388 *
389 * If some data could not be copied, this function will pad the copied
390 * data to the requested size using zero bytes.
391 *
392 * An alternate version - __copy_from_user_inatomic() - is designed
393 * to be called from atomic context, typically bracketed by calls
394 * to pagefault_disable() and pagefault_enable(). This version
395 * does *NOT* pad with zeros.
396 */
397extern unsigned long __must_check __copy_from_user_inatomic(
398 void *to, const void __user *from, unsigned long n);
399extern unsigned long __must_check __copy_from_user_zeroing(
400 void *to, const void __user *from, unsigned long n);
401
402static inline unsigned long __must_check
403__copy_from_user(void *to, const void __user *from, unsigned long n)
404{
405 might_fault();
406 return __copy_from_user_zeroing(to, from, n);
407}
408
409static inline unsigned long __must_check
410_copy_from_user(void *to, const void __user *from, unsigned long n)
411{
412 if (access_ok(VERIFY_READ, from, n))
413 n = __copy_from_user(to, from, n);
414 else
415 memset(to, 0, n);
416 return n;
417}
418
419extern void __compiletime_error("usercopy buffer size is too small")
420__bad_copy_user(void);
421
422static inline void copy_user_overflow(int size, unsigned long count)
423{
424 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
425}
426
427static inline unsigned long __must_check copy_from_user(void *to,
428 const void __user *from,
429 unsigned long n)
430{
431 int sz = __compiletime_object_size(to);
432
433 if (likely(sz == -1 || sz >= n))
434 n = _copy_from_user(to, from, n);
435 else if (!__builtin_constant_p(n))
436 copy_user_overflow(sz, n);
437 else
438 __bad_copy_user();
439
440 return n;
441}
442 322
443#ifdef __tilegx__ 323#ifdef __tilegx__
444/** 324extern unsigned long raw_copy_in_user(
445 * __copy_in_user() - copy data within user space, with less checking.
446 * @to: Destination address, in user space.
447 * @from: Source address, in user space.
448 * @n: Number of bytes to copy.
449 *
450 * Context: User context only. This function may sleep if pagefaults are
451 * enabled.
452 *
453 * Copy data from user space to user space. Caller must check
454 * the specified blocks with access_ok() before calling this function.
455 *
456 * Returns number of bytes that could not be copied.
457 * On success, this will be zero.
458 */
459extern unsigned long __copy_in_user_inatomic(
460 void __user *to, const void __user *from, unsigned long n); 325 void __user *to, const void __user *from, unsigned long n);
461
462static inline unsigned long __must_check
463__copy_in_user(void __user *to, const void __user *from, unsigned long n)
464{
465 might_fault();
466 return __copy_in_user_inatomic(to, from, n);
467}
468
469static inline unsigned long __must_check
470copy_in_user(void __user *to, const void __user *from, unsigned long n)
471{
472 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
473 n = __copy_in_user(to, from, n);
474 return n;
475}
476#endif 326#endif
477 327
478 328
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index c5369fe643c7..ecce8e177e3f 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
38 38
39/* arch/tile/lib/, various memcpy files */ 39/* arch/tile/lib/, various memcpy files */
40EXPORT_SYMBOL(memcpy); 40EXPORT_SYMBOL(memcpy);
41EXPORT_SYMBOL(__copy_to_user_inatomic); 41EXPORT_SYMBOL(raw_copy_to_user);
42EXPORT_SYMBOL(__copy_from_user_inatomic); 42EXPORT_SYMBOL(raw_copy_from_user);
43EXPORT_SYMBOL(__copy_from_user_zeroing);
44#ifdef __tilegx__ 43#ifdef __tilegx__
45EXPORT_SYMBOL(__copy_in_user_inatomic); 44EXPORT_SYMBOL(raw_copy_in_user);
46#endif 45#endif
47 46
48/* hypervisor glue */ 47/* hypervisor glue */
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index a2771ae5da53..270f1267cd18 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -24,7 +24,6 @@
24 24
25#define IS_MEMCPY 0 25#define IS_MEMCPY 0
26#define IS_COPY_FROM_USER 1 26#define IS_COPY_FROM_USER 1
27#define IS_COPY_FROM_USER_ZEROING 2
28#define IS_COPY_TO_USER -1 27#define IS_COPY_TO_USER -1
29 28
30 .section .text.memcpy_common, "ax" 29 .section .text.memcpy_common, "ax"
@@ -42,40 +41,31 @@
42 9 41 9
43 42
44 43
45/* __copy_from_user_inatomic takes the kernel target address in r0, 44/* raw_copy_from_user takes the kernel target address in r0,
46 * the user source in r1, and the bytes to copy in r2. 45 * the user source in r1, and the bytes to copy in r2.
47 * It returns the number of uncopiable bytes (hopefully zero) in r0. 46 * It returns the number of uncopiable bytes (hopefully zero) in r0.
48 */ 47 */
49ENTRY(__copy_from_user_inatomic) 48ENTRY(raw_copy_from_user)
50.type __copy_from_user_inatomic, @function 49.type raw_copy_from_user, @function
51 FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ 50 FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
52 .text.memcpy_common, \ 51 .text.memcpy_common, \
53 .Lend_memcpy_common - __copy_from_user_inatomic) 52 .Lend_memcpy_common - raw_copy_from_user)
54 { movei r29, IS_COPY_FROM_USER; j memcpy_common } 53 { movei r29, IS_COPY_FROM_USER; j memcpy_common }
55 .size __copy_from_user_inatomic, . - __copy_from_user_inatomic 54 .size raw_copy_from_user, . - raw_copy_from_user
56 55
57/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but 56/* raw_copy_to_user takes the user target address in r0,
58 * any uncopiable bytes are zeroed in the target.
59 */
60ENTRY(__copy_from_user_zeroing)
61.type __copy_from_user_zeroing, @function
62 FEEDBACK_REENTER(__copy_from_user_inatomic)
63 { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
64 .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
65
66/* __copy_to_user_inatomic takes the user target address in r0,
67 * the kernel source in r1, and the bytes to copy in r2. 57 * the kernel source in r1, and the bytes to copy in r2.
68 * It returns the number of uncopiable bytes (hopefully zero) in r0. 58 * It returns the number of uncopiable bytes (hopefully zero) in r0.
69 */ 59 */
70ENTRY(__copy_to_user_inatomic) 60ENTRY(raw_copy_to_user)
71.type __copy_to_user_inatomic, @function 61.type raw_copy_to_user, @function
72 FEEDBACK_REENTER(__copy_from_user_inatomic) 62 FEEDBACK_REENTER(raw_copy_from_user)
73 { movei r29, IS_COPY_TO_USER; j memcpy_common } 63 { movei r29, IS_COPY_TO_USER; j memcpy_common }
74 .size __copy_to_user_inatomic, . - __copy_to_user_inatomic 64 .size raw_copy_to_user, . - raw_copy_to_user
75 65
76ENTRY(memcpy) 66ENTRY(memcpy)
77.type memcpy, @function 67.type memcpy, @function
78 FEEDBACK_REENTER(__copy_from_user_inatomic) 68 FEEDBACK_REENTER(raw_copy_from_user)
79 { movei r29, IS_MEMCPY } 69 { movei r29, IS_MEMCPY }
80 .size memcpy, . - memcpy 70 .size memcpy, . - memcpy
81 /* Fall through */ 71 /* Fall through */
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
520 { bnzt r2, copy_from_user_fixup_loop } 510 { bnzt r2, copy_from_user_fixup_loop }
521 511
522.Lcopy_from_user_fixup_zero_remainder: 512.Lcopy_from_user_fixup_zero_remainder:
523 { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ 513 move lr, r27
524 /* byte-at-a-time loop faulted, so zero the rest. */
525 { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
5261: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
527 { bnzt r3, 1b }
5282: move lr, r27
529 { move r0, r2; jrp lr } 514 { move r0, r2; jrp lr }
530 515
531copy_to_user_fixup_loop: 516copy_to_user_fixup_loop:
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 97bbb6060b25..a3fea9fd973e 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -51,7 +51,7 @@
51 __v; \ 51 __v; \
52 }) 52 })
53 53
54#define USERCOPY_FUNC __copy_to_user_inatomic 54#define USERCOPY_FUNC raw_copy_to_user
55#define ST1(p, v) _ST((p), st1, (v)) 55#define ST1(p, v) _ST((p), st1, (v))
56#define ST2(p, v) _ST((p), st2, (v)) 56#define ST2(p, v) _ST((p), st2, (v))
57#define ST4(p, v) _ST((p), st4, (v)) 57#define ST4(p, v) _ST((p), st4, (v))
@@ -62,7 +62,7 @@
62#define LD8 LD 62#define LD8 LD
63#include "memcpy_64.c" 63#include "memcpy_64.c"
64 64
65#define USERCOPY_FUNC __copy_from_user_inatomic 65#define USERCOPY_FUNC raw_copy_from_user
66#define ST1 ST 66#define ST1 ST
67#define ST2 ST 67#define ST2 ST
68#define ST4 ST 68#define ST4 ST
@@ -73,7 +73,7 @@
73#define LD8(p) _LD((p), ld) 73#define LD8(p) _LD((p), ld)
74#include "memcpy_64.c" 74#include "memcpy_64.c"
75 75
76#define USERCOPY_FUNC __copy_in_user_inatomic 76#define USERCOPY_FUNC raw_copy_in_user
77#define ST1(p, v) _ST((p), st1, (v)) 77#define ST1(p, v) _ST((p), st1, (v))
78#define ST2(p, v) _ST((p), st2, (v)) 78#define ST2(p, v) _ST((p), st2, (v))
79#define ST4(p, v) _ST((p), st4, (v)) 79#define ST4(p, v) _ST((p), st4, (v))
@@ -83,12 +83,3 @@
83#define LD4(p) _LD((p), ld4u) 83#define LD4(p) _LD((p), ld4u)
84#define LD8(p) _LD((p), ld) 84#define LD8(p) _LD((p), ld)
85#include "memcpy_64.c" 85#include "memcpy_64.c"
86
87unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
88 unsigned long n)
89{
90 unsigned long rc = __copy_from_user_inatomic(to, from, n);
91 if (unlikely(rc))
92 memset(to + n - rc, 0, rc);
93 return rc;
94}