diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 17:41:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 17:41:04 -0400 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /lib/iov_iter.c | |
parent | 5fab10041b4389b61de7e7a49893190bae686241 (diff) | |
parent | 2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff) |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'lib/iov_iter.c')
-rw-r--r-- | lib/iov_iter.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index cc001a542cb5..4952311422c1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, | |||
413 | size_t count) | 413 | size_t count) |
414 | { | 414 | { |
415 | /* It will get better. Eventually... */ | 415 | /* It will get better. Eventually... */ |
416 | if (segment_eq(get_fs(), KERNEL_DS)) { | 416 | if (uaccess_kernel()) { |
417 | direction |= ITER_KVEC; | 417 | direction |= ITER_KVEC; |
418 | i->type = direction; | 418 | i->type = direction; |
419 | i->kvec = (struct kvec *)iov; | 419 | i->kvec = (struct kvec *)iov; |
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | iterate_and_advance(i, bytes, v, | 606 | iterate_and_advance(i, bytes, v, |
607 | __copy_from_user_nocache((to += v.iov_len) - v.iov_len, | 607 | __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
608 | v.iov_base, v.iov_len), | 608 | v.iov_base, v.iov_len), |
609 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | 609 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
610 | v.bv_offset, v.bv_len), | 610 | v.bv_offset, v.bv_len), |
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
625 | if (unlikely(i->count < bytes)) | 625 | if (unlikely(i->count < bytes)) |
626 | return false; | 626 | return false; |
627 | iterate_all_kinds(i, bytes, v, ({ | 627 | iterate_all_kinds(i, bytes, v, ({ |
628 | if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, | 628 | if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
629 | v.iov_base, v.iov_len)) | 629 | v.iov_base, v.iov_len)) |
630 | return false; | 630 | return false; |
631 | 0;}), | 631 | 0;}), |