summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-05-03 20:06:31 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-15 02:32:42 -0400
commit8780356ef630aa577fd4daa49e49b79674711fae (patch)
tree104e73665ca067e220a59a70daee5539e50e0c99
parent12c89130a56ae8e8d85db753d70333c4ee0ea835 (diff)
x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()
Use the updated memcpy_mcsafe() implementation to define copy_user_mcsafe() and copy_to_iter_mcsafe(). The most significant difference from typical copy_to_iter() is that the ITER_KVEC and ITER_BVEC iterator types can fail to complete a full transfer. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: hch@lst.de Cc: linux-fsdevel@vger.kernel.org Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/152539239150.31796.9189779163576449784.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--include/linux/uio.h15
-rw-r--r--lib/iov_iter.c61
4 files changed, 88 insertions, 0 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c07f492b871a..6ca22706cd64 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -60,6 +60,7 @@ config X86
60 select ARCH_HAS_PMEM_API if X86_64 60 select ARCH_HAS_PMEM_API if X86_64
61 select ARCH_HAS_REFCOUNT 61 select ARCH_HAS_REFCOUNT
62 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 62 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
63 select ARCH_HAS_UACCESS_MCSAFE if X86_64
63 select ARCH_HAS_SET_MEMORY 64 select ARCH_HAS_SET_MEMORY
64 select ARCH_HAS_SG_CHAIN 65 select ARCH_HAS_SG_CHAIN
65 select ARCH_HAS_STRICT_KERNEL_RWX 66 select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c63efc07891f..62acb613114b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -47,6 +47,17 @@ copy_user_generic(void *to, const void *from, unsigned len)
47} 47}
48 48
49static __always_inline __must_check unsigned long 49static __always_inline __must_check unsigned long
50copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51{
52 unsigned long ret;
53
54 __uaccess_begin();
55 ret = memcpy_mcsafe(to, from, len);
56 __uaccess_end();
57 return ret;
58}
59
60static __always_inline __must_check unsigned long
50raw_copy_from_user(void *dst, const void __user *src, unsigned long size) 61raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51{ 62{
52 int ret = 0; 63 int ret = 0;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e67e12adb136..f5766e853a77 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
154#define _copy_from_iter_flushcache _copy_from_iter_nocache 154#define _copy_from_iter_flushcache _copy_from_iter_nocache
155#endif 155#endif
156 156
157#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
158size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i);
159#else
160#define _copy_to_iter_mcsafe _copy_to_iter
161#endif
162
157static __always_inline __must_check 163static __always_inline __must_check
158size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 164size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
159{ 165{
@@ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
163 return _copy_from_iter_flushcache(addr, bytes, i); 169 return _copy_from_iter_flushcache(addr, bytes, i);
164} 170}
165 171
172static __always_inline __must_check
173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174{
175 if (unlikely(!check_copy_size(addr, bytes, false)))
176 return 0;
177 else
178 return _copy_to_iter_mcsafe(addr, bytes, i);
179}
180
166size_t iov_iter_zero(size_t bytes, struct iov_iter *); 181size_t iov_iter_zero(size_t bytes, struct iov_iter *);
167unsigned long iov_iter_alignment(const struct iov_iter *i); 182unsigned long iov_iter_alignment(const struct iov_iter *i);
168unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 183unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 970212670b6a..70ebc8ede143 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
573} 573}
574EXPORT_SYMBOL(_copy_to_iter); 574EXPORT_SYMBOL(_copy_to_iter);
575 575
576#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578{
579 if (access_ok(VERIFY_WRITE, to, n)) {
580 kasan_check_read(from, n);
581 n = copy_to_user_mcsafe((__force void *) to, from, n);
582 }
583 return n;
584}
585
586static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587 const char *from, size_t len)
588{
589 unsigned long ret;
590 char *to;
591
592 to = kmap_atomic(page);
593 ret = memcpy_mcsafe(to + offset, from, len);
594 kunmap_atomic(to);
595
596 return ret;
597}
598
599size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
600{
601 const char *from = addr;
602 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
603
604 if (unlikely(i->type & ITER_PIPE)) {
605 WARN_ON(1);
606 return 0;
607 }
608 if (iter_is_iovec(i))
609 might_fault();
610 iterate_and_advance(i, bytes, v,
611 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
612 ({
613 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
614 (from += v.bv_len) - v.bv_len, v.bv_len);
615 if (rem) {
616 curr_addr = (unsigned long) from;
617 bytes = curr_addr - s_addr - rem;
618 return bytes;
619 }
620 }),
621 ({
622 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
623 v.iov_len);
624 if (rem) {
625 curr_addr = (unsigned long) from;
626 bytes = curr_addr - s_addr - rem;
627 return bytes;
628 }
629 })
630 )
631
632 return bytes;
633}
634EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
635#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
636
576size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 637size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
577{ 638{
578 char *to = addr; 639 char *to = addr;